[NOD-1503] Pruning Manager (#994)

* Update pruningmanager interface

* Add a ProtoUTXOSet to hashserialization

* Update miningmanager with all the necessary stores and managers

* Implement mining manager

* Prune P.AC not in V.Past

* PruningManager fix all review comments
This commit is contained in:
Elichai Turkel 2020-11-04 10:29:45 +02:00 committed by GitHub
parent f06dc7ea90
commit 6a46cb2be6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 537 additions and 34 deletions

View File

@ -118,3 +118,11 @@ func (ps *pruningStore) deserializeUTXOSetBytes(dbPruningPointUTXOSetBytes []byt
return dbPruningPointUTXOSet.Bytes, nil
}
func (ps *pruningStore) HasPruningPoint(dbContext model.DBReader) (bool, error) {
if ps.pruningPointStaging != nil {
return true, nil
}
return dbContext.Has(pruningBlockHashKey)
}

View File

@ -80,12 +80,6 @@ func (f *factory) NewConsensus(dagParams *dagconfig.Params, db infrastructuredat
dagTopologyManager,
ghostdagDataStore,
ghostdagManager)
pruningManager := pruningmanager.New(
dagTraversalManager,
dagTopologyManager,
pruningStore,
blockStatusStore,
consensusStateStore)
pastMedianTimeManager := pastmediantimemanager.New(
dagParams.TimestampDeviationTolerance,
dbManager,
@ -102,7 +96,7 @@ func (f *factory) NewConsensus(dagParams *dagconfig.Params, db infrastructuredat
ghostdagDataStore,
acceptanceDataStore)
headerTipsManager := headertipsmanager.New(dbManager, dagTopologyManager, headerTipsStore)
genesisHash := externalapi.DomainHash(*dagParams.GenesisHash)
genesisHash := (*externalapi.DomainHash)(dagParams.GenesisHash)
mergeDepthManager := mergedepthmanager.New(
dagParams.FinalityDepth(),
dbManager,
@ -112,7 +106,7 @@ func (f *factory) NewConsensus(dagParams *dagconfig.Params, db infrastructuredat
blockValidator := blockvalidator.New(
dagParams.PowMax,
false,
&genesisHash,
genesisHash,
dagParams.EnableNonNativeSubnetworks,
dagParams.DisableDifficultyAdjustment,
dagParams.DifficultyAdjustmentWindowSize,
@ -138,7 +132,6 @@ func (f *factory) NewConsensus(dagParams *dagconfig.Params, db infrastructuredat
ghostdagManager,
dagTopologyManager,
dagTraversalManager,
pruningManager,
pastMedianTimeManager,
transactionValidator,
blockValidator,
@ -160,9 +153,26 @@ func (f *factory) NewConsensus(dagParams *dagconfig.Params, db infrastructuredat
return nil, err
}
pruningManager := pruningmanager.New(
dbManager,
dagTraversalManager,
dagTopologyManager,
consensusStateManager,
consensusStateStore,
ghostdagDataStore,
pruningStore,
blockStatusStore,
multisetStore,
acceptanceDataStore,
blockStore,
utxoDiffStore,
genesisHash,
dagParams.FinalityDepth(),
dagParams.PruningDepth())
syncManager := syncmanager.New(
dbManager,
&genesisHash,
genesisHash,
dagTraversalManager,
dagTopologyManager,
ghostdagDataStore,

View File

@ -8,5 +8,6 @@ type PruningStore interface {
Stage(pruningPointBlockHash *externalapi.DomainHash, pruningPointUTXOSetBytes []byte)
IsStaged() bool
PruningPoint(dbContext DBReader) (*externalapi.DomainHash, error)
HasPruningPoint(dbContext DBReader) (bool, error)
PruningPointSerializedUTXOSet(dbContext DBReader) ([]byte, error)
}

View File

@ -4,7 +4,6 @@ import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
// PruningManager resolves and manages the current pruning point
type PruningManager interface {
FindNextPruningPoint(blockHash *externalapi.DomainHash) error
FindNextPruningPoint() error
PruningPoint() (*externalapi.DomainHash, error)
SerializedUTXOSet() ([]byte, error)
}

View File

@ -81,6 +81,12 @@ func (bp *blockProcessor) validateAndInsertBlock(block *externalapi.DomainBlock)
bp.headerTipsStore.Stage(tips)
}
// Trigger pruning, which will check if the pruning point changed and delete the data if it did.
err = bp.pruningManager.FindNextPruningPoint()
if err != nil {
return err
}
return bp.commitAllChanges()
}

View File

@ -13,7 +13,6 @@ type consensusStateManager struct {
ghostdagManager model.GHOSTDAGManager
dagTopologyManager model.DAGTopologyManager
dagTraversalManager model.DAGTraversalManager
pruningManager model.PruningManager
pastMedianTimeManager model.PastMedianTimeManager
transactionValidator model.TransactionValidator
blockValidator model.BlockValidator
@ -42,7 +41,6 @@ func New(
ghostdagManager model.GHOSTDAGManager,
dagTopologyManager model.DAGTopologyManager,
dagTraversalManager model.DAGTraversalManager,
pruningManager model.PruningManager,
pastMedianTimeManager model.PastMedianTimeManager,
transactionValidator model.TransactionValidator,
blockValidator model.BlockValidator,
@ -68,7 +66,6 @@ func New(
ghostdagManager: ghostdagManager,
dagTopologyManager: dagTopologyManager,
dagTraversalManager: dagTraversalManager,
pruningManager: pruningManager,
pastMedianTimeManager: pastMedianTimeManager,
transactionValidator: transactionValidator,
blockValidator: blockValidator,

View File

@ -1,51 +1,249 @@
package pruningmanager
import (
"github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/hashserialization"
)
// pruningManager resolves and manages the current pruning point
type pruningManager struct {
dagTraversalManager model.DAGTraversalManager
dagTopologyManager model.DAGTopologyManager
databaseContext model.DBReader
pruningStore model.PruningStore
blockStatusStore model.BlockStatusStore
consensusStateStore model.ConsensusStateStore
dagTraversalManager model.DAGTraversalManager
dagTopologyManager model.DAGTopologyManager
consensusStateManager model.ConsensusStateManager
consensusStateStore model.ConsensusStateStore
ghostdagDataStore model.GHOSTDAGDataStore
pruningStore model.PruningStore
blockStatusStore model.BlockStatusStore
multiSetStore model.MultisetStore
acceptanceDataStore model.AcceptanceDataStore
blocksStore model.BlockStore
utxoDiffStore model.UTXODiffStore
genesisHash *externalapi.DomainHash
finalityInterval uint64
pruningDepth uint64
}
// New instantiates a new PruningManager
func New(
databaseContext model.DBReader,
dagTraversalManager model.DAGTraversalManager,
dagTopologyManager model.DAGTopologyManager,
consensusStateManager model.ConsensusStateManager,
consensusStateStore model.ConsensusStateStore,
ghostdagDataStore model.GHOSTDAGDataStore,
pruningStore model.PruningStore,
blockStatusStore model.BlockStatusStore,
consensusStateStore model.ConsensusStateStore) model.PruningManager {
multiSetStore model.MultisetStore,
acceptanceDataStore model.AcceptanceDataStore,
blocksStore model.BlockStore,
utxoDiffStore model.UTXODiffStore,
genesisHash *externalapi.DomainHash,
finalityInterval uint64,
pruningDepth uint64,
) model.PruningManager {
return &pruningManager{
dagTraversalManager: dagTraversalManager,
dagTopologyManager: dagTopologyManager,
pruningStore: pruningStore,
blockStatusStore: blockStatusStore,
consensusStateStore: consensusStateStore,
databaseContext: databaseContext,
dagTraversalManager: dagTraversalManager,
dagTopologyManager: dagTopologyManager,
consensusStateManager: consensusStateManager,
consensusStateStore: consensusStateStore,
ghostdagDataStore: ghostdagDataStore,
pruningStore: pruningStore,
blockStatusStore: blockStatusStore,
multiSetStore: multiSetStore,
acceptanceDataStore: acceptanceDataStore,
blocksStore: blocksStore,
utxoDiffStore: utxoDiffStore,
genesisHash: genesisHash,
pruningDepth: pruningDepth,
finalityInterval: finalityInterval,
}
}
// FindNextPruningPoint finds the next pruning point from the
// given blockHash
func (pm *pruningManager) FindNextPruningPoint(blockHash *externalapi.DomainHash) error {
return nil
func (pm *pruningManager) FindNextPruningPoint() error {
virtual, err := pm.ghostdagDataStore.Get(pm.databaseContext, model.VirtualBlockHash)
if err != nil {
return err
}
currentP, err := pm.PruningPoint()
if err != nil {
return err
}
currentPGhost, err := pm.ghostdagDataStore.Get(pm.databaseContext, currentP)
if err != nil {
return err
}
currentPBlueScore := currentPGhost.BlueScore
// Because the pruning point changes only once per finality, then there's no need to even check for that if a finality interval hasn't passed.
if virtual.BlueScore <= currentPBlueScore+pm.finalityInterval {
return nil
}
// This means the pruning point is still genesis.
if virtual.BlueScore <= pm.pruningDepth+pm.finalityInterval {
return nil
}
// get Virtual(pruningDepth)
candidatePHash, err := pm.dagTraversalManager.HighestChainBlockBelowBlueScore(model.VirtualBlockHash, pm.pruningDepth)
if err != nil {
return err
}
candidatePGhost, err := pm.ghostdagDataStore.Get(pm.databaseContext, candidatePHash)
if err != nil {
return err
}
// Actually check if the pruning point changed
if (currentPBlueScore / pm.finalityInterval) < (candidatePGhost.BlueScore / pm.finalityInterval) {
err = pm.savePruningPoint(candidatePHash)
if err != nil {
return err
}
return pm.deletePastBlocks(candidatePHash)
}
return pm.deletePastBlocks(currentP)
}
// PruningPoint returns the hash of the current pruning point
func (pm *pruningManager) PruningPoint() (*externalapi.DomainHash, error) {
return nil, nil
hasPruningPoint, err := pm.pruningStore.HasPruningPoint(pm.databaseContext)
if err != nil {
return nil, err
}
if hasPruningPoint {
return pm.pruningStore.PruningPoint(pm.databaseContext)
}
// If there's no pruning point yet, set genesis as the pruning point.
// This is the genesis because it means `FindNextPruningPoint()` has never been called before,
// if this is part of the first `FindNextPruningPoint()` call, then it might move the pruning point forward.
err = pm.savePruningPoint(pm.genesisHash)
if err != nil {
return nil, err
}
return pm.genesisHash, nil
}
// SerializedUTXOSet returns the serialized UTXO set of the
// current pruning point
func (pm *pruningManager) SerializedUTXOSet() ([]byte, error) {
return nil, nil
func (pm *pruningManager) deletePastBlocks(pruningPoint *externalapi.DomainHash) error {
// Go over all P.Past and P.AC that's not in V.Past
queue := pm.dagTraversalManager.NewDownHeap()
// Find P.AC that's not in V.Past
dagTips, err := pm.consensusStateStore.Tips(pm.databaseContext)
if err != nil {
return err
}
for _, tip := range dagTips {
hasPruningPointInPast, err := pm.dagTopologyManager.IsAncestorOf(pruningPoint, tip)
if err != nil {
return err
}
if !hasPruningPointInPast {
isInVirtualPast, err := pm.dagTopologyManager.IsAncestorOf(model.VirtualBlockHash, tip)
if err != nil {
return err
}
if !isInVirtualPast {
// Add them to the queue so they and their past will be pruned
err := queue.Push(tip)
if err != nil {
return err
}
}
}
}
// Add P.Parents
parents, err := pm.dagTopologyManager.Parents(pruningPoint)
if err != nil {
return err
}
for _, parent := range parents {
err = queue.Push(parent)
if err != nil {
return err
}
}
visited := map[externalapi.DomainHash]struct{}{}
// Prune everything in the queue including its past
for queue.Len() > 0 {
current := queue.Pop()
if _, ok := visited[*current]; ok {
continue
}
visited[*current] = struct{}{}
alreadyPruned, err := pm.deleteBlock(current)
if err != nil {
return err
}
if !alreadyPruned {
parents, err := pm.dagTopologyManager.Parents(current)
if err != nil {
return err
}
for _, parent := range parents {
err = queue.Push(parent)
if err != nil {
return err
}
}
}
}
return nil
}
func (pm *pruningManager) savePruningPoint(blockHash *externalapi.DomainHash) error {
utxoIter, err := pm.consensusStateManager.RestorePastUTXOSetIterator(blockHash)
if err != nil {
return err
}
serializedUtxo, err := serializeUTXOSetIterator(utxoIter)
if err != nil {
return err
}
pm.pruningStore.Stage(blockHash, serializedUtxo)
return nil
}
func (pm *pruningManager) deleteBlock(blockHash *externalapi.DomainHash) (alreadyPruned bool, err error) {
status, err := pm.blockStatusStore.Get(pm.databaseContext, blockHash)
if err != nil {
return false, err
}
if status == externalapi.StatusHeaderOnly {
return true, nil
}
pm.multiSetStore.Delete(blockHash)
pm.acceptanceDataStore.Delete(blockHash)
pm.blocksStore.Delete(blockHash)
pm.utxoDiffStore.Delete(blockHash)
pm.blockStatusStore.Stage(blockHash, externalapi.StatusHeaderOnly)
return false, nil
}
func serializeUTXOSetIterator(iter model.ReadOnlyUTXOSetIterator) ([]byte, error) {
serializedUtxo, err := hashserialization.ReadOnlyUTXOSetToProtoUTXOSet(iter)
if err != nil {
return nil, err
}
return proto.Marshal(serializedUtxo)
}

View File

@ -0,0 +1,3 @@
//go:generate protoc --go_out=. --go-grpc_out=. --go_opt=paths=source_relative --go-grpc_opt=paths=source_relative utxo.proto
package hashserialization

View File

@ -3,6 +3,7 @@ package hashserialization
import (
"bytes"
"encoding/binary"
"github.com/kaspanet/kaspad/domain/consensus/model"
"io"
"github.com/pkg/errors"
@ -15,6 +16,41 @@ import (
// keys will be iterated in an ascending order by the outpoint index.
var outpointIndexByteOrder = binary.BigEndian
const (
outpointLen = externalapi.DomainHashSize + 4
entryMinLen = 8 + 1 + 8 + 8
averageScriptPubKeySize = 20
)
// ReadOnlyUTXOSetToProtoUTXOSet converts ReadOnlyUTXOSetIterator to ProtoUTXOSet
func ReadOnlyUTXOSetToProtoUTXOSet(iter model.ReadOnlyUTXOSetIterator) (*ProtoUTXOSet, error) {
protoUTXOSet := &ProtoUTXOSet{
Utxos: []*ProtoUTXO{},
}
for iter.Next() {
outpoint, entry := iter.Get()
serializedOutpoint := bytes.NewBuffer(make([]byte, 0, outpointLen))
err := serializeOutpoint(serializedOutpoint, outpoint)
if err != nil {
return nil, err
}
serializedEntry := bytes.NewBuffer(make([]byte, 0, entryMinLen+averageScriptPubKeySize))
err = serializeUTXOEntry(serializedEntry, entry)
if err != nil {
return nil, err
}
protoUTXOSet.Utxos = append(protoUTXOSet.Utxos, &ProtoUTXO{
Entry: serializedEntry.Bytes(),
Outpoint: serializedOutpoint.Bytes(),
})
}
return protoUTXOSet, nil
}
// SerializeUTXO returns the byte-slice representation for given UTXOEntry
func SerializeUTXO(entry *externalapi.UTXOEntry, outpoint *externalapi.DomainOutpoint) ([]byte, error) {
w := &bytes.Buffer{}

View File

@ -0,0 +1,226 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.25.0
// protoc v3.12.4
// source: utxo.proto
package hashserialization
import (
proto "github.com/golang/protobuf/proto"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
type ProtoUTXO struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Entry []byte `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"`
Outpoint []byte `protobuf:"bytes,2,opt,name=outpoint,proto3" json:"outpoint,omitempty"`
}
func (x *ProtoUTXO) Reset() {
*x = ProtoUTXO{}
if protoimpl.UnsafeEnabled {
mi := &file_utxo_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ProtoUTXO) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ProtoUTXO) ProtoMessage() {}
func (x *ProtoUTXO) ProtoReflect() protoreflect.Message {
mi := &file_utxo_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ProtoUTXO.ProtoReflect.Descriptor instead.
func (*ProtoUTXO) Descriptor() ([]byte, []int) {
return file_utxo_proto_rawDescGZIP(), []int{0}
}
func (x *ProtoUTXO) GetEntry() []byte {
if x != nil {
return x.Entry
}
return nil
}
func (x *ProtoUTXO) GetOutpoint() []byte {
if x != nil {
return x.Outpoint
}
return nil
}
type ProtoUTXOSet struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Utxos []*ProtoUTXO `protobuf:"bytes,1,rep,name=utxos,proto3" json:"utxos,omitempty"`
}
func (x *ProtoUTXOSet) Reset() {
*x = ProtoUTXOSet{}
if protoimpl.UnsafeEnabled {
mi := &file_utxo_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ProtoUTXOSet) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ProtoUTXOSet) ProtoMessage() {}
func (x *ProtoUTXOSet) ProtoReflect() protoreflect.Message {
mi := &file_utxo_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ProtoUTXOSet.ProtoReflect.Descriptor instead.
func (*ProtoUTXOSet) Descriptor() ([]byte, []int) {
return file_utxo_proto_rawDescGZIP(), []int{1}
}
func (x *ProtoUTXOSet) GetUtxos() []*ProtoUTXO {
if x != nil {
return x.Utxos
}
return nil
}
var File_utxo_proto protoreflect.FileDescriptor
var file_utxo_proto_rawDesc = []byte{
0x0a, 0x0a, 0x75, 0x74, 0x78, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x68, 0x61,
0x73, 0x68, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22,
0x3d, 0x0a, 0x09, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x55, 0x54, 0x58, 0x4f, 0x12, 0x14, 0x0a, 0x05,
0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x65, 0x6e, 0x74,
0x72, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x6f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x02,
0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x22, 0x42,
0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x55, 0x54, 0x58, 0x4f, 0x53, 0x65, 0x74, 0x12, 0x32,
0x0a, 0x05, 0x75, 0x74, 0x78, 0x6f, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e,
0x68, 0x61, 0x73, 0x68, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x55, 0x54, 0x58, 0x4f, 0x52, 0x05, 0x75, 0x74, 0x78,
0x6f, 0x73, 0x42, 0x45, 0x5a, 0x43, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
0x2f, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x6e, 0x65, 0x74, 0x2f, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x64,
0x2f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75,
0x73, 0x2f, 0x75, 0x74, 0x69, 0x6c, 0x73, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x73, 0x65, 0x72, 0x69,
0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x33,
}
var (
file_utxo_proto_rawDescOnce sync.Once
file_utxo_proto_rawDescData = file_utxo_proto_rawDesc
)
func file_utxo_proto_rawDescGZIP() []byte {
file_utxo_proto_rawDescOnce.Do(func() {
file_utxo_proto_rawDescData = protoimpl.X.CompressGZIP(file_utxo_proto_rawDescData)
})
return file_utxo_proto_rawDescData
}
var file_utxo_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_utxo_proto_goTypes = []interface{}{
(*ProtoUTXO)(nil), // 0: hashserialization.ProtoUTXO
(*ProtoUTXOSet)(nil), // 1: hashserialization.ProtoUTXOSet
}
var file_utxo_proto_depIdxs = []int32{
0, // 0: hashserialization.ProtoUTXOSet.utxos:type_name -> hashserialization.ProtoUTXO
1, // [1:1] is the sub-list for method output_type
1, // [1:1] is the sub-list for method input_type
1, // [1:1] is the sub-list for extension type_name
1, // [1:1] is the sub-list for extension extendee
0, // [0:1] is the sub-list for field type_name
}
func init() { file_utxo_proto_init() }
func file_utxo_proto_init() {
if File_utxo_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_utxo_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ProtoUTXO); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_utxo_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ProtoUTXOSet); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_utxo_proto_rawDesc,
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_utxo_proto_goTypes,
DependencyIndexes: file_utxo_proto_depIdxs,
MessageInfos: file_utxo_proto_msgTypes,
}.Build()
File_utxo_proto = out.File
file_utxo_proto_rawDesc = nil
file_utxo_proto_goTypes = nil
file_utxo_proto_depIdxs = nil
}

View File

@ -0,0 +1,13 @@
syntax = "proto3";
package hashserialization;
option go_package = "github.com/kaspanet/kaspad/domain/consensus/utils/hashserialization";
message ProtoUTXO {
bytes entry = 1;
bytes outpoint = 2;
}
message ProtoUTXOSet {
repeated ProtoUTXO utxos = 1;
}

View File

@ -5,6 +5,7 @@
package dagconfig
import (
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
"math/big"
"time"
@ -157,6 +158,11 @@ func (p *Params) FinalityDepth() uint64 {
return uint64(p.FinalityDuration / p.TargetTimePerBlock)
}
// PruningDepth returns the pruning duration represented in blocks
func (p *Params) PruningDepth() uint64 {
return 2*p.FinalityDepth() + 4*constants.MergeSetSizeLimit*uint64(p.K) + 2*uint64(p.K) + 2
}
// MainnetParams defines the network parameters for the main Kaspa network.
var MainnetParams = Params{
K: ghostdagK,