[NOD-1567] Add clone methods to data stores types (#1149)

* [NOD-1567] Add clone methods to data stores types

* [NOD-1567] Fix comments

* [NOD-1567] Fix test
This commit is contained in:
Ori Newman 2020-11-24 07:56:18 -08:00 committed by GitHub
parent afc634d871
commit 45d9b63572
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
55 changed files with 392 additions and 357 deletions

View File

@ -25,14 +25,8 @@ func New() model.AcceptanceDataStore {
} }
// Stage stages the given acceptanceData for the given blockHash // Stage stages the given acceptanceData for the given blockHash
func (ads *acceptanceDataStore) Stage(blockHash *externalapi.DomainHash, acceptanceData model.AcceptanceData) error { func (ads *acceptanceDataStore) Stage(blockHash *externalapi.DomainHash, acceptanceData model.AcceptanceData) {
clone, err := ads.cloneAcceptanceData(acceptanceData) ads.staging[*blockHash] = acceptanceData.Clone()
if err != nil {
return err
}
ads.staging[*blockHash] = clone
return nil
} }
func (ads *acceptanceDataStore) IsStaged() bool { func (ads *acceptanceDataStore) IsStaged() bool {
@ -107,12 +101,3 @@ func (ads *acceptanceDataStore) deserializeAcceptanceData(acceptanceDataBytes []
func (ads *acceptanceDataStore) hashAsKey(hash *externalapi.DomainHash) model.DBKey { func (ads *acceptanceDataStore) hashAsKey(hash *externalapi.DomainHash) model.DBKey {
return bucket.Key(hash[:]) return bucket.Key(hash[:])
} }
func (ads *acceptanceDataStore) cloneAcceptanceData(acceptanceData model.AcceptanceData) (model.AcceptanceData, error) {
serialized, err := ads.serializeAcceptanceData(acceptanceData)
if err != nil {
return nil, err
}
return ads.deserializeAcceptanceData(serialized)
}

View File

@ -54,14 +54,8 @@ func (bhs *blockHeaderStore) initializeCount(dbContext model.DBReader) error {
} }
// Stage stages the given block header for the given blockHash // Stage stages the given block header for the given blockHash
func (bhs *blockHeaderStore) Stage(blockHash *externalapi.DomainHash, blockHeader *externalapi.DomainBlockHeader) error { func (bhs *blockHeaderStore) Stage(blockHash *externalapi.DomainHash, blockHeader *externalapi.DomainBlockHeader) {
clone, err := bhs.cloneHeader(blockHeader) bhs.staging[*blockHash] = blockHeader.Clone()
if err != nil {
return err
}
bhs.staging[*blockHash] = clone
return nil
} }
func (bhs *blockHeaderStore) IsStaged() bool { func (bhs *blockHeaderStore) IsStaged() bool {
@ -169,15 +163,6 @@ func (bhs *blockHeaderStore) deserializeHeader(headerBytes []byte) (*externalapi
return serialization.DbBlockHeaderToDomainBlockHeader(dbBlockHeader) return serialization.DbBlockHeaderToDomainBlockHeader(dbBlockHeader)
} }
func (bhs *blockHeaderStore) cloneHeader(header *externalapi.DomainBlockHeader) (*externalapi.DomainBlockHeader, error) {
serialized, err := bhs.serializeHeader(header)
if err != nil {
return nil, err
}
return bhs.deserializeHeader(serialized)
}
func (bhs *blockHeaderStore) Count() uint64 { func (bhs *blockHeaderStore) Count() uint64 {
return bhs.count + uint64(len(bhs.staging)) - uint64(len(bhs.toDelete)) return bhs.count + uint64(len(bhs.staging)) - uint64(len(bhs.toDelete))
} }

View File

@ -22,14 +22,8 @@ func New() model.BlockRelationStore {
} }
} }
func (brs *blockRelationStore) StageBlockRelation(blockHash *externalapi.DomainHash, blockRelations *model.BlockRelations) error { func (brs *blockRelationStore) StageBlockRelation(blockHash *externalapi.DomainHash, blockRelations *model.BlockRelations) {
clone, err := brs.clone(blockRelations) brs.staging[*blockHash] = blockRelations.Clone()
if err != nil {
return err
}
brs.staging[*blockHash] = clone
return nil
} }
func (brs *blockRelationStore) IsStaged() bool { func (brs *blockRelationStore) IsStaged() bool {
@ -94,12 +88,3 @@ func (brs *blockRelationStore) deserializeBlockRelations(blockRelationsBytes []b
} }
return serialization.DbBlockRelationsToDomainBlockRelations(dbBlockRelations) return serialization.DbBlockRelationsToDomainBlockRelations(dbBlockRelations)
} }
func (brs *blockRelationStore) clone(blockRelations *model.BlockRelations) (*model.BlockRelations, error) {
serialized, err := brs.serializeBlockRelations(blockRelations)
if err != nil {
return nil, err
}
return brs.deserializeBlockRelations(serialized)
}

View File

@ -24,7 +24,7 @@ func New() model.BlockStatusStore {
// Stage stages the given blockStatus for the given blockHash // Stage stages the given blockStatus for the given blockHash
func (bss *blockStatusStore) Stage(blockHash *externalapi.DomainHash, blockStatus externalapi.BlockStatus) { func (bss *blockStatusStore) Stage(blockHash *externalapi.DomainHash, blockStatus externalapi.BlockStatus) {
bss.staging[*blockHash] = blockStatus bss.staging[*blockHash] = blockStatus.Clone()
} }
func (bss *blockStatusStore) IsStaged() bool { func (bss *blockStatusStore) IsStaged() bool {

View File

@ -54,14 +54,8 @@ func (bs *blockStore) initializeCount(dbContext model.DBReader) error {
} }
// Stage stages the given block for the given blockHash // Stage stages the given block for the given blockHash
func (bs *blockStore) Stage(blockHash *externalapi.DomainHash, block *externalapi.DomainBlock) error { func (bs *blockStore) Stage(blockHash *externalapi.DomainHash, block *externalapi.DomainBlock) {
clone, err := bs.clone(block) bs.staging[*blockHash] = block.Clone()
if err != nil {
return err
}
bs.staging[*blockHash] = clone
return nil
} }
func (bs *blockStore) IsStaged() bool { func (bs *blockStore) IsStaged() bool {
@ -169,15 +163,6 @@ func (bs *blockStore) hashAsKey(hash *externalapi.DomainHash) model.DBKey {
return bucket.Key(hash[:]) return bucket.Key(hash[:])
} }
func (bs *blockStore) clone(block *externalapi.DomainBlock) (*externalapi.DomainBlock, error) {
serialized, err := bs.serializeBlock(block)
if err != nil {
return nil, err
}
return bs.deserializeBlock(serialized)
}
func (bs *blockStore) Count() uint64 { func (bs *blockStore) Count() uint64 {
return bs.count + uint64(len(bs.staging)) - uint64(len(bs.toDelete)) return bs.count + uint64(len(bs.staging)) - uint64(len(bs.toDelete))
} }

View File

@ -23,14 +23,8 @@ func (c *consensusStateStore) Tips(dbContext model.DBReader) ([]*externalapi.Dom
return c.deserializeTips(tipsBytes) return c.deserializeTips(tipsBytes)
} }
func (c *consensusStateStore) StageTips(tipHashes []*externalapi.DomainHash) error { func (c *consensusStateStore) StageTips(tipHashes []*externalapi.DomainHash) {
clone, err := c.cloneTips(tipHashes) c.stagedTips = externalapi.CloneHashes(tipHashes)
if err != nil {
return err
}
c.stagedTips = clone
return nil
} }
func (c *consensusStateStore) commitTips(dbTx model.DBTransaction) error { func (c *consensusStateStore) commitTips(dbTx model.DBTransaction) error {
@ -67,14 +61,3 @@ func (c *consensusStateStore) deserializeTips(tipsBytes []byte) ([]*externalapi.
return serialization.DBTipsToTips(dbTips) return serialization.DBTipsToTips(dbTips)
} }
func (c *consensusStateStore) cloneTips(tips []*externalapi.DomainHash,
) ([]*externalapi.DomainHash, error) {
serialized, err := c.serializeTips(tips)
if err != nil {
return nil, err
}
return c.deserializeTips(serialized)
}

View File

@ -23,7 +23,7 @@ func (c *consensusStateStore) StageVirtualUTXODiff(virtualUTXODiff *model.UTXODi
return errors.New("cannot stage virtual UTXO diff while virtual UTXO set is staged") return errors.New("cannot stage virtual UTXO diff while virtual UTXO set is staged")
} }
c.stagedVirtualUTXODiff = c.cloneUTXODiff(virtualUTXODiff) c.stagedVirtualUTXODiff = virtualUTXODiff.Clone()
return nil return nil
} }
@ -227,35 +227,3 @@ func (c *consensusStateStore) StageVirtualUTXOSet(virtualUTXOSetIterator model.R
return nil return nil
} }
func (c *consensusStateStore) cloneUTXODiff(diff *model.UTXODiff) *model.UTXODiff {
utxoDiffCopy := &model.UTXODiff{
ToAdd: make(model.UTXOCollection, len(diff.ToAdd)),
ToRemove: make(model.UTXOCollection, len(diff.ToRemove)),
}
for outpoint, entry := range diff.ToAdd {
scriptPublicKeyCopy := make([]byte, len(entry.ScriptPublicKey))
copy(scriptPublicKeyCopy, entry.ScriptPublicKey)
utxoDiffCopy.ToAdd[outpoint] = cloneUTXOEntry(entry)
}
for outpoint, entry := range diff.ToRemove {
scriptPublicKeyCopy := make([]byte, len(entry.ScriptPublicKey))
copy(scriptPublicKeyCopy, entry.ScriptPublicKey)
utxoDiffCopy.ToRemove[outpoint] = cloneUTXOEntry(entry)
}
return diff
}
func cloneUTXOEntry(entry *externalapi.UTXOEntry) *externalapi.UTXOEntry {
scriptPublicKeyCopy := make([]byte, len(entry.ScriptPublicKey))
copy(scriptPublicKeyCopy, entry.ScriptPublicKey)
return &externalapi.UTXOEntry{
Amount: entry.Amount,
ScriptPublicKey: scriptPublicKeyCopy,
BlockBlueScore: entry.BlockBlueScore,
IsCoinbase: entry.IsCoinbase,
}
}

View File

@ -23,14 +23,8 @@ func (c *consensusStateStore) VirtualDiffParents(dbContext model.DBReader) ([]*e
return c.deserializeVirtualDiffParents(virtualDiffParentsBytes) return c.deserializeVirtualDiffParents(virtualDiffParentsBytes)
} }
func (c *consensusStateStore) StageVirtualDiffParents(tipHashes []*externalapi.DomainHash) error { func (c *consensusStateStore) StageVirtualDiffParents(tipHashes []*externalapi.DomainHash) {
clone, err := c.cloneVirtualDiffParents(tipHashes) c.stagedVirtualDiffParents = externalapi.CloneHashes(tipHashes)
if err != nil {
return err
}
c.stagedVirtualDiffParents = clone
return nil
} }
func (c *consensusStateStore) commitVirtualDiffParents(dbTx model.DBTransaction) error { func (c *consensusStateStore) commitVirtualDiffParents(dbTx model.DBTransaction) error {
@ -67,14 +61,3 @@ func (c *consensusStateStore) deserializeVirtualDiffParents(virtualDiffParentsBy
return serialization.DBVirtualDiffParentsToVirtualDiffParents(dbVirtualDiffParents) return serialization.DBVirtualDiffParentsToVirtualDiffParents(dbVirtualDiffParents)
} }
func (c *consensusStateStore) cloneVirtualDiffParents(virtualDiffParents []*externalapi.DomainHash,
) ([]*externalapi.DomainHash, error) {
serialized, err := c.serializeVirtualDiffParents(virtualDiffParents)
if err != nil {
return nil, err
}
return c.deserializeVirtualDiffParents(serialized)
}

View File

@ -23,14 +23,8 @@ func New() model.GHOSTDAGDataStore {
} }
// Stage stages the given blockGHOSTDAGData for the given blockHash // Stage stages the given blockGHOSTDAGData for the given blockHash
func (gds *ghostdagDataStore) Stage(blockHash *externalapi.DomainHash, blockGHOSTDAGData *model.BlockGHOSTDAGData) error { func (gds *ghostdagDataStore) Stage(blockHash *externalapi.DomainHash, blockGHOSTDAGData *model.BlockGHOSTDAGData) {
clone, err := gds.clone(blockGHOSTDAGData) gds.staging[*blockHash] = blockGHOSTDAGData.Clone()
if err != nil {
return err
}
gds.staging[*blockHash] = clone
return nil
} }
func (gds *ghostdagDataStore) IsStaged() bool { func (gds *ghostdagDataStore) IsStaged() bool {
@ -89,12 +83,3 @@ func (gds *ghostdagDataStore) deserializeBlockGHOSTDAGData(blockGHOSTDAGDataByte
return serialization.DBBlockGHOSTDAGDataToBlockGHOSTDAGData(dbBlockGHOSTDAGData) return serialization.DBBlockGHOSTDAGDataToBlockGHOSTDAGData(dbBlockGHOSTDAGData)
} }
func (gds *ghostdagDataStore) clone(blockGHOSTDAGData *model.BlockGHOSTDAGData) (*model.BlockGHOSTDAGData, error) {
serialized, err := gds.serializeBlockGHOSTDAGData(blockGHOSTDAGData)
if err != nil {
return nil, err
}
return gds.deserializeBlockGHOSTDAGData(serialized)
}

View File

@ -45,14 +45,8 @@ func (h *headerTipsStore) Commit(dbTx model.DBTransaction) error {
return nil return nil
} }
func (h *headerTipsStore) Stage(tips []*externalapi.DomainHash) error { func (h *headerTipsStore) Stage(tips []*externalapi.DomainHash) {
clone, err := h.clone(tips) h.staging = externalapi.CloneHashes(tips)
if err != nil {
return err
}
h.staging = clone
return nil
} }
func (h *headerTipsStore) IsStaged() bool { func (h *headerTipsStore) IsStaged() bool {
@ -87,17 +81,6 @@ func (h *headerTipsStore) deserializeTips(tipsBytes []byte) ([]*externalapi.Doma
return serialization.DBHeaderTipsToHeaderTips(dbTips) return serialization.DBHeaderTipsToHeaderTips(dbTips)
} }
func (h *headerTipsStore) clone(tips []*externalapi.DomainHash,
) ([]*externalapi.DomainHash, error) {
serialized, err := h.serializeTips(tips)
if err != nil {
return nil, err
}
return h.deserializeTips(serialized)
}
// New instantiates a new HeaderTipsStore // New instantiates a new HeaderTipsStore
func New() model.HeaderTipsStore { func New() model.HeaderTipsStore {
return &headerTipsStore{} return &headerTipsStore{}

View File

@ -26,7 +26,7 @@ func New() model.MultisetStore {
// Stage stages the given multiset for the given blockHash // Stage stages the given multiset for the given blockHash
func (ms *multisetStore) Stage(blockHash *externalapi.DomainHash, multiset model.Multiset) { func (ms *multisetStore) Stage(blockHash *externalapi.DomainHash, multiset model.Multiset) {
ms.staging[*blockHash] = multiset ms.staging[*blockHash] = multiset.Clone()
} }
func (ms *multisetStore) IsStaged() bool { func (ms *multisetStore) IsStaged() bool {
@ -65,7 +65,7 @@ func (ms *multisetStore) Commit(dbTx model.DBTransaction) error {
// Get gets the multiset associated with the given blockHash // Get gets the multiset associated with the given blockHash
func (ms *multisetStore) Get(dbContext model.DBReader, blockHash *externalapi.DomainHash) (model.Multiset, error) { func (ms *multisetStore) Get(dbContext model.DBReader, blockHash *externalapi.DomainHash) (model.Multiset, error) {
if multiset, ok := ms.staging[*blockHash]; ok { if multiset, ok := ms.staging[*blockHash]; ok {
return multiset.Clone() return multiset.Clone(), nil
} }
multisetBytes, err := dbContext.Get(ms.hashAsKey(blockHash)) multisetBytes, err := dbContext.Get(ms.hashAsKey(blockHash))

View File

@ -27,7 +27,7 @@ func New() model.PruningStore {
// Stage stages the pruning state // Stage stages the pruning state
func (ps *pruningStore) Stage(pruningPointBlockHash *externalapi.DomainHash, pruningPointUTXOSetBytes []byte) { func (ps *pruningStore) Stage(pruningPointBlockHash *externalapi.DomainHash, pruningPointUTXOSetBytes []byte) {
ps.pruningPointStaging = &(*pruningPointBlockHash) ps.pruningPointStaging = pruningPointBlockHash.Clone()
ps.serializedUTXOSetStaging = pruningPointUTXOSetBytes ps.serializedUTXOSetStaging = pruningPointUTXOSetBytes
} }

View File

@ -27,14 +27,8 @@ func New() model.ReachabilityDataStore {
// StageReachabilityData stages the given reachabilityData for the given blockHash // StageReachabilityData stages the given reachabilityData for the given blockHash
func (rds *reachabilityDataStore) StageReachabilityData(blockHash *externalapi.DomainHash, func (rds *reachabilityDataStore) StageReachabilityData(blockHash *externalapi.DomainHash,
reachabilityData *model.ReachabilityData) error { reachabilityData *model.ReachabilityData) {
clone, err := rds.cloneReachabilityData(reachabilityData) rds.reachabilityDataStaging[*blockHash] = reachabilityData.Clone()
if err != nil {
return err
}
rds.reachabilityDataStaging[*blockHash] = clone
return nil
} }
// StageReachabilityReindexRoot stages the given reachabilityReindexRoot // StageReachabilityReindexRoot stages the given reachabilityReindexRoot
@ -151,12 +145,3 @@ func (rds *reachabilityDataStore) deserializeReachabilityReindexRoot(reachabilit
return serialization.DbHashToDomainHash(dbHash) return serialization.DbHashToDomainHash(dbHash)
} }
func (rds *reachabilityDataStore) cloneReachabilityData(reachabilityData *model.ReachabilityData) (*model.ReachabilityData, error) {
serialized, err := rds.serializeReachabilityData(reachabilityData)
if err != nil {
return nil, err
}
return rds.deserializeReachabilityData(serialized)
}

View File

@ -29,18 +29,12 @@ func New() model.UTXODiffStore {
} }
// Stage stages the given utxoDiff for the given blockHash // Stage stages the given utxoDiff for the given blockHash
func (uds *utxoDiffStore) Stage(blockHash *externalapi.DomainHash, utxoDiff *model.UTXODiff, utxoDiffChild *externalapi.DomainHash) error { func (uds *utxoDiffStore) Stage(blockHash *externalapi.DomainHash, utxoDiff *model.UTXODiff, utxoDiffChild *externalapi.DomainHash) {
utxoDiffClone, err := uds.cloneUTXODiff(utxoDiff) uds.utxoDiffStaging[*blockHash] = utxoDiff.Clone()
if err != nil {
return err
}
uds.utxoDiffStaging[*blockHash] = utxoDiffClone
if utxoDiffChild != nil { if utxoDiffChild != nil {
utxoDiffChildClone := uds.cloneUTXODiffChild(utxoDiffChild) uds.utxoDiffChildStaging[*blockHash] = utxoDiffChild.Clone()
uds.utxoDiffChildStaging[*blockHash] = utxoDiffChildClone
} }
return nil
} }
func (uds *utxoDiffStore) IsStaged() bool { func (uds *utxoDiffStore) IsStaged() bool {
@ -203,16 +197,3 @@ func (uds *utxoDiffStore) deserializeUTXODiffChild(utxoDiffChildBytes []byte) (*
return serialization.DbHashToDomainHash(dbHash) return serialization.DbHashToDomainHash(dbHash)
} }
func (uds *utxoDiffStore) cloneUTXODiff(diff *model.UTXODiff) (*model.UTXODiff, error) {
serialized, err := uds.serializeUTXODiff(diff)
if err != nil {
return nil, err
}
return uds.deserializeUTXODiff(serialized)
}
func (uds *utxoDiffStore) cloneUTXODiffChild(diffChild *externalapi.DomainHash) *externalapi.DomainHash {
return diffChild.Clone()
}

View File

@ -6,12 +6,39 @@ import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
// It's ordered in the same way as the block merge set blues. // It's ordered in the same way as the block merge set blues.
type AcceptanceData []*BlockAcceptanceData type AcceptanceData []*BlockAcceptanceData
// Clone clones the AcceptanceData
func (ad AcceptanceData) Clone() AcceptanceData {
if ad == nil {
return nil
}
clone := make(AcceptanceData, len(ad))
for i, blockAcceptanceData := range ad {
clone[i] = blockAcceptanceData.Clone()
}
return clone
}
// BlockAcceptanceData stores all transactions in a block with an indication // BlockAcceptanceData stores all transactions in a block with an indication
// if they were accepted or not by some other block // if they were accepted or not by some other block
type BlockAcceptanceData struct { type BlockAcceptanceData struct {
TransactionAcceptanceData []*TransactionAcceptanceData TransactionAcceptanceData []*TransactionAcceptanceData
} }
// Clone returns a clone of BlockAcceptanceData
func (bad *BlockAcceptanceData) Clone() *BlockAcceptanceData {
if bad == nil {
return nil
}
clone := &BlockAcceptanceData{TransactionAcceptanceData: make([]*TransactionAcceptanceData, len(bad.TransactionAcceptanceData))}
for i, acceptanceData := range bad.TransactionAcceptanceData {
clone.TransactionAcceptanceData[i] = acceptanceData.Clone()
}
return clone
}
// TransactionAcceptanceData stores a transaction together with an indication // TransactionAcceptanceData stores a transaction together with an indication
// if it was accepted or not by some block // if it was accepted or not by some block
type TransactionAcceptanceData struct { type TransactionAcceptanceData struct {
@ -19,3 +46,16 @@ type TransactionAcceptanceData struct {
Fee uint64 Fee uint64
IsAccepted bool IsAccepted bool
} }
// Clone returns a clone of TransactionAcceptanceData
func (tad *TransactionAcceptanceData) Clone() *TransactionAcceptanceData {
if tad == nil {
return nil
}
return &TransactionAcceptanceData{
Transaction: tad.Transaction.Clone(),
Fee: tad.Fee,
IsAccepted: tad.IsAccepted,
}
}

View File

@ -7,3 +7,15 @@ type BlockRelations struct {
Parents []*externalapi.DomainHash Parents []*externalapi.DomainHash
Children []*externalapi.DomainHash Children []*externalapi.DomainHash
} }
// Clone returns a clone of BlockRelations
func (br *BlockRelations) Clone() *BlockRelations {
if br == nil {
return nil
}
return &BlockRelations{
Parents: externalapi.CloneHashes(br.Parents),
Children: externalapi.CloneHashes(br.Children),
}
}

View File

@ -6,6 +6,23 @@ type DomainBlock struct {
Transactions []*DomainTransaction Transactions []*DomainTransaction
} }
// Clone returns a clone of DomainBlock
func (block *DomainBlock) Clone() *DomainBlock {
if block == nil {
return nil
}
transactionClone := make([]*DomainTransaction, len(block.Transactions))
for i, tx := range block.Transactions {
transactionClone[i] = tx.Clone()
}
return &DomainBlock{
Header: block.Header.Clone(),
Transactions: transactionClone,
}
}
// DomainBlockHeader represents the header part of a Kaspa block // DomainBlockHeader represents the header part of a Kaspa block
type DomainBlockHeader struct { type DomainBlockHeader struct {
Version int32 Version int32
@ -17,3 +34,21 @@ type DomainBlockHeader struct {
Bits uint32 Bits uint32
Nonce uint64 Nonce uint64
} }
// Clone returns a clone of DomainBlockHeader
func (header *DomainBlockHeader) Clone() *DomainBlockHeader {
if header == nil {
return nil
}
return &DomainBlockHeader{
Version: header.Version,
ParentHashes: CloneHashes(header.ParentHashes),
HashMerkleRoot: *header.HashMerkleRoot.Clone(),
AcceptedIDMerkleRoot: *header.AcceptedIDMerkleRoot.Clone(),
UTXOCommitment: *header.UTXOCommitment.Clone(),
TimeInMilliseconds: header.TimeInMilliseconds,
Bits: header.Bits,
Nonce: header.Nonce,
}
}

View File

@ -3,6 +3,11 @@ package externalapi
// BlockStatus represents the validation state of the block. // BlockStatus represents the validation state of the block.
type BlockStatus byte type BlockStatus byte
// Clone returns a clone of BlockStatus
func (bs BlockStatus) Clone() BlockStatus {
return bs
}
const ( const (
// StatusInvalid indicates that the block is invalid. // StatusInvalid indicates that the block is invalid.
StatusInvalid BlockStatus = iota StatusInvalid BlockStatus = iota

View File

@ -22,7 +22,18 @@ func (hash *DomainHash) Clone() *DomainHash {
if hash == nil { if hash == nil {
return nil return nil
} }
return &*hash
hashClone := *hash
return &hashClone
}
// CloneHashes returns a clone of the given hashes slice
func CloneHashes(hashes []*DomainHash) []*DomainHash {
clone := make([]*DomainHash, len(hashes))
for i, hash := range hashes {
clone[i] = hash.Clone()
}
return clone
} }
// DomainHashesToStrings returns a slice of strings representing the hashes in the given slice of hashes // DomainHashesToStrings returns a slice of strings representing the hashes in the given slice of hashes

View File

@ -15,3 +15,13 @@ func (id DomainSubnetworkID) String() string {
} }
return hex.EncodeToString(id[:]) return hex.EncodeToString(id[:])
} }
// Clone returns a clone of DomainSubnetworkID
func (id *DomainSubnetworkID) Clone() *DomainSubnetworkID {
if id == nil {
return nil
}
idClone := *id
return &idClone
}

View File

@ -19,6 +19,39 @@ type DomainTransaction struct {
Mass uint64 Mass uint64
} }
// Clone returns a clone of DomainTransaction
func (tx *DomainTransaction) Clone() *DomainTransaction {
if tx == nil {
return nil
}
payloadClone := make([]byte, len(tx.Payload))
copy(payloadClone, tx.Payload)
inputsClone := make([]*DomainTransactionInput, len(tx.Inputs))
for i, input := range tx.Inputs {
inputsClone[i] = input.Clone()
}
outputsClone := make([]*DomainTransactionOutput, len(tx.Outputs))
for i, output := range tx.Outputs {
outputsClone[i] = output.Clone()
}
return &DomainTransaction{
Version: tx.Version,
Inputs: inputsClone,
Outputs: outputsClone,
LockTime: tx.LockTime,
SubnetworkID: *tx.SubnetworkID.Clone(),
Gas: tx.Gas,
PayloadHash: *tx.PayloadHash.Clone(),
Payload: payloadClone,
Fee: tx.Fee,
Mass: tx.Mass,
}
}
// DomainTransactionInput represents a Kaspa transaction input // DomainTransactionInput represents a Kaspa transaction input
type DomainTransactionInput struct { type DomainTransactionInput struct {
PreviousOutpoint DomainOutpoint PreviousOutpoint DomainOutpoint
@ -28,12 +61,41 @@ type DomainTransactionInput struct {
UTXOEntry *UTXOEntry UTXOEntry *UTXOEntry
} }
// Clone returns a clone of DomainTransactionInput
func (input *DomainTransactionInput) Clone() *DomainTransactionInput {
if input == nil {
return nil
}
signatureScriptClone := make([]byte, len(input.SignatureScript))
copy(signatureScriptClone, input.SignatureScript)
return &DomainTransactionInput{
PreviousOutpoint: *input.PreviousOutpoint.Clone(),
SignatureScript: signatureScriptClone,
Sequence: input.Sequence,
UTXOEntry: input.UTXOEntry.Clone(),
}
}
// DomainOutpoint represents a Kaspa transaction outpoint // DomainOutpoint represents a Kaspa transaction outpoint
type DomainOutpoint struct { type DomainOutpoint struct {
TransactionID DomainTransactionID TransactionID DomainTransactionID
Index uint32 Index uint32
} }
// Clone returns a clone of DomainOutpoint
func (op *DomainOutpoint) Clone() *DomainOutpoint {
if op == nil {
return nil
}
return &DomainOutpoint{
TransactionID: *op.TransactionID.Clone(),
Index: op.Index,
}
}
// String stringifies an outpoint. // String stringifies an outpoint.
func (op DomainOutpoint) String() string { func (op DomainOutpoint) String() string {
return fmt.Sprintf("(%s: %d)", op.TransactionID, op.Index) return fmt.Sprintf("(%s: %d)", op.TransactionID, op.Index)
@ -53,6 +115,21 @@ type DomainTransactionOutput struct {
ScriptPublicKey []byte ScriptPublicKey []byte
} }
// Clone returns a clone of DomainTransactionOutput
func (output *DomainTransactionOutput) Clone() *DomainTransactionOutput {
if output == nil {
return nil
}
scriptPublicKeyClone := make([]byte, len(output.ScriptPublicKey))
copy(scriptPublicKeyClone, output.ScriptPublicKey)
return &DomainTransactionOutput{
Value: output.Value,
ScriptPublicKey: scriptPublicKeyClone,
}
}
// DomainTransactionID represents the ID of a Kaspa transaction // DomainTransactionID represents the ID of a Kaspa transaction
type DomainTransactionID DomainHash type DomainTransactionID DomainHash
@ -60,3 +137,13 @@ type DomainTransactionID DomainHash
func (id DomainTransactionID) String() string { func (id DomainTransactionID) String() string {
return DomainHash(id).String() return DomainHash(id).String()
} }
// Clone returns a clone of DomainTransactionID
func (id *DomainTransactionID) Clone() *DomainTransactionID {
if id == nil {
return nil
}
idClone := *id
return &idClone
}

View File

@ -11,6 +11,23 @@ type UTXOEntry struct {
IsCoinbase bool IsCoinbase bool
} }
// Clone returns a clone of UTXOEntry
func (entry *UTXOEntry) Clone() *UTXOEntry {
if entry == nil {
return nil
}
scriptPublicKeyClone := make([]byte, len(entry.ScriptPublicKey))
copy(scriptPublicKeyClone, entry.ScriptPublicKey)
return &UTXOEntry{
Amount: entry.Amount,
ScriptPublicKey: scriptPublicKeyClone,
BlockBlueScore: entry.BlockBlueScore,
IsCoinbase: entry.IsCoinbase,
}
}
// NewUTXOEntry creates a new utxoEntry representing the given txOut // NewUTXOEntry creates a new utxoEntry representing the given txOut
func NewUTXOEntry(amount uint64, scriptPubKey []byte, isCoinbase bool, blockBlueScore uint64) *UTXOEntry { func NewUTXOEntry(amount uint64, scriptPubKey []byte, isCoinbase bool, blockBlueScore uint64) *UTXOEntry {
return &UTXOEntry{ return &UTXOEntry{

View File

@ -11,5 +11,25 @@ type BlockGHOSTDAGData struct {
BluesAnticoneSizes map[externalapi.DomainHash]KType BluesAnticoneSizes map[externalapi.DomainHash]KType
} }
// Clone returns a clone of BlockGHOSTDAGData
func (bgd *BlockGHOSTDAGData) Clone() *BlockGHOSTDAGData {
if bgd == nil {
return nil
}
bluesAnticoneSizesClone := make(map[externalapi.DomainHash]KType, len(bgd.BluesAnticoneSizes))
for hash, size := range bgd.BluesAnticoneSizes {
bluesAnticoneSizesClone[hash] = size
}
return &BlockGHOSTDAGData{
BlueScore: bgd.BlueScore,
SelectedParent: bgd.SelectedParent.Clone(),
MergeSetBlues: externalapi.CloneHashes(bgd.MergeSetBlues),
MergeSetReds: externalapi.CloneHashes(bgd.MergeSetReds),
BluesAnticoneSizes: bluesAnticoneSizesClone,
}
}
// KType defines the size of GHOSTDAG consensus algorithm K parameter. // KType defines the size of GHOSTDAG consensus algorithm K parameter.
type KType byte type KType byte

View File

@ -5,7 +5,7 @@ import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
// AcceptanceDataStore represents a store of AcceptanceData // AcceptanceDataStore represents a store of AcceptanceData
type AcceptanceDataStore interface { type AcceptanceDataStore interface {
Store Store
Stage(blockHash *externalapi.DomainHash, acceptanceData AcceptanceData) error Stage(blockHash *externalapi.DomainHash, acceptanceData AcceptanceData)
IsStaged() bool IsStaged() bool
Get(dbContext DBReader, blockHash *externalapi.DomainHash) (AcceptanceData, error) Get(dbContext DBReader, blockHash *externalapi.DomainHash) (AcceptanceData, error)
Delete(blockHash *externalapi.DomainHash) Delete(blockHash *externalapi.DomainHash)

View File

@ -5,7 +5,7 @@ import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
// BlockHeaderStore represents a store of block headers // BlockHeaderStore represents a store of block headers
type BlockHeaderStore interface { type BlockHeaderStore interface {
Store Store
Stage(blockHash *externalapi.DomainHash, blockHeader *externalapi.DomainBlockHeader) error Stage(blockHash *externalapi.DomainHash, blockHeader *externalapi.DomainBlockHeader)
IsStaged() bool IsStaged() bool
BlockHeader(dbContext DBReader, blockHash *externalapi.DomainHash) (*externalapi.DomainBlockHeader, error) BlockHeader(dbContext DBReader, blockHash *externalapi.DomainHash) (*externalapi.DomainBlockHeader, error)
HasBlockHeader(dbContext DBReader, blockHash *externalapi.DomainHash) (bool, error) HasBlockHeader(dbContext DBReader, blockHash *externalapi.DomainHash) (bool, error)

View File

@ -5,7 +5,7 @@ import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
// BlockRelationStore represents a store of BlockRelations // BlockRelationStore represents a store of BlockRelations
type BlockRelationStore interface { type BlockRelationStore interface {
Store Store
StageBlockRelation(blockHash *externalapi.DomainHash, blockRelations *BlockRelations) error StageBlockRelation(blockHash *externalapi.DomainHash, blockRelations *BlockRelations)
IsStaged() bool IsStaged() bool
BlockRelation(dbContext DBReader, blockHash *externalapi.DomainHash) (*BlockRelations, error) BlockRelation(dbContext DBReader, blockHash *externalapi.DomainHash) (*BlockRelations, error)
Has(dbContext DBReader, blockHash *externalapi.DomainHash) (bool, error) Has(dbContext DBReader, blockHash *externalapi.DomainHash) (bool, error)

View File

@ -5,7 +5,7 @@ import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
// BlockStore represents a store of blocks // BlockStore represents a store of blocks
type BlockStore interface { type BlockStore interface {
Store Store
Stage(blockHash *externalapi.DomainHash, block *externalapi.DomainBlock) error Stage(blockHash *externalapi.DomainHash, block *externalapi.DomainBlock)
IsStaged() bool IsStaged() bool
Block(dbContext DBReader, blockHash *externalapi.DomainHash) (*externalapi.DomainBlock, error) Block(dbContext DBReader, blockHash *externalapi.DomainHash) (*externalapi.DomainBlock, error)
HasBlock(dbContext DBReader, blockHash *externalapi.DomainHash) (bool, error) HasBlock(dbContext DBReader, blockHash *externalapi.DomainHash) (bool, error)

View File

@ -13,9 +13,9 @@ type ConsensusStateStore interface {
HasUTXOByOutpoint(dbContext DBReader, outpoint *externalapi.DomainOutpoint) (bool, error) HasUTXOByOutpoint(dbContext DBReader, outpoint *externalapi.DomainOutpoint) (bool, error)
VirtualUTXOSetIterator(dbContext DBReader) (ReadOnlyUTXOSetIterator, error) VirtualUTXOSetIterator(dbContext DBReader) (ReadOnlyUTXOSetIterator, error)
StageVirtualDiffParents(virtualDiffParents []*externalapi.DomainHash) error StageVirtualDiffParents(virtualDiffParents []*externalapi.DomainHash)
VirtualDiffParents(dbContext DBReader) ([]*externalapi.DomainHash, error) VirtualDiffParents(dbContext DBReader) ([]*externalapi.DomainHash, error)
StageTips(tipHashes []*externalapi.DomainHash) error StageTips(tipHashes []*externalapi.DomainHash)
Tips(dbContext DBReader) ([]*externalapi.DomainHash, error) Tips(dbContext DBReader) ([]*externalapi.DomainHash, error)
} }

View File

@ -5,7 +5,7 @@ import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
// GHOSTDAGDataStore represents a store of BlockGHOSTDAGData // GHOSTDAGDataStore represents a store of BlockGHOSTDAGData
type GHOSTDAGDataStore interface { type GHOSTDAGDataStore interface {
Store Store
Stage(blockHash *externalapi.DomainHash, blockGHOSTDAGData *BlockGHOSTDAGData) error Stage(blockHash *externalapi.DomainHash, blockGHOSTDAGData *BlockGHOSTDAGData)
IsStaged() bool IsStaged() bool
Get(dbContext DBReader, blockHash *externalapi.DomainHash) (*BlockGHOSTDAGData, error) Get(dbContext DBReader, blockHash *externalapi.DomainHash) (*BlockGHOSTDAGData, error)
} }

View File

@ -5,7 +5,7 @@ import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
// HeaderTipsStore represents a store of the header tips // HeaderTipsStore represents a store of the header tips
type HeaderTipsStore interface { type HeaderTipsStore interface {
Store Store
Stage(tips []*externalapi.DomainHash) error Stage(tips []*externalapi.DomainHash)
IsStaged() bool IsStaged() bool
Tips(dbContext DBReader) ([]*externalapi.DomainHash, error) Tips(dbContext DBReader) ([]*externalapi.DomainHash, error)
HasTips(dbContext DBReader) (bool, error) HasTips(dbContext DBReader) (bool, error)

View File

@ -5,7 +5,7 @@ import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
// ReachabilityDataStore represents a store of ReachabilityData // ReachabilityDataStore represents a store of ReachabilityData
type ReachabilityDataStore interface { type ReachabilityDataStore interface {
Store Store
StageReachabilityData(blockHash *externalapi.DomainHash, reachabilityData *ReachabilityData) error StageReachabilityData(blockHash *externalapi.DomainHash, reachabilityData *ReachabilityData)
StageReachabilityReindexRoot(reachabilityReindexRoot *externalapi.DomainHash) StageReachabilityReindexRoot(reachabilityReindexRoot *externalapi.DomainHash)
IsAnythingStaged() bool IsAnythingStaged() bool
ReachabilityData(dbContext DBReader, blockHash *externalapi.DomainHash) (*ReachabilityData, error) ReachabilityData(dbContext DBReader, blockHash *externalapi.DomainHash) (*ReachabilityData, error)

View File

@ -5,7 +5,7 @@ import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
// UTXODiffStore represents a store of UTXODiffs // UTXODiffStore represents a store of UTXODiffs
type UTXODiffStore interface { type UTXODiffStore interface {
Store Store
Stage(blockHash *externalapi.DomainHash, utxoDiff *UTXODiff, utxoDiffChild *externalapi.DomainHash) error Stage(blockHash *externalapi.DomainHash, utxoDiff *UTXODiff, utxoDiffChild *externalapi.DomainHash)
IsStaged() bool IsStaged() bool
UTXODiff(dbContext DBReader, blockHash *externalapi.DomainHash) (*UTXODiff, error) UTXODiff(dbContext DBReader, blockHash *externalapi.DomainHash) (*UTXODiff, error)
UTXODiffChild(dbContext DBReader, blockHash *externalapi.DomainHash) (*externalapi.DomainHash, error) UTXODiffChild(dbContext DBReader, blockHash *externalapi.DomainHash) (*externalapi.DomainHash, error)

View File

@ -8,5 +8,5 @@ type Multiset interface {
Remove(data []byte) Remove(data []byte)
Hash() *externalapi.DomainHash Hash() *externalapi.DomainHash
Serialize() []byte Serialize() []byte
Clone() (Multiset, error) Clone() Multiset
} }

View File

@ -12,6 +12,18 @@ type ReachabilityData struct {
FutureCoveringSet FutureCoveringTreeNodeSet FutureCoveringSet FutureCoveringTreeNodeSet
} }
// Clone returns a clone of ReachabilityData
func (rd *ReachabilityData) Clone() *ReachabilityData {
if rd == nil {
return nil
}
return &ReachabilityData{
TreeNode: rd.TreeNode.Clone(),
FutureCoveringSet: externalapi.CloneHashes(rd.FutureCoveringSet),
}
}
// ReachabilityTreeNode represents a node in the reachability tree // ReachabilityTreeNode represents a node in the reachability tree
// of some DAG block. It mainly provides the ability to query *tree* // of some DAG block. It mainly provides the ability to query *tree*
// reachability with O(1) query time. It does so by managing an // reachability with O(1) query time. It does so by managing an
@ -36,6 +48,19 @@ type ReachabilityTreeNode struct {
Interval *ReachabilityInterval Interval *ReachabilityInterval
} }
// Clone returns a clone of ReachabilityTreeNode
func (rtn *ReachabilityTreeNode) Clone() *ReachabilityTreeNode {
if rtn == nil {
return nil
}
return &ReachabilityTreeNode{
Children: externalapi.CloneHashes(rtn.Children),
Parent: rtn.Parent.Clone(),
Interval: rtn.Interval.Clone(),
}
}
// ReachabilityInterval represents an interval to be used within the // ReachabilityInterval represents an interval to be used within the
// tree reachability algorithm. See ReachabilityTreeNode for further // tree reachability algorithm. See ReachabilityTreeNode for further
// details. // details.
@ -44,6 +69,18 @@ type ReachabilityInterval struct {
End uint64 End uint64
} }
// Clone returns a clone of ReachabilityInterval
func (ri *ReachabilityInterval) Clone() *ReachabilityInterval {
if ri == nil {
return nil
}
return &ReachabilityInterval{
Start: ri.Start,
End: ri.End,
}
}
func (ri *ReachabilityInterval) String() string { func (ri *ReachabilityInterval) String() string {
return fmt.Sprintf("[%d,%d]", ri.Start, ri.End) return fmt.Sprintf("[%d,%d]", ri.Start, ri.End)
} }

View File

@ -11,18 +11,18 @@ import (
// UTXOCollection represents a set of UTXOs indexed by their outpoints // UTXOCollection represents a set of UTXOs indexed by their outpoints
type UTXOCollection map[externalapi.DomainOutpoint]*externalapi.UTXOEntry type UTXOCollection map[externalapi.DomainOutpoint]*externalapi.UTXOEntry
// UTXODiff represents a diff between two UTXO Sets. // Clone returns a clone of UTXOCollection
type UTXODiff struct { func (uc UTXOCollection) Clone() UTXOCollection {
ToAdd UTXOCollection if uc == nil {
ToRemove UTXOCollection return nil
}
// NewUTXODiff instantiates an empty UTXODiff
func NewUTXODiff() *UTXODiff {
return &UTXODiff{
ToAdd: UTXOCollection{},
ToRemove: UTXOCollection{},
} }
clone := make(UTXOCollection, len(uc))
for outpoint, entry := range uc {
clone[outpoint] = entry.Clone()
}
return clone
} }
func (uc UTXOCollection) String() string { func (uc UTXOCollection) String() string {
@ -41,6 +41,32 @@ func (uc UTXOCollection) String() string {
return fmt.Sprintf("[ %s ]", strings.Join(utxoStrings, ", ")) return fmt.Sprintf("[ %s ]", strings.Join(utxoStrings, ", "))
} }
// UTXODiff represents a diff between two UTXO Sets.
type UTXODiff struct {
ToAdd UTXOCollection
ToRemove UTXOCollection
}
// Clone returns a clone of UTXODiff
func (d *UTXODiff) Clone() *UTXODiff {
if d == nil {
return nil
}
return &UTXODiff{
ToAdd: d.ToAdd.Clone(),
ToRemove: d.ToRemove.Clone(),
}
}
func (d UTXODiff) String() string { func (d UTXODiff) String() string {
return fmt.Sprintf("ToAdd: %s; ToRemove: %s", d.ToAdd, d.ToRemove) return fmt.Sprintf("ToAdd: %s; ToRemove: %s", d.ToAdd, d.ToRemove)
} }
// NewUTXODiff instantiates an empty UTXODiff
func NewUTXODiff() *UTXODiff {
return &UTXODiff{
ToAdd: UTXOCollection{},
ToRemove: UTXOCollection{},
}
}

View File

@ -83,12 +83,9 @@ func (bb *testBlockBuilder) buildBlockWithParents(
} }
} }
err := bb.blockRelationStore.StageBlockRelation(tempBlockHash, &model.BlockRelations{Parents: parentHashes}) bb.blockRelationStore.StageBlockRelation(tempBlockHash, &model.BlockRelations{Parents: parentHashes})
if err != nil {
return nil, err
}
err = bb.ghostdagManager.GHOSTDAG(tempBlockHash) err := bb.ghostdagManager.GHOSTDAG(tempBlockHash)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -111,10 +108,8 @@ func (bb *testBlockBuilder) buildBlockWithParents(
if err != nil { if err != nil {
return nil, err return nil, err
} }
err = bb.acceptanceDataStore.Stage(tempBlockHash, acceptanceData)
if err != nil { bb.acceptanceDataStore.Stage(tempBlockHash, acceptanceData)
return nil, err
}
coinbase, err := bb.coinbaseManager.ExpectedCoinbaseTransaction(tempBlockHash, coinbaseData) coinbase, err := bb.coinbaseManager.ExpectedCoinbaseTransaction(tempBlockHash, coinbaseData)
if err != nil { if err != nil {

View File

@ -129,10 +129,7 @@ func (bp *blockProcessor) validateAndInsertBlock(block *externalapi.DomainBlock)
if err != nil { if err != nil {
return err return err
} }
err = bp.headerTipsStore.Stage(tips) bp.headerTipsStore.Stage(tips)
if err != nil {
return err
}
} }
if syncInfo.State != externalapi.SyncStateMissingGenesis { if syncInfo.State != externalapi.SyncStateMissingGenesis {
@ -216,10 +213,7 @@ func (bp *blockProcessor) validateBlock(block *externalapi.DomainBlock, mode *ex
} }
if !hasHeader { if !hasHeader {
err = bp.blockHeaderStore.Stage(blockHash, block.Header) bp.blockHeaderStore.Stage(blockHash, block.Header)
if err != nil {
return err
}
} }
// If any validation until (included) proof-of-work fails, simply // If any validation until (included) proof-of-work fails, simply
@ -276,12 +270,9 @@ func (bp *blockProcessor) validatePostProofOfWork(block *externalapi.DomainBlock
blockHash := consensusserialization.BlockHash(block) blockHash := consensusserialization.BlockHash(block)
if mode.State != externalapi.SyncStateHeadersFirst { if mode.State != externalapi.SyncStateHeadersFirst {
err := bp.blockStore.Stage(blockHash, block) bp.blockStore.Stage(blockHash, block)
if err != nil {
return err
}
err = bp.blockValidator.ValidateBodyInIsolation(blockHash) err := bp.blockValidator.ValidateBodyInIsolation(blockHash)
if err != nil { if err != nil {
return err return err
} }

View File

@ -91,10 +91,7 @@ func TestCheckBlockSanity(t *testing.T) {
t.Fatalf("Too few transactions in block, expect at least 3, got %v", len(exampleValidBlock.Transactions)) t.Fatalf("Too few transactions in block, expect at least 3, got %v", len(exampleValidBlock.Transactions))
} }
err = consensus.BlockStore().Stage(blockHash, &exampleValidBlock) consensus.BlockStore().Stage(blockHash, &exampleValidBlock)
if err != nil {
t.Fatalf("Failed storing block: %v", err)
}
err = consensus.BlockValidator().ValidateBodyInIsolation(blockHash) err = consensus.BlockValidator().ValidateBodyInIsolation(blockHash)
if err != nil { if err != nil {
@ -103,10 +100,7 @@ func TestCheckBlockSanity(t *testing.T) {
// Test with block with wrong transactions sorting order // Test with block with wrong transactions sorting order
blockHash = consensusserialization.BlockHash(&blockWithWrongTxOrder) blockHash = consensusserialization.BlockHash(&blockWithWrongTxOrder)
err = consensus.BlockStore().Stage(blockHash, &blockWithWrongTxOrder) consensus.BlockStore().Stage(blockHash, &blockWithWrongTxOrder)
if err != nil {
t.Fatalf("Failed storing block: %v", err)
}
err = consensus.BlockValidator().ValidateBodyInIsolation(blockHash) err = consensus.BlockValidator().ValidateBodyInIsolation(blockHash)
if !errors.Is(err, ruleerrors.ErrTransactionsNotSorted) { if !errors.Is(err, ruleerrors.ErrTransactionsNotSorted) {
t.Errorf("CheckBlockSanity: Expected ErrTransactionsNotSorted error, instead got %v", err) t.Errorf("CheckBlockSanity: Expected ErrTransactionsNotSorted error, instead got %v", err)
@ -115,10 +109,7 @@ func TestCheckBlockSanity(t *testing.T) {
// Test a block with invalid parents order // Test a block with invalid parents order
// We no longer require blocks to have ordered parents // We no longer require blocks to have ordered parents
blockHash = consensusserialization.BlockHash(&unOrderedParentsBlock) blockHash = consensusserialization.BlockHash(&unOrderedParentsBlock)
err = consensus.BlockStore().Stage(blockHash, &unOrderedParentsBlock) consensus.BlockStore().Stage(blockHash, &unOrderedParentsBlock)
if err != nil {
t.Fatalf("Failed storing block: %v", err)
}
err = consensus.BlockValidator().ValidateBodyInIsolation(blockHash) err = consensus.BlockValidator().ValidateBodyInIsolation(blockHash)
if err != nil { if err != nil {
t.Errorf("CheckBlockSanity: Expected block to be be body in isolation valid, got error instead: %v", err) t.Errorf("CheckBlockSanity: Expected block to be be body in isolation valid, got error instead: %v", err)

View File

@ -38,13 +38,10 @@ func TestValidateMedianTime(t *testing.T) {
pastMedianTime := func(parents ...*externalapi.DomainHash) int64 { pastMedianTime := func(parents ...*externalapi.DomainHash) int64 {
var tempHash externalapi.DomainHash var tempHash externalapi.DomainHash
err := tc.BlockRelationStore().StageBlockRelation(&tempHash, &model.BlockRelations{ tc.BlockRelationStore().StageBlockRelation(&tempHash, &model.BlockRelations{
Parents: parents, Parents: parents,
Children: nil, Children: nil,
}) })
if err != nil {
t.Fatalf("StageBlockRelation: %+v", err)
}
defer tc.BlockRelationStore().Discard() defer tc.BlockRelationStore().Discard()
err = tc.GHOSTDAGManager().GHOSTDAG(&tempHash) err = tc.GHOSTDAGManager().GHOSTDAG(&tempHash)

View File

@ -101,10 +101,7 @@ func (csm *consensusStateManager) addTip(newTipHash *externalapi.DomainHash) (ne
} }
log.Tracef("The new tips are: %s", newTips) log.Tracef("The new tips are: %s", newTips)
err = csm.consensusStateStore.StageTips(newTips) csm.consensusStateStore.StageTips(newTips)
if err != nil {
return nil, err
}
log.Tracef("Staged the new tips %s", newTips) log.Tracef("Staged the new tips %s", newTips)
return newTips, nil return newTips, nil

View File

@ -127,7 +127,7 @@ func (csm *consensusStateManager) applyBlueBlocks(blockHash *externalapi.DomainH
log.Tracef("The past median time for block %s is: %d", blockHash, selectedParentMedianTime) log.Tracef("The past median time for block %s is: %d", blockHash, selectedParentMedianTime)
multiblockAcceptanceData := make(model.AcceptanceData, len(blueBlocks)) multiblockAcceptanceData := make(model.AcceptanceData, len(blueBlocks))
accumulatedUTXODiff := utxoalgebra.DiffClone(selectedParentPastUTXODiff) accumulatedUTXODiff := selectedParentPastUTXODiff.Clone()
accumulatedMass := uint64(0) accumulatedMass := uint64(0)
for i, blueBlock := range blueBlocks { for i, blueBlock := range blueBlocks {

View File

@ -132,10 +132,7 @@ func (csm *consensusStateManager) resolveSingleBlockStatus(blockHash *externalap
} }
log.Tracef("Staging the calculated acceptance data of block %s", blockHash) log.Tracef("Staging the calculated acceptance data of block %s", blockHash)
err = csm.acceptanceDataStore.Stage(blockHash, acceptanceData) csm.acceptanceDataStore.Stage(blockHash, acceptanceData)
if err != nil {
return 0, err
}
block, err := csm.blockStore.Block(csm.databaseContext, blockHash) block, err := csm.blockStore.Block(csm.databaseContext, blockHash)
if err != nil { if err != nil {

View File

@ -67,10 +67,7 @@ func (csm *consensusStateManager) setPruningPointUTXOSet(serializedUTXOSet []byt
log.Tracef("Header tip pruning point multiset validation passed") log.Tracef("Header tip pruning point multiset validation passed")
log.Tracef("Staging the parent hashes for the header tips pruning point as the DAG tips") log.Tracef("Staging the parent hashes for the header tips pruning point as the DAG tips")
err = csm.consensusStateStore.StageTips(headerTipsPruningPointHeader.ParentHashes) csm.consensusStateStore.StageTips(headerTipsPruningPointHeader.ParentHashes)
if err != nil {
return err
}
log.Tracef("Setting the parent hashes for the header tips pruning point as the virtual parents") log.Tracef("Setting the parent hashes for the header tips pruning point as the virtual parents")
err = csm.dagTopologyManager.SetParents(model.VirtualBlockHash, headerTipsPruningPointHeader.ParentHashes) err = csm.dagTopologyManager.SetParents(model.VirtualBlockHash, headerTipsPruningPointHeader.ParentHashes)
@ -159,12 +156,10 @@ func (csm *consensusStateManager) HeaderTipsPruningPoint() (*externalapi.DomainH
log.Tracef("The current header tips are: %s", headerTips) log.Tracef("The current header tips are: %s", headerTips)
log.Tracef("Temporarily staging the parents of the virtual header to be the header tips: %s", headerTips) log.Tracef("Temporarily staging the parents of the virtual header to be the header tips: %s", headerTips)
err = csm.blockRelationStore.StageBlockRelation(virtualHeaderHash, &model.BlockRelations{ csm.blockRelationStore.StageBlockRelation(virtualHeaderHash, &model.BlockRelations{
Parents: headerTips, Parents: headerTips,
}) })
if err != nil {
return nil, err
}
defer csm.blockRelationStore.Discard() defer csm.blockRelationStore.Discard()
err = csm.ghostdagManager.GHOSTDAG(virtualHeaderHash) err = csm.ghostdagManager.GHOSTDAG(virtualHeaderHash)

View File

@ -35,10 +35,7 @@ func (csm *consensusStateManager) updateVirtual(newBlockHash *externalapi.Domain
} }
log.Tracef("Staging new acceptance data for the virtual block") log.Tracef("Staging new acceptance data for the virtual block")
err = csm.acceptanceDataStore.Stage(model.VirtualBlockHash, virtualAcceptanceData) csm.acceptanceDataStore.Stage(model.VirtualBlockHash, virtualAcceptanceData)
if err != nil {
return err
}
log.Tracef("Staging new multiset for the virtual block") log.Tracef("Staging new multiset for the virtual block")
csm.multisetStore.Stage(model.VirtualBlockHash, virtualMultiset) csm.multisetStore.Stage(model.VirtualBlockHash, virtualMultiset)

View File

@ -13,10 +13,7 @@ func (csm *consensusStateManager) stageDiff(blockHash *externalapi.DomainHash,
defer log.Tracef("stageDiff end for block %s", blockHash) defer log.Tracef("stageDiff end for block %s", blockHash)
log.Tracef("Staging block %s as the diff child of %s", utxoDiffChild, blockHash) log.Tracef("Staging block %s as the diff child of %s", utxoDiffChild, blockHash)
err := csm.utxoDiffStore.Stage(blockHash, utxoDiff, utxoDiffChild) csm.utxoDiffStore.Stage(blockHash, utxoDiff, utxoDiffChild)
if err != nil {
return err
}
if utxoDiffChild == nil { if utxoDiffChild == nil {
log.Tracef("Adding block %s to the virtual diff parents", blockHash) log.Tracef("Adding block %s to the virtual diff parents", blockHash)
@ -55,7 +52,8 @@ func (csm *consensusStateManager) addToVirtualDiffParents(blockHash *externalapi
newVirtualDiffParents := append([]*externalapi.DomainHash{blockHash}, oldVirtualDiffParents...) newVirtualDiffParents := append([]*externalapi.DomainHash{blockHash}, oldVirtualDiffParents...)
log.Tracef("Staging virtual diff parents after adding %s to it", blockHash) log.Tracef("Staging virtual diff parents after adding %s to it", blockHash)
return csm.consensusStateStore.StageVirtualDiffParents(newVirtualDiffParents) csm.consensusStateStore.StageVirtualDiffParents(newVirtualDiffParents)
return nil
} }
func (csm *consensusStateManager) removeFromVirtualDiffParents(blockHash *externalapi.DomainHash) error { func (csm *consensusStateManager) removeFromVirtualDiffParents(blockHash *externalapi.DomainHash) error {
@ -80,5 +78,6 @@ func (csm *consensusStateManager) removeFromVirtualDiffParents(blockHash *extern
} }
log.Tracef("Staging virtual diff parents after removing %s from it", blockHash) log.Tracef("Staging virtual diff parents after removing %s from it", blockHash)
return csm.consensusStateStore.StageVirtualDiffParents(newVirtualDiffParents) csm.consensusStateStore.StageVirtualDiffParents(newVirtualDiffParents)
return nil
} }

View File

@ -48,13 +48,3 @@ func collectionContainsWithBlueScore(collection model.UTXOCollection, outpoint *
entry, ok := CollectionGet(collection, outpoint) entry, ok := CollectionGet(collection, outpoint)
return ok && entry.BlockBlueScore == blueScore return ok && entry.BlockBlueScore == blueScore
} }
// clone returns a clone of this collection
func collectionClone(collection model.UTXOCollection) model.UTXOCollection {
clone := make(model.UTXOCollection, len(collection))
for outpoint, entry := range collection {
collectionAdd(clone, &outpoint, entry)
}
return clone
}

View File

@ -242,7 +242,7 @@ func WithDiffInPlace(this *model.UTXODiff, diff *model.UTXODiff) error {
// WithDiff applies provided diff to this diff, creating a new utxoDiff, that would be the result if // WithDiff applies provided diff to this diff, creating a new utxoDiff, that would be the result if
// first d, and than diff were applied to some base // first d, and than diff were applied to some base
func WithDiff(this *model.UTXODiff, diff *model.UTXODiff) (*model.UTXODiff, error) { func WithDiff(this *model.UTXODiff, diff *model.UTXODiff) (*model.UTXODiff, error) {
clone := DiffClone(this) clone := this.Clone()
err := WithDiffInPlace(clone, diff) err := WithDiffInPlace(clone, diff)
if err != nil { if err != nil {

View File

@ -58,7 +58,7 @@ func TestUTXOCollection(t *testing.T) {
} }
// Test model.UTXOCollection cloning // Test model.UTXOCollection cloning
collectionClone := collectionClone(test.collection) collectionClone := test.collection.Clone()
if reflect.ValueOf(collectionClone).Pointer() == reflect.ValueOf(test.collection).Pointer() { if reflect.ValueOf(collectionClone).Pointer() == reflect.ValueOf(test.collection).Pointer() {
t.Errorf("collection is reference-equal to its clone in test \"%s\". ", test.name) t.Errorf("collection is reference-equal to its clone in test \"%s\". ", test.name)
} }
@ -95,7 +95,7 @@ func TestUTXODiff(t *testing.T) {
} }
// Test utxoDiff cloning // Test utxoDiff cloning
clonedDiff := DiffClone(diff) clonedDiff := diff.Clone()
if clonedDiff == diff { if clonedDiff == diff {
t.Errorf("cloned diff is reference-equal to the original") t.Errorf("cloned diff is reference-equal to the original")
} }
@ -576,7 +576,7 @@ func TestUTXODiffRules(t *testing.T) {
} }
// Repeat WithDiff check test.this time using withDiffInPlace // Repeat WithDiff check test.this time using withDiffInPlace
thisClone := DiffClone(test.this) thisClone := test.this.Clone()
err = WithDiffInPlace(thisClone, test.other) err = WithDiffInPlace(thisClone, test.other)
// Test whether withDiffInPlace returned an error // Test whether withDiffInPlace returned an error

View File

@ -12,15 +12,6 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
) )
// DiffClone returns a new UTXODiff which is identical to the given diff
func DiffClone(diff *model.UTXODiff) *model.UTXODiff {
clone := &model.UTXODiff{
ToAdd: collectionClone(diff.ToAdd),
ToRemove: collectionClone(diff.ToRemove),
}
return clone
}
// DiffAddTransaction modifies the provided utxoDiff with provided transaction. // DiffAddTransaction modifies the provided utxoDiff with provided transaction.
func DiffAddTransaction(utxoDiff *model.UTXODiff, transaction *externalapi.DomainTransaction, blockBlueScore uint64) error { func DiffAddTransaction(utxoDiff *model.UTXODiff, transaction *externalapi.DomainTransaction, blockBlueScore uint64) error {
for _, input := range transaction.Inputs { for _, input := range transaction.Inputs {

View File

@ -127,10 +127,7 @@ func (dtm *dagTopologyManager) SetParents(blockHash *externalapi.DomainHash, par
for i, parentChild := range parentRelations.Children { for i, parentChild := range parentRelations.Children {
if *parentChild == *blockHash { if *parentChild == *blockHash {
parentRelations.Children = append(parentRelations.Children[:i], parentRelations.Children[i+1:]...) parentRelations.Children = append(parentRelations.Children[:i], parentRelations.Children[i+1:]...)
err = dtm.blockRelationStore.StageBlockRelation(currentParent, parentRelations) dtm.blockRelationStore.StageBlockRelation(currentParent, parentRelations)
if err != nil {
return err
}
break break
} }
@ -153,21 +150,15 @@ func (dtm *dagTopologyManager) SetParents(blockHash *externalapi.DomainHash, par
} }
if !isBlockAlreadyInChildren { if !isBlockAlreadyInChildren {
parentRelations.Children = append(parentRelations.Children, blockHash) parentRelations.Children = append(parentRelations.Children, blockHash)
err = dtm.blockRelationStore.StageBlockRelation(parent, parentRelations) dtm.blockRelationStore.StageBlockRelation(parent, parentRelations)
if err != nil {
return err
}
} }
} }
// Finally - create the relations for the block itself // Finally - create the relations for the block itself
err = dtm.blockRelationStore.StageBlockRelation(blockHash, &model.BlockRelations{ dtm.blockRelationStore.StageBlockRelation(blockHash, &model.BlockRelations{
Parents: parentHashes, Parents: parentHashes,
Children: []*externalapi.DomainHash{}, Children: []*externalapi.DomainHash{},
}) })
if err != nil {
return err
}
return nil return nil
} }

View File

@ -82,10 +82,7 @@ func (gm *ghostdagManager) GHOSTDAG(blockHash *externalapi.DomainHash) error {
newBlockData.BlueScore = 0 newBlockData.BlueScore = 0
} }
err = gm.ghostdagDataStore.Stage(blockHash, newBlockData) gm.ghostdagDataStore.Stage(blockHash, newBlockData)
if err != nil {
return err
}
return nil return nil
} }

View File

@ -53,10 +53,7 @@ func (h headerTipsManager) AddHeaderTip(hash *externalapi.DomainHash) error {
} }
newTips = append(newTips, hash) newTips = append(newTips, hash)
err = h.headerTipsStore.Stage(newTips) h.headerTipsStore.Stage(newTips)
if err != nil {
return err
}
return nil return nil
} }

View File

@ -23,10 +23,9 @@ func (r *reachabilityDataStoreMock) Commit(_ model.DBTransaction) error {
panic("implement me") panic("implement me")
} }
func (r *reachabilityDataStoreMock) StageReachabilityData(blockHash *externalapi.DomainHash, reachabilityData *model.ReachabilityData) error { func (r *reachabilityDataStoreMock) StageReachabilityData(blockHash *externalapi.DomainHash, reachabilityData *model.ReachabilityData) {
r.reachabilityDataStaging[*blockHash] = reachabilityData r.reachabilityDataStaging[*blockHash] = reachabilityData
r.recorder[*blockHash] = struct{}{} r.recorder[*blockHash] = struct{}{}
return nil
} }
func (r *reachabilityDataStoreMock) StageReachabilityReindexRoot(reachabilityReindexRoot *externalapi.DomainHash) { func (r *reachabilityDataStoreMock) StageReachabilityReindexRoot(reachabilityReindexRoot *externalapi.DomainHash) {

View File

@ -5,8 +5,8 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
) )
func (rt *reachabilityManager) stageData(blockHash *externalapi.DomainHash, data *model.ReachabilityData) error { func (rt *reachabilityManager) stageData(blockHash *externalapi.DomainHash, data *model.ReachabilityData) {
return rt.reachabilityDataStore.StageReachabilityData(blockHash, data) rt.reachabilityDataStore.StageReachabilityData(blockHash, data)
} }
func (rt *reachabilityManager) stageFutureCoveringSet(blockHash *externalapi.DomainHash, set model.FutureCoveringTreeNodeSet) error { func (rt *reachabilityManager) stageFutureCoveringSet(blockHash *externalapi.DomainHash, set model.FutureCoveringTreeNodeSet) error {
@ -16,7 +16,8 @@ func (rt *reachabilityManager) stageFutureCoveringSet(blockHash *externalapi.Dom
} }
data.FutureCoveringSet = set data.FutureCoveringSet = set
return rt.reachabilityDataStore.StageReachabilityData(blockHash, data) rt.reachabilityDataStore.StageReachabilityData(blockHash, data)
return nil
} }
func (rt *reachabilityManager) stageTreeNode(blockHash *externalapi.DomainHash, node *model.ReachabilityTreeNode) error { func (rt *reachabilityManager) stageTreeNode(blockHash *externalapi.DomainHash, node *model.ReachabilityTreeNode) error {
@ -26,7 +27,8 @@ func (rt *reachabilityManager) stageTreeNode(blockHash *externalapi.DomainHash,
} }
data.TreeNode = node data.TreeNode = node
return rt.reachabilityDataStore.StageReachabilityData(blockHash, data) rt.reachabilityDataStore.StageReachabilityData(blockHash, data)
return nil
} }
func (rt *reachabilityManager) stageReindexRoot(blockHash *externalapi.DomainHash) { func (rt *reachabilityManager) stageReindexRoot(blockHash *externalapi.DomainHash) {

View File

@ -34,8 +34,9 @@ func (m multiset) Serialize() []byte {
return m.ms.Serialize()[:] return m.ms.Serialize()[:]
} }
func (m multiset) Clone() (model.Multiset, error) { func (m multiset) Clone() model.Multiset {
return FromBytes(m.Serialize()) msClone := *m.ms
return &multiset{ms: &msClone}
} }
// FromBytes deserializes the given bytes slice and returns a multiset. // FromBytes deserializes the given bytes slice and returns a multiset.