Svarog f1451406f7
Add support for multiple staging areas (#1633)
* Add StagingArea struct

* Implemented staging areas in blockStore

* Move blockStagingShard to separate folder

* Apply staging shard to acceptanceDataStore

* Update blockHeaderStore with StagingArea

* Add StagingArea to BlockRelationStore

* Add StagingArea to blockStatusStore

* Add StagingArea to consensusStateStore

* Add StagingArea to daaBlocksStore

* Add StagingArea to finalityStore

* Add StagingArea to ghostdagDataStore

* Add StagingArea to headersSelectedChainStore and headersSelectedTipStore

* Add StagingArea to multisetStore

* Add StagingArea to pruningStore

* Add StagingArea to reachabilityDataStore

* Add StagingArea to utxoDiffStore

* Fix forgotten compilation error

* Update reachability manager and some more things with StagingArea

* Add StagingArea to dagTopologyManager, and some more

* Add StagingArea to GHOSTDAGManager, and some more

* Add StagingArea to difficultyManager, and some more

* Add StagingArea to dagTraversalManager, and some more

* Add StagingArea to headerTipsManager, and some more

* Add StagingArea to constnsusStateManager, pastMedianTimeManager

* Add StagingArea to transactionValidator

* Add StagingArea to finalityManager

* Add StagingArea to mergeDepthManager

* Add StagingArea to pruningManager

* Add StagingArea to rest of ValidateAndInsertBlock

* Add StagingArea to blockValidator

* Add StagingArea to coinbaseManager

* Add StagingArea to syncManager

* Add StagingArea to blockBuilder

* Update consensus with StagingArea

* Add StagingArea to ghostdag2

* Fix remaining compilation errors

* Update names of stagingShards

* Fix forgotten stagingArea passing

* Mark stagingShard.isCommited = true once commited

* Move isStaged to stagingShard, so that it's available without going through store

* Make blockHeaderStore count be avilable from stagingShard

* Fix remaining forgotten stagingArea passing

* commitAllChanges should call dbTx.Commit in the end

* Fix all tests tests in blockValidator

* Fix all tests in consensusStateManager and some more

* Fix all tests in pruningManager

* Add many missing stagingAreas in tests

* Fix many tests

* Fix most of all other tests

* Fix ghostdag_test.go

* Add comment to StagingArea

* Make list of StagingShards an array

* Add comment to StagingShardID

* Make sure all staging shards are pointer-receiver

* Undo bucket rename in block_store

* Typo: isCommited -> isCommitted

* Add comment explaining why stagingArea.shards is an array
2021-03-29 10:34:11 +03:00

198 lines
5.7 KiB
Go

package dagtraversalmanager
import (
"container/heap"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
type blockHeapNode struct {
hash *externalapi.DomainHash
ghostdagData *model.BlockGHOSTDAGData
}
func (left *blockHeapNode) less(right *blockHeapNode, gm model.GHOSTDAGManager) bool {
return gm.Less(left.hash, left.ghostdagData, right.hash, right.ghostdagData)
}
// baseHeap is an implementation for heap.Interface that sorts blocks by their blueWork+hash
type baseHeap struct {
slice []*blockHeapNode
ghostdagManager model.GHOSTDAGManager
}
func (h *baseHeap) Len() int { return len(h.slice) }
func (h *baseHeap) Swap(i, j int) { h.slice[i], h.slice[j] = h.slice[j], h.slice[i] }
func (h *baseHeap) Push(x interface{}) {
h.slice = append(h.slice, x.(*blockHeapNode))
}
func (h *baseHeap) Pop() interface{} {
oldSlice := h.slice
oldLength := len(oldSlice)
popped := oldSlice[oldLength-1]
h.slice = oldSlice[0 : oldLength-1]
return popped
}
// peek returns the block with lowest blueWork+hash from this heap without removing it
func (h *baseHeap) peek() *blockHeapNode {
return h.slice[0]
}
// upHeap extends baseHeap to include Less operation that traverses from bottom to top
type upHeap struct{ baseHeap }
func (h *upHeap) Less(i, j int) bool {
heapNodeI := h.slice[i]
heapNodeJ := h.slice[j]
return heapNodeI.less(heapNodeJ, h.ghostdagManager)
}
// downHeap extends baseHeap to include Less operation that traverses from top to bottom
type downHeap struct{ baseHeap }
func (h *downHeap) Less(i, j int) bool {
heapNodeI := h.slice[i]
heapNodeJ := h.slice[j]
return !heapNodeI.less(heapNodeJ, h.ghostdagManager)
}
// blockHeap represents a mutable heap of blocks, sorted by their blueWork+hash
type blockHeap struct {
impl heap.Interface
ghostdagStore model.GHOSTDAGDataStore
dbContext model.DBReader
stagingArea *model.StagingArea
}
// NewDownHeap initializes and returns a new blockHeap
func (dtm *dagTraversalManager) NewDownHeap(stagingArea *model.StagingArea) model.BlockHeap {
h := blockHeap{
impl: &downHeap{baseHeap{ghostdagManager: dtm.ghostdagManager}},
ghostdagStore: dtm.ghostdagDataStore,
dbContext: dtm.databaseContext,
stagingArea: stagingArea,
}
heap.Init(h.impl)
return &h
}
// NewUpHeap initializes and returns a new blockHeap
func (dtm *dagTraversalManager) NewUpHeap(stagingArea *model.StagingArea) model.BlockHeap {
h := blockHeap{
impl: &upHeap{baseHeap{ghostdagManager: dtm.ghostdagManager}},
ghostdagStore: dtm.ghostdagDataStore,
dbContext: dtm.databaseContext,
stagingArea: stagingArea,
}
heap.Init(h.impl)
return &h
}
// Pop removes the block with lowest blueWork+hash from this heap and returns it
func (bh *blockHeap) Pop() *externalapi.DomainHash {
return heap.Pop(bh.impl).(*blockHeapNode).hash
}
// Push pushes the block onto the heap
func (bh *blockHeap) Push(blockHash *externalapi.DomainHash) error {
ghostdagData, err := bh.ghostdagStore.Get(bh.dbContext, bh.stagingArea, blockHash)
if err != nil {
return err
}
heap.Push(bh.impl, &blockHeapNode{
hash: blockHash,
ghostdagData: ghostdagData,
})
return nil
}
func (bh *blockHeap) PushSlice(blockHashes []*externalapi.DomainHash) error {
for _, blockHash := range blockHashes {
err := bh.Push(blockHash)
if err != nil {
return err
}
}
return nil
}
// Len returns the length of this heap
func (bh *blockHeap) Len() int {
return bh.impl.Len()
}
// ToSlice copies this heap to a slice
func (bh *blockHeap) ToSlice() []*externalapi.DomainHash {
length := bh.Len()
hashes := make([]*externalapi.DomainHash, length)
for i := 0; i < length; i++ {
hashes[i] = bh.Pop()
}
return hashes
}
// sizedUpBlockHeap represents a mutable heap of Blocks, sorted by their blueWork+hash, capped by a specific size.
type sizedUpBlockHeap struct {
impl upHeap
ghostdagStore model.GHOSTDAGDataStore
dbContext model.DBReader
stagingArea *model.StagingArea
}
// newSizedUpHeap initializes and returns a new sizedUpBlockHeap
func (dtm *dagTraversalManager) newSizedUpHeap(stagingArea *model.StagingArea, cap int) *sizedUpBlockHeap {
h := sizedUpBlockHeap{
impl: upHeap{baseHeap{slice: make([]*blockHeapNode, 0, cap), ghostdagManager: dtm.ghostdagManager}},
ghostdagStore: dtm.ghostdagDataStore,
dbContext: dtm.databaseContext,
stagingArea: stagingArea,
}
heap.Init(&h.impl)
return &h
}
// len returns the length of this heap
func (sbh *sizedUpBlockHeap) len() int {
return sbh.impl.Len()
}
// pop removes the block with lowest blueWork+hash from this heap and returns it
func (sbh *sizedUpBlockHeap) pop() *externalapi.DomainHash {
return heap.Pop(&sbh.impl).(*blockHeapNode).hash
}
// tryPushWithGHOSTDAGData is just like tryPush but the caller provides the ghostdagData of the block.
func (sbh *sizedUpBlockHeap) tryPushWithGHOSTDAGData(blockHash *externalapi.DomainHash,
ghostdagData *model.BlockGHOSTDAGData) (bool, error) {
node := &blockHeapNode{
hash: blockHash,
ghostdagData: ghostdagData,
}
if len(sbh.impl.slice) == cap(sbh.impl.slice) {
min := sbh.impl.peek()
// if the heap is full, and the new block is less than the minimum, return false
if node.less(min, sbh.impl.ghostdagManager) {
return false, nil
}
sbh.pop()
}
heap.Push(&sbh.impl, node)
return true, nil
}
// tryPush tries to push the block onto the heap, if the heap is full and it's less than the minimum it rejects it
func (sbh *sizedUpBlockHeap) tryPush(blockHash *externalapi.DomainHash) (bool, error) {
ghostdagData, err := sbh.ghostdagStore.Get(sbh.dbContext, sbh.stagingArea, blockHash)
if err != nil {
return false, err
}
return sbh.tryPushWithGHOSTDAGData(blockHash, ghostdagData)
}