Ori Newman d207888b67
Implement pruned headers node (#1787)
* Pruning headers p2p basic structure

* Remove headers-first

* Fix consensus tests except TestValidateAndInsertPruningPointWithSideBlocks and TestValidateAndInsertImportedPruningPoint

* Add virtual genesis

* Implement PruningPointAndItsAnticoneWithMetaData

* Start fixing TestValidateAndInsertImportedPruningPoint

* Fix TestValidateAndInsertImportedPruningPoint

* Fix BlockWindow

* Update p2p and gRPC

* Fix all tests except TestHandleRelayInvs

* Delete TestHandleRelayInvs parts that cover the old IBD flow

* Fix lint errors

* Add p2p_request_ibd_blocks.go

* Clean code

* Make MsgBlockWithMetaData implement its own representation

* Remove redundant check if highest share block is below the pruning point

* Fix TestCheckLockTimeVerifyConditionedByAbsoluteTimeWithWrongLockTime

* Fix comments, errors ane names

* Fix window size to the real value

* Check reindex root after each block at TestUpdateReindexRoot

* Remove irrelevant check

* Renames and comments

* Remove redundant argument from sendGetBlockLocator

* Don't delete staging on non-recoverable errors

* Renames and comments

* Remove redundant code

* Commit changes inside ResolveVirtual

* Add comment to IsRecoverableError

* Remove blocksWithMetaDataGHOSTDAGDataStore

* Increase windows pagefile

* Move DeleteStagingConsensus outside of defer

* Get rid of mustAccepted in receiveBlockWithMetaData

* Ban on invalid pruning point

* Rename interface_datastructures_daawindowstore.go to interface_datastructures_blocks_with_meta_data_daa_window_store.go

* * Change GetVirtualSelectedParentChainFromBlockResponseMessage and VirtualSelectedParentChainChangedNotificationMessage to show only added block hashes
*  Remove ResolveVirtual
* Use externalapi.ConsensusWrapper inside MiningManager
* Fix pruningmanager.blockwithmetadata

* Set pruning point selected child when importing the pruning point UTXO set

* Change virtual genesis hash

* replace the selected parent with virtual genesis on removePrunedBlocksFromGHOSTDAGData

* Get rid of low hash in block locators

* Remove +1 from everywhere we use difficultyAdjustmentWindowSize and increase the default value by one

* Add comments about consensus wrapper

* Don't use separate staging area when resolving resolveBlockStatus

* Fix netsync stability test

* Fix checkResolveVirtual

* Rename ConsensusWrapper->ConsensusReference

* Get rid of blockHeapNode

* Add comment to defaultDifficultyAdjustmentWindowSize

* Add SelectedChild to DAGTraversalManager

* Remove redundant copy

* Rename blockWindowHeap->calculateBlockWindowHeap

* Move isVirtualGenesisOnlyParent to utils

* Change BlockWithMetaData->BlockWithTrustedData

* Get rid of maxReasonLength

* Split IBD to 100 blocks each time

* Fix a bug in calculateBlockWindowHeap

* Switch to trusted data when encountering virtual genesis in blockWithTrustedData

* Move ConsensusReference to domain

* Update ConsensusReference comment

* Add comment

* Rename shouldNotAddGenesis->skipAddingGenesis
2021-07-26 12:24:07 +03:00

193 lines
5.9 KiB
Go

package dagtraversalmanager
import (
"container/heap"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
func blockGHOSTDAGDataHashPairLess(left, right *externalapi.BlockGHOSTDAGDataHashPair, gm model.GHOSTDAGManager) bool {
return gm.Less(left.Hash, left.GHOSTDAGData, right.Hash, right.GHOSTDAGData)
}
// baseHeap is an implementation for heap.Interface that sorts blocks by their blueWork+hash
type baseHeap struct {
slice []*externalapi.BlockGHOSTDAGDataHashPair
ghostdagManager model.GHOSTDAGManager
}
func (h *baseHeap) Len() int { return len(h.slice) }
func (h *baseHeap) Swap(i, j int) { h.slice[i], h.slice[j] = h.slice[j], h.slice[i] }
func (h *baseHeap) Push(x interface{}) {
h.slice = append(h.slice, x.(*externalapi.BlockGHOSTDAGDataHashPair))
}
func (h *baseHeap) Pop() interface{} {
oldSlice := h.slice
oldLength := len(oldSlice)
popped := oldSlice[oldLength-1]
h.slice = oldSlice[0 : oldLength-1]
return popped
}
// peek returns the block with lowest blueWork+hash from this heap without removing it
func (h *baseHeap) peek() *externalapi.BlockGHOSTDAGDataHashPair {
return h.slice[0]
}
// upHeap extends baseHeap to include Less operation that traverses from bottom to top
type upHeap struct{ baseHeap }
func (h *upHeap) Less(i, j int) bool {
heapNodeI := h.slice[i]
heapNodeJ := h.slice[j]
return blockGHOSTDAGDataHashPairLess(heapNodeI, heapNodeJ, h.ghostdagManager)
}
// downHeap extends baseHeap to include Less operation that traverses from top to bottom
type downHeap struct{ baseHeap }
func (h *downHeap) Less(i, j int) bool {
heapNodeI := h.slice[i]
heapNodeJ := h.slice[j]
return !blockGHOSTDAGDataHashPairLess(heapNodeI, heapNodeJ, h.ghostdagManager)
}
// blockHeap represents a mutable heap of blocks, sorted by their blueWork+hash
type blockHeap struct {
impl heap.Interface
ghostdagStore model.GHOSTDAGDataStore
dbContext model.DBReader
stagingArea *model.StagingArea
}
// NewDownHeap initializes and returns a new blockHeap
func (dtm *dagTraversalManager) NewDownHeap(stagingArea *model.StagingArea) model.BlockHeap {
h := blockHeap{
impl: &downHeap{baseHeap{ghostdagManager: dtm.ghostdagManager}},
ghostdagStore: dtm.ghostdagDataStore,
dbContext: dtm.databaseContext,
stagingArea: stagingArea,
}
heap.Init(h.impl)
return &h
}
// NewUpHeap initializes and returns a new blockHeap
func (dtm *dagTraversalManager) NewUpHeap(stagingArea *model.StagingArea) model.BlockHeap {
h := blockHeap{
impl: &upHeap{baseHeap{ghostdagManager: dtm.ghostdagManager}},
ghostdagStore: dtm.ghostdagDataStore,
dbContext: dtm.databaseContext,
stagingArea: stagingArea,
}
heap.Init(h.impl)
return &h
}
// Pop removes the block with lowest blueWork+hash from this heap and returns it
func (bh *blockHeap) Pop() *externalapi.DomainHash {
return heap.Pop(bh.impl).(*externalapi.BlockGHOSTDAGDataHashPair).Hash
}
// Push pushes the block onto the heap
func (bh *blockHeap) Push(blockHash *externalapi.DomainHash) error {
ghostdagData, err := bh.ghostdagStore.Get(bh.dbContext, bh.stagingArea, blockHash, false)
if err != nil {
return err
}
heap.Push(bh.impl, &externalapi.BlockGHOSTDAGDataHashPair{
Hash: blockHash,
GHOSTDAGData: ghostdagData,
})
return nil
}
func (bh *blockHeap) PushSlice(blockHashes []*externalapi.DomainHash) error {
for _, blockHash := range blockHashes {
err := bh.Push(blockHash)
if err != nil {
return err
}
}
return nil
}
// Len returns the length of this heap
func (bh *blockHeap) Len() int {
return bh.impl.Len()
}
// ToSlice copies this heap to a slice
func (bh *blockHeap) ToSlice() []*externalapi.DomainHash {
length := bh.Len()
hashes := make([]*externalapi.DomainHash, length)
for i := 0; i < length; i++ {
hashes[i] = bh.Pop()
}
return hashes
}
// sizedUpBlockHeap represents a mutable heap of Blocks, sorted by their blueWork+hash, capped by a specific size.
type sizedUpBlockHeap struct {
impl upHeap
ghostdagStore model.GHOSTDAGDataStore
dbContext model.DBReader
stagingArea *model.StagingArea
}
// newSizedUpHeap initializes and returns a new sizedUpBlockHeap
func (dtm *dagTraversalManager) newSizedUpHeap(stagingArea *model.StagingArea, cap int) *sizedUpBlockHeap {
h := sizedUpBlockHeap{
impl: upHeap{baseHeap{slice: make([]*externalapi.BlockGHOSTDAGDataHashPair, 0, cap), ghostdagManager: dtm.ghostdagManager}},
ghostdagStore: dtm.ghostdagDataStore,
dbContext: dtm.databaseContext,
stagingArea: stagingArea,
}
heap.Init(&h.impl)
return &h
}
// len returns the length of this heap
func (sbh *sizedUpBlockHeap) len() int {
return sbh.impl.Len()
}
// pop removes the block with lowest blueWork+hash from this heap and returns it
func (sbh *sizedUpBlockHeap) pop() *externalapi.DomainHash {
return heap.Pop(&sbh.impl).(*externalapi.BlockGHOSTDAGDataHashPair).Hash
}
// tryPushWithGHOSTDAGData is just like tryPush but the caller provides the ghostdagData of the block.
func (sbh *sizedUpBlockHeap) tryPushWithGHOSTDAGData(blockHash *externalapi.DomainHash,
ghostdagData *externalapi.BlockGHOSTDAGData) (bool, error) {
node := &externalapi.BlockGHOSTDAGDataHashPair{
Hash: blockHash,
GHOSTDAGData: ghostdagData,
}
if len(sbh.impl.slice) == cap(sbh.impl.slice) {
min := sbh.impl.peek()
// if the heap is full, and the new block is less than the minimum, return false
if blockGHOSTDAGDataHashPairLess(node, min, sbh.impl.ghostdagManager) {
return false, nil
}
sbh.pop()
}
heap.Push(&sbh.impl, node)
return true, nil
}
// tryPush tries to push the block onto the heap, if the heap is full and it's less than the minimum it rejects it
func (sbh *sizedUpBlockHeap) tryPush(blockHash *externalapi.DomainHash) (bool, error) {
ghostdagData, err := sbh.ghostdagStore.Get(sbh.dbContext, sbh.stagingArea, blockHash, false)
if err != nil {
return false, err
}
return sbh.tryPushWithGHOSTDAGData(blockHash, ghostdagData)
}