Ori Newman 5dbb1da84b
Implement pruning point proof (#1832)
* Calculate GHOSTDAG, reachability etc for each level

* Don't preallocate cache for dag stores except level 0 and reduce the number of connections in the integration test to 32

* Reduce the number of connections in the integration test to 16

* Increase page file

* BuildPruningPointProof

* BuildPruningPointProof

* Add PruningProofManager

* Implement ApplyPruningPointProof

* Add prefix and fix blockAtDepth and fill headersByLevel

* Some bug fixes

* Include all relevant blocks for each level in the proof

* Fix syncAndValidatePruningPointProof to return the right block hash

* Fix block window

* Fix isAncestorOfPruningPoint

* Ban for rule errors on pruning proof

* Find common ancestor for blockAtDepthMAtNextLevel

* Use pruning proof in TestValidateAndInsertImportedPruningPoint

* stage status and finality point for proof blocks

* Uncomment golint

* Change test timeouts

* Calculate merge set for ApplyPruningPointProof

* Increase test timeout

* Add better caching for daa window store

* Return to default timeout

* Add ErrPruningProofMissesBlocksBelowPruningPoint

* Add errDAAWindowBlockNotFound

* Force connection loop next iteration on connection manager stop

* Revert to Test64IncomingConnections

* Remove BlockAtDepth from DAGTraversalManager

* numBullies->16

* Set page file size to 8gb

* Increase p2p max message size

* Test64IncomingConnections->Test16IncomingConnections

* Add comment for PruningProofM

* Add comment in `func (c *ConnectionManager) Stop()`

* Rename isAncestorOfPruningPoint->isAncestorOfSelectedTip

* Revert page file to 16gb

* Improve ExpectedHeaderPruningPoint perf

* Fix comment

* Revert "Improve ExpectedHeaderPruningPoint perf"

This reverts commit bca1080e7140c78d510f51bbea858ae280c2f38e.

* Don't test windows
2021-10-26 09:48:27 +03:00

217 lines
8.3 KiB
Go

package blockparentbuilder
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
"github.com/kaspanet/kaspad/domain/consensus/utils/pow"
"github.com/pkg/errors"
)
type blockParentBuilder struct {
databaseContext model.DBManager
blockHeaderStore model.BlockHeaderStore
dagTopologyManager model.DAGTopologyManager
reachabilityDataStore model.ReachabilityDataStore
pruningStore model.PruningStore
}
// New creates a new instance of a BlockParentBuilder
func New(
databaseContext model.DBManager,
blockHeaderStore model.BlockHeaderStore,
dagTopologyManager model.DAGTopologyManager,
reachabilityDataStore model.ReachabilityDataStore,
pruningStore model.PruningStore,
) model.BlockParentBuilder {
return &blockParentBuilder{
databaseContext: databaseContext,
blockHeaderStore: blockHeaderStore,
dagTopologyManager: dagTopologyManager,
reachabilityDataStore: reachabilityDataStore,
pruningStore: pruningStore,
}
}
func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea,
directParentHashes []*externalapi.DomainHash) ([]externalapi.BlockLevelParents, error) {
// Late on we'll mutate direct parent hashes, so we first clone it.
directParentHashesCopy := make([]*externalapi.DomainHash, len(directParentHashes))
copy(directParentHashesCopy, directParentHashes)
pruningPoint, err := bpb.pruningStore.PruningPoint(bpb.databaseContext, stagingArea)
if err != nil {
return nil, err
}
// The first candidates to be added should be from a parent in the future of the pruning
// point, so later on we'll know that every block that doesn't have reachability data
// (i.e. pruned) is necessarily in the past of the current candidates and cannot be
// considered as a valid candidate.
// This is why we sort the direct parent headers in a way that the first one will be
// in the future of the pruning point.
directParentHeaders := make([]externalapi.BlockHeader, len(directParentHashesCopy))
firstParentInFutureOfPruningPointIndex := 0
foundFirstParentInFutureOfPruningPoint := false
for i, directParentHash := range directParentHashesCopy {
isInFutureOfPruningPoint, err := bpb.dagTopologyManager.IsAncestorOf(stagingArea, pruningPoint, directParentHash)
if err != nil {
return nil, err
}
if !isInFutureOfPruningPoint {
continue
}
firstParentInFutureOfPruningPointIndex = i
foundFirstParentInFutureOfPruningPoint = true
break
}
if !foundFirstParentInFutureOfPruningPoint {
return nil, errors.New("BuildParents should get at least one parent in the future of the pruning point")
}
oldFirstDirectParent := directParentHashesCopy[0]
directParentHashesCopy[0] = directParentHashesCopy[firstParentInFutureOfPruningPointIndex]
directParentHashesCopy[firstParentInFutureOfPruningPointIndex] = oldFirstDirectParent
for i, directParentHash := range directParentHashesCopy {
directParentHeader, err := bpb.blockHeaderStore.BlockHeader(bpb.databaseContext, stagingArea, directParentHash)
if err != nil {
return nil, err
}
directParentHeaders[i] = directParentHeader
}
type blockToReferences map[externalapi.DomainHash][]*externalapi.DomainHash
candidatesByLevelToReferenceBlocksMap := make(map[int]blockToReferences)
// Direct parents are guaranteed to be in one other's anticones so add them all to
// all the block levels they occupy
for _, directParentHeader := range directParentHeaders {
directParentHash := consensushashing.HeaderHash(directParentHeader)
blockLevel := pow.BlockLevel(directParentHeader)
for i := 0; i <= blockLevel; i++ {
if _, exists := candidatesByLevelToReferenceBlocksMap[i]; !exists {
candidatesByLevelToReferenceBlocksMap[i] = make(map[externalapi.DomainHash][]*externalapi.DomainHash)
}
candidatesByLevelToReferenceBlocksMap[i][*directParentHash] = []*externalapi.DomainHash{directParentHash}
}
}
virtualGenesisChildren, err := bpb.dagTopologyManager.Children(stagingArea, model.VirtualGenesisBlockHash)
if err != nil {
return nil, err
}
virtualGenesisChildrenHeaders := make(map[externalapi.DomainHash]externalapi.BlockHeader, len(virtualGenesisChildren))
for _, child := range virtualGenesisChildren {
virtualGenesisChildrenHeaders[*child], err = bpb.blockHeaderStore.BlockHeader(bpb.databaseContext, stagingArea, child)
if err != nil {
return nil, err
}
}
for _, directParentHeader := range directParentHeaders {
for blockLevel, blockLevelParentsInHeader := range directParentHeader.Parents() {
isEmptyLevel := false
if _, exists := candidatesByLevelToReferenceBlocksMap[blockLevel]; !exists {
candidatesByLevelToReferenceBlocksMap[blockLevel] = make(map[externalapi.DomainHash][]*externalapi.DomainHash)
isEmptyLevel = true
}
for _, parent := range blockLevelParentsInHeader {
hasReachabilityData, err := bpb.reachabilityDataStore.HasReachabilityData(bpb.databaseContext, stagingArea, parent)
if err != nil {
return nil, err
}
// Reference blocks are the blocks that are used in reachability queries to check if
// a candidate is in the future of another candidate. In most cases this is just the
// block itself, but in the case where a block doesn't have reachability data we need
// to use some blocks in its future as reference instead.
// If we make sure to add a parent in the future of the pruning point first, we can
// know that any pruned candidate that is in the past of some blocks in the pruning
// point anticone should have should be a parent (in the relevant level) of one of
// the virtual genesis children in the pruning point anticone. So we can check which
// virtual genesis children have this block as parent and use those block as
// reference blocks.
var referenceBlocks []*externalapi.DomainHash
if hasReachabilityData {
referenceBlocks = []*externalapi.DomainHash{parent}
} else {
for childHash, childHeader := range virtualGenesisChildrenHeaders {
childHash := childHash // Assign to a new pointer to avoid `range` pointer reuse
if childHeader.ParentsAtLevel(blockLevel).Contains(parent) {
referenceBlocks = append(referenceBlocks, &childHash)
}
}
}
if isEmptyLevel {
candidatesByLevelToReferenceBlocksMap[blockLevel][*parent] = referenceBlocks
continue
}
if !hasReachabilityData {
continue
}
toRemove := hashset.New()
isAncestorOfAnyCandidate := false
for candidate, candidateReferences := range candidatesByLevelToReferenceBlocksMap[blockLevel] {
candidate := candidate // Assign to a new pointer to avoid `range` pointer reuse
isInFutureOfCurrentCandidate, err := bpb.dagTopologyManager.IsAnyAncestorOf(stagingArea, candidateReferences, parent)
if err != nil {
return nil, err
}
if isInFutureOfCurrentCandidate {
toRemove.Add(&candidate)
continue
}
if isAncestorOfAnyCandidate {
continue
}
isAncestorOfCurrentCandidate, err := bpb.dagTopologyManager.IsAncestorOfAny(stagingArea, parent, candidateReferences)
if err != nil {
return nil, err
}
if isAncestorOfCurrentCandidate {
isAncestorOfAnyCandidate = true
}
}
if toRemove.Length() > 0 {
for hash := range toRemove {
delete(candidatesByLevelToReferenceBlocksMap[blockLevel], hash)
}
}
// We should add the block as a candidate if it's in the future of another candidate
// or in the anticone of all candidates.
if !isAncestorOfAnyCandidate || toRemove.Length() > 0 {
candidatesByLevelToReferenceBlocksMap[blockLevel][*parent] = referenceBlocks
}
}
}
}
parents := make([]externalapi.BlockLevelParents, len(candidatesByLevelToReferenceBlocksMap))
for blockLevel := 0; blockLevel < len(candidatesByLevelToReferenceBlocksMap); blockLevel++ {
levelBlocks := make(externalapi.BlockLevelParents, 0, len(candidatesByLevelToReferenceBlocksMap[blockLevel]))
for block := range candidatesByLevelToReferenceBlocksMap[blockLevel] {
block := block // Assign to a new pointer to avoid `range` pointer reuse
levelBlocks = append(levelBlocks, &block)
}
parents[blockLevel] = levelBlocks
}
return parents, nil
}