Final reindex algorithm (#1430)

* Mine JSON

* [Reindex tests] add test_params and validate_mining flag to test_consensus

* Rename file and extend tests

* Ignore local test datasets

* Use spaces over tabs

* Reindex algorithm - full algorithm, initial commit, some tests fail

* Reindex algorithm - a few critical fixes

* Reindex algorithm - move reindex struct and all related operations to new file

* Reindex algorithm - added a validateIntervals method and modified tests to use it (instead of exact comparisons)

* Reindex algorithm - modified reindexIntervals to receive the new child as argument and fixed an important related bug

* Reindex attack tests - move logic to helper function and add stretch test

* Reindex algorithm - variable names and some comments

* Reindex algorithm - minor changes

* Reindex algorithm - minor changes 2

* Reindex algorithm - extended stretch test

* Reindex algorithm - small fix to validate function

* Reindex tests - move tests and add DAG files

* go format fixes

* TestParams doc comment

* Reindex tests - exact comparisons are not needed

* Update to version 0.8.6

* Remove TestParams and use AddUTXOInvalidHeader instead

* Use gzipeed test files

* This unintended change somehow slipped in through branch merges

* Rename test

* Move interval increase/decrease methods to reachability interval file

* Addressing a bunch of minor review comments

* Addressed a few more minor review comments

* Make code of offsetSiblingsBefore and offsetSiblingsAfter symmetric

* Optimize reindex logic in cases where reorg occurs + reorg test

* Do not change reindex root too fast (on reorg)

* Some comments

* A few more comments

* Addressing review comments

* Remove TestNoAttackAlternateReorg and assert chain attack

* Minor

Co-authored-by: Elichai Turkel <elichai.turkel@gmail.com>
Co-authored-by: Mike Zak <feanorr@gmail.com>
Co-authored-by: Ori Newman <orinewman1@gmail.com>
This commit is contained in:
Michael Sutton 2021-01-27 17:09:20 +02:00 committed by GitHub
parent 8d6e71d490
commit c9b591f2d3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 1576 additions and 895 deletions

View File

@ -9,5 +9,5 @@ type ReachabilityManager interface {
IsReachabilityTreeAncestorOf(blockHashA *externalapi.DomainHash, blockHashB *externalapi.DomainHash) (bool, error)
IsDAGAncestorOf(blockHashA *externalapi.DomainHash, blockHashB *externalapi.DomainHash) (bool, error)
UpdateReindexRoot(selectedTip *externalapi.DomainHash) error
FindAncestorOfThisAmongChildrenOfOther(this, other *externalapi.DomainHash) (*externalapi.DomainHash, error)
FindNextAncestor(descendant, ancestor *externalapi.DomainHash) (*externalapi.DomainHash, error)
}

View File

@ -5,6 +5,7 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/dagconfig"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"io"
)
// TestConsensus wraps the Consensus interface with some methods that are needed by tests only
@ -32,6 +33,7 @@ type TestConsensus interface {
AddUTXOInvalidBlock(parentHashes []*externalapi.DomainHash) (*externalapi.DomainHash,
*externalapi.BlockInsertionResult, error)
MineJSON(r io.Reader) (tips []*externalapi.DomainHash, err error)
DiscardAllStores()
AcceptanceDataStore() model.AcceptanceDataStore

View File

@ -1,6 +1,9 @@
package testapi
import "github.com/kaspanet/kaspad/domain/consensus/model"
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// TestReachabilityManager adds to the main ReachabilityManager methods required by tests
type TestReachabilityManager interface {
@ -8,4 +11,6 @@ type TestReachabilityManager interface {
SetReachabilityReindexWindow(reindexWindow uint64)
SetReachabilityReindexSlack(reindexSlack uint64)
ReachabilityReindexSlack() uint64
ValidateIntervals(root *externalapi.DomainHash) error
GetAllNodes(root *externalapi.DomainHash) ([]*externalapi.DomainHash, error)
}

View File

@ -195,5 +195,5 @@ func (dtm *dagTopologyManager) ChildInSelectedParentChainOf(
blockHash, specifiedHighHash)
}
return dtm.reachabilityManager.FindAncestorOfThisAmongChildrenOfOther(highHash, blockHash)
return dtm.reachabilityManager.FindNextAncestor(highHash, blockHash)
}

View File

@ -16,6 +16,54 @@ func intervalSize(ri *model.ReachabilityInterval) uint64 {
return ri.End - ri.Start + 1
}
// intervalIncrease returns a ReachabilityInterval with offset added to start and end
func intervalIncrease(ri *model.ReachabilityInterval, offset uint64) *model.ReachabilityInterval {
return &model.ReachabilityInterval{
Start: ri.Start + offset,
End: ri.End + offset,
}
}
// intervalDecrease returns a ReachabilityInterval with offset subtracted from start and end
func intervalDecrease(ri *model.ReachabilityInterval, offset uint64) *model.ReachabilityInterval {
return &model.ReachabilityInterval{
Start: ri.Start - offset,
End: ri.End - offset,
}
}
// intervalIncreaseStart returns a ReachabilityInterval with offset added to start
func intervalIncreaseStart(ri *model.ReachabilityInterval, offset uint64) *model.ReachabilityInterval {
return &model.ReachabilityInterval{
Start: ri.Start + offset,
End: ri.End,
}
}
// intervalDecreaseStart returns a ReachabilityInterval with offset reduced from start
func intervalDecreaseStart(ri *model.ReachabilityInterval, offset uint64) *model.ReachabilityInterval {
return &model.ReachabilityInterval{
Start: ri.Start - offset,
End: ri.End,
}
}
// intervalIncreaseEnd returns a ReachabilityInterval with offset added to end
func intervalIncreaseEnd(ri *model.ReachabilityInterval, offset uint64) *model.ReachabilityInterval {
return &model.ReachabilityInterval{
Start: ri.Start,
End: ri.End + offset,
}
}
// intervalDecreaseEnd returns a ReachabilityInterval with offset subtracted from end
func intervalDecreaseEnd(ri *model.ReachabilityInterval, offset uint64) *model.ReachabilityInterval {
return &model.ReachabilityInterval{
Start: ri.Start,
End: ri.End - offset,
}
}
// intervalSplitInHalf splits this interval by a fraction of 0.5.
// See splitFraction for further details.
func intervalSplitInHalf(ri *model.ReachabilityInterval) (
@ -111,6 +159,35 @@ func intervalSplitWithExponentialBias(ri *model.ReachabilityInterval, sizes []ui
return intervalSplitExact(ri, biasedSizes)
}
// exponentialFractions returns a fraction of each size in sizes
// as follows:
// fraction[i] = 2^size[i] / sum_j(2^size[j])
// In the code below the above equation is divided by 2^max(size)
// to avoid exploding numbers. Note that in 1 / 2^(max(size)-size[i])
// we divide 1 by potentially a very large number, which will
// result in loss of float precision. This is not a problem - all
// numbers close to 0 bear effectively the same weight.
func exponentialFractions(sizes []uint64) []float64 {
maxSize := uint64(0)
for _, size := range sizes {
if size > maxSize {
maxSize = size
}
}
fractions := make([]float64, len(sizes))
for i, size := range sizes {
fractions[i] = 1 / math.Pow(2, float64(maxSize-size))
}
fractionsSum := float64(0)
for _, fraction := range fractions {
fractionsSum += fraction
}
for i, fraction := range fractions {
fractions[i] = fraction / fractionsSum
}
return fractions
}
// intervalContains returns true if ri contains other.
func intervalContains(ri *model.ReachabilityInterval, other *model.ReachabilityInterval) bool {
return ri.Start <= other.Start && other.End <= ri.End

View File

@ -1,7 +1,6 @@
package reachabilitymanager
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
@ -58,20 +57,3 @@ func (rt *reachabilityManager) findAncestorIndexOfNode(tns orderedTreeNodeSet, n
}
return low - 1, true, nil
}
func (rt *reachabilityManager) propagateIntervals(tns orderedTreeNodeSet, intervals []*model.ReachabilityInterval,
subtreeSizeMaps []map[externalapi.DomainHash]uint64) error {
for i, node := range tns {
err := rt.stageInterval(node, intervals[i])
if err != nil {
return err
}
subtreeSizeMap := subtreeSizeMaps[i]
err = rt.propagateInterval(node, subtreeSizeMap)
if err != nil {
return err
}
}
return nil
}

View File

@ -1,6 +1,7 @@
package reachabilitymanager_test
import (
"math"
"testing"
"github.com/kaspanet/kaspad/domain/consensus"
@ -57,10 +58,20 @@ func TestAddChildThatPointsDirectlyToTheSelectedParentChainBelowReindexRoot(t *t
t.Fatalf("reindex root is expected to change")
}
// Add another block over genesis
_, _, err = tc.AddBlock([]*externalapi.DomainHash{params.GenesisHash}, nil, nil)
// Add enough blocks over genesis to test also the case where the first
// level (genesis in this case) runs out of slack
slackSize := tc.ReachabilityManager().ReachabilityReindexSlack()
blocksToAdd := uint64(math.Log2(float64(slackSize))) + 2
for i := uint64(0); i < blocksToAdd; i++ {
_, _, err = tc.AddBlock([]*externalapi.DomainHash{params.GenesisHash}, nil, nil)
if err != nil {
t.Fatalf("AddBlock: %+v", err)
}
}
err = tc.ReachabilityManager().ValidateIntervals(params.GenesisHash)
if err != nil {
t.Fatalf("AddBlock: %+v", err)
t.Fatal(err)
}
})
}
@ -152,6 +163,11 @@ func TestUpdateReindexRoot(t *testing.T) {
t.Fatalf("got unexpected chain1RootBlock interval. Want: %d, got: %d",
intervalSize(chain1RootBlock), expectedChain1RootIntervalSize)
}
err = tc.ReachabilityManager().ValidateIntervals(params.GenesisHash)
if err != nil {
t.Fatal(err)
}
})
}
@ -224,27 +240,16 @@ func TestReindexIntervalsEarlierThanReindexRoot(t *testing.T) {
t.Fatalf("rightBlock interval not tight after reindex")
}
// Get the current interval for centerBlock. Its interval should be:
// genesisInterval - 1 - leftInterval - leftSlack - rightInterval - rightSlack
expectedCenterInterval := intervalSize(params.GenesisHash) - 1 -
intervalSize(leftBlock) - tc.ReachabilityManager().ReachabilityReindexSlack() -
intervalSize(rightBlock) - tc.ReachabilityManager().ReachabilityReindexSlack()
if intervalSize(centerBlock) != expectedCenterInterval {
t.Fatalf("unexpected centerBlock interval. Want: %d, got: %d",
expectedCenterInterval, intervalSize(centerBlock))
err = tc.ReachabilityManager().ValidateIntervals(params.GenesisHash)
if err != nil {
t.Fatal(err)
}
// Add a chain of reachabilityReindexWindow - 1 blocks above leftBlock.
// Each addition will trigger a low-than-reindex-root reindex. We
// expect the centerInterval to shrink by 1 each time, but its child
// to remain unaffected
centerData, err := tc.ReachabilityDataStore().ReachabilityData(tc.DatabaseContext(), centerBlock)
if err != nil {
t.Fatalf("ReachabilityData: %s", err)
}
treeChildOfCenterBlock := centerData.Children()[0]
treeChildOfCenterBlockOriginalIntervalSize := intervalSize(treeChildOfCenterBlock)
leftTipHash := leftBlock
for i := uint64(0); i < reachabilityReindexWindow-1; i++ {
var err error
@ -253,14 +258,9 @@ func TestReindexIntervalsEarlierThanReindexRoot(t *testing.T) {
t.Fatalf("AddBlock: %+v", err)
}
expectedCenterInterval--
if intervalSize(centerBlock) != expectedCenterInterval {
t.Fatalf("unexpected centerBlock interval. Want: %d, got: %d",
expectedCenterInterval, intervalSize(centerBlock))
}
if intervalSize(treeChildOfCenterBlock) != treeChildOfCenterBlockOriginalIntervalSize {
t.Fatalf("the interval of centerBlock's child unexpectedly changed")
err = tc.ReachabilityManager().ValidateIntervals(params.GenesisHash)
if err != nil {
t.Fatal(err)
}
}
@ -276,15 +276,15 @@ func TestReindexIntervalsEarlierThanReindexRoot(t *testing.T) {
t.Fatalf("AddBlock: %+v", err)
}
expectedCenterInterval--
if intervalSize(centerBlock) != expectedCenterInterval {
t.Fatalf("unexpected centerBlock interval. Want: %d, got: %d",
expectedCenterInterval, intervalSize(centerBlock))
err = tc.ReachabilityManager().ValidateIntervals(params.GenesisHash)
if err != nil {
t.Fatal(err)
}
}
if intervalSize(treeChildOfCenterBlock) != treeChildOfCenterBlockOriginalIntervalSize {
t.Fatalf("the interval of centerBlock's child unexpectedly changed")
}
err = tc.ReachabilityManager().ValidateIntervals(params.GenesisHash)
if err != nil {
t.Fatal(err)
}
})
}
@ -324,5 +324,10 @@ func TestTipsAfterReindexIntervalsEarlierThanReindexRoot(t *testing.T) {
if err != nil {
t.Fatalf("AddBlock: %+v", err)
}
err = tc.ReachabilityManager().ValidateIntervals(params.GenesisHash)
if err != nil {
t.Fatal(err)
}
})
}

View File

@ -0,0 +1,258 @@
package reachabilitymanager_test
import (
"compress/gzip"
"fmt"
"github.com/kaspanet/kaspad/domain/consensus"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/model/testapi"
"github.com/kaspanet/kaspad/domain/dagconfig"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/pkg/errors"
"math"
"math/rand"
"os"
"testing"
)
// Test configuration
const (
numBlocksExponent = 12
logLevel = "warn"
)
func initializeTest(t *testing.T, testName string) (tc testapi.TestConsensus, teardown func(keepDataDir bool)) {
t.Parallel()
logger.SetLogLevels(logLevel)
params := dagconfig.SimnetParams
params.SkipProofOfWork = true
tc, teardown, err := consensus.NewFactory().NewTestConsensus(&params, false, testName)
if err != nil {
t.Fatalf("Error setting up consensus: %+v", err)
}
return tc, teardown
}
func buildJsonDAG(t *testing.T, tc testapi.TestConsensus, attackJson bool) (tips []*externalapi.DomainHash) {
filePrefix := "noattack"
if attackJson {
filePrefix = "attack"
}
fileName := fmt.Sprintf(
"../../testdata/reachability/%s-dag-blocks--2^%d-delay-factor--1-k--18.json.gz",
filePrefix, numBlocksExponent)
f, err := os.Open(fileName)
if err != nil {
t.Fatal(err)
}
defer f.Close()
gzipReader, err := gzip.NewReader(f)
if err != nil {
t.Fatal(err)
}
defer gzipReader.Close()
tips, err = tc.MineJSON(gzipReader)
if err != nil {
t.Fatal(err)
}
err = tc.ReachabilityManager().ValidateIntervals(tc.DAGParams().GenesisHash)
if err != nil {
t.Fatal(err)
}
return tips
}
func addArbitraryBlocks(t *testing.T, tc testapi.TestConsensus) {
// After loading json, add arbitrary blocks all over the DAG to stretch
// reindex logic, and validate intervals post each addition
blocks, err := tc.ReachabilityManager().GetAllNodes(tc.DAGParams().GenesisHash)
if err != nil {
t.Fatal(err)
}
numChainsToAdd := len(blocks) / 2 // Multiply the size of the DAG with arbitrary blocks
maxBlocksInChain := 20
validationFreq := int(math.Max(1, float64(numChainsToAdd/100)))
randSource := rand.New(rand.NewSource(33233))
for i := 0; i < numChainsToAdd; i++ {
randomIndex := randSource.Intn(len(blocks))
randomParent := blocks[randomIndex]
newBlock, _, err := tc.AddUTXOInvalidHeader([]*externalapi.DomainHash{randomParent})
if err != nil {
t.Fatal(err)
}
blocks = append(blocks, newBlock)
// Add a random-length chain every few blocks
if randSource.Intn(8) == 0 {
numBlocksInChain := randSource.Intn(maxBlocksInChain)
chainBlock := newBlock
for j := 0; j < numBlocksInChain; j++ {
chainBlock, _, err = tc.AddUTXOInvalidHeader([]*externalapi.DomainHash{chainBlock})
if err != nil {
t.Fatal(err)
}
blocks = append(blocks, chainBlock)
}
}
// Normally, validate intervals for new chain only
validationRoot := newBlock
// However every 'validation frequency' blocks validate intervals for entire DAG
if i%validationFreq == 0 || i == numChainsToAdd-1 {
validationRoot = tc.DAGParams().GenesisHash
}
err = tc.ReachabilityManager().ValidateIntervals(validationRoot)
if err != nil {
t.Fatal(err)
}
}
}
func addAlternatingReorgBlocks(t *testing.T, tc testapi.TestConsensus, tips []*externalapi.DomainHash) {
// Create alternating reorgs to test the cases where
// reindex root is out of current header selected tip chain
reindexRoot, err := tc.ReachabilityDataStore().ReachabilityReindexRoot(tc.DatabaseContext())
if err != nil {
t.Fatal(err)
}
// Try finding two tips; one which has reindex root on it's chain (chainTip), and one which
// does not (reorgTip). The latter is expected to exist in json attack files.
var chainTip, reorgTip *externalapi.DomainHash
for _, block := range tips {
isRootAncestorOfTip, err := tc.ReachabilityManager().IsReachabilityTreeAncestorOf(reindexRoot, block)
if err != nil {
t.Fatal(err)
}
if isRootAncestorOfTip {
chainTip = block
} else {
reorgTip = block
}
}
if reorgTip == nil {
t.Fatal(errors.Errorf("DAG from jsom file is expected to contain a tip " +
"disagreeing with reindex root chain"))
}
if chainTip == nil {
t.Fatal(errors.Errorf("reindex root is not on any header tip chain, this is unexpected behavior"))
}
chainTipGHOSTDAGData, err := tc.GHOSTDAGDataStore().Get(tc.DatabaseContext(), chainTip)
if err != nil {
t.Fatal(err)
}
reorgTipGHOSTDAGData, err := tc.GHOSTDAGDataStore().Get(tc.DatabaseContext(), reorgTip)
if err != nil {
t.Fatal(err)
}
// Get both chains close to each other (we care about blue score and not
// blue work because we have SkipProofOfWork=true)
if chainTipGHOSTDAGData.BlueScore() > reorgTipGHOSTDAGData.BlueScore() {
blueScoreDiff := int(chainTipGHOSTDAGData.BlueScore() - reorgTipGHOSTDAGData.BlueScore())
for i := 0; i < blueScoreDiff+5; i++ {
reorgTip, _, err = tc.AddUTXOInvalidHeader([]*externalapi.DomainHash{reorgTip})
if err != nil {
t.Fatal(err)
}
}
} else {
blueScoreDiff := int(reorgTipGHOSTDAGData.BlueScore() - chainTipGHOSTDAGData.BlueScore())
for i := 0; i < blueScoreDiff+5; i++ {
chainTip, _, err = tc.AddUTXOInvalidHeader([]*externalapi.DomainHash{chainTip})
if err != nil {
t.Fatal(err)
}
}
}
err = tc.ReachabilityManager().ValidateIntervals(tc.DAGParams().GenesisHash)
if err != nil {
t.Fatal(err)
}
// Alternate between the chains 200 times
for i := 0; i < 200; i++ {
if i%2 == 0 {
for j := 0; j < 10; j++ {
chainTip, _, err = tc.AddUTXOInvalidHeader([]*externalapi.DomainHash{chainTip})
if err != nil {
t.Fatal(err)
}
}
} else {
for j := 0; j < 10; j++ {
reorgTip, _, err = tc.AddUTXOInvalidHeader([]*externalapi.DomainHash{reorgTip})
if err != nil {
t.Fatal(err)
}
}
}
err = tc.ReachabilityManager().ValidateIntervals(tc.DAGParams().GenesisHash)
if err != nil {
t.Fatal(err)
}
}
// Since current logic switches reindex root chain with reindex slack threshold - at last make the switch happen
for i := 0; i < int(tc.ReachabilityManager().ReachabilityReindexSlack())+10; i++ {
reorgTip, _, err = tc.AddUTXOInvalidHeader([]*externalapi.DomainHash{reorgTip})
if err != nil {
t.Fatal(err)
}
}
err = tc.ReachabilityManager().ValidateIntervals(tc.DAGParams().GenesisHash)
if err != nil {
t.Fatal(err)
}
}
func TestNoAttack(t *testing.T) {
tc, teardown := initializeTest(t, "TestNoAttack")
defer teardown(false)
buildJsonDAG(t, tc, false)
}
func TestAttack(t *testing.T) {
tc, teardown := initializeTest(t, "TestAttack")
defer teardown(false)
buildJsonDAG(t, tc, true)
}
func TestNoAttackFuzzy(t *testing.T) {
tc, teardown := initializeTest(t, "TestNoAttackFuzzy")
defer teardown(false)
tc.ReachabilityManager().SetReachabilityReindexSlack(10)
buildJsonDAG(t, tc, false)
addArbitraryBlocks(t, tc)
}
func TestAttackFuzzy(t *testing.T) {
tc, teardown := initializeTest(t, "TestAttackFuzzy")
defer teardown(false)
tc.ReachabilityManager().SetReachabilityReindexSlack(10)
buildJsonDAG(t, tc, true)
addArbitraryBlocks(t, tc)
}
func TestAttackAlternateReorg(t *testing.T) {
tc, teardown := initializeTest(t, "TestAttackAlternateReorg")
defer teardown(false)
tc.ReachabilityManager().SetReachabilityReindexSlack(256)
tips := buildJsonDAG(t, tc, true)
addAlternatingReorgBlocks(t, tc, tips)
}

View File

@ -246,6 +246,11 @@ func TestAddChild(t *testing.T) {
}
}
err = manager.validateIntervals(root)
if err != nil {
t.Fatal(err)
}
// Scenario 2: test addChild where all nodes are direct descendants of root
// root -> a, b, c...
// Create the root node of a new reachability tree
@ -306,6 +311,11 @@ func TestAddChild(t *testing.T) {
t.Fatalf("TestAddChild: childNode is not a descendant of root")
}
}
err = manager.validateIntervals(root)
if err != nil {
t.Fatal(err)
}
}
func TestReachabilityTreeNodeIsAncestorOf(t *testing.T) {
@ -334,6 +344,11 @@ func TestReachabilityTreeNodeIsAncestorOf(t *testing.T) {
if !helper.isReachabilityTreeAncestorOf(root, root) {
t.Fatalf("TestReachabilityTreeNodeIsAncestorOf: root is expected to be an ancestor of root")
}
err := manager.validateIntervals(root)
if err != nil {
t.Fatal(err)
}
}
func TestIntervalContains(t *testing.T) {
@ -978,19 +993,19 @@ func TestReachabilityTreeNodeString(t *testing.T) {
treeNodeB2 := helper.newNodeWithInterval(newReachabilityInterval(150, 199))
treeNodeC := helper.newNodeWithInterval(newReachabilityInterval(100, 149))
err := helper.addChildAndStage(treeNodeA, treeNodeB1)
err := helper.stageAddChild(treeNodeA, treeNodeB1)
if err != nil {
t.Fatalf("addChildAndStage: %s", err)
t.Fatalf("stageAddChild: %s", err)
}
err = helper.addChildAndStage(treeNodeA, treeNodeB2)
err = helper.stageAddChild(treeNodeA, treeNodeB2)
if err != nil {
t.Fatalf("addChildAndStage: %s", err)
t.Fatalf("stageAddChild: %s", err)
}
err = helper.addChildAndStage(treeNodeB2, treeNodeC)
err = helper.stageAddChild(treeNodeB2, treeNodeC)
if err != nil {
t.Fatalf("addChildAndStage: %s", err)
t.Fatalf("stageAddChild: %s", err)
}
str, err := manager.String(treeNodeA)

View File

@ -0,0 +1,804 @@
package reachabilitymanager
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/pkg/errors"
)
var (
// defaultReindexWindow is the default target window size for reachability
// reindexes. Note that this is not a constant for testing purposes.
defaultReindexWindow uint64 = 200
// defaultReindexSlack is default the slack interval given to reachability
// tree nodes not in the selected parent chain. Note that this is not
// a constant for testing purposes.
defaultReindexSlack uint64 = 1 << 12
)
// reindexContext is a struct used during reindex operations. It represents a temporary context
// for caching subtree information during the *current* reindex operation only
type reindexContext struct {
manager *reachabilityManager
subTreeSizesCache map[externalapi.DomainHash]uint64
}
// newReindexContext creates a new empty reindex context
func newReindexContext(rt *reachabilityManager) reindexContext {
return reindexContext{
manager: rt,
subTreeSizesCache: make(map[externalapi.DomainHash]uint64),
}
}
/*
Core (BFS) algorithms used during reindexing
*/
// countSubtrees counts the size of each subtree under this node,
// and populates the provided subTreeSizeMap with the results.
// It is equivalent to the following recursive implementation:
//
// func (rt *reachabilityManager) countSubtrees(node *model.ReachabilityTreeNode) uint64 {
// subtreeSize := uint64(0)
// for _, child := range node.children {
// subtreeSize += child.countSubtrees()
// }
// return subtreeSize + 1
// }
//
// However, we are expecting (linearly) deep trees, and so a
// recursive stack-based approach is inefficient and will hit
// recursion limits. Instead, the same logic was implemented
// using a (queue-based) BFS method. At a high level, the
// algorithm uses BFS for reaching all leaves and pushes
// intermediate updates from leaves via parent chains until all
// size information is gathered at the root of the operation
// (i.e. at node).
func (rc *reindexContext) countSubtrees(node *externalapi.DomainHash) error {
if _, ok := rc.subTreeSizesCache[*node]; ok {
return nil
}
queue := []*externalapi.DomainHash{node}
calculatedChildrenCount := make(map[externalapi.DomainHash]uint64)
for len(queue) > 0 {
var current *externalapi.DomainHash
current, queue = queue[0], queue[1:]
children, err := rc.manager.children(current)
if err != nil {
return err
}
if len(children) == 0 {
// We reached a leaf
rc.subTreeSizesCache[*current] = 1
} else if _, ok := rc.subTreeSizesCache[*current]; !ok {
// We haven't yet calculated the subtree size of
// the current node. Add all its children to the
// queue
queue = append(queue, children...)
continue
}
// We reached a leaf or a pre-calculated subtree.
// Push information up
for !current.Equal(node) {
current, err = rc.manager.parent(current)
if err != nil {
return err
}
// If the current is now nil, it means that the previous
// `current` was the genesis block -- the only block that
// does not have parents
if current == nil {
break
}
calculatedChildrenCount[*current]++
children, err := rc.manager.children(current)
if err != nil {
return err
}
if calculatedChildrenCount[*current] != uint64(len(children)) {
// Not all subtrees of the current node are ready
break
}
// All children of `current` have calculated their subtree size.
// Sum them all together and add 1 to get the sub tree size of
// `current`.
childSubtreeSizeSum := uint64(0)
for _, child := range children {
childSubtreeSizeSum += rc.subTreeSizesCache[*child]
}
rc.subTreeSizesCache[*current] = childSubtreeSizeSum + 1
}
}
return nil
}
// propagateInterval propagates the new interval using a BFS traversal.
// Subtree intervals are recursively allocated according to subtree sizes and
// the allocation rule in splitWithExponentialBias.
func (rc *reindexContext) propagateInterval(node *externalapi.DomainHash) error {
// Make sure subtrees are counted before propagating
err := rc.countSubtrees(node)
if err != nil {
return err
}
queue := []*externalapi.DomainHash{node}
for len(queue) > 0 {
var current *externalapi.DomainHash
current, queue = queue[0], queue[1:]
children, err := rc.manager.children(current)
if err != nil {
return err
}
if len(children) > 0 {
sizes := make([]uint64, len(children))
for i, child := range children {
sizes[i] = rc.subTreeSizesCache[*child]
}
interval, err := rc.manager.intervalRangeForChildAllocation(current)
if err != nil {
return err
}
intervals, err := intervalSplitWithExponentialBias(interval, sizes)
if err != nil {
return err
}
for i, child := range children {
childInterval := intervals[i]
err = rc.manager.stageInterval(child, childInterval)
if err != nil {
return err
}
queue = append(queue, child)
}
}
}
return nil
}
/*
Functions for handling reindex triggered by adding child block
*/
// reindexIntervals traverses the reachability subtree that's
// defined by the new child node and reallocates reachability interval space
// such that another reindexing is unlikely to occur shortly
// thereafter. It does this by traversing down the reachability
// tree until it finds a node with a subtree size that's greater than
// its interval size. See propagateInterval for further details.
func (rc *reindexContext) reindexIntervals(newChild, reindexRoot *externalapi.DomainHash) error {
current := newChild
// Search for the first ancestor with sufficient interval space
for {
currentInterval, err := rc.manager.interval(current)
if err != nil {
return err
}
currentIntervalSize := intervalSize(currentInterval)
err = rc.countSubtrees(current)
if err != nil {
return err
}
currentSubtreeSize := rc.subTreeSizesCache[*current]
// Current has sufficient space, break and propagate
if currentIntervalSize >= currentSubtreeSize {
break
}
parent, err := rc.manager.parent(current)
if err != nil {
return err
}
if parent == nil {
// If we ended up here it means that there are more
// than 2^64 blocks, which shouldn't ever happen.
return errors.Errorf("missing tree " +
"parent during reindexing. Theoretically, this " +
"should only ever happen if there are more " +
"than 2^64 blocks in the DAG.")
}
if current.Equal(reindexRoot) {
// Reindex root is expected to hold enough capacity as long as there are less
// than ~2^52 blocks in the DAG, which should never happen in our lifetimes
// even if block rate per second is above 100. The calculation follows from the allocation of
// 2^12 (which equals 2^64/2^52) for slack per chain block below the reindex root.
return errors.Errorf("unexpected behavior: reindex root %s is out of capacity"+
"during reindexing. Theoretically, this "+
"should only ever happen if there are more "+
"than ~2^52 blocks in the DAG.", reindexRoot.String())
}
isParentStrictAncestorOfRoot, err := rc.manager.isStrictAncestorOf(parent, reindexRoot)
if err != nil {
return err
}
if isParentStrictAncestorOfRoot {
// In this case parent is guaranteed to have sufficient interval space,
// however we avoid reindexing the entire subtree above parent
// (which includes root and thus majority of blocks mined since)
// and use slacks along the chain up from parent to reindex root.
// Notes:
// 1. we set requiredAllocation=currentSubtreeSize in order to double the
// current interval capacity
// 2. it might be the case that current is the `newChild` itself
return rc.reindexIntervalsEarlierThanRoot(current, reindexRoot, parent, currentSubtreeSize)
}
current = parent
}
// Propagate the interval to the subtree
return rc.propagateInterval(current)
}
// reindexIntervalsEarlierThanRoot implements the reindex algorithm for the case where the
// new child node is not in reindex root's subtree. The function is expected to allocate
// `requiredAllocation` to be added to interval of `allocationNode`. `commonAncestor` is
// expected to be a direct parent of `allocationNode` and an ancestor of `reindexRoot`.
func (rc *reindexContext) reindexIntervalsEarlierThanRoot(
allocationNode, reindexRoot, commonAncestor *externalapi.DomainHash, requiredAllocation uint64) error {
// The chosen child is:
// a. A reachability tree child of `commonAncestor`
// b. A reachability tree ancestor of `reindexRoot` or `reindexRoot` itself
chosenChild, err := rc.manager.FindNextAncestor(reindexRoot, commonAncestor)
if err != nil {
return err
}
nodeInterval, err := rc.manager.interval(allocationNode)
if err != nil {
return err
}
chosenInterval, err := rc.manager.interval(chosenChild)
if err != nil {
return err
}
if nodeInterval.Start < chosenInterval.Start {
// allocationNode is in the subtree before the chosen child
return rc.reclaimIntervalBefore(allocationNode, commonAncestor, chosenChild, reindexRoot, requiredAllocation)
}
// allocationNode is in the subtree after the chosen child
return rc.reclaimIntervalAfter(allocationNode, commonAncestor, chosenChild, reindexRoot, requiredAllocation)
}
func (rc *reindexContext) reclaimIntervalBefore(
allocationNode, commonAncestor, chosenChild, reindexRoot *externalapi.DomainHash, requiredAllocation uint64) error {
var slackSum uint64 = 0
var pathLen uint64 = 0
var pathSlackAlloc uint64 = 0
var err error
current := chosenChild
// Walk up the chain from common ancestor's chosen child towards reindex root
for {
if current.Equal(reindexRoot) {
// Reached reindex root. In this case, since we reached (the unlimited) root,
// we also re-allocate new slack for the chain we just traversed
previousInterval, err := rc.manager.interval(current)
if err != nil {
return err
}
offset := requiredAllocation + rc.manager.reindexSlack*pathLen - slackSum
err = rc.manager.stageInterval(current, intervalIncreaseStart(previousInterval, offset))
if err != nil {
return err
}
err = rc.propagateInterval(current)
if err != nil {
return err
}
err = rc.offsetSiblingsBefore(allocationNode, current, offset)
if err != nil {
return err
}
// Set the slack for each chain block to be reserved below during the chain walk-down
pathSlackAlloc = rc.manager.reindexSlack
break
}
slackBeforeCurrent, err := rc.manager.remainingSlackBefore(current)
if err != nil {
return err
}
slackSum += slackBeforeCurrent
if slackSum >= requiredAllocation {
previousInterval, err := rc.manager.interval(current)
if err != nil {
return err
}
// Set offset to be just enough to satisfy required allocation
offset := slackBeforeCurrent - (slackSum - requiredAllocation)
err = rc.manager.stageInterval(current, intervalIncreaseStart(previousInterval, offset))
if err != nil {
return err
}
err = rc.offsetSiblingsBefore(allocationNode, current, offset)
if err != nil {
return err
}
break
}
current, err = rc.manager.FindNextAncestor(reindexRoot, current)
if err != nil {
return err
}
pathLen++
}
// Go back down the reachability tree towards the common ancestor.
// On every hop we reindex the reachability subtree before the
// current node with an interval that is smaller.
// This is to make room for the required allocation.
for {
current, err = rc.manager.parent(current)
if err != nil {
return err
}
if current.Equal(commonAncestor) {
break
}
originalInterval, err := rc.manager.interval(current)
if err != nil {
return err
}
slackBeforeCurrent, err := rc.manager.remainingSlackBefore(current)
if err != nil {
return err
}
offset := slackBeforeCurrent - pathSlackAlloc
err = rc.manager.stageInterval(current, intervalIncreaseStart(originalInterval, offset))
if err != nil {
return err
}
err = rc.offsetSiblingsBefore(allocationNode, current, offset)
if err != nil {
return err
}
}
return nil
}
func (rc *reindexContext) offsetSiblingsBefore(allocationNode, current *externalapi.DomainHash, offset uint64) error {
parent, err := rc.manager.parent(current)
if err != nil {
return err
}
siblingsBefore, _, err := rc.manager.splitChildren(parent, current)
if err != nil {
return err
}
// Iterate over the slice in reverse order in order to break if reaching `allocationNode`
for i := len(siblingsBefore) - 1; i >= 0; i-- {
sibling := siblingsBefore[i]
if sibling.Equal(allocationNode) {
// We reached our final destination, allocate `offset` to `allocationNode` by increasing end and break
previousInterval, err := rc.manager.interval(allocationNode)
if err != nil {
return err
}
err = rc.manager.stageInterval(allocationNode, intervalIncreaseEnd(previousInterval, offset))
if err != nil {
return err
}
err = rc.propagateInterval(allocationNode)
if err != nil {
return err
}
break
}
previousInterval, err := rc.manager.interval(sibling)
if err != nil {
return err
}
err = rc.manager.stageInterval(sibling, intervalIncrease(previousInterval, offset))
if err != nil {
return err
}
err = rc.propagateInterval(sibling)
if err != nil {
return err
}
}
return nil
}
func (rc *reindexContext) reclaimIntervalAfter(
allocationNode, commonAncestor, chosenChild, reindexRoot *externalapi.DomainHash, requiredAllocation uint64) error {
var slackSum uint64 = 0
var pathLen uint64 = 0
var pathSlackAlloc uint64 = 0
var err error
current := chosenChild
// Walk up the chain from common ancestor's chosen child towards reindex root
for {
if current.Equal(reindexRoot) {
// Reached reindex root. In this case, since we reached (the unlimited) root,
// we also re-allocate new slack for the chain we just traversed
previousInterval, err := rc.manager.interval(current)
if err != nil {
return err
}
offset := requiredAllocation + rc.manager.reindexSlack*pathLen - slackSum
err = rc.manager.stageInterval(current, intervalDecreaseEnd(previousInterval, offset))
if err != nil {
return err
}
err = rc.propagateInterval(current)
if err != nil {
return err
}
err = rc.offsetSiblingsAfter(allocationNode, current, offset)
if err != nil {
return err
}
// Set the slack for each chain block to be reserved below during the chain walk-down
pathSlackAlloc = rc.manager.reindexSlack
break
}
slackAfterCurrent, err := rc.manager.remainingSlackAfter(current)
if err != nil {
return err
}
slackSum += slackAfterCurrent
if slackSum >= requiredAllocation {
previousInterval, err := rc.manager.interval(current)
if err != nil {
return err
}
// Set offset to be just enough to satisfy required allocation
offset := slackAfterCurrent - (slackSum - requiredAllocation)
err = rc.manager.stageInterval(current, intervalDecreaseEnd(previousInterval, offset))
if err != nil {
return err
}
err = rc.offsetSiblingsAfter(allocationNode, current, offset)
if err != nil {
return err
}
break
}
current, err = rc.manager.FindNextAncestor(reindexRoot, current)
if err != nil {
return err
}
pathLen++
}
// Go back down the reachability tree towards the common ancestor.
// On every hop we reindex the reachability subtree before the
// current node with an interval that is smaller.
// This is to make room for the required allocation.
for {
current, err = rc.manager.parent(current)
if err != nil {
return err
}
if current.Equal(commonAncestor) {
break
}
originalInterval, err := rc.manager.interval(current)
if err != nil {
return err
}
slackAfterCurrent, err := rc.manager.remainingSlackAfter(current)
if err != nil {
return err
}
offset := slackAfterCurrent - pathSlackAlloc
err = rc.manager.stageInterval(current, intervalDecreaseEnd(originalInterval, offset))
if err != nil {
return err
}
err = rc.offsetSiblingsAfter(allocationNode, current, offset)
if err != nil {
return err
}
}
return nil
}
func (rc *reindexContext) offsetSiblingsAfter(allocationNode, current *externalapi.DomainHash, offset uint64) error {
parent, err := rc.manager.parent(current)
if err != nil {
return err
}
_, siblingsAfter, err := rc.manager.splitChildren(parent, current)
if err != nil {
return err
}
for _, sibling := range siblingsAfter {
if sibling.Equal(allocationNode) {
// We reached our final destination, allocate `offset` to `allocationNode` by decreasing start and break
previousInterval, err := rc.manager.interval(allocationNode)
if err != nil {
return err
}
err = rc.manager.stageInterval(allocationNode, intervalDecreaseStart(previousInterval, offset))
if err != nil {
return err
}
err = rc.propagateInterval(allocationNode)
if err != nil {
return err
}
break
}
previousInterval, err := rc.manager.interval(sibling)
if err != nil {
return err
}
err = rc.manager.stageInterval(sibling, intervalDecrease(previousInterval, offset))
if err != nil {
return err
}
err = rc.propagateInterval(sibling)
if err != nil {
return err
}
}
return nil
}
/*
Functions for handling reindex triggered by moving reindex root
*/
func (rc *reindexContext) concentrateInterval(reindexRoot, chosenChild *externalapi.DomainHash, isFinalReindexRoot bool) error {
siblingsBeforeChosen, siblingsAfterChosen, err := rc.manager.splitChildren(reindexRoot, chosenChild)
if err != nil {
return err
}
siblingsBeforeSizesSum, err := rc.tightenIntervalsBefore(reindexRoot, siblingsBeforeChosen)
if err != nil {
return err
}
siblingsAfterSizesSum, err := rc.tightenIntervalsAfter(reindexRoot, siblingsAfterChosen)
if err != nil {
return err
}
err = rc.expandIntervalToChosen(
reindexRoot, chosenChild, siblingsBeforeSizesSum, siblingsAfterSizesSum, isFinalReindexRoot)
if err != nil {
return err
}
return nil
}
func (rc *reindexContext) tightenIntervalsBefore(
reindexRoot *externalapi.DomainHash, siblingsBeforeChosen []*externalapi.DomainHash) (sizesSum uint64, err error) {
siblingSubtreeSizes, sizesSum := rc.countChildrenSubtrees(siblingsBeforeChosen)
rootInterval, err := rc.manager.interval(reindexRoot)
if err != nil {
return 0, err
}
intervalBeforeChosen := newReachabilityInterval(
rootInterval.Start+rc.manager.reindexSlack,
rootInterval.Start+rc.manager.reindexSlack+sizesSum-1,
)
err = rc.propagateChildrenIntervals(intervalBeforeChosen, siblingsBeforeChosen, siblingSubtreeSizes)
if err != nil {
return 0, err
}
return sizesSum, nil
}
func (rc *reindexContext) tightenIntervalsAfter(
reindexRoot *externalapi.DomainHash, siblingsAfterChosen []*externalapi.DomainHash) (sizesSum uint64, err error) {
siblingSubtreeSizes, sizesSum := rc.countChildrenSubtrees(siblingsAfterChosen)
rootInterval, err := rc.manager.interval(reindexRoot)
if err != nil {
return 0, err
}
intervalAfterChosen := newReachabilityInterval(
rootInterval.End-rc.manager.reindexSlack-sizesSum,
rootInterval.End-rc.manager.reindexSlack-1,
)
err = rc.propagateChildrenIntervals(intervalAfterChosen, siblingsAfterChosen, siblingSubtreeSizes)
if err != nil {
return 0, err
}
return sizesSum, nil
}
func (rc *reindexContext) expandIntervalToChosen(
reindexRoot, chosenChild *externalapi.DomainHash, sizesSumBefore, sizesSumAfter uint64, isFinalReindexRoot bool) error {
rootInterval, err := rc.manager.interval(reindexRoot)
if err != nil {
return err
}
newChosenInterval := newReachabilityInterval(
rootInterval.Start+sizesSumBefore+rc.manager.reindexSlack,
rootInterval.End-sizesSumAfter-rc.manager.reindexSlack-1,
)
currentChosenInterval, err := rc.manager.interval(chosenChild)
if err != nil {
return err
}
// Propagate interval only if chosenChild is the final reindex root
if isFinalReindexRoot && !intervalContains(newChosenInterval, currentChosenInterval) {
// New interval doesn't contain the previous one, propagation is required
// We assign slack on both sides as an optimization. Were we to
// assign a tight interval, the next time the reindex root moves we
// would need to propagate intervals again. That is to say, when we
// do allocate slack, next time
// expandIntervalToChosen is called (next time the
// reindex root moves), newChosenInterval is likely to
// contain currentChosenInterval.
err := rc.manager.stageInterval(chosenChild, newReachabilityInterval(
newChosenInterval.Start+rc.manager.reindexSlack,
newChosenInterval.End-rc.manager.reindexSlack,
))
if err != nil {
return err
}
err = rc.propagateInterval(chosenChild)
if err != nil {
return err
}
}
err = rc.manager.stageInterval(chosenChild, newChosenInterval)
if err != nil {
return err
}
return nil
}
func (rc *reindexContext) countChildrenSubtrees(children []*externalapi.DomainHash) (
sizes []uint64, sum uint64) {
sizes = make([]uint64, len(children))
sum = 0
for i, node := range children {
err := rc.countSubtrees(node)
if err != nil {
return nil, 0
}
subtreeSize := rc.subTreeSizesCache[*node]
sizes[i] = subtreeSize
sum += subtreeSize
}
return sizes, sum
}
func (rc *reindexContext) propagateChildrenIntervals(
interval *model.ReachabilityInterval, children []*externalapi.DomainHash, sizes []uint64) error {
childIntervals, err := intervalSplitExact(interval, sizes)
if err != nil {
return err
}
for i, child := range children {
childInterval := childIntervals[i]
err := rc.manager.stageInterval(child, childInterval)
if err != nil {
return err
}
err = rc.propagateInterval(child)
if err != nil {
return err
}
}
return nil
}

View File

@ -25,7 +25,7 @@ func (rt *reachabilityManager) stageReindexRoot(blockHash *externalapi.DomainHas
rt.reachabilityDataStore.StageReachabilityReindexRoot(blockHash)
}
func (rt *reachabilityManager) addChildAndStage(node, child *externalapi.DomainHash) error {
func (rt *reachabilityManager) stageAddChild(node, child *externalapi.DomainHash) error {
nodeData, err := rt.reachabilityDataForInsertion(node)
if err != nil {
return err

View File

@ -2,6 +2,7 @@ package reachabilitymanager
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/model/testapi"
)
@ -21,6 +22,14 @@ func (t *testReachabilityManager) SetReachabilityReindexWindow(reindexWindow uin
t.reachabilityManager.reindexWindow = reindexWindow
}
func (t *testReachabilityManager) ValidateIntervals(root *externalapi.DomainHash) error {
return t.reachabilityManager.validateIntervals(root)
}
func (t *testReachabilityManager) GetAllNodes(root *externalapi.DomainHash) ([]*externalapi.DomainHash, error) {
return t.reachabilityManager.getAllNodes(root)
}
// NewTestReachabilityManager creates an instance of a TestReachabilityManager
func NewTestReachabilityManager(manager model.ReachabilityManager) testapi.TestReachabilityManager {
return &testReachabilityManager{reachabilityManager: manager.(*reachabilityManager)}

File diff suppressed because it is too large Load Diff

View File

@ -1,12 +1,15 @@
package consensus
import (
"encoding/json"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/model/testapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/dagconfig"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/pkg/errors"
"io"
)
type testConsensus struct {
@ -103,8 +106,63 @@ func (tc *testConsensus) AddUTXOInvalidBlock(parentHashes []*externalapi.DomainH
return consensushashing.BlockHash(block), blockInsertionResult, nil
}
func (tc *testConsensus) BuildUTXOInvalidBlock(parentHashes []*externalapi.DomainHash) (*externalapi.DomainBlock, error) {
func (tc *testConsensus) MineJSON(r io.Reader) (tips []*externalapi.DomainHash, err error) {
// jsonBlock is a json representation of a block in mine format
type jsonBlock struct {
ID string `json:"id"`
Parents []string `json:"parents"`
}
tipSet := map[externalapi.DomainHash]*externalapi.DomainHash{}
tipSet[*tc.dagParams.GenesisHash] = tc.dagParams.GenesisHash
parentsMap := make(map[string]*externalapi.DomainHash)
parentsMap["0"] = tc.dagParams.GenesisHash
decoder := json.NewDecoder(r)
// read open bracket
_, err = decoder.Token()
if err != nil {
return nil, err
}
// while the array contains values
for decoder.More() {
var block jsonBlock
// decode an array value (Message)
err := decoder.Decode(&block)
if err != nil {
return nil, err
}
if block.ID == "0" {
continue
}
parentHashes := make([]*externalapi.DomainHash, len(block.Parents))
var ok bool
for i, parentID := range block.Parents {
parentHashes[i], ok = parentsMap[parentID]
if !ok {
return nil, errors.Errorf("Couldn't find blockID: %s", parentID)
}
delete(tipSet, *parentHashes[i])
}
blockHash, _, err := tc.AddUTXOInvalidHeader(parentHashes)
if err != nil {
return nil, err
}
parentsMap[block.ID] = blockHash
tipSet[*blockHash] = blockHash
}
tips = make([]*externalapi.DomainHash, len(tipSet))
i := 0
for _, v := range tipSet {
tips[i] = v
i++
}
return tips, nil
}
func (tc *testConsensus) BuildUTXOInvalidBlock(parentHashes []*externalapi.DomainHash) (*externalapi.DomainBlock, error) {
// Require write lock because BuildBlockWithParents stages temporary data
tc.lock.Lock()
defer tc.lock.Unlock()