kaspad/testing/integration/ibd_test.go
Ori Newman d207888b67
Implement pruned headers node (#1787)
* Pruning headers p2p basic structure

* Remove headers-first

* Fix consensus tests except TestValidateAndInsertPruningPointWithSideBlocks and TestValidateAndInsertImportedPruningPoint

* Add virtual genesis

* Implement PruningPointAndItsAnticoneWithMetaData

* Start fixing TestValidateAndInsertImportedPruningPoint

* Fix TestValidateAndInsertImportedPruningPoint

* Fix BlockWindow

* Update p2p and gRPC

* Fix all tests except TestHandleRelayInvs

* Delete TestHandleRelayInvs parts that cover the old IBD flow

* Fix lint errors

* Add p2p_request_ibd_blocks.go

* Clean code

* Make MsgBlockWithMetaData implement its own representation

* Remove redundant check if highest share block is below the pruning point

* Fix TestCheckLockTimeVerifyConditionedByAbsoluteTimeWithWrongLockTime

* Fix comments, errors ane names

* Fix window size to the real value

* Check reindex root after each block at TestUpdateReindexRoot

* Remove irrelevant check

* Renames and comments

* Remove redundant argument from sendGetBlockLocator

* Don't delete staging on non-recoverable errors

* Renames and comments

* Remove redundant code

* Commit changes inside ResolveVirtual

* Add comment to IsRecoverableError

* Remove blocksWithMetaDataGHOSTDAGDataStore

* Increase windows pagefile

* Move DeleteStagingConsensus outside of defer

* Get rid of mustAccepted in receiveBlockWithMetaData

* Ban on invalid pruning point

* Rename interface_datastructures_daawindowstore.go to interface_datastructures_blocks_with_meta_data_daa_window_store.go

* * Change GetVirtualSelectedParentChainFromBlockResponseMessage and VirtualSelectedParentChainChangedNotificationMessage to show only added block hashes
*  Remove ResolveVirtual
* Use externalapi.ConsensusWrapper inside MiningManager
* Fix pruningmanager.blockwithmetadata

* Set pruning point selected child when importing the pruning point UTXO set

* Change virtual genesis hash

* replace the selected parent with virtual genesis on removePrunedBlocksFromGHOSTDAGData

* Get rid of low hash in block locators

* Remove +1 from everywhere we use difficultyAdjustmentWindowSize and increase the default value by one

* Add comments about consensus wrapper

* Don't use separate staging area when resolving resolveBlockStatus

* Fix netsync stability test

* Fix checkResolveVirtual

* Rename ConsensusWrapper->ConsensusReference

* Get rid of blockHeapNode

* Add comment to defaultDifficultyAdjustmentWindowSize

* Add SelectedChild to DAGTraversalManager

* Remove redundant copy

* Rename blockWindowHeap->calculateBlockWindowHeap

* Move isVirtualGenesisOnlyParent to utils

* Change BlockWithMetaData->BlockWithTrustedData

* Get rid of maxReasonLength

* Split IBD to 100 blocks each time

* Fix a bug in calculateBlockWindowHeap

* Switch to trusted data when encountering virtual genesis in blockWithTrustedData

* Move ConsensusReference to domain

* Update ConsensusReference comment

* Add comment

* Rename shouldNotAddGenesis->skipAddingGenesis
2021-07-26 12:24:07 +03:00

186 lines
5.0 KiB
Go

package integration
import (
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"reflect"
"sync"
"testing"
"time"
"github.com/kaspanet/kaspad/domain/dagconfig"
"github.com/kaspanet/kaspad/app/appmessage"
)
func TestIBD(t *testing.T) {
const numBlocks = 100
syncer, syncee, _, teardown := standardSetup(t)
defer teardown()
for i := 0; i < numBlocks; i++ {
mineNextBlock(t, syncer)
}
blockAddedWG := sync.WaitGroup{}
blockAddedWG.Add(numBlocks)
receivedBlocks := 0
disableOnBlockAddedHandler := false
setOnBlockAddedHandler(t, syncee, func(_ *appmessage.BlockAddedNotificationMessage) {
if disableOnBlockAddedHandler {
return
}
receivedBlocks++
blockAddedWG.Done()
})
// We expect this to trigger IBD
connect(t, syncer, syncee)
select {
case <-time.After(defaultTimeout):
t.Fatalf("Timeout waiting for IBD to finish. Received %d blocks out of %d", receivedBlocks, numBlocks)
case <-ReceiveFromChanWhenDone(func() { blockAddedWG.Wait() }):
}
disableOnBlockAddedHandler = true
// This should trigger resolving the syncee virtual
mineNextBlock(t, syncer)
time.Sleep(time.Second)
tip1Hash, err := syncer.rpcClient.GetSelectedTipHash()
if err != nil {
t.Fatalf("Error getting tip for syncer")
}
tip2Hash, err := syncee.rpcClient.GetSelectedTipHash()
if err != nil {
t.Fatalf("Error getting tip for syncee")
}
if tip1Hash.SelectedTipHash != tip2Hash.SelectedTipHash {
t.Errorf("Tips of syncer: '%s' and syncee '%s' are not equal", tip1Hash.SelectedTipHash, tip2Hash.SelectedTipHash)
}
}
// TestIBDWithPruning checks the IBD from a node with
// already pruned blocks.
func TestIBDWithPruning(t *testing.T) {
testSync := func(syncer, syncee *appHarness) {
utxoSetOverriden := make(chan struct{})
err := syncee.rpcClient.RegisterPruningPointUTXOSetNotifications(func() {
close(utxoSetOverriden)
})
if err != nil {
t.Fatalf("RegisterPruningPointUTXOSetNotifications: %+v", err)
}
// We expect this to trigger IBD
connect(t, syncer, syncee)
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
start := time.Now()
for range ticker.C {
if time.Since(start) > defaultTimeout {
t.Fatalf("Timeout waiting for IBD to finish.")
}
syncerInfo, err := syncer.rpcClient.GetBlockDAGInfo()
if err != nil {
t.Fatalf("Error getting tip for syncer")
}
synceeInfo, err := syncee.rpcClient.GetBlockDAGInfo()
if err != nil {
t.Fatalf("Error getting tip for syncee")
}
if reflect.DeepEqual(syncerInfo.TipHashes, synceeInfo.TipHashes) {
break
}
}
const timeout = 10 * time.Second
select {
case <-utxoSetOverriden:
case <-time.After(timeout):
t.Fatalf("expected pruning point UTXO set override notification, but it didn't get one after %s", timeout)
}
// Checking that the syncee can generate block templates before resolving the virtual
_, err = syncee.rpcClient.GetBlockTemplate(syncee.miningAddress)
if err != nil {
t.Fatalf("Error getting block template: %+v", err)
}
// This should trigger resolving the syncee virtual
syncerTip := mineNextBlock(t, syncer)
time.Sleep(time.Second)
synceeSelectedTip, err := syncee.rpcClient.GetSelectedTipHash()
if err != nil {
t.Fatalf("Error getting tip for syncee")
}
if synceeSelectedTip.SelectedTipHash != consensushashing.BlockHash(syncerTip).String() {
t.Fatalf("Unexpected selected tip")
}
}
const numBlocks = 100
overrideDAGParams := dagconfig.SimnetParams
// This is done to make a pruning depth of 6 blocks
overrideDAGParams.FinalityDuration = 2 * overrideDAGParams.TargetTimePerBlock
overrideDAGParams.K = 0
harnesses, teardown := setupHarnesses(t, []*harnessParams{
{
p2pAddress: p2pAddress1,
rpcAddress: rpcAddress1,
miningAddress: miningAddress1,
miningAddressPrivateKey: miningAddress1PrivateKey,
overrideDAGParams: &overrideDAGParams,
},
{
p2pAddress: p2pAddress2,
rpcAddress: rpcAddress2,
miningAddress: miningAddress2,
miningAddressPrivateKey: miningAddress2PrivateKey,
overrideDAGParams: &overrideDAGParams,
utxoIndex: true,
},
{
p2pAddress: p2pAddress3,
rpcAddress: rpcAddress3,
miningAddress: miningAddress3,
miningAddressPrivateKey: miningAddress3PrivateKey,
overrideDAGParams: &overrideDAGParams,
utxoIndex: true,
},
})
defer teardown()
syncer, syncee1, syncee2 := harnesses[0], harnesses[1], harnesses[2]
// Let syncee1 have two blocks that the syncer
// doesn't have to test a situation where
// the block locator will need more than one
// iteration to find the highest shared chain
// block.
const synceeOnlyBlocks = 2
for i := 0; i < synceeOnlyBlocks; i++ {
mineNextBlock(t, syncee1)
}
for i := 0; i < numBlocks-1; i++ {
mineNextBlock(t, syncer)
}
testSync(syncer, syncee1)
// Test a situation where a node with pruned headers syncs another fresh node.
testSync(syncee1, syncee2)
}