diff --git a/testing/integration/config_test.go b/testing/integration/config_test.go index 5309f6a23..251e94ab0 100644 --- a/testing/integration/config_test.go +++ b/testing/integration/config_test.go @@ -36,6 +36,10 @@ func setConfig(t *testing.T, harness *appHarness) { harness.config.Listeners = []string{harness.p2pAddress} harness.config.RPCListeners = []string{harness.rpcAddress} harness.config.UTXOIndex = harness.utxoIndex + + if harness.overrideDAGParams != nil { + harness.config.ActiveNetParams = harness.overrideDAGParams + } } func commonConfig() *config.Config { diff --git a/testing/integration/ibd_test.go b/testing/integration/ibd_test.go index 1a3b97803..04e8a8000 100644 --- a/testing/integration/ibd_test.go +++ b/testing/integration/ibd_test.go @@ -5,6 +5,8 @@ import ( "testing" "time" + "github.com/kaspanet/kaspad/domain/dagconfig" + "github.com/kaspanet/kaspad/app/appmessage" ) @@ -50,3 +52,106 @@ func TestIBD(t *testing.T) { t.Errorf("Tips of syncer: '%s' and syncee '%s' are not equal", tip1Hash.SelectedTipHash, tip2Hash.SelectedTipHash) } } + +// TestIBDWithPruning checks the IBD from a node with +// already pruned blocks. +func TestIBDWithPruning(t *testing.T) { + const numBlocks = 100 + + overrideDAGParams := dagconfig.SimnetParams + + // This is done to make a pruning depth of 6 blocks + overrideDAGParams.FinalityDuration = 2 * overrideDAGParams.TargetTimePerBlock + overrideDAGParams.K = 0 + harnesses, teardown := setupHarnesses(t, []*harnessParams{ + { + p2pAddress: p2pAddress1, + rpcAddress: rpcAddress1, + miningAddress: miningAddress1, + miningAddressPrivateKey: miningAddress1PrivateKey, + overrideDAGParams: &overrideDAGParams, + }, + { + p2pAddress: p2pAddress2, + rpcAddress: rpcAddress2, + miningAddress: miningAddress2, + miningAddressPrivateKey: miningAddress2PrivateKey, + overrideDAGParams: &overrideDAGParams, + }, + }) + defer teardown() + + syncer, syncee := harnesses[0], harnesses[1] + + // Let the syncee have two blocks that the syncer + // doesn't have to test a situation where + // the block locator will need more than one + // iteration to find the highest shared chain + // block. + const synceeOnlyBlocks = 2 + for i := 0; i < synceeOnlyBlocks; i++ { + mineNextBlock(t, syncee) + } + + for i := 0; i < numBlocks-1; i++ { + mineNextBlock(t, syncer) + } + + connect(t, syncer, syncee) + + // We expect this to trigger IBD + mineNextBlock(t, syncer) + + syncerBlockCountResponse, err := syncer.rpcClient.GetBlockCount() + if err != nil { + t.Fatalf("GetBlockCount: %+v", err) + } + + if syncerBlockCountResponse.BlockCount == syncerBlockCountResponse.HeaderCount { + t.Fatalf("Expected some pruned blocks but found none") + } + + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + + start := time.Now() + for range ticker.C { + if time.Since(start) > defaultTimeout { + t.Fatalf("Timeout waiting for IBD to finish.") + } + + tip1Hash, err := syncer.rpcClient.GetSelectedTipHash() + if err != nil { + t.Fatalf("Error getting tip for syncer") + } + tip2Hash, err := syncee.rpcClient.GetSelectedTipHash() + if err != nil { + t.Fatalf("Error getting tip for syncee") + } + + if tip1Hash.SelectedTipHash == tip2Hash.SelectedTipHash { + break + } + } + + synceeBlockCountResponse, err := syncee.rpcClient.GetBlockCount() + if err != nil { + t.Fatalf("GetBlockCount: %+v", err) + } + + if synceeBlockCountResponse.BlockCount != syncerBlockCountResponse.BlockCount+synceeOnlyBlocks+1 { + t.Fatalf("Because the syncee haven't pruned any of its old blocks, its expected "+ + "block count is expected to be greater than the syncer by synceeOnlyBlocks(%d)+genesis, but instead "+ + "we got syncer block count of %d and syncee block count of %d", synceeOnlyBlocks, + syncerBlockCountResponse.BlockCount, + synceeBlockCountResponse.BlockCount) + } + + if synceeBlockCountResponse.HeaderCount != syncerBlockCountResponse.HeaderCount+synceeOnlyBlocks { + t.Fatalf("Because the syncer haven't synced from the syncee, its expected "+ + "block count is expected to be smaller by synceeOnlyBlocks(%d), but instead "+ + "we got syncer headers count of %d and syncee headers count of %d", synceeOnlyBlocks, + syncerBlockCountResponse.HeaderCount, + synceeBlockCountResponse.HeaderCount) + } +} diff --git a/testing/integration/setup_test.go b/testing/integration/setup_test.go index e0e2a17d9..c1bbab12a 100644 --- a/testing/integration/setup_test.go +++ b/testing/integration/setup_test.go @@ -1,6 +1,7 @@ package integration import ( + "github.com/kaspanet/kaspad/domain/dagconfig" "path/filepath" "testing" @@ -22,6 +23,7 @@ type appHarness struct { config *config.Config database database.Database utxoIndex bool + overrideDAGParams *dagconfig.Params } type harnessParams struct { @@ -30,6 +32,7 @@ type harnessParams struct { miningAddress string miningAddressPrivateKey string utxoIndex bool + overrideDAGParams *dagconfig.Params } // setupHarness creates a single appHarness with given parameters @@ -40,6 +43,7 @@ func setupHarness(t *testing.T, params *harnessParams) (harness *appHarness, tea miningAddress: params.miningAddress, miningAddressPrivateKey: params.miningAddressPrivateKey, utxoIndex: params.utxoIndex, + overrideDAGParams: params.overrideDAGParams, } setConfig(t, harness)