Elichai Turkel dbf18d8052
Hard fork - new genesis with the utxo set of the last block (#1856)
* UTXO dump of block 0fca37ca667c2d550a6c4416dad9717e50927128c424fa4edbebc436ab13aeef

* Activate HF immediately and change reward to 1000

* Change protocol version and datadir location

* Delete comments

* Fix zero hash to muhash zero hash in genesis utxo dump check

* Don't omit genesis as direct parent

* Fix tests

* Change subsidy to 500

* Dont assume genesis multiset is empty

* Fix BlockReward test

* Fix TestValidateAndInsertImportedPruningPoint test

* Fix pruning point genesis utxo set

* Fix tests related to mainnet utxo set

* Dont change the difficulty before you have a full window

* Fix TestBlockWindow tests

* Remove global utxo set variable, and persist mainnetnet utxo deserialization between runs

* Fix last tests

* Make peer banning opt-in

* small fix for a test

* Fix go lint

* Fix Ori's review comments

* Change DAA score of genesis to checkpoint DAA score and fix all tests

* Fix the BlockLevel bits counting

* Fix some tests and make them run a little faster

* Change datadir name back to kaspa-mainnet and change db path from /data to /datadir

* Last changes for the release and change the version to 0.11.5

Co-authored-by: Ori Newman <orinewman1@gmail.com>
Co-authored-by: Ori Newman <>
Co-authored-by: msutton <mikisiton2@gmail.com>
2021-11-25 20:18:43 +02:00

226 lines
6.7 KiB
Go

package pruningmanager_test
import (
"encoding/json"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"os"
"path/filepath"
"testing"
"time"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils"
"github.com/kaspanet/kaspad/domain/dagconfig"
)
type jsonBlock struct {
ID string `json:"ID"`
Parents []string `json:"Parents"`
}
type testJSON struct {
MergeSetSizeLimit uint64 `json:"mergeSetSizeLimit"`
FinalityDepth uint64 `json:"finalityDepth"`
Blocks []*jsonBlock `json:"blocks"`
}
func TestPruning(t *testing.T) {
expectedPruningPointByNet := map[string]map[string]string{
"chain-for-test-pruning.json": {
dagconfig.MainnetParams.Name: "1582",
dagconfig.TestnetParams.Name: "1582",
dagconfig.DevnetParams.Name: "1582",
dagconfig.SimnetParams.Name: "1582",
},
"dag-for-test-pruning.json": {
dagconfig.MainnetParams.Name: "503",
dagconfig.TestnetParams.Name: "502",
dagconfig.DevnetParams.Name: "502",
dagconfig.SimnetParams.Name: "502",
},
}
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
// Improve the performance of the test a little
consensusConfig.DisableDifficultyAdjustment = true
err := filepath.Walk("./testdata", func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
jsonFile, err := os.Open(path)
if err != nil {
t.Fatalf("TestPruning : failed opening json file %s: %s", path, err)
}
defer jsonFile.Close()
test := &testJSON{}
decoder := json.NewDecoder(jsonFile)
decoder.DisallowUnknownFields()
err = decoder.Decode(&test)
if err != nil {
t.Fatalf("TestPruning: failed decoding json: %v", err)
}
consensusConfig.FinalityDuration = time.Duration(test.FinalityDepth) * consensusConfig.TargetTimePerBlock
consensusConfig.MergeSetSizeLimit = test.MergeSetSizeLimit
consensusConfig.DifficultyAdjustmentWindowSize = 400
factory := consensus.NewFactory()
factory.SetTestLevelDBCacheSize(128)
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestPruning")
if err != nil {
t.Fatalf("Error setting up consensus: %+v", err)
}
defer teardown(false)
blockIDToHash := map[string]*externalapi.DomainHash{
"0": consensusConfig.GenesisHash,
}
blockHashToID := map[externalapi.DomainHash]string{
*consensusConfig.GenesisHash: "0",
}
stagingArea := model.NewStagingArea()
for _, dagBlock := range test.Blocks {
if dagBlock.ID == "0" {
continue
}
parentHashes := make([]*externalapi.DomainHash, 0, len(dagBlock.Parents))
for _, parentID := range dagBlock.Parents {
parentHash, ok := blockIDToHash[parentID]
if !ok {
t.Fatalf("No hash was found for block with ID %s", parentID)
}
parentHashes = append(parentHashes, parentHash)
}
blockHash, _, err := tc.AddBlock(parentHashes, nil, nil)
if err != nil {
t.Fatalf("AddBlock: %+v", err)
}
blockIDToHash[dagBlock.ID] = blockHash
blockHashToID[*blockHash] = dagBlock.ID
pruningPointCandidate, err := tc.PruningStore().PruningPointCandidate(tc.DatabaseContext(), stagingArea)
if database.IsNotFoundError(err) {
pruningPointCandidate = consensusConfig.GenesisHash
} else if err != nil {
return err
}
isValidPruningPoint, err := tc.IsValidPruningPoint(pruningPointCandidate)
if err != nil {
return err
}
if !isValidPruningPoint {
t.Fatalf("isValidPruningPoint is %t while expected %t", isValidPruningPoint, true)
}
}
pruningPoint, err := tc.PruningPoint()
if err != nil {
t.Fatalf("PruningPoint: %+v", err)
}
pruningPointID := blockHashToID[*pruningPoint]
expectedPruningPoint := expectedPruningPointByNet[info.Name()][consensusConfig.Name]
if expectedPruningPoint != pruningPointID {
t.Fatalf("%s: Expected pruning point to be %s but got %s", info.Name(), expectedPruningPoint, pruningPointID)
}
// We expect blocks that are within the difficulty adjustment window size of
// the pruning point and its anticone to not get pruned
unprunedBlockHashesBelowPruningPoint := make(map[externalapi.DomainHash]struct{})
pruningPointAndItsAnticone, err := tc.PruningPointAndItsAnticone()
if err != nil {
t.Fatalf("pruningPointAndItsAnticone: %+v", err)
}
for _, blockHash := range pruningPointAndItsAnticone {
unprunedBlockHashesBelowPruningPoint[*blockHash] = struct{}{}
blockWindow, err := tc.DAGTraversalManager().BlockWindow(stagingArea, blockHash, consensusConfig.DifficultyAdjustmentWindowSize)
if err != nil {
t.Fatalf("BlockWindow: %+v", err)
}
for _, windowBlockHash := range blockWindow {
unprunedBlockHashesBelowPruningPoint[*windowBlockHash] = struct{}{}
}
}
for _, jsonBlock := range test.Blocks {
id := jsonBlock.ID
blockHash := blockIDToHash[id]
isPruningPointAncestorOfBlock, err := tc.DAGTopologyManager().IsAncestorOf(stagingArea, pruningPoint, blockHash)
if err != nil {
t.Fatalf("IsAncestorOf: %+v", err)
}
expectsBlock := true
if !isPruningPointAncestorOfBlock {
isBlockAncestorOfPruningPoint, err := tc.DAGTopologyManager().IsAncestorOf(stagingArea, blockHash, pruningPoint)
if err != nil {
t.Fatalf("IsAncestorOf: %+v", err)
}
if isBlockAncestorOfPruningPoint {
if _, ok := unprunedBlockHashesBelowPruningPoint[*blockHash]; !ok {
expectsBlock = false
}
} else {
virtualInfo, err := tc.GetVirtualInfo()
if err != nil {
t.Fatalf("GetVirtualInfo: %+v", err)
}
isInPastOfVirtual := false
for _, virtualParent := range virtualInfo.ParentHashes {
isAncestorOfVirtualParent, err := tc.DAGTopologyManager().IsAncestorOf(
stagingArea, blockHash, virtualParent)
if err != nil {
t.Fatalf("IsAncestorOf: %+v", err)
}
if isAncestorOfVirtualParent {
isInPastOfVirtual = true
break
}
}
if !isInPastOfVirtual {
if _, ok := unprunedBlockHashesBelowPruningPoint[*blockHash]; !ok {
expectsBlock = false
}
}
}
}
hasBlock, err := tc.BlockStore().HasBlock(tc.DatabaseContext(), stagingArea, blockHash)
if err != nil {
t.Fatalf("HasBlock: %+v", err)
}
if expectsBlock != hasBlock {
t.Fatalf("expected hasBlock to be %t for block %s but got %t", expectsBlock, id, hasBlock)
}
}
return nil
})
if err != nil {
t.Fatalf("Walk: %+v", err)
}
})
}