stasatdaglabs 3dbc42b4f7
Implement the new block subsidy function (#1830)
* Replace the old blockSubsidy parameters with the new ones.

* Return subsidyGenesisReward if blockHash is the genesis hash.

* Traverse a block's past for the subsidy calculation.

* Partially implement SubsidyStore.

* Refer to SubsidyStore from CoinbaseManager.

* Wrap calcBlockSubsidy in getBlockSubsidy, which first checks the database.

* Fix finalityStore not calling GenerateShardingID.

* Implement calculateAveragePastSubsidy.

* Implement calculateMergeSetSubsidySum.

* Implement calculateSubsidyRandomVariable.

* Implement calcBlockSubsidy.

* Add a TODO about floats.

* Update the calcBlockSubsidy TODO.

* Use binary.LittleEndian in calculateSubsidyRandomVariable.

* Fix bad range in calculateSubsidyRandomVariable.

* Replace float64 with big.Rat everywhere except for subsidyRandomVariable.

* Fix a nil dereference.

* Use a random walk to approximate the normal distribution.

* In order to avoid unsupported fractional results from powInt64, flip the numerator and the denominator manually.

* Set standardDeviation to 0.25, MaxSompi to 10_000_000_000 * SompiPerKaspa and defaultSubsidyGenesisReward to 1_000.

* Set the standard deviation to 0.2.

* Use a binomial distribution instead of trying to estimate the normal distribution.

* Change some values around.

* Clamp the block subsidy.

* Remove the fake duplicate constants in the util package.

* Reduce MaxSompi to only 100m Kaspa to avoid hitting the uint64 ceiling.

* Lower MaxSompi further to avoid new and exciting ways for the uint64 ceiling to be hit.

* Remove debug logs.

* Fix a couple of failing tests.

* Fix TestBlockWindow.

* Fix limitTransactionCount sometimes crashing on index-out-of-bounds.

* In TrustedDataDataDAABlock, replace BlockHeader with DomainBlock

* In calculateAveragePastSubsidy, use blockWindow instead of doing a BFS manually.

* Remove the reference to DAGTopologyManager in coinbaseManager.

* Add subsidy to the coinbase payload.

* Get rid of the subsidy store and extract subsidies out of coinbase transactions.

* Keep a blockWindow amount of blocks under the virtual for IBD purposes.

* Manually remove the virtual genesis from the merge set.

* Fix simnet genesis.

* Fix TestPruning.

* Fix TestCheckBlockIsNotPruned.

* Fix TestBlockWindow.

* Fix TestCalculateSignatureHashSchnorr.

* Fix TestCalculateSignatureHashECDSA.

* Fix serializing the wrong value into the coinbase payload.

* Rename coinbaseOutputForBlueBlock to coinbaseOutputAndSubsidyForBlueBlock.

* Add a TODO about optimizing trusted data DAA window blocks.

* Expand on a comment in TestCheckBlockIsNotPruned.

* In calcBlockSubsidy, divide the big.Int numerator by the big.Int denominator instead of converting to float64.

* Clarify a comment.

* Rename SubsidyMinGenesisReward to MinSubsidy.

* Properly handle trusted data blocks in calculateMergeSetSubsidySum.

* Use the first two bytes of the selected parent's hash for randomness instead of math/rand.

* Restore maxSompi to what it used to be.

* Fix TestPruning.

* Fix TestAmountCreation.

* Fix TestBlockWindow.

* Fix TestAmountUnitConversions.

* Increase the timeout in many-tips to 30 minutes.

* Check coinbase subsidy for every block

* Re-rename functions

* Use shift instead of powInt64 to determine subsidyRandom

Co-authored-by: Ori Newman <orinewman1@gmail.com>
2021-10-30 10:16:47 +03:00

225 lines
6.6 KiB
Go

package pruningmanager_test
import (
"encoding/json"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"os"
"path/filepath"
"testing"
"time"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils"
"github.com/kaspanet/kaspad/domain/dagconfig"
)
type jsonBlock struct {
ID string `json:"ID"`
Parents []string `json:"Parents"`
}
type testJSON struct {
MergeSetSizeLimit uint64 `json:"mergeSetSizeLimit"`
FinalityDepth uint64 `json:"finalityDepth"`
Blocks []*jsonBlock `json:"blocks"`
}
func TestPruning(t *testing.T) {
expectedPruningPointByNet := map[string]map[string]string{
"chain-for-test-pruning.json": {
dagconfig.MainnetParams.Name: "1582",
dagconfig.TestnetParams.Name: "1582",
dagconfig.DevnetParams.Name: "1582",
dagconfig.SimnetParams.Name: "1582",
},
"dag-for-test-pruning.json": {
dagconfig.MainnetParams.Name: "503",
dagconfig.TestnetParams.Name: "502",
dagconfig.DevnetParams.Name: "503",
dagconfig.SimnetParams.Name: "503",
},
}
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
err := filepath.Walk("./testdata", func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
jsonFile, err := os.Open(path)
if err != nil {
t.Fatalf("TestPruning : failed opening json file %s: %s", path, err)
}
defer jsonFile.Close()
test := &testJSON{}
decoder := json.NewDecoder(jsonFile)
decoder.DisallowUnknownFields()
err = decoder.Decode(&test)
if err != nil {
t.Fatalf("TestPruning: failed decoding json: %v", err)
}
consensusConfig.FinalityDuration = time.Duration(test.FinalityDepth) * consensusConfig.TargetTimePerBlock
consensusConfig.MergeSetSizeLimit = test.MergeSetSizeLimit
consensusConfig.DifficultyAdjustmentWindowSize = 400
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestPruning")
if err != nil {
t.Fatalf("Error setting up consensus: %+v", err)
}
defer teardown(false)
blockIDToHash := map[string]*externalapi.DomainHash{
"0": consensusConfig.GenesisHash,
}
blockHashToID := map[externalapi.DomainHash]string{
*consensusConfig.GenesisHash: "0",
}
stagingArea := model.NewStagingArea()
for _, dagBlock := range test.Blocks {
if dagBlock.ID == "0" {
continue
}
parentHashes := make([]*externalapi.DomainHash, 0, len(dagBlock.Parents))
for _, parentID := range dagBlock.Parents {
parentHash, ok := blockIDToHash[parentID]
if !ok {
t.Fatalf("No hash was found for block with ID %s", parentID)
}
parentHashes = append(parentHashes, parentHash)
}
blockHash, _, err := tc.AddBlock(parentHashes, nil, nil)
if err != nil {
t.Fatalf("AddBlock: %+v", err)
}
blockIDToHash[dagBlock.ID] = blockHash
blockHashToID[*blockHash] = dagBlock.ID
pruningPointCandidate, err := tc.PruningStore().PruningPointCandidate(tc.DatabaseContext(), stagingArea)
if database.IsNotFoundError(err) {
pruningPointCandidate = consensusConfig.GenesisHash
} else if err != nil {
return err
}
isValidPruningPoint, err := tc.IsValidPruningPoint(pruningPointCandidate)
if err != nil {
return err
}
if !isValidPruningPoint {
t.Fatalf("isValidPruningPoint is %t while expected %t", isValidPruningPoint, true)
}
}
pruningPoint, err := tc.PruningPoint()
if err != nil {
t.Fatalf("PruningPoint: %+v", err)
}
pruningPointID := blockHashToID[*pruningPoint]
expectedPruningPoint := expectedPruningPointByNet[info.Name()][consensusConfig.Name]
if expectedPruningPoint != pruningPointID {
t.Fatalf("%s: Expected pruning point to be %s but got %s", info.Name(), expectedPruningPoint, pruningPointID)
}
// We expect blocks that are within the difficulty adjustment window size of
// the pruning point and its anticone to not get pruned
unprunedBlockHashesBelowPruningPoint := make(map[externalapi.DomainHash]struct{})
pruningPointAndItsAnticone, err := tc.PruningPointAndItsAnticoneWithTrustedData()
if err != nil {
t.Fatalf("pruningPointAndItsAnticone: %+v", err)
}
for _, block := range pruningPointAndItsAnticone {
blockHash := consensushashing.BlockHash(block.Block)
unprunedBlockHashesBelowPruningPoint[*blockHash] = struct{}{}
blockWindow, err := tc.DAGTraversalManager().BlockWindow(stagingArea, blockHash, consensusConfig.DifficultyAdjustmentWindowSize)
if err != nil {
t.Fatalf("BlockWindow: %+v", err)
}
for _, windowBlockHash := range blockWindow {
unprunedBlockHashesBelowPruningPoint[*windowBlockHash] = struct{}{}
}
}
for _, jsonBlock := range test.Blocks {
id := jsonBlock.ID
blockHash := blockIDToHash[id]
isPruningPointAncestorOfBlock, err := tc.DAGTopologyManager().IsAncestorOf(stagingArea, pruningPoint, blockHash)
if err != nil {
t.Fatalf("IsAncestorOf: %+v", err)
}
expectsBlock := true
if !isPruningPointAncestorOfBlock {
isBlockAncestorOfPruningPoint, err := tc.DAGTopologyManager().IsAncestorOf(stagingArea, blockHash, pruningPoint)
if err != nil {
t.Fatalf("IsAncestorOf: %+v", err)
}
if isBlockAncestorOfPruningPoint {
if _, ok := unprunedBlockHashesBelowPruningPoint[*blockHash]; !ok {
expectsBlock = false
}
} else {
virtualInfo, err := tc.GetVirtualInfo()
if err != nil {
t.Fatalf("GetVirtualInfo: %+v", err)
}
isInPastOfVirtual := false
for _, virtualParent := range virtualInfo.ParentHashes {
isAncestorOfVirtualParent, err := tc.DAGTopologyManager().IsAncestorOf(
stagingArea, blockHash, virtualParent)
if err != nil {
t.Fatalf("IsAncestorOf: %+v", err)
}
if isAncestorOfVirtualParent {
isInPastOfVirtual = true
break
}
}
if !isInPastOfVirtual {
if _, ok := unprunedBlockHashesBelowPruningPoint[*blockHash]; !ok {
expectsBlock = false
}
}
}
}
hasBlock, err := tc.BlockStore().HasBlock(tc.DatabaseContext(), stagingArea, blockHash)
if err != nil {
t.Fatalf("HasBlock: %+v", err)
}
if expectsBlock != hasBlock {
t.Fatalf("expected hasBlock to be %t for block %s but got %t", expectsBlock, id, hasBlock)
}
}
return nil
})
if err != nil {
t.Fatalf("Walk: %+v", err)
}
})
}