mirror of
https://github.com/kaspanet/kaspad.git
synced 2025-05-28 09:46:50 +00:00

* Add StagingArea struct * Implemented staging areas in blockStore * Move blockStagingShard to separate folder * Apply staging shard to acceptanceDataStore * Update blockHeaderStore with StagingArea * Add StagingArea to BlockRelationStore * Add StagingArea to blockStatusStore * Add StagingArea to consensusStateStore * Add StagingArea to daaBlocksStore * Add StagingArea to finalityStore * Add StagingArea to ghostdagDataStore * Add StagingArea to headersSelectedChainStore and headersSelectedTipStore * Add StagingArea to multisetStore * Add StagingArea to pruningStore * Add StagingArea to reachabilityDataStore * Add StagingArea to utxoDiffStore * Fix forgotten compilation error * Update reachability manager and some more things with StagingArea * Add StagingArea to dagTopologyManager, and some more * Add StagingArea to GHOSTDAGManager, and some more * Add StagingArea to difficultyManager, and some more * Add StagingArea to dagTraversalManager, and some more * Add StagingArea to headerTipsManager, and some more * Add StagingArea to constnsusStateManager, pastMedianTimeManager * Add StagingArea to transactionValidator * Add StagingArea to finalityManager * Add StagingArea to mergeDepthManager * Add StagingArea to pruningManager * Add StagingArea to rest of ValidateAndInsertBlock * Add StagingArea to blockValidator * Add StagingArea to coinbaseManager * Add StagingArea to syncManager * Add StagingArea to blockBuilder * Update consensus with StagingArea * Add StagingArea to ghostdag2 * Fix remaining compilation errors * Update names of stagingShards * Fix forgotten stagingArea passing * Mark stagingShard.isCommited = true once commited * Move isStaged to stagingShard, so that it's available without going through store * Make blockHeaderStore count be avilable from stagingShard * Fix remaining forgotten stagingArea passing * commitAllChanges should call dbTx.Commit in the end * Fix all tests tests in blockValidator * Fix all tests in consensusStateManager and some more * Fix all tests in pruningManager * Add many missing stagingAreas in tests * Fix many tests * Fix most of all other tests * Fix ghostdag_test.go * Add comment to StagingArea * Make list of StagingShards an array * Add comment to StagingShardID * Make sure all staging shards are pointer-receiver * Undo bucket rename in block_store * Typo: isCommited -> isCommitted * Add comment explaining why stagingArea.shards is an array
149 lines
5.6 KiB
Go
149 lines
5.6 KiB
Go
package rpchandlers_test
|
|
|
|
import (
|
|
"reflect"
|
|
"sort"
|
|
"testing"
|
|
|
|
"github.com/kaspanet/kaspad/domain/consensus/model"
|
|
|
|
"github.com/kaspanet/kaspad/app/appmessage"
|
|
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
|
"github.com/kaspanet/kaspad/app/rpc/rpchandlers"
|
|
"github.com/kaspanet/kaspad/domain/consensus"
|
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
|
"github.com/kaspanet/kaspad/domain/consensus/model/testapi"
|
|
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
|
|
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils"
|
|
"github.com/kaspanet/kaspad/domain/dagconfig"
|
|
"github.com/kaspanet/kaspad/domain/miningmanager"
|
|
"github.com/kaspanet/kaspad/infrastructure/config"
|
|
)
|
|
|
|
type fakeDomain struct {
|
|
testapi.TestConsensus
|
|
}
|
|
|
|
func (d fakeDomain) Consensus() externalapi.Consensus { return d }
|
|
func (d fakeDomain) MiningManager() miningmanager.MiningManager { return nil }
|
|
|
|
func TestHandleGetBlocks(t *testing.T) {
|
|
testutils.ForAllNets(t, true, func(t *testing.T, params *dagconfig.Params) {
|
|
stagingArea := model.NewStagingArea()
|
|
|
|
factory := consensus.NewFactory()
|
|
tc, teardown, err := factory.NewTestConsensus(params, false, "TestHandleGetBlocks")
|
|
if err != nil {
|
|
t.Fatalf("Error setting up consensus: %+v", err)
|
|
}
|
|
defer teardown(false)
|
|
|
|
fakeContext := rpccontext.Context{
|
|
Config: &config.Config{Flags: &config.Flags{NetworkFlags: config.NetworkFlags{ActiveNetParams: params}}},
|
|
Domain: fakeDomain{tc},
|
|
}
|
|
|
|
getBlocks := func(lowHash *externalapi.DomainHash) *appmessage.GetBlocksResponseMessage {
|
|
request := appmessage.GetBlocksRequestMessage{}
|
|
if lowHash != nil {
|
|
request.LowHash = lowHash.String()
|
|
}
|
|
response, err := rpchandlers.HandleGetBlocks(&fakeContext, nil, &request)
|
|
if err != nil {
|
|
t.Fatalf("Expected empty request to not fail, instead: '%v'", err)
|
|
}
|
|
return response.(*appmessage.GetBlocksResponseMessage)
|
|
}
|
|
|
|
filterAntiPast := func(povBlock *externalapi.DomainHash, slice []*externalapi.DomainHash) []*externalapi.DomainHash {
|
|
antipast := make([]*externalapi.DomainHash, 0, len(slice))
|
|
|
|
for _, blockHash := range slice {
|
|
isInPastOfPovBlock, err := tc.DAGTopologyManager().IsAncestorOf(stagingArea, blockHash, povBlock)
|
|
if err != nil {
|
|
t.Fatalf("Failed doing reachability check: '%v'", err)
|
|
}
|
|
if !isInPastOfPovBlock {
|
|
antipast = append(antipast, blockHash)
|
|
}
|
|
}
|
|
return antipast
|
|
}
|
|
|
|
// Create a DAG with the following structure:
|
|
// merging block
|
|
// / | \
|
|
// split1 split2 split3
|
|
// \ | /
|
|
// merging block
|
|
// / | \
|
|
// split1 split2 split3
|
|
// \ | /
|
|
// etc.
|
|
expectedOrder := make([]*externalapi.DomainHash, 0, 40)
|
|
mergingBlock := params.GenesisHash
|
|
for i := 0; i < 10; i++ {
|
|
splitBlocks := make([]*externalapi.DomainHash, 0, 3)
|
|
for j := 0; j < 3; j++ {
|
|
blockHash, _, err := tc.AddBlock([]*externalapi.DomainHash{mergingBlock}, nil, nil)
|
|
if err != nil {
|
|
t.Fatalf("Failed adding block: %v", err)
|
|
}
|
|
splitBlocks = append(splitBlocks, blockHash)
|
|
}
|
|
sort.Sort(sort.Reverse(testutils.NewTestGhostDAGSorter(stagingArea, splitBlocks, tc, t)))
|
|
restOfSplitBlocks, selectedParent := splitBlocks[:len(splitBlocks)-1], splitBlocks[len(splitBlocks)-1]
|
|
expectedOrder = append(expectedOrder, selectedParent)
|
|
expectedOrder = append(expectedOrder, restOfSplitBlocks...)
|
|
|
|
mergingBlock, _, err = tc.AddBlock(splitBlocks, nil, nil)
|
|
if err != nil {
|
|
t.Fatalf("Failed adding block: %v", err)
|
|
}
|
|
expectedOrder = append(expectedOrder, mergingBlock)
|
|
}
|
|
|
|
virtualSelectedParent, err := tc.GetVirtualSelectedParent()
|
|
if err != nil {
|
|
t.Fatalf("Failed getting SelectedParent: %v", err)
|
|
}
|
|
if !virtualSelectedParent.Equal(expectedOrder[len(expectedOrder)-1]) {
|
|
t.Fatalf("Expected %s to be selectedParent, instead found: %s", expectedOrder[len(expectedOrder)-1], virtualSelectedParent)
|
|
}
|
|
|
|
requestSelectedParent := getBlocks(virtualSelectedParent)
|
|
if !reflect.DeepEqual(requestSelectedParent.BlockHashes, hashes.ToStrings([]*externalapi.DomainHash{virtualSelectedParent})) {
|
|
t.Fatalf("TestHandleGetBlocks expected:\n%v\nactual:\n%v", virtualSelectedParent, requestSelectedParent.BlockHashes)
|
|
}
|
|
|
|
for i, blockHash := range expectedOrder {
|
|
expectedBlocks := filterAntiPast(blockHash, expectedOrder)
|
|
expectedBlocks = append([]*externalapi.DomainHash{blockHash}, expectedBlocks...)
|
|
|
|
actualBlocks := getBlocks(blockHash)
|
|
if !reflect.DeepEqual(actualBlocks.BlockHashes, hashes.ToStrings(expectedBlocks)) {
|
|
t.Fatalf("TestHandleGetBlocks %d \nexpected: \n%v\nactual:\n%v", i,
|
|
hashes.ToStrings(expectedBlocks), actualBlocks.BlockHashes)
|
|
}
|
|
}
|
|
|
|
// Make explicitly sure that if lowHash==highHash we get a slice with a single hash.
|
|
actualBlocks := getBlocks(virtualSelectedParent)
|
|
if !reflect.DeepEqual(actualBlocks.BlockHashes, []string{virtualSelectedParent.String()}) {
|
|
t.Fatalf("TestHandleGetBlocks expected blocks to contain just '%s', instead got: \n%v",
|
|
virtualSelectedParent, actualBlocks.BlockHashes)
|
|
}
|
|
|
|
expectedOrder = append([]*externalapi.DomainHash{params.GenesisHash}, expectedOrder...)
|
|
actualOrder := getBlocks(nil)
|
|
if !reflect.DeepEqual(actualOrder.BlockHashes, hashes.ToStrings(expectedOrder)) {
|
|
t.Fatalf("TestHandleGetBlocks \nexpected: %v \nactual:\n%v", expectedOrder, actualOrder.BlockHashes)
|
|
}
|
|
|
|
requestAllExplictly := getBlocks(params.GenesisHash)
|
|
if !reflect.DeepEqual(requestAllExplictly.BlockHashes, hashes.ToStrings(expectedOrder)) {
|
|
t.Fatalf("TestHandleGetBlocks \nexpected: \n%v\n. actual:\n%v", expectedOrder, requestAllExplictly.BlockHashes)
|
|
}
|
|
})
|
|
}
|