diff --git a/.github/workflows/race.yaml b/.github/workflows/race.yaml index b992b7b15..27847217e 100644 --- a/.github/workflows/race.yaml +++ b/.github/workflows/race.yaml @@ -46,4 +46,4 @@ jobs: run: | git checkout "${{ env.run_on }}" git status - go test -race ./... + go test -timeout 20m -race ./... diff --git a/app/protocol/common/common.go b/app/protocol/common/common.go index 03061da14..3ab1d49b1 100644 --- a/app/protocol/common/common.go +++ b/app/protocol/common/common.go @@ -8,7 +8,7 @@ import ( // DefaultTimeout is the default duration to wait for enqueuing/dequeuing // to/from routes. -const DefaultTimeout = 30 * time.Second +const DefaultTimeout = 120 * time.Second // ErrPeerWithSameIDExists signifies that a peer with the same ID already exist. var ErrPeerWithSameIDExists = errors.New("ready peer with the same ID already exists") diff --git a/app/protocol/flows/blockrelay/handle_pruning_point_and_its_anticone_requests.go b/app/protocol/flows/blockrelay/handle_pruning_point_and_its_anticone_requests.go index 809b85454..168d0b34b 100644 --- a/app/protocol/flows/blockrelay/handle_pruning_point_and_its_anticone_requests.go +++ b/app/protocol/flows/blockrelay/handle_pruning_point_and_its_anticone_requests.go @@ -4,6 +4,7 @@ import ( "github.com/kaspanet/kaspad/app/appmessage" peerpkg "github.com/kaspanet/kaspad/app/protocol/peer" "github.com/kaspanet/kaspad/domain" + "github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router" ) @@ -40,13 +41,13 @@ func HandlePruningPointAndItsAnticoneRequests(context PruningPointAndItsAnticone return err } - blocks, err := context.Domain().Consensus().PruningPointAndItsAnticoneWithTrustedData() + pointAndItsAnticone, err := context.Domain().Consensus().PruningPointAndItsAnticone() if err != nil { return err } - for _, block := range blocks { - err = outgoingRoute.Enqueue(appmessage.DomainBlockWithTrustedDataToBlockWithTrustedData(block)) + for _, blockHash := range pointAndItsAnticone { + err := sendBlockWithTrustedData(context, outgoingRoute, blockHash) if err != nil { return err } @@ -60,3 +61,17 @@ func HandlePruningPointAndItsAnticoneRequests(context PruningPointAndItsAnticone log.Debugf("Sent pruning point and its anticone to %s", peer) } } + +func sendBlockWithTrustedData(context PruningPointAndItsAnticoneRequestsContext, outgoingRoute *router.Route, blockHash *externalapi.DomainHash) error { + blockWithTrustedData, err := context.Domain().Consensus().BlockWithTrustedData(blockHash) + if err != nil { + return err + } + + err = outgoingRoute.Enqueue(appmessage.DomainBlockWithTrustedDataToBlockWithTrustedData(blockWithTrustedData)) + if err != nil { + return err + } + + return nil +} diff --git a/app/protocol/flows/blockrelay/handle_request_pruning_point_utxo_set.go b/app/protocol/flows/blockrelay/handle_request_pruning_point_utxo_set.go index 8bfca3895..4d1ed41f2 100644 --- a/app/protocol/flows/blockrelay/handle_request_pruning_point_utxo_set.go +++ b/app/protocol/flows/blockrelay/handle_request_pruning_point_utxo_set.go @@ -70,7 +70,8 @@ func (flow *handleRequestPruningPointUTXOSetFlow) waitForRequestPruningPointUTXO } msgRequestPruningPointUTXOSet, ok := message.(*appmessage.MsgRequestPruningPointUTXOSet) if !ok { - return nil, protocolerrors.Errorf(true, "received unexpected message type. "+ + // TODO: Change to shouldBan: true once we fix the bug of getting redundant messages + return nil, protocolerrors.Errorf(false, "received unexpected message type. "+ "expected: %s, got: %s", appmessage.CmdRequestPruningPointUTXOSet, message.Command()) } return msgRequestPruningPointUTXOSet, nil @@ -123,7 +124,8 @@ func (flow *handleRequestPruningPointUTXOSetFlow) sendPruningPointUTXOSet( } _, ok := message.(*appmessage.MsgRequestNextPruningPointUTXOSetChunk) if !ok { - return protocolerrors.Errorf(true, "received unexpected message type. "+ + // TODO: Change to shouldBan: true once we fix the bug of getting redundant messages + return protocolerrors.Errorf(false, "received unexpected message type. "+ "expected: %s, got: %s", appmessage.CmdRequestNextPruningPointUTXOSetChunk, message.Command()) } diff --git a/app/protocol/flows/blockrelay/ibd_with_headers_proof.go b/app/protocol/flows/blockrelay/ibd_with_headers_proof.go index ab0e65f24..732ce2b2c 100644 --- a/app/protocol/flows/blockrelay/ibd_with_headers_proof.go +++ b/app/protocol/flows/blockrelay/ibd_with_headers_proof.go @@ -130,7 +130,7 @@ func (flow *handleRelayInvsFlow) downloadHeadersAndPruningUTXOSet(highHash *exte return err } - log.Debugf("Headers downloaded from peer %s", flow.peer) + log.Infof("Headers downloaded from peer %s", flow.peer) highHashInfo, err := flow.Domain().StagingConsensus().GetBlockInfo(highHash) if err != nil { diff --git a/app/rpc/rpchandlers/submit_block.go b/app/rpc/rpchandlers/submit_block.go index 388443ed5..52abfb9af 100644 --- a/app/rpc/rpchandlers/submit_block.go +++ b/app/rpc/rpchandlers/submit_block.go @@ -14,9 +14,14 @@ import ( func HandleSubmitBlock(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) { submitBlockRequest := request.(*appmessage.SubmitBlockRequestMessage) - if context.ProtocolManager.IsIBDRunning() { + isSynced, err := context.ProtocolManager.ShouldMine() + if err != nil { + return nil, err + } + + if !context.Config.AllowSubmitBlockWhenNotSynced && !isSynced { return &appmessage.SubmitBlockResponseMessage{ - Error: appmessage.RPCErrorf("Block not submitted - IBD is running"), + Error: appmessage.RPCErrorf("Block not submitted - node is not synced"), RejectReason: appmessage.RejectReasonIsInIBD, }, nil } diff --git a/build_and_test.sh b/build_and_test.sh index 30f70b0b7..886eddab6 100755 --- a/build_and_test.sh +++ b/build_and_test.sh @@ -15,8 +15,6 @@ golint -set_exit_status ./... staticcheck -checks SA4006,SA4008,SA4009,SA4010,SA5003,SA1004,SA1014,SA1021,SA1023,SA1024,SA1025,SA1026,SA1027,SA1028,SA2000,SA2001,SA2003,SA4000,SA4001,SA4003,SA4004,SA4011,SA4012,SA4013,SA4014,SA4015,SA4016,SA4017,SA4018,SA4019,SA4020,SA4021,SA4022,SA4023,SA5000,SA5002,SA5004,SA5005,SA5007,SA5008,SA5009,SA5010,SA5011,SA5012,SA6001,SA6002,SA9001,SA9002,SA9003,SA9004,SA9005,SA9006,ST1019 ./... -go vet -composites=false $FLAGS ./... - go build $FLAGS -o kaspad . if [ -n "${NO_PARALLEL}" ] diff --git a/cmd/kaspawallet/create.go b/cmd/kaspawallet/create.go index 3e8f5286f..a6df708d1 100644 --- a/cmd/kaspawallet/create.go +++ b/cmd/kaspawallet/create.go @@ -56,6 +56,7 @@ func create(conf *createConfig) error { } file := keys.File{ + Version: keys.LastVersion, EncryptedMnemonics: encryptedMnemonics, ExtendedPublicKeys: extendedPublicKeys, MinimumSignatures: conf.MinimumSignatures, diff --git a/cmd/kaspawallet/keys/create.go b/cmd/kaspawallet/keys/create.go index 340a4a41b..f9fed6bc5 100644 --- a/cmd/kaspawallet/keys/create.go +++ b/cmd/kaspawallet/keys/create.go @@ -97,7 +97,7 @@ func encryptMnemonic(mnemonic string, password []byte) (*EncryptedMnemonic, erro return nil, err } - aead, err := getAEAD(password, salt) + aead, err := getAEAD(defaultNumThreads, password, salt) if err != nil { return nil, err } diff --git a/cmd/kaspawallet/keys/keys.go b/cmd/kaspawallet/keys/keys.go index b88e81bda..bd7a41dd6 100644 --- a/cmd/kaspawallet/keys/keys.go +++ b/cmd/kaspawallet/keys/keys.go @@ -10,6 +10,7 @@ import ( "os" "path/filepath" "runtime" + "strings" "github.com/kaspanet/kaspad/domain/dagconfig" "github.com/kaspanet/kaspad/util" @@ -22,6 +23,9 @@ var ( defaultAppDir = util.AppDir("kaspawallet", false) ) +// LastVersion is the most up to date file format version +const LastVersion = 1 + func defaultKeysFile(netParams *dagconfig.Params) string { return filepath.Join(defaultAppDir, netParams.Name, "keys.json") } @@ -32,6 +36,8 @@ type encryptedPrivateKeyJSON struct { } type keysFileJSON struct { + Version uint32 `json:"version"` + NumThreads uint8 `json:"numThreads,omitempty"` // This field is ignored for versions different from 0. See more details at the function `numThreads`. EncryptedPrivateKeys []*encryptedPrivateKeyJSON `json:"encryptedMnemonics"` ExtendedPublicKeys []string `json:"publicKeys"` MinimumSignatures uint32 `json:"minimumSignatures"` @@ -49,6 +55,8 @@ type EncryptedMnemonic struct { // File holds all the data related to the wallet keys type File struct { + Version uint32 + NumThreads uint8 // This field is ignored for versions different than 0 EncryptedMnemonics []*EncryptedMnemonic ExtendedPublicKeys []string MinimumSignatures uint32 @@ -69,6 +77,8 @@ func (d *File) toJSON() *keysFileJSON { } return &keysFileJSON{ + Version: d.Version, + NumThreads: d.NumThreads, EncryptedPrivateKeys: encryptedPrivateKeysJSON, ExtendedPublicKeys: d.ExtendedPublicKeys, MinimumSignatures: d.MinimumSignatures, @@ -80,6 +90,8 @@ func (d *File) toJSON() *keysFileJSON { } func (d *File) fromJSON(fileJSON *keysFileJSON) error { + d.Version = fileJSON.Version + d.NumThreads = fileJSON.NumThreads d.MinimumSignatures = fileJSON.MinimumSignatures d.ECDSA = fileJSON.ECDSA d.ExtendedPublicKeys = fileJSON.ExtendedPublicKeys @@ -181,10 +193,20 @@ func (d *File) DecryptMnemonics(cmdLinePassword string) ([]string, error) { if len(password) == 0 { password = getPassword("Password:") } + + var numThreads uint8 + if len(d.EncryptedMnemonics) > 0 { + var err error + numThreads, err = d.numThreads(password) + if err != nil { + return nil, err + } + } + privateKeys := make([]string, len(d.EncryptedMnemonics)) for i, encryptedPrivateKey := range d.EncryptedMnemonics { var err error - privateKeys[i], err = decryptMnemonic(encryptedPrivateKey, password) + privateKeys[i], err = decryptMnemonic(numThreads, encryptedPrivateKey, password) if err != nil { return nil, err } @@ -278,13 +300,73 @@ func (d *File) Save() error { return nil } -func getAEAD(password, salt []byte) (cipher.AEAD, error) { - key := argon2.IDKey(password, salt, 1, 64*1024, uint8(runtime.NumCPU()), 32) +const defaultNumThreads = 8 + +func (d *File) numThreads(password []byte) (uint8, error) { + // There's a bug in v0 wallets where the number of threads + // was determined by the number of logical CPUs at the machine, + // which made the authentication non-deterministic across platforms. + // In order to solve it we introduce v1 where the number of threads + // is constant, and brute force the number of threads in v0. After we + // find the right amount via brute force we save the result to the file. + + if d.Version != 0 { + return defaultNumThreads, nil + } + + if d.NumThreads != 0 { + return d.NumThreads, nil + } + + numThreads, err := d.detectNumThreads(password, d.EncryptedMnemonics[0].salt) + if err != nil { + return 0, err + } + + d.NumThreads = numThreads + err = d.Save() + if err != nil { + return 0, err + } + + return numThreads, nil +} + +func (d *File) detectNumThreads(password, salt []byte) (uint8, error) { + numCPU := uint8(runtime.NumCPU()) + _, err := getAEAD(numCPU, password, salt) + if err != nil { + if !strings.Contains(err.Error(), "message authentication failed") { + return 0, err + } + } else { + return numCPU, nil + } + + for i := uint8(1); ; i++ { + if i == numCPU { + continue + } + + _, err := getAEAD(i, password, salt) + if err != nil { + const maxTries = 32 + if i > maxTries || !strings.Contains(err.Error(), "message authentication failed") { + return 0, err + } + } else { + return i, nil + } + } +} + +func getAEAD(threads uint8, password, salt []byte) (cipher.AEAD, error) { + key := argon2.IDKey(password, salt, 1, 64*1024, threads, 32) return chacha20poly1305.NewX(key) } -func decryptMnemonic(encryptedPrivateKey *EncryptedMnemonic, password []byte) (string, error) { - aead, err := getAEAD(password, encryptedPrivateKey.salt) +func decryptMnemonic(numThreads uint8, encryptedPrivateKey *EncryptedMnemonic, password []byte) (string, error) { + aead, err := getAEAD(numThreads, password, encryptedPrivateKey.salt) if err != nil { return "", err } diff --git a/domain/consensus/consensus.go b/domain/consensus/consensus.go index 011315f0f..d3bb816ad 100644 --- a/domain/consensus/consensus.go +++ b/domain/consensus/consensus.go @@ -137,11 +137,18 @@ func (s *consensus) Init(skipAddingGenesis bool) error { return nil } -func (s *consensus) PruningPointAndItsAnticoneWithTrustedData() ([]*externalapi.BlockWithTrustedData, error) { +func (s *consensus) PruningPointAndItsAnticone() ([]*externalapi.DomainHash, error) { s.lock.Lock() defer s.lock.Unlock() - return s.pruningManager.PruningPointAndItsAnticoneWithTrustedData() + return s.pruningManager.PruningPointAndItsAnticone() +} + +func (s *consensus) BlockWithTrustedData(blockHash *externalapi.DomainHash) (*externalapi.BlockWithTrustedData, error) { + s.lock.Lock() + defer s.lock.Unlock() + + return s.pruningManager.BlockWithTrustedData(model.NewStagingArea(), blockHash) } // BuildBlock builds a block over the current state, with the transactions @@ -716,7 +723,10 @@ func (s *consensus) PopulateMass(transaction *externalapi.DomainTransaction) { func (s *consensus) ResolveVirtual() error { // In order to prevent a situation that the consensus lock is held for too much time, we // release the lock each time resolve 100 blocks. - for { + for i := 0; ; i++ { + if i%10 == 0 { + log.Infof("Resolving virtual. This may take some time...") + } var isCompletelyResolved bool var err error func() { @@ -730,6 +740,7 @@ func (s *consensus) ResolveVirtual() error { } if isCompletelyResolved { + log.Infof("Resolved virtual") return nil } } diff --git a/domain/consensus/datastructures/reachabilitydatastore/reachability_data_store.go b/domain/consensus/datastructures/reachabilitydatastore/reachability_data_store.go index 31f901f64..a1f5712fb 100644 --- a/domain/consensus/datastructures/reachabilitydatastore/reachability_data_store.go +++ b/domain/consensus/datastructures/reachabilitydatastore/reachability_data_store.go @@ -6,7 +6,9 @@ import ( "github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucache" + "github.com/kaspanet/kaspad/infrastructure/db/database" "github.com/kaspanet/kaspad/util/staging" + "github.com/pkg/errors" ) var reachabilityDataBucketName = []byte("reachability-data") @@ -50,6 +52,8 @@ func (rds *reachabilityDataStore) IsStaged(stagingArea *model.StagingArea) bool return rds.stagingShard(stagingArea).isStaged() } +var errNotFound = errors.Wrap(database.ErrNotFound, "reachability data not found") + // ReachabilityData returns the reachabilityData associated with the given blockHash func (rds *reachabilityDataStore) ReachabilityData(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (model.ReachabilityData, error) { stagingShard := rds.stagingShard(stagingArea) @@ -59,10 +63,16 @@ func (rds *reachabilityDataStore) ReachabilityData(dbContext model.DBReader, sta } if reachabilityData, ok := rds.reachabilityDataCache.Get(blockHash); ok { + if reachabilityData == nil { + return nil, errNotFound + } return reachabilityData.(model.ReachabilityData), nil } reachabilityDataBytes, err := dbContext.Get(rds.reachabilityDataBlockHashAsKey(blockHash)) + if database.IsNotFoundError(err) { + rds.reachabilityDataCache.Add(blockHash, nil) + } if err != nil { return nil, err } @@ -76,17 +86,15 @@ func (rds *reachabilityDataStore) ReachabilityData(dbContext model.DBReader, sta } func (rds *reachabilityDataStore) HasReachabilityData(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (bool, error) { - stagingShard := rds.stagingShard(stagingArea) - - if _, ok := stagingShard.reachabilityData[*blockHash]; ok { - return true, nil + _, err := rds.ReachabilityData(dbContext, stagingArea, blockHash) + if database.IsNotFoundError(err) { + return false, nil + } + if err != nil { + return false, err } - if rds.reachabilityDataCache.Has(blockHash) { - return true, nil - } - - return dbContext.Has(rds.reachabilityDataBlockHashAsKey(blockHash)) + return true, nil } // ReachabilityReindexRoot returns the current reachability reindex root diff --git a/domain/consensus/factory.go b/domain/consensus/factory.go index 25f30cbd7..1509969af 100644 --- a/domain/consensus/factory.go +++ b/domain/consensus/factory.go @@ -4,6 +4,7 @@ import ( "github.com/kaspanet/kaspad/domain/consensus/datastructures/daawindowstore" "github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/processes/blockparentbuilder" + parentssanager "github.com/kaspanet/kaspad/domain/consensus/processes/parentsmanager" "github.com/kaspanet/kaspad/domain/consensus/processes/pruningproofmanager" "github.com/kaspanet/kaspad/domain/consensus/utils/constants" "io/ioutil" @@ -158,12 +159,17 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas dagTraversalManager := dagTraversalManagers[0] // Processes + parentsManager := parentssanager.New(config.GenesisHash, config.HardForkOmitGenesisFromParentsDAAScore) blockParentBuilder := blockparentbuilder.New( dbManager, blockHeaderStore, dagTopologyManager, + parentsManager, reachabilityDataStore, pruningStore, + + config.HardForkOmitGenesisFromParentsDAAScore, + config.GenesisHash, ) pastMedianTimeManager := f.pastMedianTimeConsructor( config.TimestampDeviationTolerance, @@ -316,6 +322,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas finalityManager, blockParentBuilder, pruningManager, + parentsManager, pruningStore, blockStore, @@ -403,6 +410,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas ghostdagManagers, reachabilityManagers, dagTraversalManagers, + parentsManager, ghostdagDataStores, pruningStore, @@ -581,7 +589,7 @@ func dagStores(config *Config, ghostdagDataStores[i] = ghostdagdatastore.New(prefixBucket, ghostdagDataCacheSize, preallocateCaches) } else { blockRelationStores[i] = blockrelationstore.New(prefixBucket, 200, false) - reachabilityDataStores[i] = reachabilitydatastore.New(prefixBucket, 200, false) + reachabilityDataStores[i] = reachabilitydatastore.New(prefixBucket, 86400, false) ghostdagDataStores[i] = ghostdagdatastore.New(prefixBucket, 200, false) } } diff --git a/domain/consensus/model/externalapi/block.go b/domain/consensus/model/externalapi/block.go index 15fae9763..053115f9f 100644 --- a/domain/consensus/model/externalapi/block.go +++ b/domain/consensus/model/externalapi/block.go @@ -58,7 +58,6 @@ type BlockHeader interface { type BaseBlockHeader interface { Version() uint16 Parents() []BlockLevelParents - ParentsAtLevel(level int) BlockLevelParents DirectParents() BlockLevelParents HashMerkleRoot() *DomainHash AcceptedIDMerkleRoot() *DomainHash @@ -70,6 +69,7 @@ type BaseBlockHeader interface { BlueScore() uint64 BlueWork() *big.Int PruningPoint() *DomainHash + BlockLevel() int Equal(other BaseBlockHeader) bool } diff --git a/domain/consensus/model/externalapi/consensus.go b/domain/consensus/model/externalapi/consensus.go index f2eef017d..2430eb5c6 100644 --- a/domain/consensus/model/externalapi/consensus.go +++ b/domain/consensus/model/externalapi/consensus.go @@ -25,7 +25,8 @@ type Consensus interface { GetVirtualUTXOs(expectedVirtualParents []*DomainHash, fromOutpoint *DomainOutpoint, limit int) ([]*OutpointAndUTXOEntryPair, error) PruningPoint() (*DomainHash, error) PruningPointHeaders() ([]BlockHeader, error) - PruningPointAndItsAnticoneWithTrustedData() ([]*BlockWithTrustedData, error) + PruningPointAndItsAnticone() ([]*DomainHash, error) + BlockWithTrustedData(blockHash *DomainHash) (*BlockWithTrustedData, error) ClearImportedPruningPointData() error AppendImportedPruningPointUTXOs(outpointAndUTXOEntryPairs []*OutpointAndUTXOEntryPair) error ValidateAndInsertImportedPruningPoint(newPruningPoint *DomainHash) error diff --git a/domain/consensus/model/interface_processes_blockparentbuilder.go b/domain/consensus/model/interface_processes_blockparentbuilder.go index fd42b2d3e..b865f6eec 100644 --- a/domain/consensus/model/interface_processes_blockparentbuilder.go +++ b/domain/consensus/model/interface_processes_blockparentbuilder.go @@ -5,5 +5,7 @@ import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi" // BlockParentBuilder exposes a method to build super-block parents for // a given set of direct parents type BlockParentBuilder interface { - BuildParents(stagingArea *StagingArea, directParentHashes []*externalapi.DomainHash) ([]externalapi.BlockLevelParents, error) + BuildParents(stagingArea *StagingArea, + daaScore uint64, + directParentHashes []*externalapi.DomainHash) ([]externalapi.BlockLevelParents, error) } diff --git a/domain/consensus/model/interface_processes_parentsmanager.go b/domain/consensus/model/interface_processes_parentsmanager.go new file mode 100644 index 000000000..4550fa86d --- /dev/null +++ b/domain/consensus/model/interface_processes_parentsmanager.go @@ -0,0 +1,9 @@ +package model + +import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi" + +// ParentsManager lets is a wrapper above header parents that replaces empty parents with genesis when needed. +type ParentsManager interface { + ParentsAtLevel(blockHeader externalapi.BlockHeader, level int) externalapi.BlockLevelParents + Parents(blockHeader externalapi.BlockHeader) []externalapi.BlockLevelParents +} diff --git a/domain/consensus/model/interface_processes_pruningmanager.go b/domain/consensus/model/interface_processes_pruningmanager.go index 4b7cfe5b4..4646a225f 100644 --- a/domain/consensus/model/interface_processes_pruningmanager.go +++ b/domain/consensus/model/interface_processes_pruningmanager.go @@ -12,6 +12,7 @@ type PruningManager interface { AppendImportedPruningPointUTXOs(outpointAndUTXOEntryPairs []*externalapi.OutpointAndUTXOEntryPair) error UpdatePruningPointIfRequired() error PruneAllBlocksBelow(stagingArea *StagingArea, pruningPointHash *externalapi.DomainHash) error - PruningPointAndItsAnticoneWithTrustedData() ([]*externalapi.BlockWithTrustedData, error) + PruningPointAndItsAnticone() ([]*externalapi.DomainHash, error) ExpectedHeaderPruningPoint(stagingArea *StagingArea, blockHash *externalapi.DomainHash) (*externalapi.DomainHash, error) + BlockWithTrustedData(stagingArea *StagingArea, blockHash *externalapi.DomainHash) (*externalapi.BlockWithTrustedData, error) } diff --git a/domain/consensus/processes/blockbuilder/block_builder.go b/domain/consensus/processes/blockbuilder/block_builder.go index 745bc8809..12f5768e9 100644 --- a/domain/consensus/processes/blockbuilder/block_builder.go +++ b/domain/consensus/processes/blockbuilder/block_builder.go @@ -183,10 +183,16 @@ func (bb *blockBuilder) newBlockCoinbaseTransaction(stagingArea *model.StagingAr func (bb *blockBuilder) buildHeader(stagingArea *model.StagingArea, transactions []*externalapi.DomainTransaction, newBlockPruningPoint *externalapi.DomainHash) (externalapi.BlockHeader, error) { - parents, err := bb.newBlockParents(stagingArea) + daaScore, err := bb.newBlockDAAScore(stagingArea) if err != nil { return nil, err } + + parents, err := bb.newBlockParents(stagingArea, daaScore) + if err != nil { + return nil, err + } + timeInMilliseconds, err := bb.newBlockTime(stagingArea) if err != nil { return nil, err @@ -204,10 +210,6 @@ func (bb *blockBuilder) buildHeader(stagingArea *model.StagingArea, transactions if err != nil { return nil, err } - daaScore, err := bb.newBlockDAAScore(stagingArea) - if err != nil { - return nil, err - } blueWork, err := bb.newBlockBlueWork(stagingArea) if err != nil { return nil, err @@ -233,12 +235,12 @@ func (bb *blockBuilder) buildHeader(stagingArea *model.StagingArea, transactions ), nil } -func (bb *blockBuilder) newBlockParents(stagingArea *model.StagingArea) ([]externalapi.BlockLevelParents, error) { +func (bb *blockBuilder) newBlockParents(stagingArea *model.StagingArea, daaScore uint64) ([]externalapi.BlockLevelParents, error) { virtualBlockRelations, err := bb.blockRelationStore.BlockRelation(bb.databaseContext, stagingArea, model.VirtualBlockHash) if err != nil { return nil, err } - return bb.blockParentBuilder.BuildParents(stagingArea, virtualBlockRelations.Parents) + return bb.blockParentBuilder.BuildParents(stagingArea, daaScore, virtualBlockRelations.Parents) } func (bb *blockBuilder) newBlockTime(stagingArea *model.StagingArea) (int64, error) { diff --git a/domain/consensus/processes/blockbuilder/test_block_builder.go b/domain/consensus/processes/blockbuilder/test_block_builder.go index 4796f9eab..8a87acc21 100644 --- a/domain/consensus/processes/blockbuilder/test_block_builder.go +++ b/domain/consensus/processes/blockbuilder/test_block_builder.go @@ -83,7 +83,7 @@ func (bb *testBlockBuilder) buildUTXOInvalidHeader(stagingArea *model.StagingAre return nil, err } - parents, err := bb.blockParentBuilder.BuildParents(stagingArea, parentHashes) + parents, err := bb.blockParentBuilder.BuildParents(stagingArea, daaScore, parentHashes) if err != nil { return nil, err } diff --git a/domain/consensus/processes/blockparentbuilder/blockparentbuilder.go b/domain/consensus/processes/blockparentbuilder/blockparentbuilder.go index 6f3ceede3..d12fd41a7 100644 --- a/domain/consensus/processes/blockparentbuilder/blockparentbuilder.go +++ b/domain/consensus/processes/blockparentbuilder/blockparentbuilder.go @@ -5,7 +5,6 @@ import ( "github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing" "github.com/kaspanet/kaspad/domain/consensus/utils/hashset" - "github.com/kaspanet/kaspad/domain/consensus/utils/pow" "github.com/pkg/errors" ) @@ -13,8 +12,12 @@ type blockParentBuilder struct { databaseContext model.DBManager blockHeaderStore model.BlockHeaderStore dagTopologyManager model.DAGTopologyManager + parentsManager model.ParentsManager reachabilityDataStore model.ReachabilityDataStore pruningStore model.PruningStore + + hardForkOmitGenesisFromParentsDAAScore uint64 + genesisHash *externalapi.DomainHash } // New creates a new instance of a BlockParentBuilder @@ -22,20 +25,29 @@ func New( databaseContext model.DBManager, blockHeaderStore model.BlockHeaderStore, dagTopologyManager model.DAGTopologyManager, + parentsManager model.ParentsManager, + reachabilityDataStore model.ReachabilityDataStore, pruningStore model.PruningStore, + + hardForkOmitGenesisFromParentsDAAScore uint64, + genesisHash *externalapi.DomainHash, ) model.BlockParentBuilder { return &blockParentBuilder{ - databaseContext: databaseContext, - blockHeaderStore: blockHeaderStore, - dagTopologyManager: dagTopologyManager, - reachabilityDataStore: reachabilityDataStore, - pruningStore: pruningStore, + databaseContext: databaseContext, + blockHeaderStore: blockHeaderStore, + dagTopologyManager: dagTopologyManager, + parentsManager: parentsManager, + + reachabilityDataStore: reachabilityDataStore, + pruningStore: pruningStore, + hardForkOmitGenesisFromParentsDAAScore: hardForkOmitGenesisFromParentsDAAScore, + genesisHash: genesisHash, } } func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea, - directParentHashes []*externalapi.DomainHash) ([]externalapi.BlockLevelParents, error) { + daaScore uint64, directParentHashes []*externalapi.DomainHash) ([]externalapi.BlockLevelParents, error) { // Late on we'll mutate direct parent hashes, so we first clone it. directParentHashesCopy := make([]*externalapi.DomainHash, len(directParentHashes)) @@ -93,7 +105,7 @@ func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea, // all the block levels they occupy for _, directParentHeader := range directParentHeaders { directParentHash := consensushashing.HeaderHash(directParentHeader) - blockLevel := pow.BlockLevel(directParentHeader) + blockLevel := directParentHeader.BlockLevel() for i := 0; i <= blockLevel; i++ { if _, exists := candidatesByLevelToReferenceBlocksMap[i]; !exists { candidatesByLevelToReferenceBlocksMap[i] = make(map[externalapi.DomainHash][]*externalapi.DomainHash) @@ -116,7 +128,7 @@ func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea, } for _, directParentHeader := range directParentHeaders { - for blockLevel, blockLevelParentsInHeader := range directParentHeader.Parents() { + for blockLevel, blockLevelParentsInHeader := range bpb.parentsManager.Parents(directParentHeader) { isEmptyLevel := false if _, exists := candidatesByLevelToReferenceBlocksMap[blockLevel]; !exists { candidatesByLevelToReferenceBlocksMap[blockLevel] = make(map[externalapi.DomainHash][]*externalapi.DomainHash) @@ -145,7 +157,7 @@ func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea, } else { for childHash, childHeader := range virtualGenesisChildrenHeaders { childHash := childHash // Assign to a new pointer to avoid `range` pointer reuse - if childHeader.ParentsAtLevel(blockLevel).Contains(parent) { + if bpb.parentsManager.ParentsAtLevel(childHeader, blockLevel).Contains(parent) { referenceBlocks = append(referenceBlocks, &childHash) } } @@ -203,14 +215,19 @@ func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea, } } - parents := make([]externalapi.BlockLevelParents, len(candidatesByLevelToReferenceBlocksMap)) + parents := make([]externalapi.BlockLevelParents, 0, len(candidatesByLevelToReferenceBlocksMap)) for blockLevel := 0; blockLevel < len(candidatesByLevelToReferenceBlocksMap); blockLevel++ { + if _, ok := candidatesByLevelToReferenceBlocksMap[blockLevel][*bpb.genesisHash]; daaScore >= bpb.hardForkOmitGenesisFromParentsDAAScore && ok && len(candidatesByLevelToReferenceBlocksMap[blockLevel]) == 1 { + break + } + levelBlocks := make(externalapi.BlockLevelParents, 0, len(candidatesByLevelToReferenceBlocksMap[blockLevel])) for block := range candidatesByLevelToReferenceBlocksMap[blockLevel] { block := block // Assign to a new pointer to avoid `range` pointer reuse levelBlocks = append(levelBlocks, &block) } - parents[blockLevel] = levelBlocks + + parents = append(parents, levelBlocks) } return parents, nil } diff --git a/domain/consensus/processes/blockprocessor/validate_and_insert_imported_pruning_point_test.go b/domain/consensus/processes/blockprocessor/validate_and_insert_imported_pruning_point_test.go index 79b747034..b2fbde847 100644 --- a/domain/consensus/processes/blockprocessor/validate_and_insert_imported_pruning_point_test.go +++ b/domain/consensus/processes/blockprocessor/validate_and_insert_imported_pruning_point_test.go @@ -87,13 +87,18 @@ func TestValidateAndInsertImportedPruningPoint(t *testing.T) { t.Fatalf("PruningPointHeaders: %+v", err) } - pruningPointAndItsAnticoneWithTrustedData, err := tcSyncer.PruningPointAndItsAnticoneWithTrustedData() + pruningPointAndItsAnticone, err := tcSyncer.PruningPointAndItsAnticone() if err != nil { - t.Fatalf("PruningPointAndItsAnticoneWithTrustedData: %+v", err) + t.Fatalf("PruningPointAndItsAnticone: %+v", err) } - for _, blockWithTrustedData := range pruningPointAndItsAnticoneWithTrustedData { - _, err := synceeStaging.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false) + for _, blockHash := range pruningPointAndItsAnticone { + blockWithTrustedData, err := tcSyncer.BlockWithTrustedData(blockHash) + if err != nil { + return + } + + _, err = synceeStaging.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false) if err != nil { t.Fatalf("ValidateAndInsertBlockWithTrustedData: %+v", err) } diff --git a/domain/consensus/processes/blockvalidator/block_header_in_context.go b/domain/consensus/processes/blockvalidator/block_header_in_context.go index fe59bab11..587e5e8f5 100644 --- a/domain/consensus/processes/blockvalidator/block_header_in_context.go +++ b/domain/consensus/processes/blockvalidator/block_header_in_context.go @@ -6,7 +6,6 @@ import ( "github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/ruleerrors" "github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing" - "github.com/kaspanet/kaspad/domain/consensus/utils/pow" "github.com/kaspanet/kaspad/infrastructure/logger" "github.com/pkg/errors" ) @@ -63,7 +62,7 @@ func (v *blockValidator) ValidateHeaderInContext(stagingArea *model.StagingArea, return err } if !hasReachabilityData { - blockLevel := pow.BlockLevel(header) + blockLevel := header.BlockLevel() for i := 0; i <= blockLevel; i++ { err = v.reachabilityManagers[i].AddBlock(stagingArea, blockHash) if err != nil { @@ -195,7 +194,7 @@ func (v *blockValidator) checkMergeSizeLimit(stagingArea *model.StagingArea, has } func (v *blockValidator) checkIndirectParents(stagingArea *model.StagingArea, header externalapi.BlockHeader) error { - expectedParents, err := v.blockParentBuilder.BuildParents(stagingArea, header.DirectParents()) + expectedParents, err := v.blockParentBuilder.BuildParents(stagingArea, header.DAAScore(), header.DirectParents()) if err != nil { return err } diff --git a/domain/consensus/processes/blockvalidator/blockvalidator.go b/domain/consensus/processes/blockvalidator/blockvalidator.go index acf6cf463..c39489f05 100644 --- a/domain/consensus/processes/blockvalidator/blockvalidator.go +++ b/domain/consensus/processes/blockvalidator/blockvalidator.go @@ -37,6 +37,7 @@ type blockValidator struct { finalityManager model.FinalityManager blockParentBuilder model.BlockParentBuilder pruningManager model.PruningManager + parentsManager model.ParentsManager blockStore model.BlockStore ghostdagDataStores []model.GHOSTDAGDataStore @@ -72,6 +73,7 @@ func New(powMax *big.Int, finalityManager model.FinalityManager, blockParentBuilder model.BlockParentBuilder, pruningManager model.PruningManager, + parentsManager model.ParentsManager, pruningStore model.PruningStore, blockStore model.BlockStore, @@ -108,6 +110,7 @@ func New(powMax *big.Int, finalityManager: finalityManager, blockParentBuilder: blockParentBuilder, pruningManager: pruningManager, + parentsManager: parentsManager, pruningStore: pruningStore, blockStore: blockStore, diff --git a/domain/consensus/processes/blockvalidator/pruning_violation_proof_of_work_and_difficulty.go b/domain/consensus/processes/blockvalidator/pruning_violation_proof_of_work_and_difficulty.go index c511e80d7..142aa3e09 100644 --- a/domain/consensus/processes/blockvalidator/pruning_violation_proof_of_work_and_difficulty.go +++ b/domain/consensus/processes/blockvalidator/pruning_violation_proof_of_work_and_difficulty.go @@ -67,9 +67,9 @@ func (v *blockValidator) setParents(stagingArea *model.StagingArea, header externalapi.BlockHeader, isBlockWithTrustedData bool) error { - for level := 0; level <= pow.BlockLevel(header); level++ { + for level := 0; level <= header.BlockLevel(); level++ { var parents []*externalapi.DomainHash - for _, parent := range header.ParentsAtLevel(level) { + for _, parent := range v.parentsManager.ParentsAtLevel(header, level) { _, err := v.ghostdagDataStores[level].Get(v.databaseContext, stagingArea, parent, false) isNotFoundError := database.IsNotFoundError(err) if !isNotFoundError && err != nil { @@ -116,7 +116,7 @@ func (v *blockValidator) validateDifficulty(stagingArea *model.StagingArea, return err } - blockLevel := pow.BlockLevel(header) + blockLevel := header.BlockLevel() for i := 1; i <= blockLevel; i++ { err = v.ghostdagManagers[i].GHOSTDAG(stagingArea, blockHash) if err != nil { diff --git a/domain/consensus/processes/consensusstatemanager/resolve.go b/domain/consensus/processes/consensusstatemanager/resolve.go index 9ee6f65f6..685330b5b 100644 --- a/domain/consensus/processes/consensusstatemanager/resolve.go +++ b/domain/consensus/processes/consensusstatemanager/resolve.go @@ -35,7 +35,7 @@ func (csm *consensusStateManager) ResolveVirtual(maxBlocksToResolve uint64) (boo var selectedTip *externalapi.DomainHash isCompletelyResolved := true for _, tip := range tips { - log.Infof("Resolving tip %s", tip) + log.Debugf("Resolving tip %s", tip) resolveStagingArea := model.NewStagingArea() unverifiedBlocks, err := csm.getUnverifiedChainBlocks(resolveStagingArea, tip) if err != nil { @@ -46,7 +46,7 @@ func (csm *consensusStateManager) ResolveVirtual(maxBlocksToResolve uint64) (boo hasMoreUnverifiedThanMax := maxBlocksToResolve != 0 && uint64(len(unverifiedBlocks)) > maxBlocksToResolve if hasMoreUnverifiedThanMax { resolveTip = unverifiedBlocks[uint64(len(unverifiedBlocks))-maxBlocksToResolve] - log.Infof("Has more than %d blocks to resolve. Changing the resolve tip to %s", maxBlocksToResolve, resolveTip) + log.Debugf("Has more than %d blocks to resolve. Changing the resolve tip to %s", maxBlocksToResolve, resolveTip) } blockStatus, reversalData, err := csm.resolveBlockStatus(resolveStagingArea, resolveTip, true) diff --git a/domain/consensus/processes/parentsmanager/parentsmanager.go b/domain/consensus/processes/parentsmanager/parentsmanager.go new file mode 100644 index 000000000..bec3d648c --- /dev/null +++ b/domain/consensus/processes/parentsmanager/parentsmanager.go @@ -0,0 +1,47 @@ +package parentssanager + +import ( + "github.com/kaspanet/kaspad/domain/consensus/model" + "github.com/kaspanet/kaspad/domain/consensus/model/externalapi" + "github.com/kaspanet/kaspad/domain/consensus/utils/constants" +) + +type parentsManager struct { + hardForkOmitGenesisFromParentsDAAScore uint64 + genesisHash *externalapi.DomainHash +} + +// New instantiates a new ParentsManager +func New(genesisHash *externalapi.DomainHash, hardForkOmitGenesisFromParentsDAAScore uint64) model.ParentsManager { + return &parentsManager{ + genesisHash: genesisHash, + hardForkOmitGenesisFromParentsDAAScore: hardForkOmitGenesisFromParentsDAAScore, + } +} + +func (pm *parentsManager) ParentsAtLevel(blockHeader externalapi.BlockHeader, level int) externalapi.BlockLevelParents { + var parentsAtLevel externalapi.BlockLevelParents + if len(blockHeader.Parents()) > level { + parentsAtLevel = blockHeader.Parents()[level] + } + + if len(parentsAtLevel) == 0 && len(blockHeader.DirectParents()) > 0 && blockHeader.DAAScore() >= pm.hardForkOmitGenesisFromParentsDAAScore { + return externalapi.BlockLevelParents{pm.genesisHash} + } + + return parentsAtLevel +} + +func (pm *parentsManager) Parents(blockHeader externalapi.BlockHeader) []externalapi.BlockLevelParents { + numParents := len(blockHeader.Parents()) + if blockHeader.DAAScore() >= pm.hardForkOmitGenesisFromParentsDAAScore { + numParents = constants.MaxBlockLevel + 1 + } + + parents := make([]externalapi.BlockLevelParents, numParents) + for i := 0; i < numParents; i++ { + parents[i] = pm.ParentsAtLevel(blockHeader, i) + } + + return parents +} diff --git a/domain/consensus/processes/pruningmanager/pruning_test.go b/domain/consensus/processes/pruningmanager/pruning_test.go index c006a5162..83fba86d8 100644 --- a/domain/consensus/processes/pruningmanager/pruning_test.go +++ b/domain/consensus/processes/pruningmanager/pruning_test.go @@ -2,7 +2,6 @@ package pruningmanager_test import ( "encoding/json" - "github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing" "github.com/kaspanet/kaspad/infrastructure/db/database" "os" "path/filepath" @@ -40,7 +39,7 @@ func TestPruning(t *testing.T) { dagconfig.MainnetParams.Name: "502", dagconfig.TestnetParams.Name: "502", dagconfig.DevnetParams.Name: "502", - dagconfig.SimnetParams.Name: "502", + dagconfig.SimnetParams.Name: "503", }, } @@ -140,12 +139,11 @@ func TestPruning(t *testing.T) { // We expect blocks that are within the difficulty adjustment window size of // the pruning point and its anticone to not get pruned unprunedBlockHashesBelowPruningPoint := make(map[externalapi.DomainHash]struct{}) - pruningPointAndItsAnticone, err := tc.PruningPointAndItsAnticoneWithTrustedData() + pruningPointAndItsAnticone, err := tc.PruningPointAndItsAnticone() if err != nil { t.Fatalf("pruningPointAndItsAnticone: %+v", err) } - for _, block := range pruningPointAndItsAnticone { - blockHash := consensushashing.BlockHash(block.Block) + for _, blockHash := range pruningPointAndItsAnticone { unprunedBlockHashesBelowPruningPoint[*blockHash] = struct{}{} blockWindow, err := tc.DAGTraversalManager().BlockWindow(stagingArea, blockHash, consensusConfig.DifficultyAdjustmentWindowSize) if err != nil { diff --git a/domain/consensus/processes/pruningmanager/pruningmanager.go b/domain/consensus/processes/pruningmanager/pruningmanager.go index 58a86e1a0..5b27685cc 100644 --- a/domain/consensus/processes/pruningmanager/pruningmanager.go +++ b/domain/consensus/processes/pruningmanager/pruningmanager.go @@ -907,8 +907,8 @@ func (pm *pruningManager) PruneAllBlocksBelow(stagingArea *model.StagingArea, pr return nil } -func (pm *pruningManager) PruningPointAndItsAnticoneWithTrustedData() ([]*externalapi.BlockWithTrustedData, error) { - onEnd := logger.LogAndMeasureExecutionTime(log, "PruningPointAndItsAnticoneWithTrustedData") +func (pm *pruningManager) PruningPointAndItsAnticone() ([]*externalapi.DomainHash, error) { + onEnd := logger.LogAndMeasureExecutionTime(log, "PruningPointAndItsAnticone") defer onEnd() stagingArea := model.NewStagingArea() @@ -922,34 +922,32 @@ func (pm *pruningManager) PruningPointAndItsAnticoneWithTrustedData() ([]*extern return nil, err } - blocks := make([]*externalapi.BlockWithTrustedData, 0, len(pruningPointAnticone)+1) - - pruningPointWithTrustedData, err := pm.blockWithTrustedData(stagingArea, pruningPoint) - if err != nil { - return nil, err - } - - for _, blockHash := range pruningPointAnticone { - blockWithTrustedData, err := pm.blockWithTrustedData(stagingArea, blockHash) + // Sorting the blocks in topological order + var sortErr error + sort.Slice(pruningPointAnticone, func(i, j int) bool { + headerI, err := pm.blockHeaderStore.BlockHeader(pm.databaseContext, stagingArea, pruningPointAnticone[i]) if err != nil { - return nil, err + sortErr = err + return false } - blocks = append(blocks, blockWithTrustedData) + headerJ, err := pm.blockHeaderStore.BlockHeader(pm.databaseContext, stagingArea, pruningPointAnticone[j]) + if err != nil { + sortErr = err + return false + } + + return headerI.BlueWork().Cmp(headerJ.BlueWork()) < 0 + }) + if sortErr != nil { + return nil, sortErr } - // Sorting the blocks in topological order - sort.Slice(blocks, func(i, j int) bool { - return blocks[i].Block.Header.BlueWork().Cmp(blocks[j].Block.Header.BlueWork()) < 0 - }) - // The pruning point should always come first - blocks = append([]*externalapi.BlockWithTrustedData{pruningPointWithTrustedData}, blocks...) - - return blocks, nil + return append([]*externalapi.DomainHash{pruningPoint}, pruningPointAnticone...), nil } -func (pm *pruningManager) blockWithTrustedData(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (*externalapi.BlockWithTrustedData, error) { +func (pm *pruningManager) BlockWithTrustedData(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (*externalapi.BlockWithTrustedData, error) { block, err := pm.blocksStore.Block(pm.databaseContext, stagingArea, blockHash) if err != nil { return nil, err diff --git a/domain/consensus/processes/pruningproofmanager/log.go b/domain/consensus/processes/pruningproofmanager/log.go new file mode 100644 index 000000000..d3e4d9b05 --- /dev/null +++ b/domain/consensus/processes/pruningproofmanager/log.go @@ -0,0 +1,5 @@ +package pruningproofmanager + +import "github.com/kaspanet/kaspad/infrastructure/logger" + +var log = logger.RegisterSubSystem("PPMN") diff --git a/domain/consensus/processes/pruningproofmanager/pruningproofmanager.go b/domain/consensus/processes/pruningproofmanager/pruningproofmanager.go index 1d44d0a35..35c374d95 100644 --- a/domain/consensus/processes/pruningproofmanager/pruningproofmanager.go +++ b/domain/consensus/processes/pruningproofmanager/pruningproofmanager.go @@ -15,8 +15,8 @@ import ( "github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing" "github.com/kaspanet/kaspad/domain/consensus/utils/constants" "github.com/kaspanet/kaspad/domain/consensus/utils/hashset" - "github.com/kaspanet/kaspad/domain/consensus/utils/pow" "github.com/kaspanet/kaspad/infrastructure/db/database" + "github.com/kaspanet/kaspad/infrastructure/logger" "github.com/pkg/errors" "math/big" ) @@ -28,6 +28,7 @@ type pruningProofManager struct { ghostdagManagers []model.GHOSTDAGManager reachabilityManagers []model.ReachabilityManager dagTraversalManagers []model.DAGTraversalManager + parentsManager model.ParentsManager ghostdagDataStores []model.GHOSTDAGDataStore pruningStore model.PruningStore @@ -39,6 +40,9 @@ type pruningProofManager struct { genesisHash *externalapi.DomainHash k externalapi.KType pruningProofM uint64 + + cachedPruningPoint *externalapi.DomainHash + cachedProof *externalapi.PruningPointProof } // New instantiates a new PruningManager @@ -49,6 +53,7 @@ func New( ghostdagManagers []model.GHOSTDAGManager, reachabilityManagers []model.ReachabilityManager, dagTraversalManagers []model.DAGTraversalManager, + parentsManager model.ParentsManager, ghostdagDataStores []model.GHOSTDAGDataStore, pruningStore model.PruningStore, @@ -68,6 +73,7 @@ func New( ghostdagManagers: ghostdagManagers, reachabilityManagers: reachabilityManagers, dagTraversalManagers: dagTraversalManagers, + parentsManager: parentsManager, ghostdagDataStores: ghostdagDataStores, pruningStore: pruningStore, @@ -83,6 +89,33 @@ func New( } func (ppm *pruningProofManager) BuildPruningPointProof(stagingArea *model.StagingArea) (*externalapi.PruningPointProof, error) { + onEnd := logger.LogAndMeasureExecutionTime(log, "BuildPruningPointProof") + defer onEnd() + + pruningPoint, err := ppm.pruningStore.PruningPoint(ppm.databaseContext, stagingArea) + if err != nil { + return nil, err + } + + if ppm.cachedPruningPoint != nil && ppm.cachedPruningPoint.Equal(pruningPoint) { + return ppm.cachedProof, nil + } + + proof, err := ppm.buildPruningPointProof(stagingArea) + if err != nil { + return nil, err + } + + ppm.cachedProof = proof + ppm.cachedPruningPoint = pruningPoint + + return proof, nil +} + +func (ppm *pruningProofManager) buildPruningPointProof(stagingArea *model.StagingArea) (*externalapi.PruningPointProof, error) { + onEnd := logger.LogAndMeasureExecutionTime(log, "buildPruningPointProof") + defer onEnd() + pruningPoint, err := ppm.pruningStore.PruningPoint(ppm.databaseContext, stagingArea) if err != nil { return nil, err @@ -97,17 +130,33 @@ func (ppm *pruningProofManager) BuildPruningPointProof(stagingArea *model.Stagin return nil, err } - maxLevel := len(pruningPointHeader.Parents()) - 1 + maxLevel := len(ppm.parentsManager.Parents(pruningPointHeader)) - 1 headersByLevel := make(map[int][]externalapi.BlockHeader) selectedTipByLevel := make([]*externalapi.DomainHash, maxLevel+1) - pruningPointLevel := pow.BlockLevel(pruningPointHeader) + pruningPointLevel := pruningPointHeader.BlockLevel() for blockLevel := maxLevel; blockLevel >= 0; blockLevel-- { var selectedTip *externalapi.DomainHash if blockLevel <= pruningPointLevel { selectedTip = pruningPoint } else { - blockLevelParents := pruningPointHeader.ParentsAtLevel(blockLevel) - selectedTip, err = ppm.ghostdagManagers[blockLevel].ChooseSelectedParent(stagingArea, []*externalapi.DomainHash(blockLevelParents)...) + blockLevelParents := ppm.parentsManager.ParentsAtLevel(pruningPointHeader, blockLevel) + selectedTipCandidates := make([]*externalapi.DomainHash, 0, len(blockLevelParents)) + + // In a pruned node, some pruning point parents might be missing, but we're guaranteed that its + // selected parent is not missing. + for _, parent := range blockLevelParents { + _, err := ppm.ghostdagDataStores[blockLevel].Get(ppm.databaseContext, stagingArea, parent, false) + if database.IsNotFoundError(err) { + continue + } + if err != nil { + return nil, err + } + + selectedTipCandidates = append(selectedTipCandidates, parent) + } + + selectedTip, err = ppm.ghostdagManagers[blockLevel].ChooseSelectedParent(stagingArea, selectedTipCandidates...) if err != nil { return nil, err } @@ -248,6 +297,9 @@ func (ppm *pruningProofManager) blockAtDepth(stagingArea *model.StagingArea, gho } func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *externalapi.PruningPointProof) error { + onEnd := logger.LogAndMeasureExecutionTime(log, "ValidatePruningPointProof") + defer onEnd() + stagingArea := model.NewStagingArea() if len(pruningPointProof.Headers) == 0 { @@ -257,8 +309,8 @@ func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *ext level0Headers := pruningPointProof.Headers[0] pruningPointHeader := level0Headers[len(level0Headers)-1] pruningPoint := consensushashing.HeaderHash(pruningPointHeader) - pruningPointBlockLevel := pow.BlockLevel(pruningPointHeader) - maxLevel := len(pruningPointHeader.Parents()) - 1 + pruningPointBlockLevel := pruningPointHeader.BlockLevel() + maxLevel := len(ppm.parentsManager.Parents(pruningPointHeader)) - 1 if maxLevel >= len(pruningPointProof.Headers) { return errors.Wrapf(ruleerrors.ErrPruningProofEmpty, "proof has only %d levels while pruning point "+ "has parents from %d levels", len(pruningPointProof.Headers), maxLevel+1) @@ -300,15 +352,15 @@ func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *ext var selectedTip *externalapi.DomainHash for i, header := range headers { blockHash := consensushashing.HeaderHash(header) - if pow.BlockLevel(header) < blockLevel { + if header.BlockLevel() < blockLevel { return errors.Wrapf(ruleerrors.ErrPruningProofWrongBlockLevel, "block %s level is %d when it's "+ - "expected to be at least %d", blockHash, pow.BlockLevel(header), blockLevel) + "expected to be at least %d", blockHash, header.BlockLevel(), blockLevel) } blockHeaderStore.Stage(stagingArea, blockHash, header) var parents []*externalapi.DomainHash - for _, parent := range header.ParentsAtLevel(blockLevel) { + for _, parent := range ppm.parentsManager.ParentsAtLevel(header, blockLevel) { _, err := ghostdagDataStores[blockLevel].Get(ppm.databaseContext, stagingArea, parent, false) if database.IsNotFoundError(err) { continue @@ -377,7 +429,7 @@ func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *ext } } - if !selectedTip.Equal(pruningPoint) && !pruningPointHeader.ParentsAtLevel(blockLevel).Contains(selectedTip) { + if !selectedTip.Equal(pruningPoint) && !ppm.parentsManager.ParentsAtLevel(pruningPointHeader, blockLevel).Contains(selectedTip) { return errors.Wrapf(ruleerrors.ErrPruningProofMissesBlocksBelowPruningPoint, "the selected tip %s at "+ "level %d is not a parent of the pruning point", selectedTip, blockLevel) } @@ -395,7 +447,7 @@ func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *ext return errors.Wrapf(ruleerrors.ErrPruningProofSelectedTipIsNotThePruningPoint, "the pruning "+ "proof selected tip %s at level %d is not the pruning point", selectedTip, blockLevel) } - } else if !pruningPointHeader.ParentsAtLevel(blockLevel).Contains(selectedTip) { + } else if !ppm.parentsManager.ParentsAtLevel(pruningPointHeader, blockLevel).Contains(selectedTip) { return errors.Wrapf(ruleerrors.ErrPruningProofSelectedTipNotParentOfPruningPoint, "the pruning "+ "proof selected tip %s at level %d is not a parent of the of the pruning point on the same "+ "level", selectedTip, blockLevel) @@ -554,19 +606,22 @@ func (ppm *pruningProofManager) dagProcesses( } func (ppm *pruningProofManager) ApplyPruningPointProof(stagingArea *model.StagingArea, pruningPointProof *externalapi.PruningPointProof) error { + onEnd := logger.LogAndMeasureExecutionTime(log, "ApplyPruningPointProof") + defer onEnd() + for blockLevel, headers := range pruningPointProof.Headers { var selectedTip *externalapi.DomainHash for i, header := range headers { blockHash := consensushashing.HeaderHash(header) - if pow.BlockLevel(header) < blockLevel { + if header.BlockLevel() < blockLevel { return errors.Wrapf(ruleerrors.ErrPruningProofWrongBlockLevel, "block %s level is %d when it's "+ - "expected to be at least %d", blockHash, pow.BlockLevel(header), blockLevel) + "expected to be at least %d", blockHash, header.BlockLevel(), blockLevel) } ppm.blockHeaderStore.Stage(stagingArea, blockHash, header) var parents []*externalapi.DomainHash - for _, parent := range header.ParentsAtLevel(blockLevel) { + for _, parent := range ppm.parentsManager.ParentsAtLevel(header, blockLevel) { _, err := ppm.ghostdagDataStores[blockLevel].Get(ppm.databaseContext, stagingArea, parent, false) if database.IsNotFoundError(err) { continue diff --git a/domain/consensus/utils/blockheader/blockheader.go b/domain/consensus/utils/blockheader/blockheader.go index c23f00303..029447405 100644 --- a/domain/consensus/utils/blockheader/blockheader.go +++ b/domain/consensus/utils/blockheader/blockheader.go @@ -2,6 +2,7 @@ package blockheader import ( "github.com/kaspanet/kaspad/domain/consensus/model/externalapi" + "github.com/kaspanet/kaspad/domain/consensus/utils/pow" "math/big" ) @@ -18,6 +19,9 @@ type blockHeader struct { blueScore uint64 blueWork *big.Int pruningPoint *externalapi.DomainHash + + isBlockLevelCached bool + blockLevel int } func (bh *blockHeader) BlueScore() uint64 { @@ -41,10 +45,12 @@ func (bh *blockHeader) ToImmutable() externalapi.BlockHeader { } func (bh *blockHeader) SetNonce(nonce uint64) { + bh.isBlockLevelCached = false bh.nonce = nonce } func (bh *blockHeader) SetTimeInMilliseconds(timeInMilliseconds int64) { + bh.isBlockLevelCached = false bh.timeInMilliseconds = timeInMilliseconds } @@ -56,16 +62,12 @@ func (bh *blockHeader) Parents() []externalapi.BlockLevelParents { return bh.parents } -func (bh *blockHeader) ParentsAtLevel(level int) externalapi.BlockLevelParents { - if len(bh.parents) <= level { +func (bh *blockHeader) DirectParents() externalapi.BlockLevelParents { + if len(bh.parents) == 0 { return externalapi.BlockLevelParents{} } - return bh.parents[level] -} - -func (bh *blockHeader) DirectParents() externalapi.BlockLevelParents { - return bh.ParentsAtLevel(0) + return bh.parents[0] } func (bh *blockHeader) HashMerkleRoot() *externalapi.DomainHash { @@ -177,6 +179,15 @@ func (bh *blockHeader) ToMutable() externalapi.MutableBlockHeader { return bh.clone() } +func (bh *blockHeader) BlockLevel() int { + if !bh.isBlockLevelCached { + bh.blockLevel = pow.BlockLevel(bh) + bh.isBlockLevelCached = true + } + + return bh.blockLevel +} + // NewImmutableBlockHeader returns a new immutable header func NewImmutableBlockHeader( version uint16, diff --git a/domain/consensus/utils/blockheader/blockheader_test.go b/domain/consensus/utils/blockheader/blockheader_test.go index a5b7244f2..04fccaac2 100644 --- a/domain/consensus/utils/blockheader/blockheader_test.go +++ b/domain/consensus/utils/blockheader/blockheader_test.go @@ -36,6 +36,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) { 8, big.NewInt(9), externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{10}), + false, + 0, }, expectedResult: false, }, @@ -55,6 +57,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) { 9, big.NewInt(10), externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + false, + 0, }, headersToCompareTo: []headerToCompare{ { @@ -75,6 +79,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) { 9, big.NewInt(10), externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + false, + 0, }, expectedResult: true, }, @@ -92,6 +98,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) { 9, big.NewInt(10), externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + false, + 0, }, expectedResult: false, }, @@ -111,6 +119,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) { 9, big.NewInt(10), externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + false, + 0, }, expectedResult: false, }, @@ -128,6 +138,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) { 9, big.NewInt(10), externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + false, + 0, }, expectedResult: false, }, @@ -145,6 +157,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) { 9, big.NewInt(10), externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + false, + 0, }, expectedResult: false, }, @@ -162,6 +176,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) { 9, big.NewInt(10), externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + false, + 0, }, expectedResult: false, }, @@ -179,6 +195,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) { 9, big.NewInt(10), externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + false, + 0, }, expectedResult: false, }, @@ -196,6 +214,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) { 9, big.NewInt(10), externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + false, + 0, }, expectedResult: false, }, @@ -213,6 +233,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) { 9, big.NewInt(10), externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + false, + 0, }, expectedResult: false, }, @@ -230,6 +252,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) { 9, big.NewInt(10), externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + false, + 0, }, expectedResult: false, }, @@ -247,6 +271,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) { 9, big.NewInt(10), externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + false, + 0, }, expectedResult: false, }, @@ -264,6 +290,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) { 100, big.NewInt(10), externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + false, + 0, }, expectedResult: false, }, @@ -281,6 +309,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) { 9, big.NewInt(100), externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + false, + 0, }, expectedResult: false, }, @@ -298,6 +328,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) { 9, big.NewInt(10), externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{100}), + false, + 0, }, expectedResult: false, }, diff --git a/domain/dagconfig/params.go b/domain/dagconfig/params.go index 33a5224e5..7c92d5cd1 100644 --- a/domain/dagconfig/params.go +++ b/domain/dagconfig/params.go @@ -186,6 +186,8 @@ type Params struct { FixedSubsidySwitchPruningPointInterval uint64 FixedSubsidySwitchHashRateThreshold *big.Int + + HardForkOmitGenesisFromParentsDAAScore uint64 } // NormalizeRPCServerAddress returns addr with the current network default @@ -264,6 +266,7 @@ var MainnetParams = Params{ PruningProofM: defaultPruningProofM, FixedSubsidySwitchPruningPointInterval: defaultFixedSubsidySwitchPruningPointInterval, FixedSubsidySwitchHashRateThreshold: big.NewInt(150_000_000_000), + HardForkOmitGenesisFromParentsDAAScore: 1265814, } // TestnetParams defines the network parameters for the test Kaspa network. @@ -326,6 +329,7 @@ var TestnetParams = Params{ PruningProofM: defaultPruningProofM, FixedSubsidySwitchPruningPointInterval: defaultFixedSubsidySwitchPruningPointInterval, FixedSubsidySwitchHashRateThreshold: big.NewInt(150_000_000_000), + HardForkOmitGenesisFromParentsDAAScore: 2e6, } // SimnetParams defines the network parameters for the simulation test Kaspa @@ -392,6 +396,7 @@ var SimnetParams = Params{ PruningProofM: defaultPruningProofM, FixedSubsidySwitchPruningPointInterval: defaultFixedSubsidySwitchPruningPointInterval, FixedSubsidySwitchHashRateThreshold: big.NewInt(150_000_000_000), + HardForkOmitGenesisFromParentsDAAScore: 5, } // DevnetParams defines the network parameters for the development Kaspa network. @@ -454,6 +459,7 @@ var DevnetParams = Params{ PruningProofM: defaultPruningProofM, FixedSubsidySwitchPruningPointInterval: defaultFixedSubsidySwitchPruningPointInterval, FixedSubsidySwitchHashRateThreshold: big.NewInt(150_000_000_000), + HardForkOmitGenesisFromParentsDAAScore: 3000, } var ( diff --git a/infrastructure/config/config.go b/infrastructure/config/config.go index a3f8bdc85..cb6365889 100644 --- a/infrastructure/config/config.go +++ b/infrastructure/config/config.go @@ -121,6 +121,7 @@ type Flags struct { MaxUTXOCacheSize uint64 `long:"maxutxocachesize" description:"Max size of loaded UTXO into ram from the disk in bytes"` UTXOIndex bool `long:"utxoindex" description:"Enable the UTXO index"` IsArchivalNode bool `long:"archival" description:"Run as an archival node: don't delete old block data when moving the pruning point (Warning: heavy disk usage)'"` + AllowSubmitBlockWhenNotSynced bool `long:"allow-submit-block-when-not-synced" hidden:"true" description:"Allow the node to accept blocks from RPC while not synced (this flag is mainly used for testing)"` EnableSanityCheckPruningUTXOSet bool `long:"enable-sanity-check-pruning-utxo" hidden:"true" description:"When moving the pruning point - check that the utxo set matches the utxo commitment"` NetworkFlags ServiceOptions *ServiceOptions diff --git a/infrastructure/config/network.go b/infrastructure/config/network.go index 75549b658..890c54667 100644 --- a/infrastructure/config/network.go +++ b/infrastructure/config/network.go @@ -48,6 +48,7 @@ type overrideDAGParamsConfig struct { EnableNonNativeSubnetworks *bool `json:"enableNonNativeSubnetworks"` DisableDifficultyAdjustment *bool `json:"disableDifficultyAdjustment"` SkipProofOfWork *bool `json:"skipProofOfWork"` + HardForkOmitGenesisFromParentsDAAScore *uint64 `json:"hardForkOmitGenesisFromParentsDaaScore"` } // ResolveNetwork parses the network command line argument and sets NetParams accordingly. diff --git a/infrastructure/network/connmanager/connection_requests.go b/infrastructure/network/connmanager/connection_requests.go index c01613678..f5161ffe9 100644 --- a/infrastructure/network/connmanager/connection_requests.go +++ b/infrastructure/network/connmanager/connection_requests.go @@ -66,7 +66,7 @@ func (c *ConnectionManager) checkRequestedConnections(connSet connectionSet) { log.Debugf("Connecting to connection request %s", connReq.address) err := c.initiateConnection(connReq.address) if err != nil { - log.Infof("Couldn't connect to %s: %s", address, err) + log.Infof("Couldn't connect to requested connection %s: %s", address, err) // if connection request is one try - remove from pending and ignore failure if !connReq.isPermanent { delete(c.pendingRequested, address) diff --git a/infrastructure/network/connmanager/outgoing_connections.go b/infrastructure/network/connmanager/outgoing_connections.go index 6951946e0..87328bc13 100644 --- a/infrastructure/network/connmanager/outgoing_connections.go +++ b/infrastructure/network/connmanager/outgoing_connections.go @@ -41,7 +41,7 @@ func (c *ConnectionManager) checkOutgoingConnections(connSet connectionSet) { err := c.initiateConnection(addressString) if err != nil { - log.Infof("Couldn't connect to %s: %s", addressString, err) + log.Debugf("Couldn't connect to %s: %s", addressString, err) c.addressManager.MarkConnectionFailure(netAddress) continue } diff --git a/infrastructure/os/limits/limits_unix.go b/infrastructure/os/limits/limits_unix.go index d993db9b9..0e99ff8d5 100644 --- a/infrastructure/os/limits/limits_unix.go +++ b/infrastructure/os/limits/limits_unix.go @@ -2,6 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. +//go:build !windows && !plan9 // +build !windows,!plan9 package limits diff --git a/infrastructure/os/signal/signalsigterm.go b/infrastructure/os/signal/signalsigterm.go index c6fb425c6..38ddcf360 100644 --- a/infrastructure/os/signal/signalsigterm.go +++ b/infrastructure/os/signal/signalsigterm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. +//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris // +build darwin dragonfly freebsd linux netbsd openbsd solaris package signal diff --git a/stability-tests/many-tips/main.go b/stability-tests/many-tips/main.go index e553f121e..8623f1dd3 100644 --- a/stability-tests/many-tips/main.go +++ b/stability-tests/many-tips/main.go @@ -115,6 +115,7 @@ func startNode() (teardown func(), err error) { "--logdir", dataDir, "--rpclisten", rpcAddress, "--loglevel", "debug", + "--allow-submit-block-when-not-synced", ) if err != nil { return nil, err diff --git a/stability-tests/netsync/fast-pruning-ibd-test/fast-pruning-ibd-test-params.json b/stability-tests/netsync/fast-pruning-ibd-test/fast-pruning-ibd-test-params.json index df69961cb..d13694923 100644 --- a/stability-tests/netsync/fast-pruning-ibd-test/fast-pruning-ibd-test-params.json +++ b/stability-tests/netsync/fast-pruning-ibd-test/fast-pruning-ibd-test-params.json @@ -1 +1 @@ -{"skipProofOfWork":true, "mergeSetSizeLimit": 30, "finalityDuration": 30000} +{"skipProofOfWork":true, "mergeSetSizeLimit": 30, "finalityDuration": 30000, "hardForkOmitGenesisFromParentsDaaScore": 2505} diff --git a/stability-tests/netsync/node.go b/stability-tests/netsync/node.go index 52a0a4b36..0a5dade40 100644 --- a/stability-tests/netsync/node.go +++ b/stability-tests/netsync/node.go @@ -38,6 +38,7 @@ func startNode(name string, rpcAddress, listen, connect, profilePort, dataDir st "--listen", listen, "--profile", profilePort, "--loglevel", "debug", + "--allow-submit-block-when-not-synced", } if connect != "" { args = append(args, "--connect", connect) diff --git a/stability-tests/simple-sync/start-nodes.go b/stability-tests/simple-sync/start-nodes.go index 41399a514..3ccd0d38b 100644 --- a/stability-tests/simple-sync/start-nodes.go +++ b/stability-tests/simple-sync/start-nodes.go @@ -44,6 +44,7 @@ func startNodes() (teardown func(), err error) { "--rpclisten", syncerRPCAddress, "--listen", syncerListen, "--loglevel", "debug", + "--allow-submit-block-when-not-synced", ) if err != nil { return nil, err diff --git a/testing/integration/config_test.go b/testing/integration/config_test.go index c5365c9e2..5b8df58f8 100644 --- a/testing/integration/config_test.go +++ b/testing/integration/config_test.go @@ -36,6 +36,7 @@ func setConfig(t *testing.T, harness *appHarness) { harness.config.Listeners = []string{harness.p2pAddress} harness.config.RPCListeners = []string{harness.rpcAddress} harness.config.UTXOIndex = harness.utxoIndex + harness.config.AllowSubmitBlockWhenNotSynced = true if harness.overrideDAGParams != nil { harness.config.ActiveNetParams = harness.overrideDAGParams