Compare commits

..

2 Commits

Author SHA1 Message Date
Ori Newman
765dd170e4 Optimizations and header size reduce hardfork (#1853)
* Modify DefaultTimeout to 120 seconds

A temporary workaround for nodes having trouble to sync (currently the download of pruning point related data during IBD takes more than 30 seconds)

* Cache existence in reachability store

* Cache block level in the header

* Fix IBD indication on submit block

* Add hardForkOmitGenesisFromParentsDAAScore logic

* Fix NumThreads bug in the wallet

* Get rid of ParentsAtLevel header method

* Fix a bug in BuildPruningPointProof

* Increase race detector timeout

* Add cache to BuildPruningPointProof

* Add comments and temp comment out go vet

* Fix ParentsAtLevel

* Dont fill empty parents

* Change HardForkOmitGenesisFromParentsDAAScore in fast netsync test

* Add --allow-submit-block-when-not-synced in stability tests

* Fix TestPruning

* Return fast tests

* Fix off by one error on kaspawallet

* Fetch only one block with trusted data at a time

* Update fork DAA score

* Don't ban for unexpected message type

* Fix tests

Co-authored-by: Michael Sutton <mikisiton2@gmail.com>
Co-authored-by: Ori Newman <>
2021-11-22 09:00:39 +02:00
Ori Newman
8e362845b3 Update to version 0.11.4 2021-11-21 01:52:56 +02:00
23 changed files with 102 additions and 58 deletions

View File

@@ -4,6 +4,7 @@ import (
"github.com/kaspanet/kaspad/app/appmessage"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
@@ -40,13 +41,13 @@ func HandlePruningPointAndItsAnticoneRequests(context PruningPointAndItsAnticone
return err
}
blocks, err := context.Domain().Consensus().PruningPointAndItsAnticoneWithTrustedData()
pointAndItsAnticone, err := context.Domain().Consensus().PruningPointAndItsAnticone()
if err != nil {
return err
}
for _, block := range blocks {
err = outgoingRoute.Enqueue(appmessage.DomainBlockWithTrustedDataToBlockWithTrustedData(block))
for _, blockHash := range pointAndItsAnticone {
err := sendBlockWithTrustedData(context, outgoingRoute, blockHash)
if err != nil {
return err
}
@@ -60,3 +61,17 @@ func HandlePruningPointAndItsAnticoneRequests(context PruningPointAndItsAnticone
log.Debugf("Sent pruning point and its anticone to %s", peer)
}
}
func sendBlockWithTrustedData(context PruningPointAndItsAnticoneRequestsContext, outgoingRoute *router.Route, blockHash *externalapi.DomainHash) error {
blockWithTrustedData, err := context.Domain().Consensus().BlockWithTrustedData(blockHash)
if err != nil {
return err
}
err = outgoingRoute.Enqueue(appmessage.DomainBlockWithTrustedDataToBlockWithTrustedData(blockWithTrustedData))
if err != nil {
return err
}
return nil
}

View File

@@ -70,7 +70,8 @@ func (flow *handleRequestPruningPointUTXOSetFlow) waitForRequestPruningPointUTXO
}
msgRequestPruningPointUTXOSet, ok := message.(*appmessage.MsgRequestPruningPointUTXOSet)
if !ok {
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
// TODO: Change to shouldBan: true once we fix the bug of getting redundant messages
return nil, protocolerrors.Errorf(false, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdRequestPruningPointUTXOSet, message.Command())
}
return msgRequestPruningPointUTXOSet, nil
@@ -123,7 +124,8 @@ func (flow *handleRequestPruningPointUTXOSetFlow) sendPruningPointUTXOSet(
}
_, ok := message.(*appmessage.MsgRequestNextPruningPointUTXOSetChunk)
if !ok {
return protocolerrors.Errorf(true, "received unexpected message type. "+
// TODO: Change to shouldBan: true once we fix the bug of getting redundant messages
return protocolerrors.Errorf(false, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdRequestNextPruningPointUTXOSetChunk, message.Command())
}

View File

@@ -15,8 +15,6 @@ golint -set_exit_status ./...
staticcheck -checks SA4006,SA4008,SA4009,SA4010,SA5003,SA1004,SA1014,SA1021,SA1023,SA1024,SA1025,SA1026,SA1027,SA1028,SA2000,SA2001,SA2003,SA4000,SA4001,SA4003,SA4004,SA4011,SA4012,SA4013,SA4014,SA4015,SA4016,SA4017,SA4018,SA4019,SA4020,SA4021,SA4022,SA4023,SA5000,SA5002,SA5004,SA5005,SA5007,SA5008,SA5009,SA5010,SA5011,SA5012,SA6001,SA6002,SA9001,SA9002,SA9003,SA9004,SA9005,SA9006,ST1019 ./...
go vet -composites=false $FLAGS ./...
go build $FLAGS -o kaspad .
if [ -n "${NO_PARALLEL}" ]

View File

@@ -97,7 +97,7 @@ func encryptMnemonic(mnemonic string, password []byte) (*EncryptedMnemonic, erro
return nil, err
}
aead, err := getAEAD(password, salt)
aead, err := getAEAD(defaultNumThreads, password, salt)
if err != nil {
return nil, err
}

View File

@@ -23,6 +23,7 @@ var (
defaultAppDir = util.AppDir("kaspawallet", false)
)
// LastVersion is the most up to date file format version
const LastVersion = 1
func defaultKeysFile(netParams *dagconfig.Params) string {
@@ -36,7 +37,7 @@ type encryptedPrivateKeyJSON struct {
type keysFileJSON struct {
Version uint32 `json:"version"`
NumThreads uint8 `json:"numThreads,omitempty"` // This field is ignored for versions different than 0
NumThreads uint8 `json:"numThreads,omitempty"` // This field is ignored for versions different from 0. See more details at the function `numThreads`.
EncryptedPrivateKeys []*encryptedPrivateKeyJSON `json:"encryptedMnemonics"`
ExtendedPublicKeys []string `json:"publicKeys"`
MinimumSignatures uint32 `json:"minimumSignatures"`
@@ -299,9 +300,18 @@ func (d *File) Save() error {
return nil
}
const defaultNumThreads = 8
func (d *File) numThreads(password []byte) (uint8, error) {
// There's a bug in v0 wallets where the number of threads
// was determined by the number of logical CPUs at the machine,
// which made the authentication non-deterministic across platforms.
// In order to solve it we introduce v1 where the number of threads
// is constant, and brute force the number of threads in v0. After we
// find the right amount via brute force we save the result to the file.
if d.Version != 0 {
return 8, nil
return defaultNumThreads, nil
}
if d.NumThreads != 0 {
@@ -341,7 +351,7 @@ func (d *File) detectNumThreads(password, salt []byte) (uint8, error) {
_, err := getAEAD(i, password, salt)
if err != nil {
const maxTries = 32
if i == maxTries || !strings.Contains(err.Error(), "message authentication failed") {
if i > maxTries || !strings.Contains(err.Error(), "message authentication failed") {
return 0, err
}
} else {

View File

@@ -137,11 +137,18 @@ func (s *consensus) Init(skipAddingGenesis bool) error {
return nil
}
func (s *consensus) PruningPointAndItsAnticoneWithTrustedData() ([]*externalapi.BlockWithTrustedData, error) {
func (s *consensus) PruningPointAndItsAnticone() ([]*externalapi.DomainHash, error) {
s.lock.Lock()
defer s.lock.Unlock()
return s.pruningManager.PruningPointAndItsAnticoneWithTrustedData()
return s.pruningManager.PruningPointAndItsAnticone()
}
func (s *consensus) BlockWithTrustedData(blockHash *externalapi.DomainHash) (*externalapi.BlockWithTrustedData, error) {
s.lock.Lock()
defer s.lock.Unlock()
return s.pruningManager.BlockWithTrustedData(model.NewStagingArea(), blockHash)
}
// BuildBlock builds a block over the current state, with the transactions

View File

@@ -25,7 +25,8 @@ type Consensus interface {
GetVirtualUTXOs(expectedVirtualParents []*DomainHash, fromOutpoint *DomainOutpoint, limit int) ([]*OutpointAndUTXOEntryPair, error)
PruningPoint() (*DomainHash, error)
PruningPointHeaders() ([]BlockHeader, error)
PruningPointAndItsAnticoneWithTrustedData() ([]*BlockWithTrustedData, error)
PruningPointAndItsAnticone() ([]*DomainHash, error)
BlockWithTrustedData(blockHash *DomainHash) (*BlockWithTrustedData, error)
ClearImportedPruningPointData() error
AppendImportedPruningPointUTXOs(outpointAndUTXOEntryPairs []*OutpointAndUTXOEntryPair) error
ValidateAndInsertImportedPruningPoint(newPruningPoint *DomainHash) error

View File

@@ -2,6 +2,7 @@ package model
import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
// ParentsManager lets is a wrapper above header parents that replaces empty parents with genesis when needed.
type ParentsManager interface {
ParentsAtLevel(blockHeader externalapi.BlockHeader, level int) externalapi.BlockLevelParents
Parents(blockHeader externalapi.BlockHeader) []externalapi.BlockLevelParents

View File

@@ -12,6 +12,7 @@ type PruningManager interface {
AppendImportedPruningPointUTXOs(outpointAndUTXOEntryPairs []*externalapi.OutpointAndUTXOEntryPair) error
UpdatePruningPointIfRequired() error
PruneAllBlocksBelow(stagingArea *StagingArea, pruningPointHash *externalapi.DomainHash) error
PruningPointAndItsAnticoneWithTrustedData() ([]*externalapi.BlockWithTrustedData, error)
PruningPointAndItsAnticone() ([]*externalapi.DomainHash, error)
ExpectedHeaderPruningPoint(stagingArea *StagingArea, blockHash *externalapi.DomainHash) (*externalapi.DomainHash, error)
BlockWithTrustedData(stagingArea *StagingArea, blockHash *externalapi.DomainHash) (*externalapi.BlockWithTrustedData, error)
}

View File

@@ -215,7 +215,7 @@ func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea,
}
}
parents := make([]externalapi.BlockLevelParents, len(candidatesByLevelToReferenceBlocksMap))
parents := make([]externalapi.BlockLevelParents, 0, len(candidatesByLevelToReferenceBlocksMap))
for blockLevel := 0; blockLevel < len(candidatesByLevelToReferenceBlocksMap); blockLevel++ {
if _, ok := candidatesByLevelToReferenceBlocksMap[blockLevel][*bpb.genesisHash]; daaScore >= bpb.hardForkOmitGenesisFromParentsDAAScore && ok && len(candidatesByLevelToReferenceBlocksMap[blockLevel]) == 1 {
break
@@ -227,7 +227,7 @@ func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea,
levelBlocks = append(levelBlocks, &block)
}
parents[blockLevel] = levelBlocks
parents = append(parents, levelBlocks)
}
return parents, nil
}

View File

@@ -87,13 +87,18 @@ func TestValidateAndInsertImportedPruningPoint(t *testing.T) {
t.Fatalf("PruningPointHeaders: %+v", err)
}
pruningPointAndItsAnticoneWithTrustedData, err := tcSyncer.PruningPointAndItsAnticoneWithTrustedData()
pruningPointAndItsAnticone, err := tcSyncer.PruningPointAndItsAnticone()
if err != nil {
t.Fatalf("PruningPointAndItsAnticoneWithTrustedData: %+v", err)
t.Fatalf("PruningPointAndItsAnticone: %+v", err)
}
for _, blockWithTrustedData := range pruningPointAndItsAnticoneWithTrustedData {
_, err := synceeStaging.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false)
for _, blockHash := range pruningPointAndItsAnticone {
blockWithTrustedData, err := tcSyncer.BlockWithTrustedData(blockHash)
if err != nil {
return
}
_, err = synceeStaging.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false)
if err != nil {
t.Fatalf("ValidateAndInsertBlockWithTrustedData: %+v", err)
}

View File

@@ -11,7 +11,7 @@ type parentsManager struct {
genesisHash *externalapi.DomainHash
}
// New instantiates a new HeadersSelectedTipManager
// New instantiates a new ParentsManager
func New(genesisHash *externalapi.DomainHash, hardForkOmitGenesisFromParentsDAAScore uint64) model.ParentsManager {
return &parentsManager{
genesisHash: genesisHash,
@@ -20,14 +20,16 @@ func New(genesisHash *externalapi.DomainHash, hardForkOmitGenesisFromParentsDAAS
}
func (pm *parentsManager) ParentsAtLevel(blockHeader externalapi.BlockHeader, level int) externalapi.BlockLevelParents {
if len(blockHeader.Parents()) <= level {
if blockHeader.DAAScore() >= pm.hardForkOmitGenesisFromParentsDAAScore {
return externalapi.BlockLevelParents{pm.genesisHash}
}
return externalapi.BlockLevelParents{}
var parentsAtLevel externalapi.BlockLevelParents
if len(blockHeader.Parents()) > level {
parentsAtLevel = blockHeader.Parents()[level]
}
return blockHeader.Parents()[level]
if len(parentsAtLevel) == 0 && len(blockHeader.DirectParents()) > 0 && blockHeader.DAAScore() >= pm.hardForkOmitGenesisFromParentsDAAScore {
return externalapi.BlockLevelParents{pm.genesisHash}
}
return parentsAtLevel
}
func (pm *parentsManager) Parents(blockHeader externalapi.BlockHeader) []externalapi.BlockLevelParents {

View File

@@ -2,7 +2,6 @@ package pruningmanager_test
import (
"encoding/json"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"os"
"path/filepath"
@@ -40,7 +39,7 @@ func TestPruning(t *testing.T) {
dagconfig.MainnetParams.Name: "502",
dagconfig.TestnetParams.Name: "502",
dagconfig.DevnetParams.Name: "502",
dagconfig.SimnetParams.Name: "502",
dagconfig.SimnetParams.Name: "503",
},
}
@@ -140,12 +139,11 @@ func TestPruning(t *testing.T) {
// We expect blocks that are within the difficulty adjustment window size of
// the pruning point and its anticone to not get pruned
unprunedBlockHashesBelowPruningPoint := make(map[externalapi.DomainHash]struct{})
pruningPointAndItsAnticone, err := tc.PruningPointAndItsAnticoneWithTrustedData()
pruningPointAndItsAnticone, err := tc.PruningPointAndItsAnticone()
if err != nil {
t.Fatalf("pruningPointAndItsAnticone: %+v", err)
}
for _, block := range pruningPointAndItsAnticone {
blockHash := consensushashing.BlockHash(block.Block)
for _, blockHash := range pruningPointAndItsAnticone {
unprunedBlockHashesBelowPruningPoint[*blockHash] = struct{}{}
blockWindow, err := tc.DAGTraversalManager().BlockWindow(stagingArea, blockHash, consensusConfig.DifficultyAdjustmentWindowSize)
if err != nil {

View File

@@ -907,8 +907,8 @@ func (pm *pruningManager) PruneAllBlocksBelow(stagingArea *model.StagingArea, pr
return nil
}
func (pm *pruningManager) PruningPointAndItsAnticoneWithTrustedData() ([]*externalapi.BlockWithTrustedData, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "PruningPointAndItsAnticoneWithTrustedData")
func (pm *pruningManager) PruningPointAndItsAnticone() ([]*externalapi.DomainHash, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "PruningPointAndItsAnticone")
defer onEnd()
stagingArea := model.NewStagingArea()
@@ -922,34 +922,32 @@ func (pm *pruningManager) PruningPointAndItsAnticoneWithTrustedData() ([]*extern
return nil, err
}
blocks := make([]*externalapi.BlockWithTrustedData, 0, len(pruningPointAnticone)+1)
pruningPointWithTrustedData, err := pm.blockWithTrustedData(stagingArea, pruningPoint)
if err != nil {
return nil, err
}
for _, blockHash := range pruningPointAnticone {
blockWithTrustedData, err := pm.blockWithTrustedData(stagingArea, blockHash)
// Sorting the blocks in topological order
var sortErr error
sort.Slice(pruningPointAnticone, func(i, j int) bool {
headerI, err := pm.blockHeaderStore.BlockHeader(pm.databaseContext, stagingArea, pruningPointAnticone[i])
if err != nil {
return nil, err
sortErr = err
return false
}
blocks = append(blocks, blockWithTrustedData)
headerJ, err := pm.blockHeaderStore.BlockHeader(pm.databaseContext, stagingArea, pruningPointAnticone[j])
if err != nil {
sortErr = err
return false
}
return headerI.BlueWork().Cmp(headerJ.BlueWork()) < 0
})
if sortErr != nil {
return nil, sortErr
}
// Sorting the blocks in topological order
sort.Slice(blocks, func(i, j int) bool {
return blocks[i].Block.Header.BlueWork().Cmp(blocks[j].Block.Header.BlueWork()) < 0
})
// The pruning point should always come first
blocks = append([]*externalapi.BlockWithTrustedData{pruningPointWithTrustedData}, blocks...)
return blocks, nil
return append([]*externalapi.DomainHash{pruningPoint}, pruningPointAnticone...), nil
}
func (pm *pruningManager) blockWithTrustedData(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (*externalapi.BlockWithTrustedData, error) {
func (pm *pruningManager) BlockWithTrustedData(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (*externalapi.BlockWithTrustedData, error) {
block, err := pm.blocksStore.Block(pm.databaseContext, stagingArea, blockHash)
if err != nil {
return nil, err

View File

@@ -266,7 +266,7 @@ var MainnetParams = Params{
PruningProofM: defaultPruningProofM,
FixedSubsidySwitchPruningPointInterval: defaultFixedSubsidySwitchPruningPointInterval,
FixedSubsidySwitchHashRateThreshold: big.NewInt(150_000_000_000),
HardForkOmitGenesisFromParentsDAAScore: 2e6,
HardForkOmitGenesisFromParentsDAAScore: 1265814,
}
// TestnetParams defines the network parameters for the test Kaspa network.

View File

@@ -48,6 +48,7 @@ type overrideDAGParamsConfig struct {
EnableNonNativeSubnetworks *bool `json:"enableNonNativeSubnetworks"`
DisableDifficultyAdjustment *bool `json:"disableDifficultyAdjustment"`
SkipProofOfWork *bool `json:"skipProofOfWork"`
HardForkOmitGenesisFromParentsDAAScore *uint64 `json:"hardForkOmitGenesisFromParentsDaaScore"`
}
// ResolveNetwork parses the network command line argument and sets NetParams accordingly.

View File

@@ -2,6 +2,7 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
//go:build !windows && !plan9
// +build !windows,!plan9
package limits

View File

@@ -2,6 +2,7 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
package signal

View File

@@ -115,6 +115,7 @@ func startNode() (teardown func(), err error) {
"--logdir", dataDir,
"--rpclisten", rpcAddress,
"--loglevel", "debug",
"--allow-submit-block-when-not-synced",
)
if err != nil {
return nil, err

View File

@@ -1 +1 @@
{"skipProofOfWork":true, "mergeSetSizeLimit": 30, "finalityDuration": 30000}
{"skipProofOfWork":true, "mergeSetSizeLimit": 30, "finalityDuration": 30000, "hardForkOmitGenesisFromParentsDaaScore": 2505}

View File

@@ -38,6 +38,7 @@ func startNode(name string, rpcAddress, listen, connect, profilePort, dataDir st
"--listen", listen,
"--profile", profilePort,
"--loglevel", "debug",
"--allow-submit-block-when-not-synced",
}
if connect != "" {
args = append(args, "--connect", connect)

View File

@@ -44,6 +44,7 @@ func startNodes() (teardown func(), err error) {
"--rpclisten", syncerRPCAddress,
"--listen", syncerListen,
"--loglevel", "debug",
"--allow-submit-block-when-not-synced",
)
if err != nil {
return nil, err

View File

@@ -11,7 +11,7 @@ const validCharacters = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrs
const (
appMajor uint = 0
appMinor uint = 11
appPatch uint = 3
appPatch uint = 4
)
// appBuild is defined as a variable so it can be overridden during the build