mirror of
https://github.com/kaspanet/kaspad.git
synced 2025-07-06 21:02:32 +00:00
Optimizations and header size reduce hardfork (#1853)
* Modify DefaultTimeout to 120 seconds A temporary workaround for nodes having trouble to sync (currently the download of pruning point related data during IBD takes more than 30 seconds) * Cache existence in reachability store * Cache block level in the header * Fix IBD indication on submit block * Add hardForkOmitGenesisFromParentsDAAScore logic * Fix NumThreads bug in the wallet * Get rid of ParentsAtLevel header method * Fix a bug in BuildPruningPointProof * Increase race detector timeout * Add cache to BuildPruningPointProof * Add comments and temp comment out go vet * Fix ParentsAtLevel * Dont fill empty parents * Change HardForkOmitGenesisFromParentsDAAScore in fast netsync test * Add --allow-submit-block-when-not-synced in stability tests * Fix TestPruning * Return fast tests * Fix off by one error on kaspawallet * Fetch only one block with trusted data at a time * Update fork DAA score * Don't ban for unexpected message type * Fix tests Co-authored-by: Michael Sutton <mikisiton2@gmail.com> Co-authored-by: Ori Newman <>
This commit is contained in:
parent
8e362845b3
commit
765dd170e4
2
.github/workflows/race.yaml
vendored
2
.github/workflows/race.yaml
vendored
@ -46,4 +46,4 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
git checkout "${{ env.run_on }}"
|
git checkout "${{ env.run_on }}"
|
||||||
git status
|
git status
|
||||||
go test -race ./...
|
go test -timeout 20m -race ./...
|
||||||
|
@ -8,7 +8,7 @@ import (
|
|||||||
|
|
||||||
// DefaultTimeout is the default duration to wait for enqueuing/dequeuing
|
// DefaultTimeout is the default duration to wait for enqueuing/dequeuing
|
||||||
// to/from routes.
|
// to/from routes.
|
||||||
const DefaultTimeout = 30 * time.Second
|
const DefaultTimeout = 120 * time.Second
|
||||||
|
|
||||||
// ErrPeerWithSameIDExists signifies that a peer with the same ID already exist.
|
// ErrPeerWithSameIDExists signifies that a peer with the same ID already exist.
|
||||||
var ErrPeerWithSameIDExists = errors.New("ready peer with the same ID already exists")
|
var ErrPeerWithSameIDExists = errors.New("ready peer with the same ID already exists")
|
||||||
|
@ -4,6 +4,7 @@ import (
|
|||||||
"github.com/kaspanet/kaspad/app/appmessage"
|
"github.com/kaspanet/kaspad/app/appmessage"
|
||||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||||
"github.com/kaspanet/kaspad/domain"
|
"github.com/kaspanet/kaspad/domain"
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -40,13 +41,13 @@ func HandlePruningPointAndItsAnticoneRequests(context PruningPointAndItsAnticone
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
blocks, err := context.Domain().Consensus().PruningPointAndItsAnticoneWithTrustedData()
|
pointAndItsAnticone, err := context.Domain().Consensus().PruningPointAndItsAnticone()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, block := range blocks {
|
for _, blockHash := range pointAndItsAnticone {
|
||||||
err = outgoingRoute.Enqueue(appmessage.DomainBlockWithTrustedDataToBlockWithTrustedData(block))
|
err := sendBlockWithTrustedData(context, outgoingRoute, blockHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -60,3 +61,17 @@ func HandlePruningPointAndItsAnticoneRequests(context PruningPointAndItsAnticone
|
|||||||
log.Debugf("Sent pruning point and its anticone to %s", peer)
|
log.Debugf("Sent pruning point and its anticone to %s", peer)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func sendBlockWithTrustedData(context PruningPointAndItsAnticoneRequestsContext, outgoingRoute *router.Route, blockHash *externalapi.DomainHash) error {
|
||||||
|
blockWithTrustedData, err := context.Domain().Consensus().BlockWithTrustedData(blockHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = outgoingRoute.Enqueue(appmessage.DomainBlockWithTrustedDataToBlockWithTrustedData(blockWithTrustedData))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
@ -70,7 +70,8 @@ func (flow *handleRequestPruningPointUTXOSetFlow) waitForRequestPruningPointUTXO
|
|||||||
}
|
}
|
||||||
msgRequestPruningPointUTXOSet, ok := message.(*appmessage.MsgRequestPruningPointUTXOSet)
|
msgRequestPruningPointUTXOSet, ok := message.(*appmessage.MsgRequestPruningPointUTXOSet)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
|
// TODO: Change to shouldBan: true once we fix the bug of getting redundant messages
|
||||||
|
return nil, protocolerrors.Errorf(false, "received unexpected message type. "+
|
||||||
"expected: %s, got: %s", appmessage.CmdRequestPruningPointUTXOSet, message.Command())
|
"expected: %s, got: %s", appmessage.CmdRequestPruningPointUTXOSet, message.Command())
|
||||||
}
|
}
|
||||||
return msgRequestPruningPointUTXOSet, nil
|
return msgRequestPruningPointUTXOSet, nil
|
||||||
@ -123,7 +124,8 @@ func (flow *handleRequestPruningPointUTXOSetFlow) sendPruningPointUTXOSet(
|
|||||||
}
|
}
|
||||||
_, ok := message.(*appmessage.MsgRequestNextPruningPointUTXOSetChunk)
|
_, ok := message.(*appmessage.MsgRequestNextPruningPointUTXOSetChunk)
|
||||||
if !ok {
|
if !ok {
|
||||||
return protocolerrors.Errorf(true, "received unexpected message type. "+
|
// TODO: Change to shouldBan: true once we fix the bug of getting redundant messages
|
||||||
|
return protocolerrors.Errorf(false, "received unexpected message type. "+
|
||||||
"expected: %s, got: %s", appmessage.CmdRequestNextPruningPointUTXOSetChunk, message.Command())
|
"expected: %s, got: %s", appmessage.CmdRequestNextPruningPointUTXOSetChunk, message.Command())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -130,7 +130,7 @@ func (flow *handleRelayInvsFlow) downloadHeadersAndPruningUTXOSet(highHash *exte
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("Headers downloaded from peer %s", flow.peer)
|
log.Infof("Headers downloaded from peer %s", flow.peer)
|
||||||
|
|
||||||
highHashInfo, err := flow.Domain().StagingConsensus().GetBlockInfo(highHash)
|
highHashInfo, err := flow.Domain().StagingConsensus().GetBlockInfo(highHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -14,9 +14,14 @@ import (
|
|||||||
func HandleSubmitBlock(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
func HandleSubmitBlock(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||||
submitBlockRequest := request.(*appmessage.SubmitBlockRequestMessage)
|
submitBlockRequest := request.(*appmessage.SubmitBlockRequestMessage)
|
||||||
|
|
||||||
if context.ProtocolManager.IsIBDRunning() {
|
isSynced, err := context.ProtocolManager.ShouldMine()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !context.Config.AllowSubmitBlockWhenNotSynced && !isSynced {
|
||||||
return &appmessage.SubmitBlockResponseMessage{
|
return &appmessage.SubmitBlockResponseMessage{
|
||||||
Error: appmessage.RPCErrorf("Block not submitted - IBD is running"),
|
Error: appmessage.RPCErrorf("Block not submitted - node is not synced"),
|
||||||
RejectReason: appmessage.RejectReasonIsInIBD,
|
RejectReason: appmessage.RejectReasonIsInIBD,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
@ -15,8 +15,6 @@ golint -set_exit_status ./...
|
|||||||
|
|
||||||
staticcheck -checks SA4006,SA4008,SA4009,SA4010,SA5003,SA1004,SA1014,SA1021,SA1023,SA1024,SA1025,SA1026,SA1027,SA1028,SA2000,SA2001,SA2003,SA4000,SA4001,SA4003,SA4004,SA4011,SA4012,SA4013,SA4014,SA4015,SA4016,SA4017,SA4018,SA4019,SA4020,SA4021,SA4022,SA4023,SA5000,SA5002,SA5004,SA5005,SA5007,SA5008,SA5009,SA5010,SA5011,SA5012,SA6001,SA6002,SA9001,SA9002,SA9003,SA9004,SA9005,SA9006,ST1019 ./...
|
staticcheck -checks SA4006,SA4008,SA4009,SA4010,SA5003,SA1004,SA1014,SA1021,SA1023,SA1024,SA1025,SA1026,SA1027,SA1028,SA2000,SA2001,SA2003,SA4000,SA4001,SA4003,SA4004,SA4011,SA4012,SA4013,SA4014,SA4015,SA4016,SA4017,SA4018,SA4019,SA4020,SA4021,SA4022,SA4023,SA5000,SA5002,SA5004,SA5005,SA5007,SA5008,SA5009,SA5010,SA5011,SA5012,SA6001,SA6002,SA9001,SA9002,SA9003,SA9004,SA9005,SA9006,ST1019 ./...
|
||||||
|
|
||||||
go vet -composites=false $FLAGS ./...
|
|
||||||
|
|
||||||
go build $FLAGS -o kaspad .
|
go build $FLAGS -o kaspad .
|
||||||
|
|
||||||
if [ -n "${NO_PARALLEL}" ]
|
if [ -n "${NO_PARALLEL}" ]
|
||||||
|
@ -56,6 +56,7 @@ func create(conf *createConfig) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
file := keys.File{
|
file := keys.File{
|
||||||
|
Version: keys.LastVersion,
|
||||||
EncryptedMnemonics: encryptedMnemonics,
|
EncryptedMnemonics: encryptedMnemonics,
|
||||||
ExtendedPublicKeys: extendedPublicKeys,
|
ExtendedPublicKeys: extendedPublicKeys,
|
||||||
MinimumSignatures: conf.MinimumSignatures,
|
MinimumSignatures: conf.MinimumSignatures,
|
||||||
|
@ -97,7 +97,7 @@ func encryptMnemonic(mnemonic string, password []byte) (*EncryptedMnemonic, erro
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
aead, err := getAEAD(password, salt)
|
aead, err := getAEAD(defaultNumThreads, password, salt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -10,6 +10,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||||
"github.com/kaspanet/kaspad/util"
|
"github.com/kaspanet/kaspad/util"
|
||||||
@ -22,6 +23,9 @@ var (
|
|||||||
defaultAppDir = util.AppDir("kaspawallet", false)
|
defaultAppDir = util.AppDir("kaspawallet", false)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// LastVersion is the most up to date file format version
|
||||||
|
const LastVersion = 1
|
||||||
|
|
||||||
func defaultKeysFile(netParams *dagconfig.Params) string {
|
func defaultKeysFile(netParams *dagconfig.Params) string {
|
||||||
return filepath.Join(defaultAppDir, netParams.Name, "keys.json")
|
return filepath.Join(defaultAppDir, netParams.Name, "keys.json")
|
||||||
}
|
}
|
||||||
@ -32,6 +36,8 @@ type encryptedPrivateKeyJSON struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type keysFileJSON struct {
|
type keysFileJSON struct {
|
||||||
|
Version uint32 `json:"version"`
|
||||||
|
NumThreads uint8 `json:"numThreads,omitempty"` // This field is ignored for versions different from 0. See more details at the function `numThreads`.
|
||||||
EncryptedPrivateKeys []*encryptedPrivateKeyJSON `json:"encryptedMnemonics"`
|
EncryptedPrivateKeys []*encryptedPrivateKeyJSON `json:"encryptedMnemonics"`
|
||||||
ExtendedPublicKeys []string `json:"publicKeys"`
|
ExtendedPublicKeys []string `json:"publicKeys"`
|
||||||
MinimumSignatures uint32 `json:"minimumSignatures"`
|
MinimumSignatures uint32 `json:"minimumSignatures"`
|
||||||
@ -49,6 +55,8 @@ type EncryptedMnemonic struct {
|
|||||||
|
|
||||||
// File holds all the data related to the wallet keys
|
// File holds all the data related to the wallet keys
|
||||||
type File struct {
|
type File struct {
|
||||||
|
Version uint32
|
||||||
|
NumThreads uint8 // This field is ignored for versions different than 0
|
||||||
EncryptedMnemonics []*EncryptedMnemonic
|
EncryptedMnemonics []*EncryptedMnemonic
|
||||||
ExtendedPublicKeys []string
|
ExtendedPublicKeys []string
|
||||||
MinimumSignatures uint32
|
MinimumSignatures uint32
|
||||||
@ -69,6 +77,8 @@ func (d *File) toJSON() *keysFileJSON {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return &keysFileJSON{
|
return &keysFileJSON{
|
||||||
|
Version: d.Version,
|
||||||
|
NumThreads: d.NumThreads,
|
||||||
EncryptedPrivateKeys: encryptedPrivateKeysJSON,
|
EncryptedPrivateKeys: encryptedPrivateKeysJSON,
|
||||||
ExtendedPublicKeys: d.ExtendedPublicKeys,
|
ExtendedPublicKeys: d.ExtendedPublicKeys,
|
||||||
MinimumSignatures: d.MinimumSignatures,
|
MinimumSignatures: d.MinimumSignatures,
|
||||||
@ -80,6 +90,8 @@ func (d *File) toJSON() *keysFileJSON {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *File) fromJSON(fileJSON *keysFileJSON) error {
|
func (d *File) fromJSON(fileJSON *keysFileJSON) error {
|
||||||
|
d.Version = fileJSON.Version
|
||||||
|
d.NumThreads = fileJSON.NumThreads
|
||||||
d.MinimumSignatures = fileJSON.MinimumSignatures
|
d.MinimumSignatures = fileJSON.MinimumSignatures
|
||||||
d.ECDSA = fileJSON.ECDSA
|
d.ECDSA = fileJSON.ECDSA
|
||||||
d.ExtendedPublicKeys = fileJSON.ExtendedPublicKeys
|
d.ExtendedPublicKeys = fileJSON.ExtendedPublicKeys
|
||||||
@ -181,10 +193,20 @@ func (d *File) DecryptMnemonics(cmdLinePassword string) ([]string, error) {
|
|||||||
if len(password) == 0 {
|
if len(password) == 0 {
|
||||||
password = getPassword("Password:")
|
password = getPassword("Password:")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var numThreads uint8
|
||||||
|
if len(d.EncryptedMnemonics) > 0 {
|
||||||
|
var err error
|
||||||
|
numThreads, err = d.numThreads(password)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
privateKeys := make([]string, len(d.EncryptedMnemonics))
|
privateKeys := make([]string, len(d.EncryptedMnemonics))
|
||||||
for i, encryptedPrivateKey := range d.EncryptedMnemonics {
|
for i, encryptedPrivateKey := range d.EncryptedMnemonics {
|
||||||
var err error
|
var err error
|
||||||
privateKeys[i], err = decryptMnemonic(encryptedPrivateKey, password)
|
privateKeys[i], err = decryptMnemonic(numThreads, encryptedPrivateKey, password)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -278,13 +300,73 @@ func (d *File) Save() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getAEAD(password, salt []byte) (cipher.AEAD, error) {
|
const defaultNumThreads = 8
|
||||||
key := argon2.IDKey(password, salt, 1, 64*1024, uint8(runtime.NumCPU()), 32)
|
|
||||||
|
func (d *File) numThreads(password []byte) (uint8, error) {
|
||||||
|
// There's a bug in v0 wallets where the number of threads
|
||||||
|
// was determined by the number of logical CPUs at the machine,
|
||||||
|
// which made the authentication non-deterministic across platforms.
|
||||||
|
// In order to solve it we introduce v1 where the number of threads
|
||||||
|
// is constant, and brute force the number of threads in v0. After we
|
||||||
|
// find the right amount via brute force we save the result to the file.
|
||||||
|
|
||||||
|
if d.Version != 0 {
|
||||||
|
return defaultNumThreads, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.NumThreads != 0 {
|
||||||
|
return d.NumThreads, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
numThreads, err := d.detectNumThreads(password, d.EncryptedMnemonics[0].salt)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.NumThreads = numThreads
|
||||||
|
err = d.Save()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return numThreads, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *File) detectNumThreads(password, salt []byte) (uint8, error) {
|
||||||
|
numCPU := uint8(runtime.NumCPU())
|
||||||
|
_, err := getAEAD(numCPU, password, salt)
|
||||||
|
if err != nil {
|
||||||
|
if !strings.Contains(err.Error(), "message authentication failed") {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return numCPU, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := uint8(1); ; i++ {
|
||||||
|
if i == numCPU {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := getAEAD(i, password, salt)
|
||||||
|
if err != nil {
|
||||||
|
const maxTries = 32
|
||||||
|
if i > maxTries || !strings.Contains(err.Error(), "message authentication failed") {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getAEAD(threads uint8, password, salt []byte) (cipher.AEAD, error) {
|
||||||
|
key := argon2.IDKey(password, salt, 1, 64*1024, threads, 32)
|
||||||
return chacha20poly1305.NewX(key)
|
return chacha20poly1305.NewX(key)
|
||||||
}
|
}
|
||||||
|
|
||||||
func decryptMnemonic(encryptedPrivateKey *EncryptedMnemonic, password []byte) (string, error) {
|
func decryptMnemonic(numThreads uint8, encryptedPrivateKey *EncryptedMnemonic, password []byte) (string, error) {
|
||||||
aead, err := getAEAD(password, encryptedPrivateKey.salt)
|
aead, err := getAEAD(numThreads, password, encryptedPrivateKey.salt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
@ -137,11 +137,18 @@ func (s *consensus) Init(skipAddingGenesis bool) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *consensus) PruningPointAndItsAnticoneWithTrustedData() ([]*externalapi.BlockWithTrustedData, error) {
|
func (s *consensus) PruningPointAndItsAnticone() ([]*externalapi.DomainHash, error) {
|
||||||
s.lock.Lock()
|
s.lock.Lock()
|
||||||
defer s.lock.Unlock()
|
defer s.lock.Unlock()
|
||||||
|
|
||||||
return s.pruningManager.PruningPointAndItsAnticoneWithTrustedData()
|
return s.pruningManager.PruningPointAndItsAnticone()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consensus) BlockWithTrustedData(blockHash *externalapi.DomainHash) (*externalapi.BlockWithTrustedData, error) {
|
||||||
|
s.lock.Lock()
|
||||||
|
defer s.lock.Unlock()
|
||||||
|
|
||||||
|
return s.pruningManager.BlockWithTrustedData(model.NewStagingArea(), blockHash)
|
||||||
}
|
}
|
||||||
|
|
||||||
// BuildBlock builds a block over the current state, with the transactions
|
// BuildBlock builds a block over the current state, with the transactions
|
||||||
@ -716,7 +723,10 @@ func (s *consensus) PopulateMass(transaction *externalapi.DomainTransaction) {
|
|||||||
func (s *consensus) ResolveVirtual() error {
|
func (s *consensus) ResolveVirtual() error {
|
||||||
// In order to prevent a situation that the consensus lock is held for too much time, we
|
// In order to prevent a situation that the consensus lock is held for too much time, we
|
||||||
// release the lock each time resolve 100 blocks.
|
// release the lock each time resolve 100 blocks.
|
||||||
for {
|
for i := 0; ; i++ {
|
||||||
|
if i%10 == 0 {
|
||||||
|
log.Infof("Resolving virtual. This may take some time...")
|
||||||
|
}
|
||||||
var isCompletelyResolved bool
|
var isCompletelyResolved bool
|
||||||
var err error
|
var err error
|
||||||
func() {
|
func() {
|
||||||
@ -730,6 +740,7 @@ func (s *consensus) ResolveVirtual() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if isCompletelyResolved {
|
if isCompletelyResolved {
|
||||||
|
log.Infof("Resolved virtual")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -6,7 +6,9 @@ import (
|
|||||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
|
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
|
||||||
|
"github.com/kaspanet/kaspad/infrastructure/db/database"
|
||||||
"github.com/kaspanet/kaspad/util/staging"
|
"github.com/kaspanet/kaspad/util/staging"
|
||||||
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
var reachabilityDataBucketName = []byte("reachability-data")
|
var reachabilityDataBucketName = []byte("reachability-data")
|
||||||
@ -50,6 +52,8 @@ func (rds *reachabilityDataStore) IsStaged(stagingArea *model.StagingArea) bool
|
|||||||
return rds.stagingShard(stagingArea).isStaged()
|
return rds.stagingShard(stagingArea).isStaged()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var errNotFound = errors.Wrap(database.ErrNotFound, "reachability data not found")
|
||||||
|
|
||||||
// ReachabilityData returns the reachabilityData associated with the given blockHash
|
// ReachabilityData returns the reachabilityData associated with the given blockHash
|
||||||
func (rds *reachabilityDataStore) ReachabilityData(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (model.ReachabilityData, error) {
|
func (rds *reachabilityDataStore) ReachabilityData(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (model.ReachabilityData, error) {
|
||||||
stagingShard := rds.stagingShard(stagingArea)
|
stagingShard := rds.stagingShard(stagingArea)
|
||||||
@ -59,10 +63,16 @@ func (rds *reachabilityDataStore) ReachabilityData(dbContext model.DBReader, sta
|
|||||||
}
|
}
|
||||||
|
|
||||||
if reachabilityData, ok := rds.reachabilityDataCache.Get(blockHash); ok {
|
if reachabilityData, ok := rds.reachabilityDataCache.Get(blockHash); ok {
|
||||||
|
if reachabilityData == nil {
|
||||||
|
return nil, errNotFound
|
||||||
|
}
|
||||||
return reachabilityData.(model.ReachabilityData), nil
|
return reachabilityData.(model.ReachabilityData), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
reachabilityDataBytes, err := dbContext.Get(rds.reachabilityDataBlockHashAsKey(blockHash))
|
reachabilityDataBytes, err := dbContext.Get(rds.reachabilityDataBlockHashAsKey(blockHash))
|
||||||
|
if database.IsNotFoundError(err) {
|
||||||
|
rds.reachabilityDataCache.Add(blockHash, nil)
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -76,17 +86,15 @@ func (rds *reachabilityDataStore) ReachabilityData(dbContext model.DBReader, sta
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (rds *reachabilityDataStore) HasReachabilityData(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (bool, error) {
|
func (rds *reachabilityDataStore) HasReachabilityData(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (bool, error) {
|
||||||
stagingShard := rds.stagingShard(stagingArea)
|
_, err := rds.ReachabilityData(dbContext, stagingArea, blockHash)
|
||||||
|
if database.IsNotFoundError(err) {
|
||||||
if _, ok := stagingShard.reachabilityData[*blockHash]; ok {
|
return false, nil
|
||||||
return true, nil
|
}
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if rds.reachabilityDataCache.Has(blockHash) {
|
return true, nil
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return dbContext.Has(rds.reachabilityDataBlockHashAsKey(blockHash))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReachabilityReindexRoot returns the current reachability reindex root
|
// ReachabilityReindexRoot returns the current reachability reindex root
|
||||||
|
@ -4,6 +4,7 @@ import (
|
|||||||
"github.com/kaspanet/kaspad/domain/consensus/datastructures/daawindowstore"
|
"github.com/kaspanet/kaspad/domain/consensus/datastructures/daawindowstore"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/processes/blockparentbuilder"
|
"github.com/kaspanet/kaspad/domain/consensus/processes/blockparentbuilder"
|
||||||
|
parentssanager "github.com/kaspanet/kaspad/domain/consensus/processes/parentsmanager"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/processes/pruningproofmanager"
|
"github.com/kaspanet/kaspad/domain/consensus/processes/pruningproofmanager"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@ -158,12 +159,17 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
|
|||||||
dagTraversalManager := dagTraversalManagers[0]
|
dagTraversalManager := dagTraversalManagers[0]
|
||||||
|
|
||||||
// Processes
|
// Processes
|
||||||
|
parentsManager := parentssanager.New(config.GenesisHash, config.HardForkOmitGenesisFromParentsDAAScore)
|
||||||
blockParentBuilder := blockparentbuilder.New(
|
blockParentBuilder := blockparentbuilder.New(
|
||||||
dbManager,
|
dbManager,
|
||||||
blockHeaderStore,
|
blockHeaderStore,
|
||||||
dagTopologyManager,
|
dagTopologyManager,
|
||||||
|
parentsManager,
|
||||||
reachabilityDataStore,
|
reachabilityDataStore,
|
||||||
pruningStore,
|
pruningStore,
|
||||||
|
|
||||||
|
config.HardForkOmitGenesisFromParentsDAAScore,
|
||||||
|
config.GenesisHash,
|
||||||
)
|
)
|
||||||
pastMedianTimeManager := f.pastMedianTimeConsructor(
|
pastMedianTimeManager := f.pastMedianTimeConsructor(
|
||||||
config.TimestampDeviationTolerance,
|
config.TimestampDeviationTolerance,
|
||||||
@ -316,6 +322,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
|
|||||||
finalityManager,
|
finalityManager,
|
||||||
blockParentBuilder,
|
blockParentBuilder,
|
||||||
pruningManager,
|
pruningManager,
|
||||||
|
parentsManager,
|
||||||
|
|
||||||
pruningStore,
|
pruningStore,
|
||||||
blockStore,
|
blockStore,
|
||||||
@ -403,6 +410,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
|
|||||||
ghostdagManagers,
|
ghostdagManagers,
|
||||||
reachabilityManagers,
|
reachabilityManagers,
|
||||||
dagTraversalManagers,
|
dagTraversalManagers,
|
||||||
|
parentsManager,
|
||||||
|
|
||||||
ghostdagDataStores,
|
ghostdagDataStores,
|
||||||
pruningStore,
|
pruningStore,
|
||||||
@ -581,7 +589,7 @@ func dagStores(config *Config,
|
|||||||
ghostdagDataStores[i] = ghostdagdatastore.New(prefixBucket, ghostdagDataCacheSize, preallocateCaches)
|
ghostdagDataStores[i] = ghostdagdatastore.New(prefixBucket, ghostdagDataCacheSize, preallocateCaches)
|
||||||
} else {
|
} else {
|
||||||
blockRelationStores[i] = blockrelationstore.New(prefixBucket, 200, false)
|
blockRelationStores[i] = blockrelationstore.New(prefixBucket, 200, false)
|
||||||
reachabilityDataStores[i] = reachabilitydatastore.New(prefixBucket, 200, false)
|
reachabilityDataStores[i] = reachabilitydatastore.New(prefixBucket, 86400, false)
|
||||||
ghostdagDataStores[i] = ghostdagdatastore.New(prefixBucket, 200, false)
|
ghostdagDataStores[i] = ghostdagdatastore.New(prefixBucket, 200, false)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -58,7 +58,6 @@ type BlockHeader interface {
|
|||||||
type BaseBlockHeader interface {
|
type BaseBlockHeader interface {
|
||||||
Version() uint16
|
Version() uint16
|
||||||
Parents() []BlockLevelParents
|
Parents() []BlockLevelParents
|
||||||
ParentsAtLevel(level int) BlockLevelParents
|
|
||||||
DirectParents() BlockLevelParents
|
DirectParents() BlockLevelParents
|
||||||
HashMerkleRoot() *DomainHash
|
HashMerkleRoot() *DomainHash
|
||||||
AcceptedIDMerkleRoot() *DomainHash
|
AcceptedIDMerkleRoot() *DomainHash
|
||||||
@ -70,6 +69,7 @@ type BaseBlockHeader interface {
|
|||||||
BlueScore() uint64
|
BlueScore() uint64
|
||||||
BlueWork() *big.Int
|
BlueWork() *big.Int
|
||||||
PruningPoint() *DomainHash
|
PruningPoint() *DomainHash
|
||||||
|
BlockLevel() int
|
||||||
Equal(other BaseBlockHeader) bool
|
Equal(other BaseBlockHeader) bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -25,7 +25,8 @@ type Consensus interface {
|
|||||||
GetVirtualUTXOs(expectedVirtualParents []*DomainHash, fromOutpoint *DomainOutpoint, limit int) ([]*OutpointAndUTXOEntryPair, error)
|
GetVirtualUTXOs(expectedVirtualParents []*DomainHash, fromOutpoint *DomainOutpoint, limit int) ([]*OutpointAndUTXOEntryPair, error)
|
||||||
PruningPoint() (*DomainHash, error)
|
PruningPoint() (*DomainHash, error)
|
||||||
PruningPointHeaders() ([]BlockHeader, error)
|
PruningPointHeaders() ([]BlockHeader, error)
|
||||||
PruningPointAndItsAnticoneWithTrustedData() ([]*BlockWithTrustedData, error)
|
PruningPointAndItsAnticone() ([]*DomainHash, error)
|
||||||
|
BlockWithTrustedData(blockHash *DomainHash) (*BlockWithTrustedData, error)
|
||||||
ClearImportedPruningPointData() error
|
ClearImportedPruningPointData() error
|
||||||
AppendImportedPruningPointUTXOs(outpointAndUTXOEntryPairs []*OutpointAndUTXOEntryPair) error
|
AppendImportedPruningPointUTXOs(outpointAndUTXOEntryPairs []*OutpointAndUTXOEntryPair) error
|
||||||
ValidateAndInsertImportedPruningPoint(newPruningPoint *DomainHash) error
|
ValidateAndInsertImportedPruningPoint(newPruningPoint *DomainHash) error
|
||||||
|
@ -5,5 +5,7 @@ import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
|||||||
// BlockParentBuilder exposes a method to build super-block parents for
|
// BlockParentBuilder exposes a method to build super-block parents for
|
||||||
// a given set of direct parents
|
// a given set of direct parents
|
||||||
type BlockParentBuilder interface {
|
type BlockParentBuilder interface {
|
||||||
BuildParents(stagingArea *StagingArea, directParentHashes []*externalapi.DomainHash) ([]externalapi.BlockLevelParents, error)
|
BuildParents(stagingArea *StagingArea,
|
||||||
|
daaScore uint64,
|
||||||
|
directParentHashes []*externalapi.DomainHash) ([]externalapi.BlockLevelParents, error)
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,9 @@
|
|||||||
|
package model
|
||||||
|
|
||||||
|
import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
|
|
||||||
|
// ParentsManager lets is a wrapper above header parents that replaces empty parents with genesis when needed.
|
||||||
|
type ParentsManager interface {
|
||||||
|
ParentsAtLevel(blockHeader externalapi.BlockHeader, level int) externalapi.BlockLevelParents
|
||||||
|
Parents(blockHeader externalapi.BlockHeader) []externalapi.BlockLevelParents
|
||||||
|
}
|
@ -12,6 +12,7 @@ type PruningManager interface {
|
|||||||
AppendImportedPruningPointUTXOs(outpointAndUTXOEntryPairs []*externalapi.OutpointAndUTXOEntryPair) error
|
AppendImportedPruningPointUTXOs(outpointAndUTXOEntryPairs []*externalapi.OutpointAndUTXOEntryPair) error
|
||||||
UpdatePruningPointIfRequired() error
|
UpdatePruningPointIfRequired() error
|
||||||
PruneAllBlocksBelow(stagingArea *StagingArea, pruningPointHash *externalapi.DomainHash) error
|
PruneAllBlocksBelow(stagingArea *StagingArea, pruningPointHash *externalapi.DomainHash) error
|
||||||
PruningPointAndItsAnticoneWithTrustedData() ([]*externalapi.BlockWithTrustedData, error)
|
PruningPointAndItsAnticone() ([]*externalapi.DomainHash, error)
|
||||||
ExpectedHeaderPruningPoint(stagingArea *StagingArea, blockHash *externalapi.DomainHash) (*externalapi.DomainHash, error)
|
ExpectedHeaderPruningPoint(stagingArea *StagingArea, blockHash *externalapi.DomainHash) (*externalapi.DomainHash, error)
|
||||||
|
BlockWithTrustedData(stagingArea *StagingArea, blockHash *externalapi.DomainHash) (*externalapi.BlockWithTrustedData, error)
|
||||||
}
|
}
|
||||||
|
@ -183,10 +183,16 @@ func (bb *blockBuilder) newBlockCoinbaseTransaction(stagingArea *model.StagingAr
|
|||||||
func (bb *blockBuilder) buildHeader(stagingArea *model.StagingArea, transactions []*externalapi.DomainTransaction,
|
func (bb *blockBuilder) buildHeader(stagingArea *model.StagingArea, transactions []*externalapi.DomainTransaction,
|
||||||
newBlockPruningPoint *externalapi.DomainHash) (externalapi.BlockHeader, error) {
|
newBlockPruningPoint *externalapi.DomainHash) (externalapi.BlockHeader, error) {
|
||||||
|
|
||||||
parents, err := bb.newBlockParents(stagingArea)
|
daaScore, err := bb.newBlockDAAScore(stagingArea)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
parents, err := bb.newBlockParents(stagingArea, daaScore)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
timeInMilliseconds, err := bb.newBlockTime(stagingArea)
|
timeInMilliseconds, err := bb.newBlockTime(stagingArea)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -204,10 +210,6 @@ func (bb *blockBuilder) buildHeader(stagingArea *model.StagingArea, transactions
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
daaScore, err := bb.newBlockDAAScore(stagingArea)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
blueWork, err := bb.newBlockBlueWork(stagingArea)
|
blueWork, err := bb.newBlockBlueWork(stagingArea)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -233,12 +235,12 @@ func (bb *blockBuilder) buildHeader(stagingArea *model.StagingArea, transactions
|
|||||||
), nil
|
), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bb *blockBuilder) newBlockParents(stagingArea *model.StagingArea) ([]externalapi.BlockLevelParents, error) {
|
func (bb *blockBuilder) newBlockParents(stagingArea *model.StagingArea, daaScore uint64) ([]externalapi.BlockLevelParents, error) {
|
||||||
virtualBlockRelations, err := bb.blockRelationStore.BlockRelation(bb.databaseContext, stagingArea, model.VirtualBlockHash)
|
virtualBlockRelations, err := bb.blockRelationStore.BlockRelation(bb.databaseContext, stagingArea, model.VirtualBlockHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return bb.blockParentBuilder.BuildParents(stagingArea, virtualBlockRelations.Parents)
|
return bb.blockParentBuilder.BuildParents(stagingArea, daaScore, virtualBlockRelations.Parents)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bb *blockBuilder) newBlockTime(stagingArea *model.StagingArea) (int64, error) {
|
func (bb *blockBuilder) newBlockTime(stagingArea *model.StagingArea) (int64, error) {
|
||||||
|
@ -83,7 +83,7 @@ func (bb *testBlockBuilder) buildUTXOInvalidHeader(stagingArea *model.StagingAre
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
parents, err := bb.blockParentBuilder.BuildParents(stagingArea, parentHashes)
|
parents, err := bb.blockParentBuilder.BuildParents(stagingArea, daaScore, parentHashes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -5,7 +5,6 @@ import (
|
|||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
|
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/pow"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -13,8 +12,12 @@ type blockParentBuilder struct {
|
|||||||
databaseContext model.DBManager
|
databaseContext model.DBManager
|
||||||
blockHeaderStore model.BlockHeaderStore
|
blockHeaderStore model.BlockHeaderStore
|
||||||
dagTopologyManager model.DAGTopologyManager
|
dagTopologyManager model.DAGTopologyManager
|
||||||
|
parentsManager model.ParentsManager
|
||||||
reachabilityDataStore model.ReachabilityDataStore
|
reachabilityDataStore model.ReachabilityDataStore
|
||||||
pruningStore model.PruningStore
|
pruningStore model.PruningStore
|
||||||
|
|
||||||
|
hardForkOmitGenesisFromParentsDAAScore uint64
|
||||||
|
genesisHash *externalapi.DomainHash
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new instance of a BlockParentBuilder
|
// New creates a new instance of a BlockParentBuilder
|
||||||
@ -22,20 +25,29 @@ func New(
|
|||||||
databaseContext model.DBManager,
|
databaseContext model.DBManager,
|
||||||
blockHeaderStore model.BlockHeaderStore,
|
blockHeaderStore model.BlockHeaderStore,
|
||||||
dagTopologyManager model.DAGTopologyManager,
|
dagTopologyManager model.DAGTopologyManager,
|
||||||
|
parentsManager model.ParentsManager,
|
||||||
|
|
||||||
reachabilityDataStore model.ReachabilityDataStore,
|
reachabilityDataStore model.ReachabilityDataStore,
|
||||||
pruningStore model.PruningStore,
|
pruningStore model.PruningStore,
|
||||||
|
|
||||||
|
hardForkOmitGenesisFromParentsDAAScore uint64,
|
||||||
|
genesisHash *externalapi.DomainHash,
|
||||||
) model.BlockParentBuilder {
|
) model.BlockParentBuilder {
|
||||||
return &blockParentBuilder{
|
return &blockParentBuilder{
|
||||||
databaseContext: databaseContext,
|
databaseContext: databaseContext,
|
||||||
blockHeaderStore: blockHeaderStore,
|
blockHeaderStore: blockHeaderStore,
|
||||||
dagTopologyManager: dagTopologyManager,
|
dagTopologyManager: dagTopologyManager,
|
||||||
reachabilityDataStore: reachabilityDataStore,
|
parentsManager: parentsManager,
|
||||||
pruningStore: pruningStore,
|
|
||||||
|
reachabilityDataStore: reachabilityDataStore,
|
||||||
|
pruningStore: pruningStore,
|
||||||
|
hardForkOmitGenesisFromParentsDAAScore: hardForkOmitGenesisFromParentsDAAScore,
|
||||||
|
genesisHash: genesisHash,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea,
|
func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea,
|
||||||
directParentHashes []*externalapi.DomainHash) ([]externalapi.BlockLevelParents, error) {
|
daaScore uint64, directParentHashes []*externalapi.DomainHash) ([]externalapi.BlockLevelParents, error) {
|
||||||
|
|
||||||
// Late on we'll mutate direct parent hashes, so we first clone it.
|
// Late on we'll mutate direct parent hashes, so we first clone it.
|
||||||
directParentHashesCopy := make([]*externalapi.DomainHash, len(directParentHashes))
|
directParentHashesCopy := make([]*externalapi.DomainHash, len(directParentHashes))
|
||||||
@ -93,7 +105,7 @@ func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea,
|
|||||||
// all the block levels they occupy
|
// all the block levels they occupy
|
||||||
for _, directParentHeader := range directParentHeaders {
|
for _, directParentHeader := range directParentHeaders {
|
||||||
directParentHash := consensushashing.HeaderHash(directParentHeader)
|
directParentHash := consensushashing.HeaderHash(directParentHeader)
|
||||||
blockLevel := pow.BlockLevel(directParentHeader)
|
blockLevel := directParentHeader.BlockLevel()
|
||||||
for i := 0; i <= blockLevel; i++ {
|
for i := 0; i <= blockLevel; i++ {
|
||||||
if _, exists := candidatesByLevelToReferenceBlocksMap[i]; !exists {
|
if _, exists := candidatesByLevelToReferenceBlocksMap[i]; !exists {
|
||||||
candidatesByLevelToReferenceBlocksMap[i] = make(map[externalapi.DomainHash][]*externalapi.DomainHash)
|
candidatesByLevelToReferenceBlocksMap[i] = make(map[externalapi.DomainHash][]*externalapi.DomainHash)
|
||||||
@ -116,7 +128,7 @@ func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea,
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, directParentHeader := range directParentHeaders {
|
for _, directParentHeader := range directParentHeaders {
|
||||||
for blockLevel, blockLevelParentsInHeader := range directParentHeader.Parents() {
|
for blockLevel, blockLevelParentsInHeader := range bpb.parentsManager.Parents(directParentHeader) {
|
||||||
isEmptyLevel := false
|
isEmptyLevel := false
|
||||||
if _, exists := candidatesByLevelToReferenceBlocksMap[blockLevel]; !exists {
|
if _, exists := candidatesByLevelToReferenceBlocksMap[blockLevel]; !exists {
|
||||||
candidatesByLevelToReferenceBlocksMap[blockLevel] = make(map[externalapi.DomainHash][]*externalapi.DomainHash)
|
candidatesByLevelToReferenceBlocksMap[blockLevel] = make(map[externalapi.DomainHash][]*externalapi.DomainHash)
|
||||||
@ -145,7 +157,7 @@ func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea,
|
|||||||
} else {
|
} else {
|
||||||
for childHash, childHeader := range virtualGenesisChildrenHeaders {
|
for childHash, childHeader := range virtualGenesisChildrenHeaders {
|
||||||
childHash := childHash // Assign to a new pointer to avoid `range` pointer reuse
|
childHash := childHash // Assign to a new pointer to avoid `range` pointer reuse
|
||||||
if childHeader.ParentsAtLevel(blockLevel).Contains(parent) {
|
if bpb.parentsManager.ParentsAtLevel(childHeader, blockLevel).Contains(parent) {
|
||||||
referenceBlocks = append(referenceBlocks, &childHash)
|
referenceBlocks = append(referenceBlocks, &childHash)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -203,14 +215,19 @@ func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
parents := make([]externalapi.BlockLevelParents, len(candidatesByLevelToReferenceBlocksMap))
|
parents := make([]externalapi.BlockLevelParents, 0, len(candidatesByLevelToReferenceBlocksMap))
|
||||||
for blockLevel := 0; blockLevel < len(candidatesByLevelToReferenceBlocksMap); blockLevel++ {
|
for blockLevel := 0; blockLevel < len(candidatesByLevelToReferenceBlocksMap); blockLevel++ {
|
||||||
|
if _, ok := candidatesByLevelToReferenceBlocksMap[blockLevel][*bpb.genesisHash]; daaScore >= bpb.hardForkOmitGenesisFromParentsDAAScore && ok && len(candidatesByLevelToReferenceBlocksMap[blockLevel]) == 1 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
levelBlocks := make(externalapi.BlockLevelParents, 0, len(candidatesByLevelToReferenceBlocksMap[blockLevel]))
|
levelBlocks := make(externalapi.BlockLevelParents, 0, len(candidatesByLevelToReferenceBlocksMap[blockLevel]))
|
||||||
for block := range candidatesByLevelToReferenceBlocksMap[blockLevel] {
|
for block := range candidatesByLevelToReferenceBlocksMap[blockLevel] {
|
||||||
block := block // Assign to a new pointer to avoid `range` pointer reuse
|
block := block // Assign to a new pointer to avoid `range` pointer reuse
|
||||||
levelBlocks = append(levelBlocks, &block)
|
levelBlocks = append(levelBlocks, &block)
|
||||||
}
|
}
|
||||||
parents[blockLevel] = levelBlocks
|
|
||||||
|
parents = append(parents, levelBlocks)
|
||||||
}
|
}
|
||||||
return parents, nil
|
return parents, nil
|
||||||
}
|
}
|
||||||
|
@ -87,13 +87,18 @@ func TestValidateAndInsertImportedPruningPoint(t *testing.T) {
|
|||||||
t.Fatalf("PruningPointHeaders: %+v", err)
|
t.Fatalf("PruningPointHeaders: %+v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
pruningPointAndItsAnticoneWithTrustedData, err := tcSyncer.PruningPointAndItsAnticoneWithTrustedData()
|
pruningPointAndItsAnticone, err := tcSyncer.PruningPointAndItsAnticone()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("PruningPointAndItsAnticoneWithTrustedData: %+v", err)
|
t.Fatalf("PruningPointAndItsAnticone: %+v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, blockWithTrustedData := range pruningPointAndItsAnticoneWithTrustedData {
|
for _, blockHash := range pruningPointAndItsAnticone {
|
||||||
_, err := synceeStaging.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false)
|
blockWithTrustedData, err := tcSyncer.BlockWithTrustedData(blockHash)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = synceeStaging.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("ValidateAndInsertBlockWithTrustedData: %+v", err)
|
t.Fatalf("ValidateAndInsertBlockWithTrustedData: %+v", err)
|
||||||
}
|
}
|
||||||
|
@ -6,7 +6,6 @@ import (
|
|||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/pow"
|
|
||||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
@ -63,7 +62,7 @@ func (v *blockValidator) ValidateHeaderInContext(stagingArea *model.StagingArea,
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !hasReachabilityData {
|
if !hasReachabilityData {
|
||||||
blockLevel := pow.BlockLevel(header)
|
blockLevel := header.BlockLevel()
|
||||||
for i := 0; i <= blockLevel; i++ {
|
for i := 0; i <= blockLevel; i++ {
|
||||||
err = v.reachabilityManagers[i].AddBlock(stagingArea, blockHash)
|
err = v.reachabilityManagers[i].AddBlock(stagingArea, blockHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -195,7 +194,7 @@ func (v *blockValidator) checkMergeSizeLimit(stagingArea *model.StagingArea, has
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (v *blockValidator) checkIndirectParents(stagingArea *model.StagingArea, header externalapi.BlockHeader) error {
|
func (v *blockValidator) checkIndirectParents(stagingArea *model.StagingArea, header externalapi.BlockHeader) error {
|
||||||
expectedParents, err := v.blockParentBuilder.BuildParents(stagingArea, header.DirectParents())
|
expectedParents, err := v.blockParentBuilder.BuildParents(stagingArea, header.DAAScore(), header.DirectParents())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -37,6 +37,7 @@ type blockValidator struct {
|
|||||||
finalityManager model.FinalityManager
|
finalityManager model.FinalityManager
|
||||||
blockParentBuilder model.BlockParentBuilder
|
blockParentBuilder model.BlockParentBuilder
|
||||||
pruningManager model.PruningManager
|
pruningManager model.PruningManager
|
||||||
|
parentsManager model.ParentsManager
|
||||||
|
|
||||||
blockStore model.BlockStore
|
blockStore model.BlockStore
|
||||||
ghostdagDataStores []model.GHOSTDAGDataStore
|
ghostdagDataStores []model.GHOSTDAGDataStore
|
||||||
@ -72,6 +73,7 @@ func New(powMax *big.Int,
|
|||||||
finalityManager model.FinalityManager,
|
finalityManager model.FinalityManager,
|
||||||
blockParentBuilder model.BlockParentBuilder,
|
blockParentBuilder model.BlockParentBuilder,
|
||||||
pruningManager model.PruningManager,
|
pruningManager model.PruningManager,
|
||||||
|
parentsManager model.ParentsManager,
|
||||||
|
|
||||||
pruningStore model.PruningStore,
|
pruningStore model.PruningStore,
|
||||||
blockStore model.BlockStore,
|
blockStore model.BlockStore,
|
||||||
@ -108,6 +110,7 @@ func New(powMax *big.Int,
|
|||||||
finalityManager: finalityManager,
|
finalityManager: finalityManager,
|
||||||
blockParentBuilder: blockParentBuilder,
|
blockParentBuilder: blockParentBuilder,
|
||||||
pruningManager: pruningManager,
|
pruningManager: pruningManager,
|
||||||
|
parentsManager: parentsManager,
|
||||||
|
|
||||||
pruningStore: pruningStore,
|
pruningStore: pruningStore,
|
||||||
blockStore: blockStore,
|
blockStore: blockStore,
|
||||||
|
@ -67,9 +67,9 @@ func (v *blockValidator) setParents(stagingArea *model.StagingArea,
|
|||||||
header externalapi.BlockHeader,
|
header externalapi.BlockHeader,
|
||||||
isBlockWithTrustedData bool) error {
|
isBlockWithTrustedData bool) error {
|
||||||
|
|
||||||
for level := 0; level <= pow.BlockLevel(header); level++ {
|
for level := 0; level <= header.BlockLevel(); level++ {
|
||||||
var parents []*externalapi.DomainHash
|
var parents []*externalapi.DomainHash
|
||||||
for _, parent := range header.ParentsAtLevel(level) {
|
for _, parent := range v.parentsManager.ParentsAtLevel(header, level) {
|
||||||
_, err := v.ghostdagDataStores[level].Get(v.databaseContext, stagingArea, parent, false)
|
_, err := v.ghostdagDataStores[level].Get(v.databaseContext, stagingArea, parent, false)
|
||||||
isNotFoundError := database.IsNotFoundError(err)
|
isNotFoundError := database.IsNotFoundError(err)
|
||||||
if !isNotFoundError && err != nil {
|
if !isNotFoundError && err != nil {
|
||||||
@ -116,7 +116,7 @@ func (v *blockValidator) validateDifficulty(stagingArea *model.StagingArea,
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
blockLevel := pow.BlockLevel(header)
|
blockLevel := header.BlockLevel()
|
||||||
for i := 1; i <= blockLevel; i++ {
|
for i := 1; i <= blockLevel; i++ {
|
||||||
err = v.ghostdagManagers[i].GHOSTDAG(stagingArea, blockHash)
|
err = v.ghostdagManagers[i].GHOSTDAG(stagingArea, blockHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -35,7 +35,7 @@ func (csm *consensusStateManager) ResolveVirtual(maxBlocksToResolve uint64) (boo
|
|||||||
var selectedTip *externalapi.DomainHash
|
var selectedTip *externalapi.DomainHash
|
||||||
isCompletelyResolved := true
|
isCompletelyResolved := true
|
||||||
for _, tip := range tips {
|
for _, tip := range tips {
|
||||||
log.Infof("Resolving tip %s", tip)
|
log.Debugf("Resolving tip %s", tip)
|
||||||
resolveStagingArea := model.NewStagingArea()
|
resolveStagingArea := model.NewStagingArea()
|
||||||
unverifiedBlocks, err := csm.getUnverifiedChainBlocks(resolveStagingArea, tip)
|
unverifiedBlocks, err := csm.getUnverifiedChainBlocks(resolveStagingArea, tip)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -46,7 +46,7 @@ func (csm *consensusStateManager) ResolveVirtual(maxBlocksToResolve uint64) (boo
|
|||||||
hasMoreUnverifiedThanMax := maxBlocksToResolve != 0 && uint64(len(unverifiedBlocks)) > maxBlocksToResolve
|
hasMoreUnverifiedThanMax := maxBlocksToResolve != 0 && uint64(len(unverifiedBlocks)) > maxBlocksToResolve
|
||||||
if hasMoreUnverifiedThanMax {
|
if hasMoreUnverifiedThanMax {
|
||||||
resolveTip = unverifiedBlocks[uint64(len(unverifiedBlocks))-maxBlocksToResolve]
|
resolveTip = unverifiedBlocks[uint64(len(unverifiedBlocks))-maxBlocksToResolve]
|
||||||
log.Infof("Has more than %d blocks to resolve. Changing the resolve tip to %s", maxBlocksToResolve, resolveTip)
|
log.Debugf("Has more than %d blocks to resolve. Changing the resolve tip to %s", maxBlocksToResolve, resolveTip)
|
||||||
}
|
}
|
||||||
|
|
||||||
blockStatus, reversalData, err := csm.resolveBlockStatus(resolveStagingArea, resolveTip, true)
|
blockStatus, reversalData, err := csm.resolveBlockStatus(resolveStagingArea, resolveTip, true)
|
||||||
|
47
domain/consensus/processes/parentsmanager/parentsmanager.go
Normal file
47
domain/consensus/processes/parentsmanager/parentsmanager.go
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
package parentssanager
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
||||||
|
)
|
||||||
|
|
||||||
|
type parentsManager struct {
|
||||||
|
hardForkOmitGenesisFromParentsDAAScore uint64
|
||||||
|
genesisHash *externalapi.DomainHash
|
||||||
|
}
|
||||||
|
|
||||||
|
// New instantiates a new ParentsManager
|
||||||
|
func New(genesisHash *externalapi.DomainHash, hardForkOmitGenesisFromParentsDAAScore uint64) model.ParentsManager {
|
||||||
|
return &parentsManager{
|
||||||
|
genesisHash: genesisHash,
|
||||||
|
hardForkOmitGenesisFromParentsDAAScore: hardForkOmitGenesisFromParentsDAAScore,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pm *parentsManager) ParentsAtLevel(blockHeader externalapi.BlockHeader, level int) externalapi.BlockLevelParents {
|
||||||
|
var parentsAtLevel externalapi.BlockLevelParents
|
||||||
|
if len(blockHeader.Parents()) > level {
|
||||||
|
parentsAtLevel = blockHeader.Parents()[level]
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(parentsAtLevel) == 0 && len(blockHeader.DirectParents()) > 0 && blockHeader.DAAScore() >= pm.hardForkOmitGenesisFromParentsDAAScore {
|
||||||
|
return externalapi.BlockLevelParents{pm.genesisHash}
|
||||||
|
}
|
||||||
|
|
||||||
|
return parentsAtLevel
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pm *parentsManager) Parents(blockHeader externalapi.BlockHeader) []externalapi.BlockLevelParents {
|
||||||
|
numParents := len(blockHeader.Parents())
|
||||||
|
if blockHeader.DAAScore() >= pm.hardForkOmitGenesisFromParentsDAAScore {
|
||||||
|
numParents = constants.MaxBlockLevel + 1
|
||||||
|
}
|
||||||
|
|
||||||
|
parents := make([]externalapi.BlockLevelParents, numParents)
|
||||||
|
for i := 0; i < numParents; i++ {
|
||||||
|
parents[i] = pm.ParentsAtLevel(blockHeader, i)
|
||||||
|
}
|
||||||
|
|
||||||
|
return parents
|
||||||
|
}
|
@ -2,7 +2,6 @@ package pruningmanager_test
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
|
||||||
"github.com/kaspanet/kaspad/infrastructure/db/database"
|
"github.com/kaspanet/kaspad/infrastructure/db/database"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@ -40,7 +39,7 @@ func TestPruning(t *testing.T) {
|
|||||||
dagconfig.MainnetParams.Name: "502",
|
dagconfig.MainnetParams.Name: "502",
|
||||||
dagconfig.TestnetParams.Name: "502",
|
dagconfig.TestnetParams.Name: "502",
|
||||||
dagconfig.DevnetParams.Name: "502",
|
dagconfig.DevnetParams.Name: "502",
|
||||||
dagconfig.SimnetParams.Name: "502",
|
dagconfig.SimnetParams.Name: "503",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -140,12 +139,11 @@ func TestPruning(t *testing.T) {
|
|||||||
// We expect blocks that are within the difficulty adjustment window size of
|
// We expect blocks that are within the difficulty adjustment window size of
|
||||||
// the pruning point and its anticone to not get pruned
|
// the pruning point and its anticone to not get pruned
|
||||||
unprunedBlockHashesBelowPruningPoint := make(map[externalapi.DomainHash]struct{})
|
unprunedBlockHashesBelowPruningPoint := make(map[externalapi.DomainHash]struct{})
|
||||||
pruningPointAndItsAnticone, err := tc.PruningPointAndItsAnticoneWithTrustedData()
|
pruningPointAndItsAnticone, err := tc.PruningPointAndItsAnticone()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("pruningPointAndItsAnticone: %+v", err)
|
t.Fatalf("pruningPointAndItsAnticone: %+v", err)
|
||||||
}
|
}
|
||||||
for _, block := range pruningPointAndItsAnticone {
|
for _, blockHash := range pruningPointAndItsAnticone {
|
||||||
blockHash := consensushashing.BlockHash(block.Block)
|
|
||||||
unprunedBlockHashesBelowPruningPoint[*blockHash] = struct{}{}
|
unprunedBlockHashesBelowPruningPoint[*blockHash] = struct{}{}
|
||||||
blockWindow, err := tc.DAGTraversalManager().BlockWindow(stagingArea, blockHash, consensusConfig.DifficultyAdjustmentWindowSize)
|
blockWindow, err := tc.DAGTraversalManager().BlockWindow(stagingArea, blockHash, consensusConfig.DifficultyAdjustmentWindowSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -907,8 +907,8 @@ func (pm *pruningManager) PruneAllBlocksBelow(stagingArea *model.StagingArea, pr
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pm *pruningManager) PruningPointAndItsAnticoneWithTrustedData() ([]*externalapi.BlockWithTrustedData, error) {
|
func (pm *pruningManager) PruningPointAndItsAnticone() ([]*externalapi.DomainHash, error) {
|
||||||
onEnd := logger.LogAndMeasureExecutionTime(log, "PruningPointAndItsAnticoneWithTrustedData")
|
onEnd := logger.LogAndMeasureExecutionTime(log, "PruningPointAndItsAnticone")
|
||||||
defer onEnd()
|
defer onEnd()
|
||||||
|
|
||||||
stagingArea := model.NewStagingArea()
|
stagingArea := model.NewStagingArea()
|
||||||
@ -922,34 +922,32 @@ func (pm *pruningManager) PruningPointAndItsAnticoneWithTrustedData() ([]*extern
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
blocks := make([]*externalapi.BlockWithTrustedData, 0, len(pruningPointAnticone)+1)
|
// Sorting the blocks in topological order
|
||||||
|
var sortErr error
|
||||||
pruningPointWithTrustedData, err := pm.blockWithTrustedData(stagingArea, pruningPoint)
|
sort.Slice(pruningPointAnticone, func(i, j int) bool {
|
||||||
if err != nil {
|
headerI, err := pm.blockHeaderStore.BlockHeader(pm.databaseContext, stagingArea, pruningPointAnticone[i])
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, blockHash := range pruningPointAnticone {
|
|
||||||
blockWithTrustedData, err := pm.blockWithTrustedData(stagingArea, blockHash)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
sortErr = err
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
blocks = append(blocks, blockWithTrustedData)
|
headerJ, err := pm.blockHeaderStore.BlockHeader(pm.databaseContext, stagingArea, pruningPointAnticone[j])
|
||||||
|
if err != nil {
|
||||||
|
sortErr = err
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return headerI.BlueWork().Cmp(headerJ.BlueWork()) < 0
|
||||||
|
})
|
||||||
|
if sortErr != nil {
|
||||||
|
return nil, sortErr
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sorting the blocks in topological order
|
|
||||||
sort.Slice(blocks, func(i, j int) bool {
|
|
||||||
return blocks[i].Block.Header.BlueWork().Cmp(blocks[j].Block.Header.BlueWork()) < 0
|
|
||||||
})
|
|
||||||
|
|
||||||
// The pruning point should always come first
|
// The pruning point should always come first
|
||||||
blocks = append([]*externalapi.BlockWithTrustedData{pruningPointWithTrustedData}, blocks...)
|
return append([]*externalapi.DomainHash{pruningPoint}, pruningPointAnticone...), nil
|
||||||
|
|
||||||
return blocks, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pm *pruningManager) blockWithTrustedData(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (*externalapi.BlockWithTrustedData, error) {
|
func (pm *pruningManager) BlockWithTrustedData(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (*externalapi.BlockWithTrustedData, error) {
|
||||||
block, err := pm.blocksStore.Block(pm.databaseContext, stagingArea, blockHash)
|
block, err := pm.blocksStore.Block(pm.databaseContext, stagingArea, blockHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
5
domain/consensus/processes/pruningproofmanager/log.go
Normal file
5
domain/consensus/processes/pruningproofmanager/log.go
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
package pruningproofmanager
|
||||||
|
|
||||||
|
import "github.com/kaspanet/kaspad/infrastructure/logger"
|
||||||
|
|
||||||
|
var log = logger.RegisterSubSystem("PPMN")
|
@ -15,8 +15,8 @@ import (
|
|||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
|
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/pow"
|
|
||||||
"github.com/kaspanet/kaspad/infrastructure/db/database"
|
"github.com/kaspanet/kaspad/infrastructure/db/database"
|
||||||
|
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"math/big"
|
"math/big"
|
||||||
)
|
)
|
||||||
@ -28,6 +28,7 @@ type pruningProofManager struct {
|
|||||||
ghostdagManagers []model.GHOSTDAGManager
|
ghostdagManagers []model.GHOSTDAGManager
|
||||||
reachabilityManagers []model.ReachabilityManager
|
reachabilityManagers []model.ReachabilityManager
|
||||||
dagTraversalManagers []model.DAGTraversalManager
|
dagTraversalManagers []model.DAGTraversalManager
|
||||||
|
parentsManager model.ParentsManager
|
||||||
|
|
||||||
ghostdagDataStores []model.GHOSTDAGDataStore
|
ghostdagDataStores []model.GHOSTDAGDataStore
|
||||||
pruningStore model.PruningStore
|
pruningStore model.PruningStore
|
||||||
@ -39,6 +40,9 @@ type pruningProofManager struct {
|
|||||||
genesisHash *externalapi.DomainHash
|
genesisHash *externalapi.DomainHash
|
||||||
k externalapi.KType
|
k externalapi.KType
|
||||||
pruningProofM uint64
|
pruningProofM uint64
|
||||||
|
|
||||||
|
cachedPruningPoint *externalapi.DomainHash
|
||||||
|
cachedProof *externalapi.PruningPointProof
|
||||||
}
|
}
|
||||||
|
|
||||||
// New instantiates a new PruningManager
|
// New instantiates a new PruningManager
|
||||||
@ -49,6 +53,7 @@ func New(
|
|||||||
ghostdagManagers []model.GHOSTDAGManager,
|
ghostdagManagers []model.GHOSTDAGManager,
|
||||||
reachabilityManagers []model.ReachabilityManager,
|
reachabilityManagers []model.ReachabilityManager,
|
||||||
dagTraversalManagers []model.DAGTraversalManager,
|
dagTraversalManagers []model.DAGTraversalManager,
|
||||||
|
parentsManager model.ParentsManager,
|
||||||
|
|
||||||
ghostdagDataStores []model.GHOSTDAGDataStore,
|
ghostdagDataStores []model.GHOSTDAGDataStore,
|
||||||
pruningStore model.PruningStore,
|
pruningStore model.PruningStore,
|
||||||
@ -68,6 +73,7 @@ func New(
|
|||||||
ghostdagManagers: ghostdagManagers,
|
ghostdagManagers: ghostdagManagers,
|
||||||
reachabilityManagers: reachabilityManagers,
|
reachabilityManagers: reachabilityManagers,
|
||||||
dagTraversalManagers: dagTraversalManagers,
|
dagTraversalManagers: dagTraversalManagers,
|
||||||
|
parentsManager: parentsManager,
|
||||||
|
|
||||||
ghostdagDataStores: ghostdagDataStores,
|
ghostdagDataStores: ghostdagDataStores,
|
||||||
pruningStore: pruningStore,
|
pruningStore: pruningStore,
|
||||||
@ -83,6 +89,33 @@ func New(
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ppm *pruningProofManager) BuildPruningPointProof(stagingArea *model.StagingArea) (*externalapi.PruningPointProof, error) {
|
func (ppm *pruningProofManager) BuildPruningPointProof(stagingArea *model.StagingArea) (*externalapi.PruningPointProof, error) {
|
||||||
|
onEnd := logger.LogAndMeasureExecutionTime(log, "BuildPruningPointProof")
|
||||||
|
defer onEnd()
|
||||||
|
|
||||||
|
pruningPoint, err := ppm.pruningStore.PruningPoint(ppm.databaseContext, stagingArea)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if ppm.cachedPruningPoint != nil && ppm.cachedPruningPoint.Equal(pruningPoint) {
|
||||||
|
return ppm.cachedProof, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
proof, err := ppm.buildPruningPointProof(stagingArea)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ppm.cachedProof = proof
|
||||||
|
ppm.cachedPruningPoint = pruningPoint
|
||||||
|
|
||||||
|
return proof, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ppm *pruningProofManager) buildPruningPointProof(stagingArea *model.StagingArea) (*externalapi.PruningPointProof, error) {
|
||||||
|
onEnd := logger.LogAndMeasureExecutionTime(log, "buildPruningPointProof")
|
||||||
|
defer onEnd()
|
||||||
|
|
||||||
pruningPoint, err := ppm.pruningStore.PruningPoint(ppm.databaseContext, stagingArea)
|
pruningPoint, err := ppm.pruningStore.PruningPoint(ppm.databaseContext, stagingArea)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -97,17 +130,33 @@ func (ppm *pruningProofManager) BuildPruningPointProof(stagingArea *model.Stagin
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
maxLevel := len(pruningPointHeader.Parents()) - 1
|
maxLevel := len(ppm.parentsManager.Parents(pruningPointHeader)) - 1
|
||||||
headersByLevel := make(map[int][]externalapi.BlockHeader)
|
headersByLevel := make(map[int][]externalapi.BlockHeader)
|
||||||
selectedTipByLevel := make([]*externalapi.DomainHash, maxLevel+1)
|
selectedTipByLevel := make([]*externalapi.DomainHash, maxLevel+1)
|
||||||
pruningPointLevel := pow.BlockLevel(pruningPointHeader)
|
pruningPointLevel := pruningPointHeader.BlockLevel()
|
||||||
for blockLevel := maxLevel; blockLevel >= 0; blockLevel-- {
|
for blockLevel := maxLevel; blockLevel >= 0; blockLevel-- {
|
||||||
var selectedTip *externalapi.DomainHash
|
var selectedTip *externalapi.DomainHash
|
||||||
if blockLevel <= pruningPointLevel {
|
if blockLevel <= pruningPointLevel {
|
||||||
selectedTip = pruningPoint
|
selectedTip = pruningPoint
|
||||||
} else {
|
} else {
|
||||||
blockLevelParents := pruningPointHeader.ParentsAtLevel(blockLevel)
|
blockLevelParents := ppm.parentsManager.ParentsAtLevel(pruningPointHeader, blockLevel)
|
||||||
selectedTip, err = ppm.ghostdagManagers[blockLevel].ChooseSelectedParent(stagingArea, []*externalapi.DomainHash(blockLevelParents)...)
|
selectedTipCandidates := make([]*externalapi.DomainHash, 0, len(blockLevelParents))
|
||||||
|
|
||||||
|
// In a pruned node, some pruning point parents might be missing, but we're guaranteed that its
|
||||||
|
// selected parent is not missing.
|
||||||
|
for _, parent := range blockLevelParents {
|
||||||
|
_, err := ppm.ghostdagDataStores[blockLevel].Get(ppm.databaseContext, stagingArea, parent, false)
|
||||||
|
if database.IsNotFoundError(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
selectedTipCandidates = append(selectedTipCandidates, parent)
|
||||||
|
}
|
||||||
|
|
||||||
|
selectedTip, err = ppm.ghostdagManagers[blockLevel].ChooseSelectedParent(stagingArea, selectedTipCandidates...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -248,6 +297,9 @@ func (ppm *pruningProofManager) blockAtDepth(stagingArea *model.StagingArea, gho
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *externalapi.PruningPointProof) error {
|
func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *externalapi.PruningPointProof) error {
|
||||||
|
onEnd := logger.LogAndMeasureExecutionTime(log, "ValidatePruningPointProof")
|
||||||
|
defer onEnd()
|
||||||
|
|
||||||
stagingArea := model.NewStagingArea()
|
stagingArea := model.NewStagingArea()
|
||||||
|
|
||||||
if len(pruningPointProof.Headers) == 0 {
|
if len(pruningPointProof.Headers) == 0 {
|
||||||
@ -257,8 +309,8 @@ func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *ext
|
|||||||
level0Headers := pruningPointProof.Headers[0]
|
level0Headers := pruningPointProof.Headers[0]
|
||||||
pruningPointHeader := level0Headers[len(level0Headers)-1]
|
pruningPointHeader := level0Headers[len(level0Headers)-1]
|
||||||
pruningPoint := consensushashing.HeaderHash(pruningPointHeader)
|
pruningPoint := consensushashing.HeaderHash(pruningPointHeader)
|
||||||
pruningPointBlockLevel := pow.BlockLevel(pruningPointHeader)
|
pruningPointBlockLevel := pruningPointHeader.BlockLevel()
|
||||||
maxLevel := len(pruningPointHeader.Parents()) - 1
|
maxLevel := len(ppm.parentsManager.Parents(pruningPointHeader)) - 1
|
||||||
if maxLevel >= len(pruningPointProof.Headers) {
|
if maxLevel >= len(pruningPointProof.Headers) {
|
||||||
return errors.Wrapf(ruleerrors.ErrPruningProofEmpty, "proof has only %d levels while pruning point "+
|
return errors.Wrapf(ruleerrors.ErrPruningProofEmpty, "proof has only %d levels while pruning point "+
|
||||||
"has parents from %d levels", len(pruningPointProof.Headers), maxLevel+1)
|
"has parents from %d levels", len(pruningPointProof.Headers), maxLevel+1)
|
||||||
@ -300,15 +352,15 @@ func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *ext
|
|||||||
var selectedTip *externalapi.DomainHash
|
var selectedTip *externalapi.DomainHash
|
||||||
for i, header := range headers {
|
for i, header := range headers {
|
||||||
blockHash := consensushashing.HeaderHash(header)
|
blockHash := consensushashing.HeaderHash(header)
|
||||||
if pow.BlockLevel(header) < blockLevel {
|
if header.BlockLevel() < blockLevel {
|
||||||
return errors.Wrapf(ruleerrors.ErrPruningProofWrongBlockLevel, "block %s level is %d when it's "+
|
return errors.Wrapf(ruleerrors.ErrPruningProofWrongBlockLevel, "block %s level is %d when it's "+
|
||||||
"expected to be at least %d", blockHash, pow.BlockLevel(header), blockLevel)
|
"expected to be at least %d", blockHash, header.BlockLevel(), blockLevel)
|
||||||
}
|
}
|
||||||
|
|
||||||
blockHeaderStore.Stage(stagingArea, blockHash, header)
|
blockHeaderStore.Stage(stagingArea, blockHash, header)
|
||||||
|
|
||||||
var parents []*externalapi.DomainHash
|
var parents []*externalapi.DomainHash
|
||||||
for _, parent := range header.ParentsAtLevel(blockLevel) {
|
for _, parent := range ppm.parentsManager.ParentsAtLevel(header, blockLevel) {
|
||||||
_, err := ghostdagDataStores[blockLevel].Get(ppm.databaseContext, stagingArea, parent, false)
|
_, err := ghostdagDataStores[blockLevel].Get(ppm.databaseContext, stagingArea, parent, false)
|
||||||
if database.IsNotFoundError(err) {
|
if database.IsNotFoundError(err) {
|
||||||
continue
|
continue
|
||||||
@ -377,7 +429,7 @@ func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *ext
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !selectedTip.Equal(pruningPoint) && !pruningPointHeader.ParentsAtLevel(blockLevel).Contains(selectedTip) {
|
if !selectedTip.Equal(pruningPoint) && !ppm.parentsManager.ParentsAtLevel(pruningPointHeader, blockLevel).Contains(selectedTip) {
|
||||||
return errors.Wrapf(ruleerrors.ErrPruningProofMissesBlocksBelowPruningPoint, "the selected tip %s at "+
|
return errors.Wrapf(ruleerrors.ErrPruningProofMissesBlocksBelowPruningPoint, "the selected tip %s at "+
|
||||||
"level %d is not a parent of the pruning point", selectedTip, blockLevel)
|
"level %d is not a parent of the pruning point", selectedTip, blockLevel)
|
||||||
}
|
}
|
||||||
@ -395,7 +447,7 @@ func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *ext
|
|||||||
return errors.Wrapf(ruleerrors.ErrPruningProofSelectedTipIsNotThePruningPoint, "the pruning "+
|
return errors.Wrapf(ruleerrors.ErrPruningProofSelectedTipIsNotThePruningPoint, "the pruning "+
|
||||||
"proof selected tip %s at level %d is not the pruning point", selectedTip, blockLevel)
|
"proof selected tip %s at level %d is not the pruning point", selectedTip, blockLevel)
|
||||||
}
|
}
|
||||||
} else if !pruningPointHeader.ParentsAtLevel(blockLevel).Contains(selectedTip) {
|
} else if !ppm.parentsManager.ParentsAtLevel(pruningPointHeader, blockLevel).Contains(selectedTip) {
|
||||||
return errors.Wrapf(ruleerrors.ErrPruningProofSelectedTipNotParentOfPruningPoint, "the pruning "+
|
return errors.Wrapf(ruleerrors.ErrPruningProofSelectedTipNotParentOfPruningPoint, "the pruning "+
|
||||||
"proof selected tip %s at level %d is not a parent of the of the pruning point on the same "+
|
"proof selected tip %s at level %d is not a parent of the of the pruning point on the same "+
|
||||||
"level", selectedTip, blockLevel)
|
"level", selectedTip, blockLevel)
|
||||||
@ -554,19 +606,22 @@ func (ppm *pruningProofManager) dagProcesses(
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ppm *pruningProofManager) ApplyPruningPointProof(stagingArea *model.StagingArea, pruningPointProof *externalapi.PruningPointProof) error {
|
func (ppm *pruningProofManager) ApplyPruningPointProof(stagingArea *model.StagingArea, pruningPointProof *externalapi.PruningPointProof) error {
|
||||||
|
onEnd := logger.LogAndMeasureExecutionTime(log, "ApplyPruningPointProof")
|
||||||
|
defer onEnd()
|
||||||
|
|
||||||
for blockLevel, headers := range pruningPointProof.Headers {
|
for blockLevel, headers := range pruningPointProof.Headers {
|
||||||
var selectedTip *externalapi.DomainHash
|
var selectedTip *externalapi.DomainHash
|
||||||
for i, header := range headers {
|
for i, header := range headers {
|
||||||
blockHash := consensushashing.HeaderHash(header)
|
blockHash := consensushashing.HeaderHash(header)
|
||||||
if pow.BlockLevel(header) < blockLevel {
|
if header.BlockLevel() < blockLevel {
|
||||||
return errors.Wrapf(ruleerrors.ErrPruningProofWrongBlockLevel, "block %s level is %d when it's "+
|
return errors.Wrapf(ruleerrors.ErrPruningProofWrongBlockLevel, "block %s level is %d when it's "+
|
||||||
"expected to be at least %d", blockHash, pow.BlockLevel(header), blockLevel)
|
"expected to be at least %d", blockHash, header.BlockLevel(), blockLevel)
|
||||||
}
|
}
|
||||||
|
|
||||||
ppm.blockHeaderStore.Stage(stagingArea, blockHash, header)
|
ppm.blockHeaderStore.Stage(stagingArea, blockHash, header)
|
||||||
|
|
||||||
var parents []*externalapi.DomainHash
|
var parents []*externalapi.DomainHash
|
||||||
for _, parent := range header.ParentsAtLevel(blockLevel) {
|
for _, parent := range ppm.parentsManager.ParentsAtLevel(header, blockLevel) {
|
||||||
_, err := ppm.ghostdagDataStores[blockLevel].Get(ppm.databaseContext, stagingArea, parent, false)
|
_, err := ppm.ghostdagDataStores[blockLevel].Get(ppm.databaseContext, stagingArea, parent, false)
|
||||||
if database.IsNotFoundError(err) {
|
if database.IsNotFoundError(err) {
|
||||||
continue
|
continue
|
||||||
|
@ -2,6 +2,7 @@ package blockheader
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/utils/pow"
|
||||||
"math/big"
|
"math/big"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -18,6 +19,9 @@ type blockHeader struct {
|
|||||||
blueScore uint64
|
blueScore uint64
|
||||||
blueWork *big.Int
|
blueWork *big.Int
|
||||||
pruningPoint *externalapi.DomainHash
|
pruningPoint *externalapi.DomainHash
|
||||||
|
|
||||||
|
isBlockLevelCached bool
|
||||||
|
blockLevel int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bh *blockHeader) BlueScore() uint64 {
|
func (bh *blockHeader) BlueScore() uint64 {
|
||||||
@ -41,10 +45,12 @@ func (bh *blockHeader) ToImmutable() externalapi.BlockHeader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (bh *blockHeader) SetNonce(nonce uint64) {
|
func (bh *blockHeader) SetNonce(nonce uint64) {
|
||||||
|
bh.isBlockLevelCached = false
|
||||||
bh.nonce = nonce
|
bh.nonce = nonce
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bh *blockHeader) SetTimeInMilliseconds(timeInMilliseconds int64) {
|
func (bh *blockHeader) SetTimeInMilliseconds(timeInMilliseconds int64) {
|
||||||
|
bh.isBlockLevelCached = false
|
||||||
bh.timeInMilliseconds = timeInMilliseconds
|
bh.timeInMilliseconds = timeInMilliseconds
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -56,16 +62,12 @@ func (bh *blockHeader) Parents() []externalapi.BlockLevelParents {
|
|||||||
return bh.parents
|
return bh.parents
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bh *blockHeader) ParentsAtLevel(level int) externalapi.BlockLevelParents {
|
func (bh *blockHeader) DirectParents() externalapi.BlockLevelParents {
|
||||||
if len(bh.parents) <= level {
|
if len(bh.parents) == 0 {
|
||||||
return externalapi.BlockLevelParents{}
|
return externalapi.BlockLevelParents{}
|
||||||
}
|
}
|
||||||
|
|
||||||
return bh.parents[level]
|
return bh.parents[0]
|
||||||
}
|
|
||||||
|
|
||||||
func (bh *blockHeader) DirectParents() externalapi.BlockLevelParents {
|
|
||||||
return bh.ParentsAtLevel(0)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bh *blockHeader) HashMerkleRoot() *externalapi.DomainHash {
|
func (bh *blockHeader) HashMerkleRoot() *externalapi.DomainHash {
|
||||||
@ -177,6 +179,15 @@ func (bh *blockHeader) ToMutable() externalapi.MutableBlockHeader {
|
|||||||
return bh.clone()
|
return bh.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (bh *blockHeader) BlockLevel() int {
|
||||||
|
if !bh.isBlockLevelCached {
|
||||||
|
bh.blockLevel = pow.BlockLevel(bh)
|
||||||
|
bh.isBlockLevelCached = true
|
||||||
|
}
|
||||||
|
|
||||||
|
return bh.blockLevel
|
||||||
|
}
|
||||||
|
|
||||||
// NewImmutableBlockHeader returns a new immutable header
|
// NewImmutableBlockHeader returns a new immutable header
|
||||||
func NewImmutableBlockHeader(
|
func NewImmutableBlockHeader(
|
||||||
version uint16,
|
version uint16,
|
||||||
|
@ -36,6 +36,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
|
|||||||
8,
|
8,
|
||||||
big.NewInt(9),
|
big.NewInt(9),
|
||||||
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{10}),
|
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{10}),
|
||||||
|
false,
|
||||||
|
0,
|
||||||
},
|
},
|
||||||
expectedResult: false,
|
expectedResult: false,
|
||||||
},
|
},
|
||||||
@ -55,6 +57,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
|
|||||||
9,
|
9,
|
||||||
big.NewInt(10),
|
big.NewInt(10),
|
||||||
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
|
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
|
||||||
|
false,
|
||||||
|
0,
|
||||||
},
|
},
|
||||||
headersToCompareTo: []headerToCompare{
|
headersToCompareTo: []headerToCompare{
|
||||||
{
|
{
|
||||||
@ -75,6 +79,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
|
|||||||
9,
|
9,
|
||||||
big.NewInt(10),
|
big.NewInt(10),
|
||||||
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
|
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
|
||||||
|
false,
|
||||||
|
0,
|
||||||
},
|
},
|
||||||
expectedResult: true,
|
expectedResult: true,
|
||||||
},
|
},
|
||||||
@ -92,6 +98,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
|
|||||||
9,
|
9,
|
||||||
big.NewInt(10),
|
big.NewInt(10),
|
||||||
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
|
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
|
||||||
|
false,
|
||||||
|
0,
|
||||||
},
|
},
|
||||||
expectedResult: false,
|
expectedResult: false,
|
||||||
},
|
},
|
||||||
@ -111,6 +119,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
|
|||||||
9,
|
9,
|
||||||
big.NewInt(10),
|
big.NewInt(10),
|
||||||
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
|
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
|
||||||
|
false,
|
||||||
|
0,
|
||||||
},
|
},
|
||||||
expectedResult: false,
|
expectedResult: false,
|
||||||
},
|
},
|
||||||
@ -128,6 +138,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
|
|||||||
9,
|
9,
|
||||||
big.NewInt(10),
|
big.NewInt(10),
|
||||||
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
|
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
|
||||||
|
false,
|
||||||
|
0,
|
||||||
},
|
},
|
||||||
expectedResult: false,
|
expectedResult: false,
|
||||||
},
|
},
|
||||||
@ -145,6 +157,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
|
|||||||
9,
|
9,
|
||||||
big.NewInt(10),
|
big.NewInt(10),
|
||||||
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
|
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
|
||||||
|
false,
|
||||||
|
0,
|
||||||
},
|
},
|
||||||
expectedResult: false,
|
expectedResult: false,
|
||||||
},
|
},
|
||||||
@ -162,6 +176,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
|
|||||||
9,
|
9,
|
||||||
big.NewInt(10),
|
big.NewInt(10),
|
||||||
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
|
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
|
||||||
|
false,
|
||||||
|
0,
|
||||||
},
|
},
|
||||||
expectedResult: false,
|
expectedResult: false,
|
||||||
},
|
},
|
||||||
@ -179,6 +195,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
|
|||||||
9,
|
9,
|
||||||
big.NewInt(10),
|
big.NewInt(10),
|
||||||
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
|
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
|
||||||
|
false,
|
||||||
|
0,
|
||||||
},
|
},
|
||||||
expectedResult: false,
|
expectedResult: false,
|
||||||
},
|
},
|
||||||
@ -196,6 +214,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
|
|||||||
9,
|
9,
|
||||||
big.NewInt(10),
|
big.NewInt(10),
|
||||||
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
|
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
|
||||||
|
false,
|
||||||
|
0,
|
||||||
},
|
},
|
||||||
expectedResult: false,
|
expectedResult: false,
|
||||||
},
|
},
|
||||||
@ -213,6 +233,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
|
|||||||
9,
|
9,
|
||||||
big.NewInt(10),
|
big.NewInt(10),
|
||||||
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
|
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
|
||||||
|
false,
|
||||||
|
0,
|
||||||
},
|
},
|
||||||
expectedResult: false,
|
expectedResult: false,
|
||||||
},
|
},
|
||||||
@ -230,6 +252,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
|
|||||||
9,
|
9,
|
||||||
big.NewInt(10),
|
big.NewInt(10),
|
||||||
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
|
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
|
||||||
|
false,
|
||||||
|
0,
|
||||||
},
|
},
|
||||||
expectedResult: false,
|
expectedResult: false,
|
||||||
},
|
},
|
||||||
@ -247,6 +271,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
|
|||||||
9,
|
9,
|
||||||
big.NewInt(10),
|
big.NewInt(10),
|
||||||
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
|
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
|
||||||
|
false,
|
||||||
|
0,
|
||||||
},
|
},
|
||||||
expectedResult: false,
|
expectedResult: false,
|
||||||
},
|
},
|
||||||
@ -264,6 +290,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
|
|||||||
100,
|
100,
|
||||||
big.NewInt(10),
|
big.NewInt(10),
|
||||||
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
|
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
|
||||||
|
false,
|
||||||
|
0,
|
||||||
},
|
},
|
||||||
expectedResult: false,
|
expectedResult: false,
|
||||||
},
|
},
|
||||||
@ -281,6 +309,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
|
|||||||
9,
|
9,
|
||||||
big.NewInt(100),
|
big.NewInt(100),
|
||||||
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
|
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
|
||||||
|
false,
|
||||||
|
0,
|
||||||
},
|
},
|
||||||
expectedResult: false,
|
expectedResult: false,
|
||||||
},
|
},
|
||||||
@ -298,6 +328,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
|
|||||||
9,
|
9,
|
||||||
big.NewInt(10),
|
big.NewInt(10),
|
||||||
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{100}),
|
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{100}),
|
||||||
|
false,
|
||||||
|
0,
|
||||||
},
|
},
|
||||||
expectedResult: false,
|
expectedResult: false,
|
||||||
},
|
},
|
||||||
|
@ -186,6 +186,8 @@ type Params struct {
|
|||||||
FixedSubsidySwitchPruningPointInterval uint64
|
FixedSubsidySwitchPruningPointInterval uint64
|
||||||
|
|
||||||
FixedSubsidySwitchHashRateThreshold *big.Int
|
FixedSubsidySwitchHashRateThreshold *big.Int
|
||||||
|
|
||||||
|
HardForkOmitGenesisFromParentsDAAScore uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// NormalizeRPCServerAddress returns addr with the current network default
|
// NormalizeRPCServerAddress returns addr with the current network default
|
||||||
@ -264,6 +266,7 @@ var MainnetParams = Params{
|
|||||||
PruningProofM: defaultPruningProofM,
|
PruningProofM: defaultPruningProofM,
|
||||||
FixedSubsidySwitchPruningPointInterval: defaultFixedSubsidySwitchPruningPointInterval,
|
FixedSubsidySwitchPruningPointInterval: defaultFixedSubsidySwitchPruningPointInterval,
|
||||||
FixedSubsidySwitchHashRateThreshold: big.NewInt(150_000_000_000),
|
FixedSubsidySwitchHashRateThreshold: big.NewInt(150_000_000_000),
|
||||||
|
HardForkOmitGenesisFromParentsDAAScore: 1265814,
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestnetParams defines the network parameters for the test Kaspa network.
|
// TestnetParams defines the network parameters for the test Kaspa network.
|
||||||
@ -326,6 +329,7 @@ var TestnetParams = Params{
|
|||||||
PruningProofM: defaultPruningProofM,
|
PruningProofM: defaultPruningProofM,
|
||||||
FixedSubsidySwitchPruningPointInterval: defaultFixedSubsidySwitchPruningPointInterval,
|
FixedSubsidySwitchPruningPointInterval: defaultFixedSubsidySwitchPruningPointInterval,
|
||||||
FixedSubsidySwitchHashRateThreshold: big.NewInt(150_000_000_000),
|
FixedSubsidySwitchHashRateThreshold: big.NewInt(150_000_000_000),
|
||||||
|
HardForkOmitGenesisFromParentsDAAScore: 2e6,
|
||||||
}
|
}
|
||||||
|
|
||||||
// SimnetParams defines the network parameters for the simulation test Kaspa
|
// SimnetParams defines the network parameters for the simulation test Kaspa
|
||||||
@ -392,6 +396,7 @@ var SimnetParams = Params{
|
|||||||
PruningProofM: defaultPruningProofM,
|
PruningProofM: defaultPruningProofM,
|
||||||
FixedSubsidySwitchPruningPointInterval: defaultFixedSubsidySwitchPruningPointInterval,
|
FixedSubsidySwitchPruningPointInterval: defaultFixedSubsidySwitchPruningPointInterval,
|
||||||
FixedSubsidySwitchHashRateThreshold: big.NewInt(150_000_000_000),
|
FixedSubsidySwitchHashRateThreshold: big.NewInt(150_000_000_000),
|
||||||
|
HardForkOmitGenesisFromParentsDAAScore: 5,
|
||||||
}
|
}
|
||||||
|
|
||||||
// DevnetParams defines the network parameters for the development Kaspa network.
|
// DevnetParams defines the network parameters for the development Kaspa network.
|
||||||
@ -454,6 +459,7 @@ var DevnetParams = Params{
|
|||||||
PruningProofM: defaultPruningProofM,
|
PruningProofM: defaultPruningProofM,
|
||||||
FixedSubsidySwitchPruningPointInterval: defaultFixedSubsidySwitchPruningPointInterval,
|
FixedSubsidySwitchPruningPointInterval: defaultFixedSubsidySwitchPruningPointInterval,
|
||||||
FixedSubsidySwitchHashRateThreshold: big.NewInt(150_000_000_000),
|
FixedSubsidySwitchHashRateThreshold: big.NewInt(150_000_000_000),
|
||||||
|
HardForkOmitGenesisFromParentsDAAScore: 3000,
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -121,6 +121,7 @@ type Flags struct {
|
|||||||
MaxUTXOCacheSize uint64 `long:"maxutxocachesize" description:"Max size of loaded UTXO into ram from the disk in bytes"`
|
MaxUTXOCacheSize uint64 `long:"maxutxocachesize" description:"Max size of loaded UTXO into ram from the disk in bytes"`
|
||||||
UTXOIndex bool `long:"utxoindex" description:"Enable the UTXO index"`
|
UTXOIndex bool `long:"utxoindex" description:"Enable the UTXO index"`
|
||||||
IsArchivalNode bool `long:"archival" description:"Run as an archival node: don't delete old block data when moving the pruning point (Warning: heavy disk usage)'"`
|
IsArchivalNode bool `long:"archival" description:"Run as an archival node: don't delete old block data when moving the pruning point (Warning: heavy disk usage)'"`
|
||||||
|
AllowSubmitBlockWhenNotSynced bool `long:"allow-submit-block-when-not-synced" hidden:"true" description:"Allow the node to accept blocks from RPC while not synced (this flag is mainly used for testing)"`
|
||||||
EnableSanityCheckPruningUTXOSet bool `long:"enable-sanity-check-pruning-utxo" hidden:"true" description:"When moving the pruning point - check that the utxo set matches the utxo commitment"`
|
EnableSanityCheckPruningUTXOSet bool `long:"enable-sanity-check-pruning-utxo" hidden:"true" description:"When moving the pruning point - check that the utxo set matches the utxo commitment"`
|
||||||
NetworkFlags
|
NetworkFlags
|
||||||
ServiceOptions *ServiceOptions
|
ServiceOptions *ServiceOptions
|
||||||
|
@ -48,6 +48,7 @@ type overrideDAGParamsConfig struct {
|
|||||||
EnableNonNativeSubnetworks *bool `json:"enableNonNativeSubnetworks"`
|
EnableNonNativeSubnetworks *bool `json:"enableNonNativeSubnetworks"`
|
||||||
DisableDifficultyAdjustment *bool `json:"disableDifficultyAdjustment"`
|
DisableDifficultyAdjustment *bool `json:"disableDifficultyAdjustment"`
|
||||||
SkipProofOfWork *bool `json:"skipProofOfWork"`
|
SkipProofOfWork *bool `json:"skipProofOfWork"`
|
||||||
|
HardForkOmitGenesisFromParentsDAAScore *uint64 `json:"hardForkOmitGenesisFromParentsDaaScore"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResolveNetwork parses the network command line argument and sets NetParams accordingly.
|
// ResolveNetwork parses the network command line argument and sets NetParams accordingly.
|
||||||
|
@ -66,7 +66,7 @@ func (c *ConnectionManager) checkRequestedConnections(connSet connectionSet) {
|
|||||||
log.Debugf("Connecting to connection request %s", connReq.address)
|
log.Debugf("Connecting to connection request %s", connReq.address)
|
||||||
err := c.initiateConnection(connReq.address)
|
err := c.initiateConnection(connReq.address)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Infof("Couldn't connect to %s: %s", address, err)
|
log.Infof("Couldn't connect to requested connection %s: %s", address, err)
|
||||||
// if connection request is one try - remove from pending and ignore failure
|
// if connection request is one try - remove from pending and ignore failure
|
||||||
if !connReq.isPermanent {
|
if !connReq.isPermanent {
|
||||||
delete(c.pendingRequested, address)
|
delete(c.pendingRequested, address)
|
||||||
|
@ -41,7 +41,7 @@ func (c *ConnectionManager) checkOutgoingConnections(connSet connectionSet) {
|
|||||||
|
|
||||||
err := c.initiateConnection(addressString)
|
err := c.initiateConnection(addressString)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Infof("Couldn't connect to %s: %s", addressString, err)
|
log.Debugf("Couldn't connect to %s: %s", addressString, err)
|
||||||
c.addressManager.MarkConnectionFailure(netAddress)
|
c.addressManager.MarkConnectionFailure(netAddress)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
// Use of this source code is governed by an ISC
|
// Use of this source code is governed by an ISC
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:build !windows && !plan9
|
||||||
// +build !windows,!plan9
|
// +build !windows,!plan9
|
||||||
|
|
||||||
package limits
|
package limits
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
// Use of this source code is governed by an ISC
|
// Use of this source code is governed by an ISC
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
|
||||||
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
|
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
|
||||||
|
|
||||||
package signal
|
package signal
|
||||||
|
@ -115,6 +115,7 @@ func startNode() (teardown func(), err error) {
|
|||||||
"--logdir", dataDir,
|
"--logdir", dataDir,
|
||||||
"--rpclisten", rpcAddress,
|
"--rpclisten", rpcAddress,
|
||||||
"--loglevel", "debug",
|
"--loglevel", "debug",
|
||||||
|
"--allow-submit-block-when-not-synced",
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -1 +1 @@
|
|||||||
{"skipProofOfWork":true, "mergeSetSizeLimit": 30, "finalityDuration": 30000}
|
{"skipProofOfWork":true, "mergeSetSizeLimit": 30, "finalityDuration": 30000, "hardForkOmitGenesisFromParentsDaaScore": 2505}
|
||||||
|
@ -38,6 +38,7 @@ func startNode(name string, rpcAddress, listen, connect, profilePort, dataDir st
|
|||||||
"--listen", listen,
|
"--listen", listen,
|
||||||
"--profile", profilePort,
|
"--profile", profilePort,
|
||||||
"--loglevel", "debug",
|
"--loglevel", "debug",
|
||||||
|
"--allow-submit-block-when-not-synced",
|
||||||
}
|
}
|
||||||
if connect != "" {
|
if connect != "" {
|
||||||
args = append(args, "--connect", connect)
|
args = append(args, "--connect", connect)
|
||||||
|
@ -44,6 +44,7 @@ func startNodes() (teardown func(), err error) {
|
|||||||
"--rpclisten", syncerRPCAddress,
|
"--rpclisten", syncerRPCAddress,
|
||||||
"--listen", syncerListen,
|
"--listen", syncerListen,
|
||||||
"--loglevel", "debug",
|
"--loglevel", "debug",
|
||||||
|
"--allow-submit-block-when-not-synced",
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -36,6 +36,7 @@ func setConfig(t *testing.T, harness *appHarness) {
|
|||||||
harness.config.Listeners = []string{harness.p2pAddress}
|
harness.config.Listeners = []string{harness.p2pAddress}
|
||||||
harness.config.RPCListeners = []string{harness.rpcAddress}
|
harness.config.RPCListeners = []string{harness.rpcAddress}
|
||||||
harness.config.UTXOIndex = harness.utxoIndex
|
harness.config.UTXOIndex = harness.utxoIndex
|
||||||
|
harness.config.AllowSubmitBlockWhenNotSynced = true
|
||||||
|
|
||||||
if harness.overrideDAGParams != nil {
|
if harness.overrideDAGParams != nil {
|
||||||
harness.config.ActiveNetParams = harness.overrideDAGParams
|
harness.config.ActiveNetParams = harness.overrideDAGParams
|
||||||
|
Loading…
x
Reference in New Issue
Block a user