Compare commits

...

9 Commits

Author SHA1 Message Date
stasatdaglabs
7078dada5c Add a missing space to make go fmt happy. 2021-11-26 09:46:34 +02:00
Elichai Turkel
606b781ca0 Fix bug in RequiredDifficulty and release another version 2021-11-25 21:49:15 +02:00
Elichai Turkel
dbf18d8052 Hard fork - new genesis with the utxo set of the last block (#1856)
* UTXO dump of block 0fca37ca667c2d550a6c4416dad9717e50927128c424fa4edbebc436ab13aeef

* Activate HF immediately and change reward to 1000

* Change protocol version and datadir location

* Delete comments

* Fix zero hash to muhash zero hash in genesis utxo dump check

* Don't omit genesis as direct parent

* Fix tests

* Change subsidy to 500

* Dont assume genesis multiset is empty

* Fix BlockReward test

* Fix TestValidateAndInsertImportedPruningPoint test

* Fix pruning point genesis utxo set

* Fix tests related to mainnet utxo set

* Dont change the difficulty before you have a full window

* Fix TestBlockWindow tests

* Remove global utxo set variable, and persist mainnetnet utxo deserialization between runs

* Fix last tests

* Make peer banning opt-in

* small fix for a test

* Fix go lint

* Fix Ori's review comments

* Change DAA score of genesis to checkpoint DAA score and fix all tests

* Fix the BlockLevel bits counting

* Fix some tests and make them run a little faster

* Change datadir name back to kaspa-mainnet and change db path from /data to /datadir

* Last changes for the release and change the version to 0.11.5

Co-authored-by: Ori Newman <orinewman1@gmail.com>
Co-authored-by: Ori Newman <>
Co-authored-by: msutton <mikisiton2@gmail.com>
2021-11-25 20:18:43 +02:00
Michael Sutton
2a1b38ce7a Fix mergeset limit violation bug (#1855)
* Added a test which reproduces a bug where virtual's mergeset exceeds the mergeset limit -- this test currently fails

* Added a simple condition to fix the mergeset limit bug

* Format issue
2021-11-24 19:34:59 +02:00
Ori Newman
29c410d123 Change HF time 2021-11-22 11:27:17 +02:00
Ori Newman
6e6fabf956 Change HF time 2021-11-22 11:24:55 +02:00
Ori Newman
b04292c97a Don't server parallel PruningPointAndItsAnticoneRequests 2021-11-22 09:56:30 +02:00
Ori Newman
765dd170e4 Optimizations and header size reduce hardfork (#1853)
* Modify DefaultTimeout to 120 seconds

A temporary workaround for nodes having trouble to sync (currently the download of pruning point related data during IBD takes more than 30 seconds)

* Cache existence in reachability store

* Cache block level in the header

* Fix IBD indication on submit block

* Add hardForkOmitGenesisFromParentsDAAScore logic

* Fix NumThreads bug in the wallet

* Get rid of ParentsAtLevel header method

* Fix a bug in BuildPruningPointProof

* Increase race detector timeout

* Add cache to BuildPruningPointProof

* Add comments and temp comment out go vet

* Fix ParentsAtLevel

* Dont fill empty parents

* Change HardForkOmitGenesisFromParentsDAAScore in fast netsync test

* Add --allow-submit-block-when-not-synced in stability tests

* Fix TestPruning

* Return fast tests

* Fix off by one error on kaspawallet

* Fetch only one block with trusted data at a time

* Update fork DAA score

* Don't ban for unexpected message type

* Fix tests

Co-authored-by: Michael Sutton <mikisiton2@gmail.com>
Co-authored-by: Ori Newman <>
2021-11-22 09:00:39 +02:00
Ori Newman
8e362845b3 Update to version 0.11.4 2021-11-21 01:52:56 +02:00
75 changed files with 1023 additions and 575 deletions

View File

@@ -46,4 +46,4 @@ jobs:
run: |
git checkout "${{ env.run_on }}"
git status
go test -race ./...
go test -timeout 20m -race ./...

View File

@@ -20,7 +20,10 @@ import (
"github.com/kaspanet/kaspad/version"
)
const leveldbCacheSizeMiB = 256
const (
leveldbCacheSizeMiB = 256
defaultDataDirname = "datadir2"
)
var desiredLimits = &limits.DesiredLimits{
FileLimitWant: 2048,
@@ -159,7 +162,7 @@ func (app *kaspadApp) main(startedChan chan<- struct{}) error {
// dbPath returns the path to the block database given a database type.
func databasePath(cfg *config.Config) string {
return filepath.Join(cfg.AppDir, "data")
return filepath.Join(cfg.AppDir, defaultDataDirname)
}
func removeDatabase(cfg *config.Config) error {

View File

@@ -12,7 +12,7 @@ import (
const (
// ProtocolVersion is the latest protocol version this package supports.
ProtocolVersion uint32 = 1
ProtocolVersion uint32 = 3
// DefaultServices describes the default services that are supported by
// the server.

View File

@@ -8,7 +8,7 @@ import (
// DefaultTimeout is the default duration to wait for enqueuing/dequeuing
// to/from routes.
const DefaultTimeout = 30 * time.Second
const DefaultTimeout = 120 * time.Second
// ErrPeerWithSameIDExists signifies that a peer with the same ID already exist.
var ErrPeerWithSameIDExists = errors.New("ready peer with the same ID already exists")

View File

@@ -3,8 +3,12 @@ package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"runtime"
"sync/atomic"
)
// PruningPointAndItsAnticoneRequestsContext is the interface for the context needed for the HandlePruningPointAndItsAnticoneRequests flow.
@@ -12,51 +16,80 @@ type PruningPointAndItsAnticoneRequestsContext interface {
Domain() domain.Domain
}
var isBusy uint32
// HandlePruningPointAndItsAnticoneRequests listens to appmessage.MsgRequestPruningPointAndItsAnticone messages and sends
// the pruning point and its anticone to the requesting peer.
func HandlePruningPointAndItsAnticoneRequests(context PruningPointAndItsAnticoneRequestsContext, incomingRoute *router.Route,
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
for {
_, err := incomingRoute.Dequeue()
if err != nil {
return err
}
log.Debugf("Got request for pruning point and its anticone from %s", peer)
pruningPointHeaders, err := context.Domain().Consensus().PruningPointHeaders()
if err != nil {
return err
}
msgPruningPointHeaders := make([]*appmessage.MsgBlockHeader, len(pruningPointHeaders))
for i, header := range pruningPointHeaders {
msgPruningPointHeaders[i] = appmessage.DomainBlockHeaderToBlockHeader(header)
}
err = outgoingRoute.Enqueue(appmessage.NewMsgPruningPoints(msgPruningPointHeaders))
if err != nil {
return err
}
blocks, err := context.Domain().Consensus().PruningPointAndItsAnticoneWithTrustedData()
if err != nil {
return err
}
for _, block := range blocks {
err = outgoingRoute.Enqueue(appmessage.DomainBlockWithTrustedDataToBlockWithTrustedData(block))
err := func() error {
_, err := incomingRoute.Dequeue()
if err != nil {
return err
}
}
err = outgoingRoute.Enqueue(appmessage.NewMsgDoneBlocksWithTrustedData())
if !atomic.CompareAndSwapUint32(&isBusy, 0, 1) {
return protocolerrors.Errorf(false, "node is busy with other pruning point anticone requests")
}
defer atomic.StoreUint32(&isBusy, 0)
log.Debugf("Got request for pruning point and its anticone from %s", peer)
pruningPointHeaders, err := context.Domain().Consensus().PruningPointHeaders()
if err != nil {
return err
}
msgPruningPointHeaders := make([]*appmessage.MsgBlockHeader, len(pruningPointHeaders))
for i, header := range pruningPointHeaders {
msgPruningPointHeaders[i] = appmessage.DomainBlockHeaderToBlockHeader(header)
}
err = outgoingRoute.Enqueue(appmessage.NewMsgPruningPoints(msgPruningPointHeaders))
if err != nil {
return err
}
pointAndItsAnticone, err := context.Domain().Consensus().PruningPointAndItsAnticone()
if err != nil {
return err
}
for _, blockHash := range pointAndItsAnticone {
err := sendBlockWithTrustedData(context, outgoingRoute, blockHash)
if err != nil {
return err
}
}
err = outgoingRoute.Enqueue(appmessage.NewMsgDoneBlocksWithTrustedData())
if err != nil {
return err
}
log.Debugf("Sent pruning point and its anticone to %s", peer)
return nil
}()
if err != nil {
return err
}
log.Debugf("Sent pruning point and its anticone to %s", peer)
}
}
func sendBlockWithTrustedData(context PruningPointAndItsAnticoneRequestsContext, outgoingRoute *router.Route, blockHash *externalapi.DomainHash) error {
blockWithTrustedData, err := context.Domain().Consensus().BlockWithTrustedData(blockHash)
if err != nil {
return err
}
err = outgoingRoute.Enqueue(appmessage.DomainBlockWithTrustedDataToBlockWithTrustedData(blockWithTrustedData))
if err != nil {
return err
}
runtime.GC()
return nil
}

View File

@@ -70,7 +70,8 @@ func (flow *handleRequestPruningPointUTXOSetFlow) waitForRequestPruningPointUTXO
}
msgRequestPruningPointUTXOSet, ok := message.(*appmessage.MsgRequestPruningPointUTXOSet)
if !ok {
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
// TODO: Change to shouldBan: true once we fix the bug of getting redundant messages
return nil, protocolerrors.Errorf(false, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdRequestPruningPointUTXOSet, message.Command())
}
return msgRequestPruningPointUTXOSet, nil
@@ -123,7 +124,8 @@ func (flow *handleRequestPruningPointUTXOSetFlow) sendPruningPointUTXOSet(
}
_, ok := message.(*appmessage.MsgRequestNextPruningPointUTXOSetChunk)
if !ok {
return protocolerrors.Errorf(true, "received unexpected message type. "+
// TODO: Change to shouldBan: true once we fix the bug of getting redundant messages
return protocolerrors.Errorf(false, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdRequestNextPruningPointUTXOSetChunk, message.Command())
}

View File

@@ -130,7 +130,7 @@ func (flow *handleRelayInvsFlow) downloadHeadersAndPruningUTXOSet(highHash *exte
return err
}
log.Debugf("Headers downloaded from peer %s", flow.peer)
log.Infof("Headers downloaded from peer %s", flow.peer)
highHashInfo, err := flow.Domain().StagingConsensus().GetBlockInfo(highHash)
if err != nil {

View File

@@ -15,7 +15,7 @@ import (
// maxProtocolVersion version is the maximum supported protocol
// version this kaspad node supports
const maxProtocolVersion = 1
const maxProtocolVersion = 3
// Peer holds data about a peer.
type Peer struct {

View File

@@ -102,7 +102,7 @@ func (m *Manager) routerInitializer(router *routerpkg.Router, netConnection *net
func (m *Manager) handleError(err error, netConnection *netadapter.NetConnection, outgoingRoute *routerpkg.Route) {
if protocolErr := (protocolerrors.ProtocolError{}); errors.As(err, &protocolErr) {
if !m.context.Config().DisableBanning && protocolErr.ShouldBan {
if m.context.Config().EnableBanning && protocolErr.ShouldBan {
log.Warnf("Banning %s (reason: %s)", netConnection, protocolErr.Cause)
err := m.context.ConnectionManager().Ban(netConnection)

View File

@@ -14,9 +14,14 @@ import (
func HandleSubmitBlock(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
submitBlockRequest := request.(*appmessage.SubmitBlockRequestMessage)
if context.ProtocolManager.IsIBDRunning() {
isSynced, err := context.ProtocolManager.ShouldMine()
if err != nil {
return nil, err
}
if !context.Config.AllowSubmitBlockWhenNotSynced && !isSynced {
return &appmessage.SubmitBlockResponseMessage{
Error: appmessage.RPCErrorf("Block not submitted - IBD is running"),
Error: appmessage.RPCErrorf("Block not submitted - node is not synced"),
RejectReason: appmessage.RejectReasonIsInIBD,
}, nil
}

View File

@@ -15,8 +15,6 @@ golint -set_exit_status ./...
staticcheck -checks SA4006,SA4008,SA4009,SA4010,SA5003,SA1004,SA1014,SA1021,SA1023,SA1024,SA1025,SA1026,SA1027,SA1028,SA2000,SA2001,SA2003,SA4000,SA4001,SA4003,SA4004,SA4011,SA4012,SA4013,SA4014,SA4015,SA4016,SA4017,SA4018,SA4019,SA4020,SA4021,SA4022,SA4023,SA5000,SA5002,SA5004,SA5005,SA5007,SA5008,SA5009,SA5010,SA5011,SA5012,SA6001,SA6002,SA9001,SA9002,SA9003,SA9004,SA9005,SA9006,ST1019 ./...
go vet -composites=false $FLAGS ./...
go build $FLAGS -o kaspad .
if [ -n "${NO_PARALLEL}" ]

View File

@@ -56,6 +56,7 @@ func create(conf *createConfig) error {
}
file := keys.File{
Version: keys.LastVersion,
EncryptedMnemonics: encryptedMnemonics,
ExtendedPublicKeys: extendedPublicKeys,
MinimumSignatures: conf.MinimumSignatures,

View File

@@ -97,7 +97,7 @@ func encryptMnemonic(mnemonic string, password []byte) (*EncryptedMnemonic, erro
return nil, err
}
aead, err := getAEAD(password, salt)
aead, err := getAEAD(defaultNumThreads, password, salt)
if err != nil {
return nil, err
}

View File

@@ -10,6 +10,7 @@ import (
"os"
"path/filepath"
"runtime"
"strings"
"github.com/kaspanet/kaspad/domain/dagconfig"
"github.com/kaspanet/kaspad/util"
@@ -22,6 +23,9 @@ var (
defaultAppDir = util.AppDir("kaspawallet", false)
)
// LastVersion is the most up to date file format version
const LastVersion = 1
func defaultKeysFile(netParams *dagconfig.Params) string {
return filepath.Join(defaultAppDir, netParams.Name, "keys.json")
}
@@ -32,6 +36,8 @@ type encryptedPrivateKeyJSON struct {
}
type keysFileJSON struct {
Version uint32 `json:"version"`
NumThreads uint8 `json:"numThreads,omitempty"` // This field is ignored for versions different from 0. See more details at the function `numThreads`.
EncryptedPrivateKeys []*encryptedPrivateKeyJSON `json:"encryptedMnemonics"`
ExtendedPublicKeys []string `json:"publicKeys"`
MinimumSignatures uint32 `json:"minimumSignatures"`
@@ -49,6 +55,8 @@ type EncryptedMnemonic struct {
// File holds all the data related to the wallet keys
type File struct {
Version uint32
NumThreads uint8 // This field is ignored for versions different than 0
EncryptedMnemonics []*EncryptedMnemonic
ExtendedPublicKeys []string
MinimumSignatures uint32
@@ -69,6 +77,8 @@ func (d *File) toJSON() *keysFileJSON {
}
return &keysFileJSON{
Version: d.Version,
NumThreads: d.NumThreads,
EncryptedPrivateKeys: encryptedPrivateKeysJSON,
ExtendedPublicKeys: d.ExtendedPublicKeys,
MinimumSignatures: d.MinimumSignatures,
@@ -80,6 +90,8 @@ func (d *File) toJSON() *keysFileJSON {
}
func (d *File) fromJSON(fileJSON *keysFileJSON) error {
d.Version = fileJSON.Version
d.NumThreads = fileJSON.NumThreads
d.MinimumSignatures = fileJSON.MinimumSignatures
d.ECDSA = fileJSON.ECDSA
d.ExtendedPublicKeys = fileJSON.ExtendedPublicKeys
@@ -181,10 +193,20 @@ func (d *File) DecryptMnemonics(cmdLinePassword string) ([]string, error) {
if len(password) == 0 {
password = getPassword("Password:")
}
var numThreads uint8
if len(d.EncryptedMnemonics) > 0 {
var err error
numThreads, err = d.numThreads(password)
if err != nil {
return nil, err
}
}
privateKeys := make([]string, len(d.EncryptedMnemonics))
for i, encryptedPrivateKey := range d.EncryptedMnemonics {
var err error
privateKeys[i], err = decryptMnemonic(encryptedPrivateKey, password)
privateKeys[i], err = decryptMnemonic(numThreads, encryptedPrivateKey, password)
if err != nil {
return nil, err
}
@@ -278,13 +300,73 @@ func (d *File) Save() error {
return nil
}
func getAEAD(password, salt []byte) (cipher.AEAD, error) {
key := argon2.IDKey(password, salt, 1, 64*1024, uint8(runtime.NumCPU()), 32)
const defaultNumThreads = 8
func (d *File) numThreads(password []byte) (uint8, error) {
// There's a bug in v0 wallets where the number of threads
// was determined by the number of logical CPUs at the machine,
// which made the authentication non-deterministic across platforms.
// In order to solve it we introduce v1 where the number of threads
// is constant, and brute force the number of threads in v0. After we
// find the right amount via brute force we save the result to the file.
if d.Version != 0 {
return defaultNumThreads, nil
}
if d.NumThreads != 0 {
return d.NumThreads, nil
}
numThreads, err := d.detectNumThreads(password, d.EncryptedMnemonics[0].salt)
if err != nil {
return 0, err
}
d.NumThreads = numThreads
err = d.Save()
if err != nil {
return 0, err
}
return numThreads, nil
}
func (d *File) detectNumThreads(password, salt []byte) (uint8, error) {
numCPU := uint8(runtime.NumCPU())
_, err := getAEAD(numCPU, password, salt)
if err != nil {
if !strings.Contains(err.Error(), "message authentication failed") {
return 0, err
}
} else {
return numCPU, nil
}
for i := uint8(1); ; i++ {
if i == numCPU {
continue
}
_, err := getAEAD(i, password, salt)
if err != nil {
const maxTries = 32
if i > maxTries || !strings.Contains(err.Error(), "message authentication failed") {
return 0, err
}
} else {
return i, nil
}
}
}
func getAEAD(threads uint8, password, salt []byte) (cipher.AEAD, error) {
key := argon2.IDKey(password, salt, 1, 64*1024, threads, 32)
return chacha20poly1305.NewX(key)
}
func decryptMnemonic(encryptedPrivateKey *EncryptedMnemonic, password []byte) (string, error) {
aead, err := getAEAD(password, encryptedPrivateKey.salt)
func decryptMnemonic(numThreads uint8, encryptedPrivateKey *EncryptedMnemonic, password []byte) (string, error) {
aead, err := getAEAD(numThreads, password, encryptedPrivateKey.salt)
if err != nil {
return "", err
}

View File

@@ -137,11 +137,18 @@ func (s *consensus) Init(skipAddingGenesis bool) error {
return nil
}
func (s *consensus) PruningPointAndItsAnticoneWithTrustedData() ([]*externalapi.BlockWithTrustedData, error) {
func (s *consensus) PruningPointAndItsAnticone() ([]*externalapi.DomainHash, error) {
s.lock.Lock()
defer s.lock.Unlock()
return s.pruningManager.PruningPointAndItsAnticoneWithTrustedData()
return s.pruningManager.PruningPointAndItsAnticone()
}
func (s *consensus) BlockWithTrustedData(blockHash *externalapi.DomainHash) (*externalapi.BlockWithTrustedData, error) {
s.lock.Lock()
defer s.lock.Unlock()
return s.pruningManager.BlockWithTrustedData(model.NewStagingArea(), blockHash)
}
// BuildBlock builds a block over the current state, with the transactions
@@ -716,7 +723,10 @@ func (s *consensus) PopulateMass(transaction *externalapi.DomainTransaction) {
func (s *consensus) ResolveVirtual() error {
// In order to prevent a situation that the consensus lock is held for too much time, we
// release the lock each time resolve 100 blocks.
for {
for i := 0; ; i++ {
if i%10 == 0 {
log.Infof("Resolving virtual. This may take some time...")
}
var isCompletelyResolved bool
var err error
func() {
@@ -730,6 +740,7 @@ func (s *consensus) ResolveVirtual() error {
}
if isCompletelyResolved {
log.Infof("Resolved virtual")
return nil
}
}

View File

@@ -6,7 +6,9 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/kaspanet/kaspad/util/staging"
"github.com/pkg/errors"
)
var reachabilityDataBucketName = []byte("reachability-data")
@@ -50,6 +52,8 @@ func (rds *reachabilityDataStore) IsStaged(stagingArea *model.StagingArea) bool
return rds.stagingShard(stagingArea).isStaged()
}
var errNotFound = errors.Wrap(database.ErrNotFound, "reachability data not found")
// ReachabilityData returns the reachabilityData associated with the given blockHash
func (rds *reachabilityDataStore) ReachabilityData(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (model.ReachabilityData, error) {
stagingShard := rds.stagingShard(stagingArea)
@@ -59,10 +63,16 @@ func (rds *reachabilityDataStore) ReachabilityData(dbContext model.DBReader, sta
}
if reachabilityData, ok := rds.reachabilityDataCache.Get(blockHash); ok {
if reachabilityData == nil {
return nil, errNotFound
}
return reachabilityData.(model.ReachabilityData), nil
}
reachabilityDataBytes, err := dbContext.Get(rds.reachabilityDataBlockHashAsKey(blockHash))
if database.IsNotFoundError(err) {
rds.reachabilityDataCache.Add(blockHash, nil)
}
if err != nil {
return nil, err
}
@@ -76,17 +86,15 @@ func (rds *reachabilityDataStore) ReachabilityData(dbContext model.DBReader, sta
}
func (rds *reachabilityDataStore) HasReachabilityData(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (bool, error) {
stagingShard := rds.stagingShard(stagingArea)
if _, ok := stagingShard.reachabilityData[*blockHash]; ok {
return true, nil
_, err := rds.ReachabilityData(dbContext, stagingArea, blockHash)
if database.IsNotFoundError(err) {
return false, nil
}
if err != nil {
return false, err
}
if rds.reachabilityDataCache.Has(blockHash) {
return true, nil
}
return dbContext.Has(rds.reachabilityDataBlockHashAsKey(blockHash))
return true, nil
}
// ReachabilityReindexRoot returns the current reachability reindex root

View File

@@ -4,6 +4,7 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/datastructures/daawindowstore"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/processes/blockparentbuilder"
parentssanager "github.com/kaspanet/kaspad/domain/consensus/processes/parentsmanager"
"github.com/kaspanet/kaspad/domain/consensus/processes/pruningproofmanager"
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
"io/ioutil"
@@ -158,12 +159,16 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
dagTraversalManager := dagTraversalManagers[0]
// Processes
parentsManager := parentssanager.New(config.GenesisHash)
blockParentBuilder := blockparentbuilder.New(
dbManager,
blockHeaderStore,
dagTopologyManager,
parentsManager,
reachabilityDataStore,
pruningStore,
config.GenesisHash,
)
pastMedianTimeManager := f.pastMedianTimeConsructor(
config.TimestampDeviationTolerance,
@@ -316,6 +321,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
finalityManager,
blockParentBuilder,
pruningManager,
parentsManager,
pruningStore,
blockStore,
@@ -403,6 +409,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
ghostdagManagers,
reachabilityManagers,
dagTraversalManagers,
parentsManager,
ghostdagDataStores,
pruningStore,
@@ -581,7 +588,7 @@ func dagStores(config *Config,
ghostdagDataStores[i] = ghostdagdatastore.New(prefixBucket, ghostdagDataCacheSize, preallocateCaches)
} else {
blockRelationStores[i] = blockrelationstore.New(prefixBucket, 200, false)
reachabilityDataStores[i] = reachabilitydatastore.New(prefixBucket, 200, false)
reachabilityDataStores[i] = reachabilitydatastore.New(prefixBucket, 86400, false)
ghostdagDataStores[i] = ghostdagdatastore.New(prefixBucket, 200, false)
}
}

View File

@@ -16,8 +16,8 @@ import (
func TestFinality(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
// Set finalityInterval to 50 blocks, so that test runs quickly
consensusConfig.FinalityDuration = 50 * consensusConfig.TargetTimePerBlock
// Set finalityInterval to 20 blocks, so that test runs quickly
consensusConfig.FinalityDuration = 20 * consensusConfig.TargetTimePerBlock
factory := consensus.NewFactory()
consensus, teardown, err := factory.NewTestConsensus(consensusConfig, "TestFinality")
@@ -180,7 +180,8 @@ func TestFinality(t *testing.T) {
func TestBoundedMergeDepth(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
// Set finalityInterval to 50 blocks, so that test runs quickly
consensusConfig.FinalityDuration = 50 * consensusConfig.TargetTimePerBlock
consensusConfig.K = 5
consensusConfig.FinalityDuration = 7 * consensusConfig.TargetTimePerBlock
finalityInterval := int(consensusConfig.FinalityDepth())
if int(consensusConfig.K) >= finalityInterval {

View File

@@ -58,7 +58,6 @@ type BlockHeader interface {
type BaseBlockHeader interface {
Version() uint16
Parents() []BlockLevelParents
ParentsAtLevel(level int) BlockLevelParents
DirectParents() BlockLevelParents
HashMerkleRoot() *DomainHash
AcceptedIDMerkleRoot() *DomainHash
@@ -70,6 +69,7 @@ type BaseBlockHeader interface {
BlueScore() uint64
BlueWork() *big.Int
PruningPoint() *DomainHash
BlockLevel() int
Equal(other BaseBlockHeader) bool
}

View File

@@ -25,7 +25,8 @@ type Consensus interface {
GetVirtualUTXOs(expectedVirtualParents []*DomainHash, fromOutpoint *DomainOutpoint, limit int) ([]*OutpointAndUTXOEntryPair, error)
PruningPoint() (*DomainHash, error)
PruningPointHeaders() ([]BlockHeader, error)
PruningPointAndItsAnticoneWithTrustedData() ([]*BlockWithTrustedData, error)
PruningPointAndItsAnticone() ([]*DomainHash, error)
BlockWithTrustedData(blockHash *DomainHash) (*BlockWithTrustedData, error)
ClearImportedPruningPointData() error
AppendImportedPruningPointUTXOs(outpointAndUTXOEntryPairs []*OutpointAndUTXOEntryPair) error
ValidateAndInsertImportedPruningPoint(newPruningPoint *DomainHash) error

View File

@@ -5,5 +5,7 @@ import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
// BlockParentBuilder exposes a method to build super-block parents for
// a given set of direct parents
type BlockParentBuilder interface {
BuildParents(stagingArea *StagingArea, directParentHashes []*externalapi.DomainHash) ([]externalapi.BlockLevelParents, error)
BuildParents(stagingArea *StagingArea,
daaScore uint64,
directParentHashes []*externalapi.DomainHash) ([]externalapi.BlockLevelParents, error)
}

View File

@@ -0,0 +1,9 @@
package model
import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
// ParentsManager lets is a wrapper above header parents that replaces empty parents with genesis when needed.
type ParentsManager interface {
ParentsAtLevel(blockHeader externalapi.BlockHeader, level int) externalapi.BlockLevelParents
Parents(blockHeader externalapi.BlockHeader) []externalapi.BlockLevelParents
}

View File

@@ -12,6 +12,7 @@ type PruningManager interface {
AppendImportedPruningPointUTXOs(outpointAndUTXOEntryPairs []*externalapi.OutpointAndUTXOEntryPair) error
UpdatePruningPointIfRequired() error
PruneAllBlocksBelow(stagingArea *StagingArea, pruningPointHash *externalapi.DomainHash) error
PruningPointAndItsAnticoneWithTrustedData() ([]*externalapi.BlockWithTrustedData, error)
PruningPointAndItsAnticone() ([]*externalapi.DomainHash, error)
ExpectedHeaderPruningPoint(stagingArea *StagingArea, blockHash *externalapi.DomainHash) (*externalapi.DomainHash, error)
BlockWithTrustedData(stagingArea *StagingArea, blockHash *externalapi.DomainHash) (*externalapi.BlockWithTrustedData, error)
}

View File

@@ -183,10 +183,16 @@ func (bb *blockBuilder) newBlockCoinbaseTransaction(stagingArea *model.StagingAr
func (bb *blockBuilder) buildHeader(stagingArea *model.StagingArea, transactions []*externalapi.DomainTransaction,
newBlockPruningPoint *externalapi.DomainHash) (externalapi.BlockHeader, error) {
parents, err := bb.newBlockParents(stagingArea)
daaScore, err := bb.newBlockDAAScore(stagingArea)
if err != nil {
return nil, err
}
parents, err := bb.newBlockParents(stagingArea, daaScore)
if err != nil {
return nil, err
}
timeInMilliseconds, err := bb.newBlockTime(stagingArea)
if err != nil {
return nil, err
@@ -204,10 +210,6 @@ func (bb *blockBuilder) buildHeader(stagingArea *model.StagingArea, transactions
if err != nil {
return nil, err
}
daaScore, err := bb.newBlockDAAScore(stagingArea)
if err != nil {
return nil, err
}
blueWork, err := bb.newBlockBlueWork(stagingArea)
if err != nil {
return nil, err
@@ -233,12 +235,12 @@ func (bb *blockBuilder) buildHeader(stagingArea *model.StagingArea, transactions
), nil
}
func (bb *blockBuilder) newBlockParents(stagingArea *model.StagingArea) ([]externalapi.BlockLevelParents, error) {
func (bb *blockBuilder) newBlockParents(stagingArea *model.StagingArea, daaScore uint64) ([]externalapi.BlockLevelParents, error) {
virtualBlockRelations, err := bb.blockRelationStore.BlockRelation(bb.databaseContext, stagingArea, model.VirtualBlockHash)
if err != nil {
return nil, err
}
return bb.blockParentBuilder.BuildParents(stagingArea, virtualBlockRelations.Parents)
return bb.blockParentBuilder.BuildParents(stagingArea, daaScore, virtualBlockRelations.Parents)
}
func (bb *blockBuilder) newBlockTime(stagingArea *model.StagingArea) (int64, error) {

View File

@@ -83,7 +83,7 @@ func (bb *testBlockBuilder) buildUTXOInvalidHeader(stagingArea *model.StagingAre
return nil, err
}
parents, err := bb.blockParentBuilder.BuildParents(stagingArea, parentHashes)
parents, err := bb.blockParentBuilder.BuildParents(stagingArea, daaScore, parentHashes)
if err != nil {
return nil, err
}

View File

@@ -5,7 +5,6 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
"github.com/kaspanet/kaspad/domain/consensus/utils/pow"
"github.com/pkg/errors"
)
@@ -13,8 +12,11 @@ type blockParentBuilder struct {
databaseContext model.DBManager
blockHeaderStore model.BlockHeaderStore
dagTopologyManager model.DAGTopologyManager
parentsManager model.ParentsManager
reachabilityDataStore model.ReachabilityDataStore
pruningStore model.PruningStore
genesisHash *externalapi.DomainHash
}
// New creates a new instance of a BlockParentBuilder
@@ -22,20 +24,27 @@ func New(
databaseContext model.DBManager,
blockHeaderStore model.BlockHeaderStore,
dagTopologyManager model.DAGTopologyManager,
parentsManager model.ParentsManager,
reachabilityDataStore model.ReachabilityDataStore,
pruningStore model.PruningStore,
genesisHash *externalapi.DomainHash,
) model.BlockParentBuilder {
return &blockParentBuilder{
databaseContext: databaseContext,
blockHeaderStore: blockHeaderStore,
dagTopologyManager: dagTopologyManager,
databaseContext: databaseContext,
blockHeaderStore: blockHeaderStore,
dagTopologyManager: dagTopologyManager,
parentsManager: parentsManager,
reachabilityDataStore: reachabilityDataStore,
pruningStore: pruningStore,
genesisHash: genesisHash,
}
}
func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea,
directParentHashes []*externalapi.DomainHash) ([]externalapi.BlockLevelParents, error) {
daaScore uint64, directParentHashes []*externalapi.DomainHash) ([]externalapi.BlockLevelParents, error) {
// Late on we'll mutate direct parent hashes, so we first clone it.
directParentHashesCopy := make([]*externalapi.DomainHash, len(directParentHashes))
@@ -93,7 +102,7 @@ func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea,
// all the block levels they occupy
for _, directParentHeader := range directParentHeaders {
directParentHash := consensushashing.HeaderHash(directParentHeader)
blockLevel := pow.BlockLevel(directParentHeader)
blockLevel := directParentHeader.BlockLevel()
for i := 0; i <= blockLevel; i++ {
if _, exists := candidatesByLevelToReferenceBlocksMap[i]; !exists {
candidatesByLevelToReferenceBlocksMap[i] = make(map[externalapi.DomainHash][]*externalapi.DomainHash)
@@ -116,7 +125,7 @@ func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea,
}
for _, directParentHeader := range directParentHeaders {
for blockLevel, blockLevelParentsInHeader := range directParentHeader.Parents() {
for blockLevel, blockLevelParentsInHeader := range bpb.parentsManager.Parents(directParentHeader) {
isEmptyLevel := false
if _, exists := candidatesByLevelToReferenceBlocksMap[blockLevel]; !exists {
candidatesByLevelToReferenceBlocksMap[blockLevel] = make(map[externalapi.DomainHash][]*externalapi.DomainHash)
@@ -145,7 +154,7 @@ func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea,
} else {
for childHash, childHeader := range virtualGenesisChildrenHeaders {
childHash := childHash // Assign to a new pointer to avoid `range` pointer reuse
if childHeader.ParentsAtLevel(blockLevel).Contains(parent) {
if bpb.parentsManager.ParentsAtLevel(childHeader, blockLevel).Contains(parent) {
referenceBlocks = append(referenceBlocks, &childHash)
}
}
@@ -203,14 +212,21 @@ func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea,
}
}
parents := make([]externalapi.BlockLevelParents, len(candidatesByLevelToReferenceBlocksMap))
parents := make([]externalapi.BlockLevelParents, 0, len(candidatesByLevelToReferenceBlocksMap))
for blockLevel := 0; blockLevel < len(candidatesByLevelToReferenceBlocksMap); blockLevel++ {
if blockLevel > 0 {
if _, ok := candidatesByLevelToReferenceBlocksMap[blockLevel][*bpb.genesisHash]; ok && len(candidatesByLevelToReferenceBlocksMap[blockLevel]) == 1 {
break
}
}
levelBlocks := make(externalapi.BlockLevelParents, 0, len(candidatesByLevelToReferenceBlocksMap[blockLevel]))
for block := range candidatesByLevelToReferenceBlocksMap[blockLevel] {
block := block // Assign to a new pointer to avoid `range` pointer reuse
levelBlocks = append(levelBlocks, &block)
}
parents[blockLevel] = levelBlocks
parents = append(parents, levelBlocks)
}
return parents, nil
}

View File

@@ -1,12 +1,20 @@
package blockprocessor
import (
"bytes"
"compress/gzip"
"github.com/kaspanet/go-muhash"
// we need to embed the utxoset of mainnet genesis here
_ "embed"
"fmt"
"github.com/kaspanet/kaspad/domain/consensus/utils/multiset"
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/kaspanet/kaspad/util/staging"
"github.com/kaspanet/kaspad/util/difficulty"
"github.com/kaspanet/kaspad/util/staging"
"io"
"sync"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
@@ -16,6 +24,9 @@ import (
"github.com/pkg/errors"
)
//go:embed resources/utxos.gz
var utxoDumpFile []byte
func (bp *blockProcessor) setBlockStatusAfterBlockValidation(
stagingArea *model.StagingArea, block *externalapi.DomainBlock, isPruningPoint bool) error {
@@ -128,6 +139,11 @@ func (bp *blockProcessor) validateAndInsertBlock(stagingArea *model.StagingArea,
}
}
err = bp.ifGenesisSetUtxoSet(block)
if err != nil {
return nil, err
}
var selectedParentChainChanges *externalapi.SelectedChainPath
var virtualUTXODiff externalapi.UTXODiff
var reversalData *model.UTXODiffReversalData
@@ -215,6 +231,93 @@ func (bp *blockProcessor) validateAndInsertBlock(stagingArea *model.StagingArea,
}, nil
}
var mainnetGenesisUTXOSet externalapi.UTXODiff
var mainnetGenesisMultiSet model.Multiset
var mainnetGenesisOnce sync.Once
var mainnetGenesisErr error
func deserializeMainnetUTXOSet() (externalapi.UTXODiff, model.Multiset, error) {
mainnetGenesisOnce.Do(func() {
toAdd := make(map[externalapi.DomainOutpoint]externalapi.UTXOEntry)
mainnetGenesisMultiSet = multiset.New()
file, err := gzip.NewReader(bytes.NewReader(utxoDumpFile))
if err != nil {
mainnetGenesisErr = err
return
}
for i := 0; ; i++ {
size := make([]byte, 1)
_, err = io.ReadFull(file, size)
if errors.Is(err, io.EOF) {
break
}
if err != nil {
mainnetGenesisErr = err
return
}
serializedUTXO := make([]byte, size[0])
_, err = io.ReadFull(file, serializedUTXO)
if err != nil {
mainnetGenesisErr = err
return
}
mainnetGenesisMultiSet.Add(serializedUTXO)
entry, outpoint, err := utxo.DeserializeUTXO(serializedUTXO)
if err != nil {
mainnetGenesisErr = err
return
}
toAdd[*outpoint] = entry
}
mainnetGenesisUTXOSet, mainnetGenesisErr = utxo.NewUTXODiffFromCollections(utxo.NewUTXOCollection(toAdd), utxo.NewUTXOCollection(make(map[externalapi.DomainOutpoint]externalapi.UTXOEntry)))
})
return mainnetGenesisUTXOSet, mainnetGenesisMultiSet, mainnetGenesisErr
}
func (bp *blockProcessor) ifGenesisSetUtxoSet(block *externalapi.DomainBlock) error {
isGenesis := len(block.Header.DirectParents()) == 0
if !isGenesis {
return nil
}
blockHash := consensushashing.BlockHash(block)
if !block.Header.UTXOCommitment().Equal(externalapi.NewDomainHashFromByteArray(muhash.EmptyMuHashHash.AsArray())) {
log.Infof("Loading checkpoint UTXO set")
diff, utxoSetMultiset, err := deserializeMainnetUTXOSet()
if err != nil {
return err
}
log.Infof("Finished loading checkpoint UTXO set")
utxoSetHash := utxoSetMultiset.Hash()
if !utxoSetHash.Equal(block.Header.UTXOCommitment()) {
return errors.New("Invalid UTXO set dump")
}
area := model.NewStagingArea()
bp.consensusStateStore.StageVirtualUTXODiff(area, diff)
bp.utxoDiffStore.Stage(area, blockHash, diff, nil)
// commit the multiset of genesis
bp.multisetStore.Stage(area, blockHash, utxoSetMultiset)
err = staging.CommitAllChanges(bp.databaseContext, area)
if err != nil {
return err
}
} else {
// if it's genesis but has an empty muhash then commit an empty multiset and an empty diff
area := model.NewStagingArea()
bp.consensusStateStore.StageVirtualUTXODiff(area, utxo.NewUTXODiff())
bp.utxoDiffStore.Stage(area, blockHash, utxo.NewUTXODiff(), nil)
bp.multisetStore.Stage(area, blockHash, multiset.New())
err := staging.CommitAllChanges(bp.databaseContext, area)
if err != nil {
return err
}
}
return nil
}
func isHeaderOnlyBlock(block *externalapi.DomainBlock) bool {
return len(block.Transactions) == 0
}

View File

@@ -87,13 +87,18 @@ func TestValidateAndInsertImportedPruningPoint(t *testing.T) {
t.Fatalf("PruningPointHeaders: %+v", err)
}
pruningPointAndItsAnticoneWithTrustedData, err := tcSyncer.PruningPointAndItsAnticoneWithTrustedData()
pruningPointAndItsAnticone, err := tcSyncer.PruningPointAndItsAnticone()
if err != nil {
t.Fatalf("PruningPointAndItsAnticoneWithTrustedData: %+v", err)
t.Fatalf("PruningPointAndItsAnticone: %+v", err)
}
for _, blockWithTrustedData := range pruningPointAndItsAnticoneWithTrustedData {
_, err := synceeStaging.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false)
for _, blockHash := range pruningPointAndItsAnticone {
blockWithTrustedData, err := tcSyncer.BlockWithTrustedData(blockHash)
if err != nil {
return
}
_, err = synceeStaging.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false)
if err != nil {
t.Fatalf("ValidateAndInsertBlockWithTrustedData: %+v", err)
}
@@ -135,10 +140,21 @@ func TestValidateAndInsertImportedPruningPoint(t *testing.T) {
}
}
pruningPointUTXOs, err := tcSyncer.GetPruningPointUTXOs(pruningPoint, nil, 1000)
if err != nil {
t.Fatalf("GetPruningPointUTXOs: %+v", err)
var fromOutpoint *externalapi.DomainOutpoint
var pruningPointUTXOs []*externalapi.OutpointAndUTXOEntryPair
const step = 100_000
for {
outpointAndUTXOEntryPairs, err := tcSyncer.GetPruningPointUTXOs(pruningPoint, fromOutpoint, step)
if err != nil {
t.Fatalf("GetPruningPointUTXOs: %+v", err)
}
fromOutpoint = outpointAndUTXOEntryPairs[len(outpointAndUTXOEntryPairs)-1].Outpoint
pruningPointUTXOs = append(pruningPointUTXOs, outpointAndUTXOEntryPairs...)
if len(outpointAndUTXOEntryPairs) < step {
break
}
}
err = synceeStaging.AppendImportedPruningPointUTXOs(pruningPointUTXOs)
if err != nil {
t.Fatalf("AppendImportedPruningPointUTXOs: %+v", err)
@@ -502,7 +518,7 @@ func TestGetPruningPointUTXOs(t *testing.T) {
// Get pruning point UTXOs in a loop
var allOutpointAndUTXOEntryPairs []*externalapi.OutpointAndUTXOEntryPair
step := 100
const step = 100_000
var fromOutpoint *externalapi.DomainOutpoint
for {
outpointAndUTXOEntryPairs, err := testConsensus.GetPruningPointUTXOs(pruningPoint, fromOutpoint, step)
@@ -517,11 +533,17 @@ func TestGetPruningPointUTXOs(t *testing.T) {
}
}
const mainnetUTXOSize = 1232643
expected := len(outputs) + 1
if consensusConfig.Name == "kaspa-mainnet" {
expected += mainnetUTXOSize
}
// Make sure the length of the UTXOs is exactly spendingTransaction.Outputs + 1 coinbase
// output (includingBlock's coinbase)
if len(allOutpointAndUTXOEntryPairs) != len(outputs)+1 {
if len(allOutpointAndUTXOEntryPairs) != expected {
t.Fatalf("Returned an unexpected amount of UTXOs. "+
"Want: %d, got: %d", len(outputs)+2, len(allOutpointAndUTXOEntryPairs))
"Want: %d, got: %d", expected, len(allOutpointAndUTXOEntryPairs))
}
// Make sure all spendingTransaction.Outputs are in the returned UTXOs

View File

@@ -4,6 +4,8 @@ import (
"bytes"
"math"
"math/big"
"reflect"
"runtime"
"testing"
"github.com/kaspanet/kaspad/domain/consensus"
@@ -21,6 +23,31 @@ import (
"github.com/pkg/errors"
)
func TestBlockValidator_ValidateBodyInIsolation(t *testing.T) {
tests := []func(t *testing.T, tc testapi.TestConsensus, cfg *consensus.Config){
CheckBlockSanity,
CheckBlockHashMerkleRoot,
BlockMass,
CheckBlockDuplicateTransactions,
CheckBlockContainsOnlyOneCoinbase,
CheckBlockDoubleSpends,
CheckFirstBlockTransactionIsCoinbase,
}
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
tc, teardown, err := consensus.NewFactory().NewTestConsensus(consensusConfig, "TestChainedTransactions")
if err != nil {
t.Fatalf("Error setting up consensus: %+v", err)
}
defer teardown(false)
for _, test := range tests {
testName := runtime.FuncForPC(reflect.ValueOf(test).Pointer()).Name()
t.Run(testName, func(t *testing.T) {
test(t, tc, consensusConfig)
})
}
})
}
func TestChainedTransactions(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
consensusConfig.BlockCoinbaseMaturity = 0
@@ -89,47 +116,39 @@ func TestChainedTransactions(t *testing.T) {
})
}
// TestCheckBlockSanity tests the CheckBlockSanity function to ensure it works
// CheckBlockSanity tests the CheckBlockSanity function to ensure it works
// as expected.
func TestCheckBlockSanity(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckBlockSanity")
if err != nil {
t.Fatalf("Error setting up consensus: %+v", err)
}
defer teardown(false)
blockHash := consensushashing.BlockHash(&exampleValidBlock)
if len(exampleValidBlock.Transactions) < 3 {
t.Fatalf("Too few transactions in block, expect at least 3, got %v", len(exampleValidBlock.Transactions))
}
func CheckBlockSanity(t *testing.T, tc testapi.TestConsensus, _ *consensus.Config) {
blockHash := consensushashing.BlockHash(&exampleValidBlock)
if len(exampleValidBlock.Transactions) < 3 {
t.Fatalf("Too few transactions in block, expect at least 3, got %v", len(exampleValidBlock.Transactions))
}
stagingArea := model.NewStagingArea()
stagingArea := model.NewStagingArea()
tc.BlockStore().Stage(stagingArea, blockHash, &exampleValidBlock)
tc.BlockStore().Stage(stagingArea, blockHash, &exampleValidBlock)
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err != nil {
t.Fatalf("Failed validating block in isolation: %v", err)
}
err := tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err != nil {
t.Fatalf("Failed validating block in isolation: %v", err)
}
// Test with block with wrong transactions sorting order
blockHash = consensushashing.BlockHash(&blockWithWrongTxOrder)
tc.BlockStore().Stage(stagingArea, blockHash, &blockWithWrongTxOrder)
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if !errors.Is(err, ruleerrors.ErrTransactionsNotSorted) {
t.Errorf("CheckBlockSanity: Expected ErrTransactionsNotSorted error, instead got %v", err)
}
// Test with block with wrong transactions sorting order
blockHash = consensushashing.BlockHash(&blockWithWrongTxOrder)
tc.BlockStore().Stage(stagingArea, blockHash, &blockWithWrongTxOrder)
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if !errors.Is(err, ruleerrors.ErrTransactionsNotSorted) {
t.Errorf("CheckBlockSanity: Expected ErrTransactionsNotSorted error, instead got %v", err)
}
// Test a block with invalid parents order
// We no longer require blocks to have ordered parents
blockHash = consensushashing.BlockHash(&unOrderedParentsBlock)
tc.BlockStore().Stage(stagingArea, blockHash, &unOrderedParentsBlock)
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err != nil {
t.Errorf("CheckBlockSanity: Expected block to be be body in isolation valid, got error instead: %v", err)
}
})
// Test a block with invalid parents order
// We no longer require blocks to have ordered parents
blockHash = consensushashing.BlockHash(&unOrderedParentsBlock)
tc.BlockStore().Stage(stagingArea, blockHash, &unOrderedParentsBlock)
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err != nil {
t.Errorf("CheckBlockSanity: Expected block to be be body in isolation valid, got error instead: %v", err)
}
}
var unOrderedParentsBlock = externalapi.DomainBlock{
@@ -1025,59 +1044,41 @@ var blockWithWrongTxOrder = externalapi.DomainBlock{
},
}
func TestCheckBlockHashMerkleRoot(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckBlockHashMerkleRoot")
if err != nil {
t.Fatalf("Error setting up tc: %+v", err)
}
defer teardown(false)
func CheckBlockHashMerkleRoot(t *testing.T, tc testapi.TestConsensus, consensusConfig *consensus.Config) {
block, _, err := tc.BuildBlockWithParents([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil)
if err != nil {
t.Fatalf("BuildBlockWithParents: %+v", err)
}
blockWithInvalidMerkleRoot := block.Clone()
blockWithInvalidMerkleRoot.Transactions[0].Version += 1
block, _, err := tc.BuildBlockWithParents([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil)
if err != nil {
t.Fatalf("BuildBlockWithParents: %+v", err)
}
blockWithInvalidMerkleRoot := block.Clone()
blockWithInvalidMerkleRoot.Transactions[0].Version += 1
_, err = tc.ValidateAndInsertBlock(blockWithInvalidMerkleRoot, true)
if !errors.Is(err, ruleerrors.ErrBadMerkleRoot) {
t.Fatalf("Unexpected error: %+v", err)
}
_, err = tc.ValidateAndInsertBlock(blockWithInvalidMerkleRoot, true)
if !errors.Is(err, ruleerrors.ErrBadMerkleRoot) {
t.Fatalf("Unexpected error: %+v", err)
}
// Check that a block with invalid merkle root is not marked as invalid
// and can be re-added with the right transactions.
_, err = tc.ValidateAndInsertBlock(block, true)
if err != nil {
t.Fatalf("ValidateAndInsertBlock: %+v", err)
}
})
// Check that a block with invalid merkle root is not marked as invalid
// and can be re-added with the right transactions.
_, err = tc.ValidateAndInsertBlock(block, true)
if err != nil {
t.Fatalf("ValidateAndInsertBlock: %+v", err)
}
}
func TestBlockMass(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestBlockMass")
if err != nil {
t.Fatalf("Error setting up tc: %+v", err)
}
defer teardown(false)
func BlockMass(t *testing.T, tc testapi.TestConsensus, consensusConfig *consensus.Config) {
block, _, err := initBlockWithInvalidBlockMass(consensusConfig, tc)
if err != nil {
t.Fatalf("Error BuildBlockWithParents : %+v", err)
}
blockHash := consensushashing.BlockHash(block)
stagingArea := model.NewStagingArea()
tc.BlockStore().Stage(stagingArea, blockHash, block)
block, _, err := initBlockWithInvalidBlockMass(consensusConfig, tc)
if err != nil {
t.Fatalf("Error BuildBlockWithParents : %+v", err)
}
blockHash := consensushashing.BlockHash(block)
stagingArea := model.NewStagingArea()
tc.BlockStore().Stage(stagingArea, blockHash, block)
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err == nil || !errors.Is(err, ruleerrors.ErrBlockMassTooHigh) {
t.Fatalf("ValidateBodyInIsolationTest: TestBlockMass:"+
" Unexpected error: Expected to: %v, but got : %v", ruleerrors.ErrBlockMassTooHigh, err)
}
})
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err == nil || !errors.Is(err, ruleerrors.ErrBlockMassTooHigh) {
t.Fatalf("ValidateBodyInIsolationTest: TestBlockMass:"+
" Unexpected error: Expected to: %v, but got : %v", ruleerrors.ErrBlockMassTooHigh, err)
}
}
func initBlockWithInvalidBlockMass(consensusConfig *consensus.Config, tc testapi.TestConsensus) (*externalapi.DomainBlock, externalapi.UTXODiff, error) {
@@ -1113,30 +1114,20 @@ func initBlockWithInvalidBlockMass(consensusConfig *consensus.Config, tc testapi
return tc.BuildBlockWithParents([]*externalapi.DomainHash{consensusConfig.GenesisHash}, &emptyCoinbase, []*externalapi.DomainTransaction{tx})
}
func TestCheckBlockDuplicateTransactions(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
func CheckBlockDuplicateTransactions(t *testing.T, tc testapi.TestConsensus, consensusConfig *consensus.Config) {
block, _, err := initBlockWithDuplicateTransaction(consensusConfig, tc)
if err != nil {
t.Fatalf("Error BuildBlockWithParents : %+v", err)
}
blockHash := consensushashing.BlockHash(block)
stagingArea := model.NewStagingArea()
tc.BlockStore().Stage(stagingArea, blockHash, block)
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckBlockDuplicateTransactions")
if err != nil {
t.Fatalf("Error setting up tc: %+v", err)
}
defer teardown(false)
block, _, err := initBlockWithDuplicateTransaction(consensusConfig, tc)
if err != nil {
t.Fatalf("Error BuildBlockWithParents : %+v", err)
}
blockHash := consensushashing.BlockHash(block)
stagingArea := model.NewStagingArea()
tc.BlockStore().Stage(stagingArea, blockHash, block)
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err == nil || !errors.Is(err, ruleerrors.ErrDuplicateTx) {
t.Fatalf("ValidateBodyInIsolationTest: TestCheckBlockDuplicateTransactions:"+
" Unexpected error: Expected to: %v, but got : %v", ruleerrors.ErrDuplicateTx, err)
}
})
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err == nil || !errors.Is(err, ruleerrors.ErrDuplicateTx) {
t.Fatalf("ValidateBodyInIsolationTest: TestCheckBlockDuplicateTransactions:"+
" Unexpected error: Expected to: %v, but got : %v", ruleerrors.ErrDuplicateTx, err)
}
}
func initBlockWithDuplicateTransaction(consensusConfig *consensus.Config, tc testapi.TestConsensus) (*externalapi.DomainBlock, externalapi.UTXODiff, error) {
@@ -1170,30 +1161,20 @@ func initBlockWithDuplicateTransaction(consensusConfig *consensus.Config, tc tes
return tc.BuildBlockWithParents([]*externalapi.DomainHash{consensusConfig.GenesisHash}, &emptyCoinbase, []*externalapi.DomainTransaction{tx, tx})
}
func TestCheckBlockContainsOnlyOneCoinbase(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
func CheckBlockContainsOnlyOneCoinbase(t *testing.T, tc testapi.TestConsensus, consensusConfig *consensus.Config) {
block, _, err := initBlockWithMoreThanOneCoinbase(consensusConfig, tc)
if err != nil {
t.Fatalf("Error BuildBlockWithParents : %+v", err)
}
blockHash := consensushashing.BlockHash(block)
stagingArea := model.NewStagingArea()
tc.BlockStore().Stage(stagingArea, blockHash, block)
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckBlockContainsOnlyOneCoinbase")
if err != nil {
t.Fatalf("Error setting up tc: %+v", err)
}
defer teardown(false)
block, _, err := initBlockWithMoreThanOneCoinbase(consensusConfig, tc)
if err != nil {
t.Fatalf("Error BuildBlockWithParents : %+v", err)
}
blockHash := consensushashing.BlockHash(block)
stagingArea := model.NewStagingArea()
tc.BlockStore().Stage(stagingArea, blockHash, block)
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err == nil || !errors.Is(err, ruleerrors.ErrMultipleCoinbases) {
t.Fatalf("ValidateBodyInIsolationTest: TestCheckBlockContainsOnlyOneCoinbase:"+
" Unexpected error: Expected to: %v, but got : %v", ruleerrors.ErrMultipleCoinbases, err)
}
})
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err == nil || !errors.Is(err, ruleerrors.ErrMultipleCoinbases) {
t.Fatalf("ValidateBodyInIsolationTest: TestCheckBlockContainsOnlyOneCoinbase:"+
" Unexpected error: Expected to: %v, but got : %v", ruleerrors.ErrMultipleCoinbases, err)
}
}
func initBlockWithMoreThanOneCoinbase(consensusConfig *consensus.Config, tc testapi.TestConsensus) (*externalapi.DomainBlock, externalapi.UTXODiff, error) {
@@ -1227,30 +1208,20 @@ func initBlockWithMoreThanOneCoinbase(consensusConfig *consensus.Config, tc test
return tc.BuildBlockWithParents([]*externalapi.DomainHash{consensusConfig.GenesisHash}, &emptyCoinbase, []*externalapi.DomainTransaction{tx})
}
func TestCheckBlockDoubleSpends(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
func CheckBlockDoubleSpends(t *testing.T, tc testapi.TestConsensus, consensusConfig *consensus.Config) {
block, _, err := initBlockWithDoubleSpends(consensusConfig, tc)
if err != nil {
t.Fatalf("Error BuildBlockWithParents : %+v", err)
}
blockHash := consensushashing.BlockHash(block)
stagingArea := model.NewStagingArea()
tc.BlockStore().Stage(stagingArea, blockHash, block)
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckBlockDoubleSpends")
if err != nil {
t.Fatalf("Error setting up tc: %+v", err)
}
defer teardown(false)
block, _, err := initBlockWithDoubleSpends(consensusConfig, tc)
if err != nil {
t.Fatalf("Error BuildBlockWithParents : %+v", err)
}
blockHash := consensushashing.BlockHash(block)
stagingArea := model.NewStagingArea()
tc.BlockStore().Stage(stagingArea, blockHash, block)
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err == nil || !errors.Is(err, ruleerrors.ErrDoubleSpendInSameBlock) {
t.Fatalf("ValidateBodyInIsolationTest: TestCheckBlockDoubleSpends:"+
" Unexpected error: Expected to: %v, but got : %v", ruleerrors.ErrDoubleSpendInSameBlock, err)
}
})
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err == nil || !errors.Is(err, ruleerrors.ErrDoubleSpendInSameBlock) {
t.Fatalf("ValidateBodyInIsolationTest: TestCheckBlockDoubleSpends:"+
" Unexpected error: Expected to: %v, but got : %v", ruleerrors.ErrDoubleSpendInSameBlock, err)
}
}
func initBlockWithDoubleSpends(consensusConfig *consensus.Config, tc testapi.TestConsensus) (*externalapi.DomainBlock, externalapi.UTXODiff, error) {
@@ -1303,27 +1274,18 @@ func initBlockWithDoubleSpends(consensusConfig *consensus.Config, tc testapi.Tes
&emptyCoinbase, []*externalapi.DomainTransaction{tx, txSameOutpoint})
}
func TestCheckFirstBlockTransactionIsCoinbase(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
func CheckFirstBlockTransactionIsCoinbase(t *testing.T, tc testapi.TestConsensus, consensusConfig *consensus.Config) {
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckFirstBlockTransactionIsCoinbase")
if err != nil {
t.Fatalf("Error setting up tc: %+v", err)
}
defer teardown(false)
block := initBlockWithFirstTransactionDifferentThanCoinbase(consensusConfig)
blockHash := consensushashing.BlockHash(block)
stagingArea := model.NewStagingArea()
tc.BlockStore().Stage(stagingArea, blockHash, block)
block := initBlockWithFirstTransactionDifferentThanCoinbase(consensusConfig)
blockHash := consensushashing.BlockHash(block)
stagingArea := model.NewStagingArea()
tc.BlockStore().Stage(stagingArea, blockHash, block)
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err == nil || !errors.Is(err, ruleerrors.ErrFirstTxNotCoinbase) {
t.Fatalf("ValidateBodyInIsolationTest: TestCheckFirstBlockTransactionIsCoinbase:"+
" Unexpected error: Expected to: %v, but got : %v", ruleerrors.ErrFirstTxNotCoinbase, err)
}
})
err := tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err == nil || !errors.Is(err, ruleerrors.ErrFirstTxNotCoinbase) {
t.Fatalf("ValidateBodyInIsolationTest: TestCheckFirstBlockTransactionIsCoinbase:"+
" Unexpected error: Expected to: %v, but got : %v", ruleerrors.ErrFirstTxNotCoinbase, err)
}
}
func initBlockWithFirstTransactionDifferentThanCoinbase(consensusConfig *consensus.Config) *externalapi.DomainBlock {

View File

@@ -6,7 +6,6 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/pow"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/pkg/errors"
)
@@ -63,7 +62,7 @@ func (v *blockValidator) ValidateHeaderInContext(stagingArea *model.StagingArea,
return err
}
if !hasReachabilityData {
blockLevel := pow.BlockLevel(header)
blockLevel := header.BlockLevel()
for i := 0; i <= blockLevel; i++ {
err = v.reachabilityManagers[i].AddBlock(stagingArea, blockHash)
if err != nil {
@@ -195,7 +194,7 @@ func (v *blockValidator) checkMergeSizeLimit(stagingArea *model.StagingArea, has
}
func (v *blockValidator) checkIndirectParents(stagingArea *model.StagingArea, header externalapi.BlockHeader) error {
expectedParents, err := v.blockParentBuilder.BuildParents(stagingArea, header.DirectParents())
expectedParents, err := v.blockParentBuilder.BuildParents(stagingArea, header.DAAScore(), header.DirectParents())
if err != nil {
return err
}

View File

@@ -67,8 +67,8 @@ func TestValidateMedianTime(t *testing.T) {
blockTime := tip.Header.TimeInMilliseconds()
for i := 0; i < 100; i++ {
blockTime += 1000
for i := 0; i < 10; i++ {
blockTime += 100
_, tipHash = addBlock(blockTime, []*externalapi.DomainHash{tipHash}, nil)
}
@@ -163,16 +163,17 @@ func TestCheckParentsIncest(t *testing.T) {
func TestCheckMergeSizeLimit(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
consensusConfig.MergeSetSizeLimit = 2 * uint64(consensusConfig.K)
consensusConfig.MergeSetSizeLimit = 5
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckParentsIncest")
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckMergeSizeLimit")
if err != nil {
t.Fatalf("Error setting up consensus: %+v", err)
}
defer teardown(false)
chain1TipHash := consensusConfig.GenesisHash
for i := uint64(0); i < consensusConfig.MergeSetSizeLimit+2; i++ {
// We add a chain larger by one than chain2 below, to make this one the selected chain
for i := uint64(0); i < consensusConfig.MergeSetSizeLimit+1; i++ {
chain1TipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{chain1TipHash}, nil, nil)
if err != nil {
t.Fatalf("AddBlock: %+v", err)
@@ -180,7 +181,9 @@ func TestCheckMergeSizeLimit(t *testing.T) {
}
chain2TipHash := consensusConfig.GenesisHash
for i := uint64(0); i < consensusConfig.MergeSetSizeLimit+1; i++ {
// We add a merge set of size exactly MergeSetSizeLimit (to violate the limit),
// since selected parent is also counted
for i := uint64(0); i < consensusConfig.MergeSetSizeLimit; i++ {
chain2TipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{chain2TipHash}, nil, nil)
if err != nil {
t.Fatalf("AddBlock: %+v", err)
@@ -193,3 +196,56 @@ func TestCheckMergeSizeLimit(t *testing.T) {
}
})
}
func TestVirtualSelectionViolatingMergeSizeLimit(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
consensusConfig.MergeSetSizeLimit = 2 * uint64(consensusConfig.K)
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestVirtualSelectionViolatingMergeSizeLimit")
if err != nil {
t.Fatalf("Error setting up consensus: %+v", err)
}
defer teardown(false)
chain1TipHash := consensusConfig.GenesisHash
// We add a chain larger than chain2 below, to make this one the selected chain
for i := uint64(0); i < consensusConfig.MergeSetSizeLimit; i++ {
chain1TipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{chain1TipHash}, nil, nil)
if err != nil {
t.Fatalf("AddBlock: %+v", err)
}
}
chain2TipHash := consensusConfig.GenesisHash
// We add a merge set of size exactly MergeSetSizeLimit-1 (to still not violate the limit)
for i := uint64(0); i < consensusConfig.MergeSetSizeLimit-1; i++ {
chain2TipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{chain2TipHash}, nil, nil)
if err != nil {
t.Fatalf("AddBlock: %+v", err)
}
}
// We now add a single block over genesis which is expected to exceed the limit
_, _, err = tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil)
if err != nil {
t.Fatalf("AddBlock: %+v", err)
}
stagingArea := model.NewStagingArea()
virtualSelectedParent, err := tc.GetVirtualSelectedParent()
if err != nil {
t.Fatalf("GetVirtualSelectedParent: %+v", err)
}
selectedParentAnticone, err := tc.DAGTraversalManager().AnticoneFromVirtualPOV(stagingArea, virtualSelectedParent)
if err != nil {
t.Fatalf("AnticoneFromVirtualPOV: %+v", err)
}
// Test if Virtual's mergeset is too large
// Note: the selected parent itself is also counted in the mergeset limit
if len(selectedParentAnticone)+1 > (int)(consensusConfig.MergeSetSizeLimit) {
t.Fatalf("Virtual's mergset size (%d) exeeds merge set limit (%d)",
len(selectedParentAnticone)+1, consensusConfig.MergeSetSizeLimit)
}
})
}

View File

@@ -1,6 +1,9 @@
package blockvalidator_test
import (
"github.com/kaspanet/kaspad/domain/consensus/model/testapi"
"reflect"
"runtime"
"testing"
"github.com/kaspanet/kaspad/domain/consensus"
@@ -13,73 +16,74 @@ import (
"github.com/pkg/errors"
)
func TestCheckParentsLimit(t *testing.T) {
func TestBlockValidator_ValidateHeaderInIsolation(t *testing.T) {
tests := []func(t *testing.T, tc testapi.TestConsensus, cfg *consensus.Config){
CheckParentsLimit,
CheckBlockVersion,
CheckBlockTimestampInIsolation,
}
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckParentsLimit")
tc, teardown, err := consensus.NewFactory().NewTestConsensus(consensusConfig, "TestBlockValidator_ValidateHeaderInIsolation")
if err != nil {
t.Fatalf("Error setting up consensus: %+v", err)
}
defer teardown(false)
for i := externalapi.KType(0); i < consensusConfig.MaxBlockParents+1; i++ {
_, _, err = tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil)
if err != nil {
t.Fatalf("AddBlock: %+v", err)
}
}
tips, err := tc.Tips()
if err != nil {
t.Fatalf("Tips: %+v", err)
}
_, _, err = tc.AddBlock(tips, nil, nil)
if !errors.Is(err, ruleerrors.ErrTooManyParents) {
t.Fatalf("Unexpected error: %+v", err)
for _, test := range tests {
testName := runtime.FuncForPC(reflect.ValueOf(test).Pointer()).Name()
t.Run(testName, func(t *testing.T) {
test(t, tc, consensusConfig)
})
}
})
}
func TestCheckBlockVersion(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckBlockVersion")
func CheckParentsLimit(t *testing.T, tc testapi.TestConsensus, consensusConfig *consensus.Config) {
for i := externalapi.KType(0); i < consensusConfig.MaxBlockParents+1; i++ {
_, _, err := tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil)
if err != nil {
t.Fatalf("Error setting up consensus: %+v", err)
t.Fatalf("AddBlock: %+v", err)
}
defer teardown(false)
}
block, _, err := tc.BuildBlockWithParents([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil)
if err != nil {
t.Fatalf("BuildBlockWithParents: %+v", err)
}
tips, err := tc.Tips()
if err != nil {
t.Fatalf("Tips: %+v", err)
}
block.Header = blockheader.NewImmutableBlockHeader(
constants.MaxBlockVersion+1,
block.Header.Parents(),
block.Header.HashMerkleRoot(),
block.Header.AcceptedIDMerkleRoot(),
block.Header.UTXOCommitment(),
block.Header.TimeInMilliseconds(),
block.Header.Bits(),
block.Header.Nonce(),
block.Header.DAAScore(),
block.Header.BlueScore(),
block.Header.BlueWork(),
block.Header.PruningPoint(),
)
_, err = tc.ValidateAndInsertBlock(block, true)
if !errors.Is(err, ruleerrors.ErrBlockVersionIsUnknown) {
t.Fatalf("Unexpected error: %+v", err)
}
})
_, _, err = tc.AddBlock(tips, nil, nil)
if !errors.Is(err, ruleerrors.ErrTooManyParents) {
t.Fatalf("Unexpected error: %+v", err)
}
}
func TestCheckBlockTimestampInIsolation(t *testing.T) {
func CheckBlockVersion(t *testing.T, tc testapi.TestConsensus, consensusConfig *consensus.Config) {
block, _, err := tc.BuildBlockWithParents([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil)
if err != nil {
t.Fatalf("BuildBlockWithParents: %+v", err)
}
block.Header = blockheader.NewImmutableBlockHeader(
constants.MaxBlockVersion+1,
block.Header.Parents(),
block.Header.HashMerkleRoot(),
block.Header.AcceptedIDMerkleRoot(),
block.Header.UTXOCommitment(),
block.Header.TimeInMilliseconds(),
block.Header.Bits(),
block.Header.Nonce(),
block.Header.DAAScore(),
block.Header.BlueScore(),
block.Header.BlueWork(),
block.Header.PruningPoint(),
)
_, err = tc.ValidateAndInsertBlock(block, true)
if !errors.Is(err, ruleerrors.ErrBlockVersionIsUnknown) {
t.Fatalf("Unexpected error: %+v", err)
}
}
func CheckBlockTimestampInIsolation(t *testing.T, tc testapi.TestConsensus, cfg *consensus.Config) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
factory := consensus.NewFactory()

View File

@@ -37,6 +37,7 @@ type blockValidator struct {
finalityManager model.FinalityManager
blockParentBuilder model.BlockParentBuilder
pruningManager model.PruningManager
parentsManager model.ParentsManager
blockStore model.BlockStore
ghostdagDataStores []model.GHOSTDAGDataStore
@@ -72,6 +73,7 @@ func New(powMax *big.Int,
finalityManager model.FinalityManager,
blockParentBuilder model.BlockParentBuilder,
pruningManager model.PruningManager,
parentsManager model.ParentsManager,
pruningStore model.PruningStore,
blockStore model.BlockStore,
@@ -108,6 +110,7 @@ func New(powMax *big.Int,
finalityManager: finalityManager,
blockParentBuilder: blockParentBuilder,
pruningManager: pruningManager,
parentsManager: parentsManager,
pruningStore: pruningStore,
blockStore: blockStore,

View File

@@ -49,9 +49,11 @@ func (v *blockValidator) ValidatePruningPointViolationAndProofOfWorkAndDifficult
}
}
err = v.checkProofOfWork(header)
if err != nil {
return err
if !blockHash.Equal(v.genesisHash) {
err = v.checkProofOfWork(header)
if err != nil {
return err
}
}
err = v.validateDifficulty(stagingArea, blockHash, isBlockWithTrustedData)
@@ -67,9 +69,9 @@ func (v *blockValidator) setParents(stagingArea *model.StagingArea,
header externalapi.BlockHeader,
isBlockWithTrustedData bool) error {
for level := 0; level <= pow.BlockLevel(header); level++ {
for level := 0; level <= header.BlockLevel(); level++ {
var parents []*externalapi.DomainHash
for _, parent := range header.ParentsAtLevel(level) {
for _, parent := range v.parentsManager.ParentsAtLevel(header, level) {
_, err := v.ghostdagDataStores[level].Get(v.databaseContext, stagingArea, parent, false)
isNotFoundError := database.IsNotFoundError(err)
if !isNotFoundError && err != nil {
@@ -116,7 +118,7 @@ func (v *blockValidator) validateDifficulty(stagingArea *model.StagingArea,
return err
}
blockLevel := pow.BlockLevel(header)
blockLevel := header.BlockLevel()
for i := 1; i <= blockLevel; i++ {
err = v.ghostdagManagers[i].GHOSTDAG(stagingArea, blockHash)
if err != nil {

View File

@@ -101,10 +101,13 @@ func TestPOW(t *testing.T) {
t.Fatal(err)
}
random := rand.New(rand.NewSource(0))
mining.SolveBlock(validBlock, random)
_, err = tc.ValidateAndInsertBlock(validBlock, true)
if err != nil {
t.Fatal(err)
// Difficulty is too high on mainnet to actually mine.
if consensusConfig.Name != "kaspa-mainnet" {
mining.SolveBlock(validBlock, random)
_, err = tc.ValidateAndInsertBlock(validBlock, true)
if err != nil {
t.Fatal(err)
}
}
})
}
@@ -296,7 +299,7 @@ func TestCheckPruningPointViolation(t *testing.T) {
func TestValidateDifficulty(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
factory := consensus.NewFactory()
mocDifficulty := &mocDifficultyManager{}
mocDifficulty := &mocDifficultyManager{genesisDaaScore: consensusConfig.GenesisBlock.Header.DAAScore()}
factory.SetTestDifficultyManager(func(_ model.DBReader, _ model.GHOSTDAGManager, _ model.GHOSTDAGDataStore,
_ model.BlockHeaderStore, daaBlocksStore model.DAABlocksStore, _ model.DAGTopologyManager,
_ model.DAGTraversalManager, _ *big.Int, _ int, _ bool, _ time.Duration,
@@ -342,6 +345,7 @@ type mocDifficultyManager struct {
testDifficulty uint32
testGenesisBits uint32
daaBlocksStore model.DAABlocksStore
genesisDaaScore uint64
}
// RequiredDifficulty returns the difficulty required for the test
@@ -352,7 +356,7 @@ func (dm *mocDifficultyManager) RequiredDifficulty(*model.StagingArea, *external
// StageDAADataAndReturnRequiredDifficulty returns the difficulty required for the test
func (dm *mocDifficultyManager) StageDAADataAndReturnRequiredDifficulty(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, isBlockWithTrustedData bool) (uint32, error) {
// Populate daaBlocksStore with fake values
dm.daaBlocksStore.StageDAAScore(stagingArea, blockHash, 0)
dm.daaBlocksStore.StageDAAScore(stagingArea, blockHash, dm.genesisDaaScore)
dm.daaBlocksStore.StageBlockDAAAddedBlocks(stagingArea, blockHash, nil)
return dm.testDifficulty, nil

View File

@@ -86,36 +86,5 @@ func TestBlockRewardSwitch(t *testing.T) {
t.Fatalf("Subsidy has unexpected value. Want: %d, got: %d", consensusConfig.MinSubsidy, subsidy)
}
}
// Add another block. We expect it to be another pruning point
lastPruningPointHash, _, err := tc.AddBlock([]*externalapi.DomainHash{tipHash}, nil, nil)
if err != nil {
t.Fatalf("AddBlock: %+v", err)
}
// Make sure that another pruning point had been added
pruningPointHeaders, err = tc.PruningPointHeaders()
if err != nil {
t.Fatalf("PruningPointHeaders: %+v", pruningPointHeaders)
}
expectedPruningPointHeaderAmount = expectedPruningPointHeaderAmount + 1
if uint64(len(pruningPointHeaders)) != expectedPruningPointHeaderAmount {
t.Fatalf("Unexpected amount of pruning point headers. "+
"Want: %d, got: %d", expectedPruningPointHeaderAmount, len(pruningPointHeaders))
}
// Make sure that the last pruning point has a post-switch subsidy
lastPruningPoint, err := tc.GetBlock(lastPruningPointHash)
if err != nil {
t.Fatalf("GetBlock: %+v", err)
}
lastPruningPointCoinbase := lastPruningPoint.Transactions[transactionhelper.CoinbaseTransactionIndex]
_, _, subsidy, err := tc.CoinbaseManager().ExtractCoinbaseDataBlueScoreAndSubsidy(lastPruningPointCoinbase)
if err != nil {
t.Fatalf("ExtractCoinbaseDataBlueScoreAndSubsidy: %+v", err)
}
if subsidy != consensusConfig.SubsidyGenesisReward {
t.Fatalf("Subsidy has unexpected value. Want: %d, got: %d", consensusConfig.SubsidyGenesisReward, subsidy)
}
})
}

View File

@@ -2,6 +2,8 @@ package coinbasemanager
import (
"encoding/binary"
"math/big"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
@@ -10,7 +12,6 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/utils/transactionhelper"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/pkg/errors"
"math/big"
)
type coinbaseManager struct {
@@ -190,51 +191,7 @@ func (c *coinbaseManager) CalcBlockSubsidy(stagingArea *model.StagingArea,
return c.subsidyGenesisReward, nil
}
isBlockRewardFixed, err := c.isBlockRewardFixed(stagingArea, blockPruningPoint)
if err != nil {
return 0, err
}
if isBlockRewardFixed {
return c.subsidyGenesisReward, nil
}
averagePastSubsidy, err := c.calculateAveragePastSubsidy(stagingArea, blockHash)
if err != nil {
return 0, err
}
mergeSetSubsidySum, err := c.calculateMergeSetSubsidySum(stagingArea, blockHash)
if err != nil {
return 0, err
}
subsidyRandomVariable, err := c.calculateSubsidyRandomVariable(stagingArea, blockHash)
if err != nil {
return 0, err
}
pastSubsidy := new(big.Rat).Mul(averagePastSubsidy, c.subsidyPastRewardMultiplier)
mergeSetSubsidy := new(big.Rat).Mul(mergeSetSubsidySum, c.subsidyMergeSetRewardMultiplier)
// In order to avoid unsupported negative exponents in powInt64, flip
// the numerator and the denominator manually
subsidyRandom := new(big.Rat)
if subsidyRandomVariable >= 0 {
subsidyRandom = subsidyRandom.SetInt64(1 << subsidyRandomVariable)
} else {
subsidyRandom = subsidyRandom.SetFrac64(1, 1<<(-subsidyRandomVariable))
}
blockSubsidyBigRat := new(big.Rat).Add(mergeSetSubsidy, new(big.Rat).Mul(pastSubsidy, subsidyRandom))
blockSubsidyBigInt := new(big.Int).Div(blockSubsidyBigRat.Num(), blockSubsidyBigRat.Denom())
blockSubsidyUint64 := blockSubsidyBigInt.Uint64()
clampedBlockSubsidy := blockSubsidyUint64
if clampedBlockSubsidy < c.minSubsidy {
clampedBlockSubsidy = c.minSubsidy
} else if clampedBlockSubsidy > c.maxSubsidy {
clampedBlockSubsidy = c.maxSubsidy
}
return clampedBlockSubsidy, nil
return c.maxSubsidy, nil
}
func (c *coinbaseManager) calculateAveragePastSubsidy(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (*big.Rat, error) {

View File

@@ -75,7 +75,7 @@ func TestVirtualDiff(t *testing.T) {
blockB.Transactions[0].Outputs[0].Value,
blockB.Transactions[0].Outputs[0].ScriptPublicKey,
true,
2, //Expected virtual DAA score
consensusConfig.GenesisBlock.Header.DAAScore()+2, //Expected virtual DAA score
)) {
t.Fatalf("Unexpected entry %s", entry)
}

View File

@@ -2,7 +2,6 @@ package consensusstatemanager
import (
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/multiset"
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/pkg/errors"
@@ -23,8 +22,16 @@ func (csm *consensusStateManager) CalculatePastUTXOAndAcceptanceData(stagingArea
if blockHash.Equal(csm.genesisHash) {
log.Debugf("Block %s is the genesis. By definition, "+
"it has an empty UTXO diff, empty acceptance data, and a blank multiset", blockHash)
return utxo.NewUTXODiff(), externalapi.AcceptanceData{}, multiset.New(), nil
"it has a predefined UTXO diff, empty acceptance data, and a predefined multiset", blockHash)
multiset, err := csm.multisetStore.Get(csm.databaseContext, stagingArea, blockHash)
if err != nil {
return nil, nil, nil, err
}
utxoDiff, err := csm.utxoDiffStore.UTXODiff(csm.databaseContext, stagingArea, blockHash)
if err != nil {
return nil, nil, nil, err
}
return utxoDiff, externalapi.AcceptanceData{}, multiset, nil
}
blockGHOSTDAGData, err := csm.ghostdagDataStore.Get(csm.databaseContext, stagingArea, blockHash, false)

View File

@@ -4,7 +4,6 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/multiset"
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
)
@@ -19,8 +18,8 @@ func (csm *consensusStateManager) calculateMultiset(stagingArea *model.StagingAr
if blockHash.Equal(csm.genesisHash) {
log.Debugf("Selected parent is nil, which could only happen for the genesis. " +
"The genesis, by definition, has an empty multiset")
return multiset.New(), nil
"The genesis has a predefined multiset")
return csm.multisetStore.Get(csm.databaseContext, stagingArea, blockHash)
}
ms, err := csm.multisetStore.Get(csm.databaseContext, stagingArea, blockGHOSTDAGData.SelectedParent())

View File

@@ -59,7 +59,8 @@ func (csm *consensusStateManager) pickVirtualParents(stagingArea *model.StagingA
selectedVirtualParents := []*externalapi.DomainHash{virtualSelectedParent}
mergeSetSize := uint64(1) // starts counting from 1 because selectedParent is already in the mergeSet
for len(candidates) > 0 && uint64(len(selectedVirtualParents)) < uint64(csm.maxBlockParents) {
// First condition implies that no point in searching since limit was already reached
for mergeSetSize < csm.mergeSetSizeLimit && len(candidates) > 0 && uint64(len(selectedVirtualParents)) < uint64(csm.maxBlockParents) {
candidate := candidates[0]
candidates = candidates[1:]

View File

@@ -35,7 +35,7 @@ func (csm *consensusStateManager) ResolveVirtual(maxBlocksToResolve uint64) (boo
var selectedTip *externalapi.DomainHash
isCompletelyResolved := true
for _, tip := range tips {
log.Infof("Resolving tip %s", tip)
log.Debugf("Resolving tip %s", tip)
resolveStagingArea := model.NewStagingArea()
unverifiedBlocks, err := csm.getUnverifiedChainBlocks(resolveStagingArea, tip)
if err != nil {
@@ -46,7 +46,7 @@ func (csm *consensusStateManager) ResolveVirtual(maxBlocksToResolve uint64) (boo
hasMoreUnverifiedThanMax := maxBlocksToResolve != 0 && uint64(len(unverifiedBlocks)) > maxBlocksToResolve
if hasMoreUnverifiedThanMax {
resolveTip = unverifiedBlocks[uint64(len(unverifiedBlocks))-maxBlocksToResolve]
log.Infof("Has more than %d blocks to resolve. Changing the resolve tip to %s", maxBlocksToResolve, resolveTip)
log.Debugf("Has more than %d blocks to resolve. Changing the resolve tip to %s", maxBlocksToResolve, resolveTip)
}
blockStatus, reversalData, err := csm.resolveBlockStatus(resolveStagingArea, resolveTip, true)

View File

@@ -3,8 +3,6 @@ package consensusstatemanager
import (
"fmt"
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
"github.com/kaspanet/kaspad/util/staging"
"github.com/kaspanet/kaspad/domain/consensus/model"
@@ -129,7 +127,11 @@ func (csm *consensusStateManager) selectedParentInfo(
if lastUnverifiedBlock.Equal(csm.genesisHash) {
log.Debugf("the most recent unverified block is the genesis block, "+
"which by definition has status: %s", externalapi.StatusUTXOValid)
return lastUnverifiedBlock, externalapi.StatusUTXOValid, utxo.NewUTXODiff(), nil
utxoDiff, err := csm.utxoDiffStore.UTXODiff(csm.databaseContext, stagingArea, lastUnverifiedBlock)
if err != nil {
return nil, 0, nil, err
}
return lastUnverifiedBlock, externalapi.StatusUTXOValid, utxoDiff, nil
}
lastUnverifiedBlockGHOSTDAGData, err := csm.ghostdagDataStore.Get(csm.databaseContext, stagingArea, lastUnverifiedBlock, false)
if err != nil {

View File

@@ -285,7 +285,7 @@ func TestTransactionAcceptance(t *testing.T) {
if err != nil {
t.Fatalf("Error getting blockF: %+v", err)
}
updatedDAAScoreVirtualBlock := 26
updatedDAAScoreVirtualBlock := consensusConfig.GenesisBlock.Header.DAAScore() + 26
//We expect the second transaction in the "blue block" (blueChildOfRedBlock) to be accepted because the merge set is ordered topologically
//and the red block is ordered topologically before the "blue block" so the input is known in the UTXOSet.
expectedAcceptanceData := externalapi.AcceptanceData{

View File

@@ -40,12 +40,12 @@ func TestBlockWindow(t *testing.T) {
{
parents: []string{"C", "D"},
id: "E",
expectedWindow: []string{"C", "D", "B"},
expectedWindow: []string{"D", "C", "B"},
},
{
parents: []string{"C", "D"},
id: "F",
expectedWindow: []string{"C", "D", "B"},
expectedWindow: []string{"D", "C", "B"},
},
{
parents: []string{"A"},
@@ -60,37 +60,38 @@ func TestBlockWindow(t *testing.T) {
{
parents: []string{"H", "F"},
id: "I",
expectedWindow: []string{"F", "H", "C", "D", "B", "G"},
expectedWindow: []string{"F", "D", "H", "C", "G", "B"},
},
{
parents: []string{"I"},
id: "J",
expectedWindow: []string{"I", "F", "H", "C", "D", "B", "G"},
expectedWindow: []string{"I", "F", "D", "H", "C", "G", "B"},
},
//
{
parents: []string{"J"},
id: "K",
expectedWindow: []string{"J", "I", "F", "H", "C", "D", "B", "G"},
expectedWindow: []string{"J", "I", "F", "D", "H", "C", "G", "B"},
},
{
parents: []string{"K"},
id: "L",
expectedWindow: []string{"K", "J", "I", "F", "H", "C", "D", "B", "G"},
expectedWindow: []string{"K", "J", "I", "F", "D", "H", "C", "G", "B"},
},
{
parents: []string{"L"},
id: "M",
expectedWindow: []string{"L", "K", "J", "I", "F", "H", "C", "D", "B", "G"},
expectedWindow: []string{"L", "K", "J", "I", "F", "D", "H", "C", "G", "B"},
},
{
parents: []string{"M"},
id: "N",
expectedWindow: []string{"M", "L", "K", "J", "I", "F", "H", "C", "D", "B"},
expectedWindow: []string{"M", "L", "K", "J", "I", "F", "D", "H", "C", "G"},
},
{
parents: []string{"N"},
id: "O",
expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "H", "C", "D"},
expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "D", "H", "C"},
},
},
dagconfig.TestnetParams.Name: {
@@ -132,37 +133,37 @@ func TestBlockWindow(t *testing.T) {
{
parents: []string{"H", "F"},
id: "I",
expectedWindow: []string{"F", "H", "C", "D", "G", "B"},
expectedWindow: []string{"F", "C", "D", "H", "B", "G"},
},
{
parents: []string{"I"},
id: "J",
expectedWindow: []string{"I", "F", "H", "C", "D", "G", "B"},
expectedWindow: []string{"I", "F", "C", "D", "H", "B", "G"},
},
{
parents: []string{"J"},
id: "K",
expectedWindow: []string{"J", "I", "F", "H", "C", "D", "G", "B"},
expectedWindow: []string{"J", "I", "F", "C", "D", "H", "B", "G"},
},
{
parents: []string{"K"},
id: "L",
expectedWindow: []string{"K", "J", "I", "F", "H", "C", "D", "G", "B"},
expectedWindow: []string{"K", "J", "I", "F", "C", "D", "H", "B", "G"},
},
{
parents: []string{"L"},
id: "M",
expectedWindow: []string{"L", "K", "J", "I", "F", "H", "C", "D", "G", "B"},
expectedWindow: []string{"L", "K", "J", "I", "F", "C", "D", "H", "B", "G"},
},
{
parents: []string{"M"},
id: "N",
expectedWindow: []string{"M", "L", "K", "J", "I", "F", "H", "C", "D", "G"},
expectedWindow: []string{"M", "L", "K", "J", "I", "F", "C", "D", "H", "B"},
},
{
parents: []string{"N"},
id: "O",
expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "H", "C", "D"},
expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "C", "D", "H"},
},
},
dagconfig.DevnetParams.Name: {
@@ -204,37 +205,37 @@ func TestBlockWindow(t *testing.T) {
{
parents: []string{"H", "F"},
id: "I",
expectedWindow: []string{"F", "D", "C", "H", "B", "G"},
expectedWindow: []string{"F", "D", "H", "C", "B", "G"},
},
{
parents: []string{"I"},
id: "J",
expectedWindow: []string{"I", "F", "D", "C", "H", "B", "G"},
expectedWindow: []string{"I", "F", "D", "H", "C", "B", "G"},
},
{
parents: []string{"J"},
id: "K",
expectedWindow: []string{"J", "I", "F", "D", "C", "H", "B", "G"},
expectedWindow: []string{"J", "I", "F", "D", "H", "C", "B", "G"},
},
{
parents: []string{"K"},
id: "L",
expectedWindow: []string{"K", "J", "I", "F", "D", "C", "H", "B", "G"},
expectedWindow: []string{"K", "J", "I", "F", "D", "H", "C", "B", "G"},
},
{
parents: []string{"L"},
id: "M",
expectedWindow: []string{"L", "K", "J", "I", "F", "D", "C", "H", "B", "G"},
expectedWindow: []string{"L", "K", "J", "I", "F", "D", "H", "C", "B", "G"},
},
{
parents: []string{"M"},
id: "N",
expectedWindow: []string{"M", "L", "K", "J", "I", "F", "D", "C", "H", "B"},
expectedWindow: []string{"M", "L", "K", "J", "I", "F", "D", "H", "C", "B"},
},
{
parents: []string{"N"},
id: "O",
expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "D", "C", "H"},
expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "D", "H", "C"},
},
},
dagconfig.SimnetParams.Name: {
@@ -276,37 +277,37 @@ func TestBlockWindow(t *testing.T) {
{
parents: []string{"H", "F"},
id: "I",
expectedWindow: []string{"F", "D", "H", "C", "G", "B"},
expectedWindow: []string{"F", "H", "D", "C", "B", "G"},
},
{
parents: []string{"I"},
id: "J",
expectedWindow: []string{"I", "F", "D", "H", "C", "G", "B"},
expectedWindow: []string{"I", "F", "H", "D", "C", "B", "G"},
},
{
parents: []string{"J"},
id: "K",
expectedWindow: []string{"J", "I", "F", "D", "H", "C", "G", "B"},
expectedWindow: []string{"J", "I", "F", "H", "D", "C", "B", "G"},
},
{
parents: []string{"K"},
id: "L",
expectedWindow: []string{"K", "J", "I", "F", "D", "H", "C", "G", "B"},
expectedWindow: []string{"K", "J", "I", "F", "H", "D", "C", "B", "G"},
},
{
parents: []string{"L"},
id: "M",
expectedWindow: []string{"L", "K", "J", "I", "F", "D", "H", "C", "G", "B"},
expectedWindow: []string{"L", "K", "J", "I", "F", "H", "D", "C", "B", "G"},
},
{
parents: []string{"M"},
id: "N",
expectedWindow: []string{"M", "L", "K", "J", "I", "F", "D", "H", "C", "G"},
expectedWindow: []string{"M", "L", "K", "J", "I", "F", "H", "D", "C", "B"},
},
{
parents: []string{"N"},
id: "O",
expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "D", "H", "C"},
expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "H", "D", "C"},
},
},
}

View File

@@ -105,10 +105,14 @@ func (dm *difficultyManager) requiredDifficultyFromTargetsWindow(targetsWindow b
return dm.genesisBits, nil
}
// in the past this was < 2 as the comment explains, we changed it to under the window size to
// make the hashrate(which is ~1.5GH/s) constant in the first 2641 blocks so that we won't have a lot of tips
// We need at least 2 blocks to get a timestamp interval
// We could instead clamp the timestamp difference to `targetTimePerBlock`,
// but then everything will cancel out and we'll get the target from the last block, which will be the same as genesis.
if len(targetsWindow) < 2 {
// We add 64 as a safety margin
if len(targetsWindow) < 2 || len(targetsWindow) < dm.difficultyAdjustmentWindowSize {
return dm.genesisBits, nil
}
windowMinTimestamp, windowMaxTimeStamp, windowsMinIndex, _ := targetsWindow.minMaxTimestamps()
@@ -157,7 +161,11 @@ func (dm *difficultyManager) calculateDaaScoreAndAddedBlocks(stagingArea *model.
isBlockWithTrustedData bool) (uint64, []*externalapi.DomainHash, error) {
if blockHash.Equal(dm.genesisHash) {
return 0, nil, nil
genesisHeader, err := dm.headerStore.BlockHeader(dm.databaseContext, stagingArea, dm.genesisHash)
if err != nil {
return 0, nil, err
}
return genesisHeader.DAAScore(), nil, nil
}
ghostdagData, err := dm.ghostdagStore.Get(dm.databaseContext, stagingArea, blockHash, false)

View File

@@ -19,12 +19,6 @@ import (
func TestDifficulty(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
// Mainnet's genesis is too new, so if we'll build on it we'll get to the future very quickly.
// TODO: Once it gets older, we should unskip this test.
if consensusConfig.Name == "kaspa-mainnet" {
return
}
if consensusConfig.DisableDifficultyAdjustment {
return
}
@@ -37,7 +31,7 @@ func TestDifficulty(t *testing.T) {
}
consensusConfig.K = 1
consensusConfig.DifficultyAdjustmentWindowSize = 265
consensusConfig.DifficultyAdjustmentWindowSize = 140
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestDifficulty")
@@ -114,7 +108,7 @@ func TestDifficulty(t *testing.T) {
"window size, the difficulty should be the same as genesis'")
}
}
for i := 0; i < consensusConfig.DifficultyAdjustmentWindowSize+100; i++ {
for i := 0; i < consensusConfig.DifficultyAdjustmentWindowSize+10; i++ {
tip, tipHash = addBlock(0, tipHash)
if tip.Header.Bits() != consensusConfig.GenesisBlock.Header.Bits() {
t.Fatalf("As long as the block rate remains the same, the difficulty shouldn't change")
@@ -136,9 +130,9 @@ func TestDifficulty(t *testing.T) {
var expectedBits uint32
switch consensusConfig.Name {
case dagconfig.TestnetParams.Name, dagconfig.DevnetParams.Name:
expectedBits = uint32(0x1e7f83df)
expectedBits = uint32(0x1e7f1441)
case dagconfig.MainnetParams.Name:
expectedBits = uint32(0x1e7f83df)
expectedBits = uint32(0x1d02c50f)
}
if tip.Header.Bits() != expectedBits {
@@ -237,7 +231,7 @@ func TestDifficulty(t *testing.T) {
func TestDAAScore(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
consensusConfig.DifficultyAdjustmentWindowSize = 265
consensusConfig.DifficultyAdjustmentWindowSize = 86
stagingArea := model.NewStagingArea()
@@ -269,9 +263,9 @@ func TestDAAScore(t *testing.T) {
t.Fatalf("DAAScore: %+v", err)
}
blockBlueScore3ExpectedDAAScore := uint64(2)
blockBlueScore3ExpectedDAAScore := uint64(2) + consensusConfig.GenesisBlock.Header.DAAScore()
if blockBlueScore3DAAScore != blockBlueScore3ExpectedDAAScore {
t.Fatalf("DAA score is expected to be %d but got %d", blockBlueScore3ExpectedDAAScore, blockBlueScore3ExpectedDAAScore)
t.Fatalf("DAA score is expected to be %d but got %d", blockBlueScore3ExpectedDAAScore, blockBlueScore3DAAScore)
}
tipDAAScore := blockBlueScore3ExpectedDAAScore

View File

@@ -0,0 +1,41 @@
package parentssanager
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
)
type parentsManager struct {
genesisHash *externalapi.DomainHash
}
// New instantiates a new ParentsManager
func New(genesisHash *externalapi.DomainHash) model.ParentsManager {
return &parentsManager{
genesisHash: genesisHash,
}
}
func (pm *parentsManager) ParentsAtLevel(blockHeader externalapi.BlockHeader, level int) externalapi.BlockLevelParents {
var parentsAtLevel externalapi.BlockLevelParents
if len(blockHeader.Parents()) > level {
parentsAtLevel = blockHeader.Parents()[level]
}
if len(parentsAtLevel) == 0 && len(blockHeader.DirectParents()) > 0 {
return externalapi.BlockLevelParents{pm.genesisHash}
}
return parentsAtLevel
}
func (pm *parentsManager) Parents(blockHeader externalapi.BlockHeader) []externalapi.BlockLevelParents {
numParents := constants.MaxBlockLevel + 1
parents := make([]externalapi.BlockLevelParents, numParents)
for i := 0; i < numParents; i++ {
parents[i] = pm.ParentsAtLevel(blockHeader, i)
}
return parents
}

View File

@@ -2,7 +2,6 @@ package pruningmanager_test
import (
"encoding/json"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"os"
"path/filepath"
@@ -37,7 +36,7 @@ func TestPruning(t *testing.T) {
dagconfig.SimnetParams.Name: "1582",
},
"dag-for-test-pruning.json": {
dagconfig.MainnetParams.Name: "502",
dagconfig.MainnetParams.Name: "503",
dagconfig.TestnetParams.Name: "502",
dagconfig.DevnetParams.Name: "502",
dagconfig.SimnetParams.Name: "502",
@@ -45,6 +44,8 @@ func TestPruning(t *testing.T) {
}
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
// Improve the performance of the test a little
consensusConfig.DisableDifficultyAdjustment = true
err := filepath.Walk("./testdata", func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
@@ -72,6 +73,7 @@ func TestPruning(t *testing.T) {
consensusConfig.DifficultyAdjustmentWindowSize = 400
factory := consensus.NewFactory()
factory.SetTestLevelDBCacheSize(128)
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestPruning")
if err != nil {
t.Fatalf("Error setting up consensus: %+v", err)
@@ -140,12 +142,11 @@ func TestPruning(t *testing.T) {
// We expect blocks that are within the difficulty adjustment window size of
// the pruning point and its anticone to not get pruned
unprunedBlockHashesBelowPruningPoint := make(map[externalapi.DomainHash]struct{})
pruningPointAndItsAnticone, err := tc.PruningPointAndItsAnticoneWithTrustedData()
pruningPointAndItsAnticone, err := tc.PruningPointAndItsAnticone()
if err != nil {
t.Fatalf("pruningPointAndItsAnticone: %+v", err)
}
for _, block := range pruningPointAndItsAnticone {
blockHash := consensushashing.BlockHash(block.Block)
for _, blockHash := range pruningPointAndItsAnticone {
unprunedBlockHashesBelowPruningPoint[*blockHash] = struct{}{}
blockWindow, err := tc.DAGTraversalManager().BlockWindow(stagingArea, blockHash, consensusConfig.DifficultyAdjustmentWindowSize)
if err != nil {

View File

@@ -675,7 +675,19 @@ func (pm *pruningManager) calculateDiffBetweenPreviousAndCurrentPruningPoints(st
onEnd := logger.LogAndMeasureExecutionTime(log, "pruningManager.calculateDiffBetweenPreviousAndCurrentPruningPoints")
defer onEnd()
if currentPruningHash.Equal(pm.genesisHash) {
return utxo.NewUTXODiff(), nil
iter, err := pm.consensusStateManager.RestorePastUTXOSetIterator(stagingArea, currentPruningHash)
if err != nil {
return nil, err
}
set := make(map[externalapi.DomainOutpoint]externalapi.UTXOEntry)
for ok := iter.First(); ok; ok = iter.Next() {
outpoint, entry, err := iter.Get()
if err != nil {
return nil, err
}
set[*outpoint] = entry
}
return utxo.NewUTXODiffFromCollections(utxo.NewUTXOCollection(set), utxo.NewUTXOCollection(make(map[externalapi.DomainOutpoint]externalapi.UTXOEntry)))
}
pruningPointIndex, err := pm.pruningStore.CurrentPruningPointIndex(pm.databaseContext, stagingArea)
@@ -907,8 +919,8 @@ func (pm *pruningManager) PruneAllBlocksBelow(stagingArea *model.StagingArea, pr
return nil
}
func (pm *pruningManager) PruningPointAndItsAnticoneWithTrustedData() ([]*externalapi.BlockWithTrustedData, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "PruningPointAndItsAnticoneWithTrustedData")
func (pm *pruningManager) PruningPointAndItsAnticone() ([]*externalapi.DomainHash, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "PruningPointAndItsAnticone")
defer onEnd()
stagingArea := model.NewStagingArea()
@@ -922,34 +934,32 @@ func (pm *pruningManager) PruningPointAndItsAnticoneWithTrustedData() ([]*extern
return nil, err
}
blocks := make([]*externalapi.BlockWithTrustedData, 0, len(pruningPointAnticone)+1)
pruningPointWithTrustedData, err := pm.blockWithTrustedData(stagingArea, pruningPoint)
if err != nil {
return nil, err
}
for _, blockHash := range pruningPointAnticone {
blockWithTrustedData, err := pm.blockWithTrustedData(stagingArea, blockHash)
// Sorting the blocks in topological order
var sortErr error
sort.Slice(pruningPointAnticone, func(i, j int) bool {
headerI, err := pm.blockHeaderStore.BlockHeader(pm.databaseContext, stagingArea, pruningPointAnticone[i])
if err != nil {
return nil, err
sortErr = err
return false
}
blocks = append(blocks, blockWithTrustedData)
headerJ, err := pm.blockHeaderStore.BlockHeader(pm.databaseContext, stagingArea, pruningPointAnticone[j])
if err != nil {
sortErr = err
return false
}
return headerI.BlueWork().Cmp(headerJ.BlueWork()) < 0
})
if sortErr != nil {
return nil, sortErr
}
// Sorting the blocks in topological order
sort.Slice(blocks, func(i, j int) bool {
return blocks[i].Block.Header.BlueWork().Cmp(blocks[j].Block.Header.BlueWork()) < 0
})
// The pruning point should always come first
blocks = append([]*externalapi.BlockWithTrustedData{pruningPointWithTrustedData}, blocks...)
return blocks, nil
return append([]*externalapi.DomainHash{pruningPoint}, pruningPointAnticone...), nil
}
func (pm *pruningManager) blockWithTrustedData(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (*externalapi.BlockWithTrustedData, error) {
func (pm *pruningManager) BlockWithTrustedData(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (*externalapi.BlockWithTrustedData, error) {
block, err := pm.blocksStore.Block(pm.databaseContext, stagingArea, blockHash)
if err != nil {
return nil, err

View File

@@ -0,0 +1,5 @@
package pruningproofmanager
import "github.com/kaspanet/kaspad/infrastructure/logger"
var log = logger.RegisterSubSystem("PPMN")

View File

@@ -15,8 +15,8 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
"github.com/kaspanet/kaspad/domain/consensus/utils/pow"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/pkg/errors"
"math/big"
)
@@ -28,6 +28,7 @@ type pruningProofManager struct {
ghostdagManagers []model.GHOSTDAGManager
reachabilityManagers []model.ReachabilityManager
dagTraversalManagers []model.DAGTraversalManager
parentsManager model.ParentsManager
ghostdagDataStores []model.GHOSTDAGDataStore
pruningStore model.PruningStore
@@ -39,6 +40,9 @@ type pruningProofManager struct {
genesisHash *externalapi.DomainHash
k externalapi.KType
pruningProofM uint64
cachedPruningPoint *externalapi.DomainHash
cachedProof *externalapi.PruningPointProof
}
// New instantiates a new PruningManager
@@ -49,6 +53,7 @@ func New(
ghostdagManagers []model.GHOSTDAGManager,
reachabilityManagers []model.ReachabilityManager,
dagTraversalManagers []model.DAGTraversalManager,
parentsManager model.ParentsManager,
ghostdagDataStores []model.GHOSTDAGDataStore,
pruningStore model.PruningStore,
@@ -68,6 +73,7 @@ func New(
ghostdagManagers: ghostdagManagers,
reachabilityManagers: reachabilityManagers,
dagTraversalManagers: dagTraversalManagers,
parentsManager: parentsManager,
ghostdagDataStores: ghostdagDataStores,
pruningStore: pruningStore,
@@ -83,6 +89,33 @@ func New(
}
func (ppm *pruningProofManager) BuildPruningPointProof(stagingArea *model.StagingArea) (*externalapi.PruningPointProof, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "BuildPruningPointProof")
defer onEnd()
pruningPoint, err := ppm.pruningStore.PruningPoint(ppm.databaseContext, stagingArea)
if err != nil {
return nil, err
}
if ppm.cachedPruningPoint != nil && ppm.cachedPruningPoint.Equal(pruningPoint) {
return ppm.cachedProof, nil
}
proof, err := ppm.buildPruningPointProof(stagingArea)
if err != nil {
return nil, err
}
ppm.cachedProof = proof
ppm.cachedPruningPoint = pruningPoint
return proof, nil
}
func (ppm *pruningProofManager) buildPruningPointProof(stagingArea *model.StagingArea) (*externalapi.PruningPointProof, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "buildPruningPointProof")
defer onEnd()
pruningPoint, err := ppm.pruningStore.PruningPoint(ppm.databaseContext, stagingArea)
if err != nil {
return nil, err
@@ -97,17 +130,33 @@ func (ppm *pruningProofManager) BuildPruningPointProof(stagingArea *model.Stagin
return nil, err
}
maxLevel := len(pruningPointHeader.Parents()) - 1
maxLevel := len(ppm.parentsManager.Parents(pruningPointHeader)) - 1
headersByLevel := make(map[int][]externalapi.BlockHeader)
selectedTipByLevel := make([]*externalapi.DomainHash, maxLevel+1)
pruningPointLevel := pow.BlockLevel(pruningPointHeader)
pruningPointLevel := pruningPointHeader.BlockLevel()
for blockLevel := maxLevel; blockLevel >= 0; blockLevel-- {
var selectedTip *externalapi.DomainHash
if blockLevel <= pruningPointLevel {
selectedTip = pruningPoint
} else {
blockLevelParents := pruningPointHeader.ParentsAtLevel(blockLevel)
selectedTip, err = ppm.ghostdagManagers[blockLevel].ChooseSelectedParent(stagingArea, []*externalapi.DomainHash(blockLevelParents)...)
blockLevelParents := ppm.parentsManager.ParentsAtLevel(pruningPointHeader, blockLevel)
selectedTipCandidates := make([]*externalapi.DomainHash, 0, len(blockLevelParents))
// In a pruned node, some pruning point parents might be missing, but we're guaranteed that its
// selected parent is not missing.
for _, parent := range blockLevelParents {
_, err := ppm.ghostdagDataStores[blockLevel].Get(ppm.databaseContext, stagingArea, parent, false)
if database.IsNotFoundError(err) {
continue
}
if err != nil {
return nil, err
}
selectedTipCandidates = append(selectedTipCandidates, parent)
}
selectedTip, err = ppm.ghostdagManagers[blockLevel].ChooseSelectedParent(stagingArea, selectedTipCandidates...)
if err != nil {
return nil, err
}
@@ -248,6 +297,9 @@ func (ppm *pruningProofManager) blockAtDepth(stagingArea *model.StagingArea, gho
}
func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *externalapi.PruningPointProof) error {
onEnd := logger.LogAndMeasureExecutionTime(log, "ValidatePruningPointProof")
defer onEnd()
stagingArea := model.NewStagingArea()
if len(pruningPointProof.Headers) == 0 {
@@ -257,8 +309,8 @@ func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *ext
level0Headers := pruningPointProof.Headers[0]
pruningPointHeader := level0Headers[len(level0Headers)-1]
pruningPoint := consensushashing.HeaderHash(pruningPointHeader)
pruningPointBlockLevel := pow.BlockLevel(pruningPointHeader)
maxLevel := len(pruningPointHeader.Parents()) - 1
pruningPointBlockLevel := pruningPointHeader.BlockLevel()
maxLevel := len(ppm.parentsManager.Parents(pruningPointHeader)) - 1
if maxLevel >= len(pruningPointProof.Headers) {
return errors.Wrapf(ruleerrors.ErrPruningProofEmpty, "proof has only %d levels while pruning point "+
"has parents from %d levels", len(pruningPointProof.Headers), maxLevel+1)
@@ -300,15 +352,15 @@ func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *ext
var selectedTip *externalapi.DomainHash
for i, header := range headers {
blockHash := consensushashing.HeaderHash(header)
if pow.BlockLevel(header) < blockLevel {
if header.BlockLevel() < blockLevel {
return errors.Wrapf(ruleerrors.ErrPruningProofWrongBlockLevel, "block %s level is %d when it's "+
"expected to be at least %d", blockHash, pow.BlockLevel(header), blockLevel)
"expected to be at least %d", blockHash, header.BlockLevel(), blockLevel)
}
blockHeaderStore.Stage(stagingArea, blockHash, header)
var parents []*externalapi.DomainHash
for _, parent := range header.ParentsAtLevel(blockLevel) {
for _, parent := range ppm.parentsManager.ParentsAtLevel(header, blockLevel) {
_, err := ghostdagDataStores[blockLevel].Get(ppm.databaseContext, stagingArea, parent, false)
if database.IsNotFoundError(err) {
continue
@@ -377,7 +429,7 @@ func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *ext
}
}
if !selectedTip.Equal(pruningPoint) && !pruningPointHeader.ParentsAtLevel(blockLevel).Contains(selectedTip) {
if !selectedTip.Equal(pruningPoint) && !ppm.parentsManager.ParentsAtLevel(pruningPointHeader, blockLevel).Contains(selectedTip) {
return errors.Wrapf(ruleerrors.ErrPruningProofMissesBlocksBelowPruningPoint, "the selected tip %s at "+
"level %d is not a parent of the pruning point", selectedTip, blockLevel)
}
@@ -395,7 +447,7 @@ func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *ext
return errors.Wrapf(ruleerrors.ErrPruningProofSelectedTipIsNotThePruningPoint, "the pruning "+
"proof selected tip %s at level %d is not the pruning point", selectedTip, blockLevel)
}
} else if !pruningPointHeader.ParentsAtLevel(blockLevel).Contains(selectedTip) {
} else if !ppm.parentsManager.ParentsAtLevel(pruningPointHeader, blockLevel).Contains(selectedTip) {
return errors.Wrapf(ruleerrors.ErrPruningProofSelectedTipNotParentOfPruningPoint, "the pruning "+
"proof selected tip %s at level %d is not a parent of the of the pruning point on the same "+
"level", selectedTip, blockLevel)
@@ -554,19 +606,22 @@ func (ppm *pruningProofManager) dagProcesses(
}
func (ppm *pruningProofManager) ApplyPruningPointProof(stagingArea *model.StagingArea, pruningPointProof *externalapi.PruningPointProof) error {
onEnd := logger.LogAndMeasureExecutionTime(log, "ApplyPruningPointProof")
defer onEnd()
for blockLevel, headers := range pruningPointProof.Headers {
var selectedTip *externalapi.DomainHash
for i, header := range headers {
blockHash := consensushashing.HeaderHash(header)
if pow.BlockLevel(header) < blockLevel {
if header.BlockLevel() < blockLevel {
return errors.Wrapf(ruleerrors.ErrPruningProofWrongBlockLevel, "block %s level is %d when it's "+
"expected to be at least %d", blockHash, pow.BlockLevel(header), blockLevel)
"expected to be at least %d", blockHash, header.BlockLevel(), blockLevel)
}
ppm.blockHeaderStore.Stage(stagingArea, blockHash, header)
var parents []*externalapi.DomainHash
for _, parent := range header.ParentsAtLevel(blockLevel) {
for _, parent := range ppm.parentsManager.ParentsAtLevel(header, blockLevel) {
_, err := ppm.ghostdagDataStores[blockLevel].Get(ppm.databaseContext, stagingArea, parent, false)
if database.IsNotFoundError(err) {
continue

View File

@@ -49,7 +49,7 @@ func TestCheckLockTimeVerifyConditionedByDAAScore(t *testing.T) {
}
fees := uint64(1)
//Create a CLTV script:
targetDAAScore := uint64(30)
targetDAAScore := consensusConfig.GenesisBlock.Header.DAAScore() + uint64(30)
redeemScriptCLTV, err := createScriptCLTV(targetDAAScore)
if err != nil {
t.Fatalf("Failed to create a script using createScriptCLTV: %v", err)
@@ -156,7 +156,7 @@ func TestCheckLockTimeVerifyConditionedByDAAScoreWithWrongLockTime(t *testing.T)
}
fees := uint64(1)
//Create a CLTV script:
targetDAAScore := uint64(30)
targetDAAScore := consensusConfig.GenesisBlock.Header.DAAScore() + uint64(30)
redeemScriptCLTV, err := createScriptCLTV(targetDAAScore)
if err != nil {
t.Fatalf("Failed to create a script using createScriptCLTV: %v", err)

View File

@@ -2,6 +2,7 @@ package blockheader
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/pow"
"math/big"
)
@@ -18,6 +19,9 @@ type blockHeader struct {
blueScore uint64
blueWork *big.Int
pruningPoint *externalapi.DomainHash
isBlockLevelCached bool
blockLevel int
}
func (bh *blockHeader) BlueScore() uint64 {
@@ -41,10 +45,12 @@ func (bh *blockHeader) ToImmutable() externalapi.BlockHeader {
}
func (bh *blockHeader) SetNonce(nonce uint64) {
bh.isBlockLevelCached = false
bh.nonce = nonce
}
func (bh *blockHeader) SetTimeInMilliseconds(timeInMilliseconds int64) {
bh.isBlockLevelCached = false
bh.timeInMilliseconds = timeInMilliseconds
}
@@ -56,16 +62,12 @@ func (bh *blockHeader) Parents() []externalapi.BlockLevelParents {
return bh.parents
}
func (bh *blockHeader) ParentsAtLevel(level int) externalapi.BlockLevelParents {
if len(bh.parents) <= level {
func (bh *blockHeader) DirectParents() externalapi.BlockLevelParents {
if len(bh.parents) == 0 {
return externalapi.BlockLevelParents{}
}
return bh.parents[level]
}
func (bh *blockHeader) DirectParents() externalapi.BlockLevelParents {
return bh.ParentsAtLevel(0)
return bh.parents[0]
}
func (bh *blockHeader) HashMerkleRoot() *externalapi.DomainHash {
@@ -177,6 +179,15 @@ func (bh *blockHeader) ToMutable() externalapi.MutableBlockHeader {
return bh.clone()
}
func (bh *blockHeader) BlockLevel() int {
if !bh.isBlockLevelCached {
bh.blockLevel = pow.BlockLevel(bh)
bh.isBlockLevelCached = true
}
return bh.blockLevel
}
// NewImmutableBlockHeader returns a new immutable header
func NewImmutableBlockHeader(
version uint16,

View File

@@ -36,6 +36,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
8,
big.NewInt(9),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{10}),
false,
0,
},
expectedResult: false,
},
@@ -55,6 +57,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
headersToCompareTo: []headerToCompare{
{
@@ -75,6 +79,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: true,
},
@@ -92,6 +98,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: false,
},
@@ -111,6 +119,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: false,
},
@@ -128,6 +138,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: false,
},
@@ -145,6 +157,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: false,
},
@@ -162,6 +176,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: false,
},
@@ -179,6 +195,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: false,
},
@@ -196,6 +214,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: false,
},
@@ -213,6 +233,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: false,
},
@@ -230,6 +252,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: false,
},
@@ -247,6 +271,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: false,
},
@@ -264,6 +290,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
100,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: false,
},
@@ -281,6 +309,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(100),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: false,
},
@@ -298,6 +328,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{100}),
false,
0,
},
expectedResult: false,
},

View File

@@ -37,5 +37,7 @@ const (
LockTimeThreshold = 5e11 // Tue Nov 5 00:53:20 1985 UTC
// MaxBlockLevel is the maximum possible block level.
MaxBlockLevel = 255
// This is technically 255, but we clamped it at 256 - block level of mainnet genesis
// This means that any block that has a level lower or equal to genesis will be level 0.
MaxBlockLevel = 225
)

View File

@@ -104,9 +104,10 @@ func BlockLevel(header externalapi.BlockHeader) int {
}
proofOfWorkValue := NewState(header.ToMutable()).CalculateProofOfWorkValue()
for blockLevel := 0; ; blockLevel++ {
if blockLevel == constants.MaxBlockLevel || proofOfWorkValue.Bit(blockLevel+1) != 0 {
return blockLevel
}
level := constants.MaxBlockLevel - proofOfWorkValue.BitLen()
// If the block has a level lower than genesis make it zero.
if level < 0 {
level = 0
}
return level
}

View File

@@ -22,7 +22,7 @@ import (
//
const (
defaultMaxCoinbasePayloadLength = 172
defaultMaxCoinbasePayloadLength = 204
// defaultMaxBlockMass is a bound on the mass of a block, larger values increase the bound d
// on the round trip time of a block, which affects the other parameters as described below
defaultMaxBlockMass = 500_000
@@ -49,7 +49,7 @@ const (
defaultMergeSetSizeLimit = defaultGHOSTDAGK * 10
defaultSubsidyGenesisReward = 1 * constants.SompiPerKaspa
defaultMinSubsidy = 1 * constants.SompiPerKaspa
defaultMaxSubsidy = 1000 * constants.SompiPerKaspa
defaultMaxSubsidy = 500 * constants.SompiPerKaspa
defaultBaseSubsidy = 50 * constants.SompiPerKaspa
defaultFixedSubsidySwitchPruningPointInterval uint64 = 7
defaultCoinbasePayloadScriptPublicKeyMaxLength = 150

View File

@@ -36,10 +36,14 @@ var genesisTxPayload = []byte{
0x20, 0xd7, 0x90, 0xd7, 0x9c, 0xd7, 0x94, 0xd7,
0x9b, 0xd7, 0x9d, 0x20, 0xd7, 0xaa, 0xd7, 0xa2,
0xd7, 0x91, 0xd7, 0x93, 0xd7, 0x95, 0xd7, 0x9f,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Bitcoin block hash 00000000000000000001733c62adb19f1b77fa0735d0e11f25af36fc9ca908a5
0x00, 0x01, 0x73, 0x3c, 0x62, 0xad, 0xb1, 0x9f,
0x1b, 0x77, 0xfa, 0x07, 0x35, 0xd0, 0xe1, 0x1f,
0x25, 0xaf, 0x36, 0xfc, 0x9c, 0xa9, 0x08, 0xa5,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Bitcoin block hash 0000000000000000000b1f8e1c17b0133d439174e52efbb0c41c3583a8aa66b0
0x00, 0x0b, 0x1f, 0x8e, 0x1c, 0x17, 0xb0, 0x13,
0x3d, 0x43, 0x91, 0x74, 0xe5, 0x2e, 0xfb, 0xb0,
0xc4, 0x1c, 0x35, 0x83, 0xa8, 0xaa, 0x66, 0xb0,
0x0f, 0xca, 0x37, 0xca, 0x66, 0x7c, 0x2d, 0x55, // Checkpoint block hash 0fca37ca667c2d550a6c4416dad9717e50927128c424fa4edbebc436ab13aeef
0x0a, 0x6c, 0x44, 0x16, 0xda, 0xd9, 0x71, 0x7e,
0x50, 0x92, 0x71, 0x28, 0xc4, 0x24, 0xfa, 0x4e,
0xdb, 0xeb, 0xc4, 0x36, 0xab, 0x13, 0xae, 0xef,
}
// genesisCoinbaseTx is the coinbase transaction for the genesis blocks for
@@ -50,19 +54,13 @@ var genesisCoinbaseTx = transactionhelper.NewSubnetworkTransaction(0, []*externa
// genesisHash is the hash of the first block in the block DAG for the main
// network (genesis block).
var genesisHash = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
0xca, 0xeb, 0x97, 0x96, 0x0a, 0x16, 0x0c, 0x21,
0x1a, 0x6b, 0x21, 0x96, 0xbd, 0x78, 0x39, 0x9f,
0xd4, 0xc4, 0xcc, 0x5b, 0x50, 0x9f, 0x55, 0xc1,
0x2c, 0x8a, 0x7d, 0x81, 0x5f, 0x75, 0x36, 0xea,
0x58, 0xc2, 0xd4, 0x19, 0x9e, 0x21, 0xf9, 0x10, 0xd1, 0x57, 0x1d, 0x11, 0x49, 0x69, 0xce, 0xce, 0xf4, 0x8f, 0x9, 0xf9, 0x34, 0xd4, 0x2c, 0xcb, 0x6a, 0x28, 0x1a, 0x15, 0x86, 0x8f, 0x29, 0x99,
})
// genesisMerkleRoot is the hash of the first transaction in the genesis block
// for the main network.
var genesisMerkleRoot = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
0xca, 0xed, 0xaf, 0x7d, 0x4a, 0x08, 0xbb, 0xe8,
0x90, 0x11, 0x64, 0x0c, 0x48, 0x41, 0xb6, 0x6d,
0x5b, 0xba, 0x67, 0xd7, 0x28, 0x8c, 0xe6, 0xd6,
0x72, 0x28, 0xdb, 0x00, 0x09, 0x66, 0xe9, 0x74,
0x8e, 0xc8, 0x98, 0x56, 0x8c, 0x68, 0x1, 0xd1, 0x3d, 0xf4, 0xee, 0x6e, 0x2a, 0x1b, 0x54, 0xb7, 0xe6, 0x23, 0x6f, 0x67, 0x1f, 0x20, 0x95, 0x4f, 0x5, 0x30, 0x64, 0x10, 0x51, 0x8e, 0xeb, 0x32,
})
// genesisBlock defines the genesis block of the block DAG which serves as the
@@ -73,11 +71,13 @@ var genesisBlock = externalapi.DomainBlock{
[]externalapi.BlockLevelParents{},
genesisMerkleRoot,
&externalapi.DomainHash{},
externalapi.NewDomainHashFromByteArray(muhash.EmptyMuHashHash.AsArray()),
0x17cfb020c02,
0x1e7fffff,
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
0x71, 0x0f, 0x27, 0xdf, 0x42, 0x3e, 0x63, 0xaa, 0x6c, 0xdb, 0x72, 0xb8, 0x9e, 0xa5, 0xa0, 0x6c, 0xff, 0xa3, 0x99, 0xd6, 0x6f, 0x16, 0x77, 0x04, 0x45, 0x5b, 0x5a, 0xf5, 0x9d, 0xef, 0x8e, 0x20,
}),
1637609671037,
486722099,
0x3392c,
0,
1312860, // Checkpoint DAA score
0,
big.NewInt(0),
&externalapi.DomainHash{},

View File

@@ -211,7 +211,15 @@ var MainnetParams = Params{
Net: appmessage.Mainnet,
RPCPort: "16110",
DefaultPort: "16111",
DNSSeeds: []string{"mainnet-dnsseed.daglabs-dev.com"},
DNSSeeds: []string{
"mainnet-dnsseed.daglabs-dev.com",
// This DNS seeder is run by Denis Mashkevich
"mainnet-dnsseed-1.kaspanet.org",
// This DNS seeder is run by Denis Mashkevich
"mainnet-dnsseed-2.kaspanet.org",
// This DNS seeder is run by Elichai Turkel
"kaspa.turkel.in",
},
// DAG parameters
GenesisBlock: &genesisBlock,

View File

@@ -40,7 +40,7 @@ func TestValidateAndInsertTransaction(t *testing.T) {
miningManager := miningFactory.NewMiningManager(consensusReference, &consensusConfig.Params, mempool.DefaultConfig(&consensusConfig.Params))
transactionsToInsert := make([]*externalapi.DomainTransaction, 10)
for i := range transactionsToInsert {
transactionsToInsert[i] = createTransactionWithUTXOEntry(t, i)
transactionsToInsert[i] = createTransactionWithUTXOEntry(t, i, 0)
_, err = miningManager.ValidateAndInsertTransaction(transactionsToInsert[i], false, true)
if err != nil {
t.Fatalf("ValidateAndInsertTransaction: %v", err)
@@ -89,7 +89,7 @@ func TestImmatureSpend(t *testing.T) {
tcAsConsensusPointer := &tcAsConsensus
consensusReference := consensusreference.NewConsensusReference(&tcAsConsensusPointer)
miningManager := miningFactory.NewMiningManager(consensusReference, &consensusConfig.Params, mempool.DefaultConfig(&consensusConfig.Params))
tx := createTransactionWithUTXOEntry(t, 0)
tx := createTransactionWithUTXOEntry(t, 0, consensusConfig.GenesisBlock.Header.DAAScore())
_, err = miningManager.ValidateAndInsertTransaction(tx, false, false)
txRuleError := &mempool.TxRuleError{}
if !errors.As(err, txRuleError) || txRuleError.RejectCode != mempool.RejectImmatureSpend {
@@ -119,7 +119,7 @@ func TestInsertDoubleTransactionsToMempool(t *testing.T) {
tcAsConsensusPointer := &tcAsConsensus
consensusReference := consensusreference.NewConsensusReference(&tcAsConsensusPointer)
miningManager := miningFactory.NewMiningManager(consensusReference, &consensusConfig.Params, mempool.DefaultConfig(&consensusConfig.Params))
transaction := createTransactionWithUTXOEntry(t, 0)
transaction := createTransactionWithUTXOEntry(t, 0, 0)
_, err = miningManager.ValidateAndInsertTransaction(transaction, false, true)
if err != nil {
t.Fatalf("ValidateAndInsertTransaction: %v", err)
@@ -186,7 +186,7 @@ func TestHandleNewBlockTransactions(t *testing.T) {
miningManager := miningFactory.NewMiningManager(consensusReference, &consensusConfig.Params, mempool.DefaultConfig(&consensusConfig.Params))
transactionsToInsert := make([]*externalapi.DomainTransaction, 10)
for i := range transactionsToInsert {
transaction := createTransactionWithUTXOEntry(t, i)
transaction := createTransactionWithUTXOEntry(t, i, 0)
transactionsToInsert[i] = transaction
_, err = miningManager.ValidateAndInsertTransaction(transaction, false, true)
if err != nil {
@@ -253,12 +253,12 @@ func TestDoubleSpendWithBlock(t *testing.T) {
tcAsConsensusPointer := &tcAsConsensus
consensusReference := consensusreference.NewConsensusReference(&tcAsConsensusPointer)
miningManager := miningFactory.NewMiningManager(consensusReference, &consensusConfig.Params, mempool.DefaultConfig(&consensusConfig.Params))
transactionInTheMempool := createTransactionWithUTXOEntry(t, 0)
transactionInTheMempool := createTransactionWithUTXOEntry(t, 0, 0)
_, err = miningManager.ValidateAndInsertTransaction(transactionInTheMempool, false, true)
if err != nil {
t.Fatalf("ValidateAndInsertTransaction: %v", err)
}
doubleSpendTransactionInTheBlock := createTransactionWithUTXOEntry(t, 0)
doubleSpendTransactionInTheBlock := createTransactionWithUTXOEntry(t, 0, 0)
doubleSpendTransactionInTheBlock.Inputs[0].PreviousOutpoint = transactionInTheMempool.Inputs[0].PreviousOutpoint
blockTransactions := []*externalapi.DomainTransaction{nil, doubleSpendTransactionInTheBlock}
_, err = miningManager.HandleNewBlockTransactions(blockTransactions)
@@ -571,7 +571,7 @@ func TestRevalidateHighPriorityTransactions(t *testing.T) {
})
}
func createTransactionWithUTXOEntry(t *testing.T, i int) *externalapi.DomainTransaction {
func createTransactionWithUTXOEntry(t *testing.T, i int, daaScore uint64) *externalapi.DomainTransaction {
prevOutTxID := externalapi.DomainTransactionID{}
prevOutPoint := externalapi.DomainOutpoint{TransactionID: prevOutTxID, Index: uint32(i)}
scriptPublicKey, redeemScript := testutils.OpTrueScript()
@@ -587,7 +587,7 @@ func createTransactionWithUTXOEntry(t *testing.T, i int) *externalapi.DomainTran
100000000, // 1 KAS
scriptPublicKey,
true,
uint64(0)),
daaScore),
}
txOut := externalapi.DomainTransactionOutput{
Value: 10000,

View File

@@ -29,7 +29,6 @@ import (
const (
defaultConfigFilename = "kaspad.conf"
defaultDataDirname = "data"
defaultLogLevel = "info"
defaultLogDirname = "logs"
defaultLogFilename = "kaspad.log"
@@ -86,7 +85,7 @@ type Flags struct {
Listeners []string `long:"listen" description:"Add an interface/port to listen for connections (default all interfaces port: 16111, testnet: 16211)"`
TargetOutboundPeers int `long:"outpeers" description:"Target number of outbound peers"`
MaxInboundPeers int `long:"maxinpeers" description:"Max number of inbound peers"`
DisableBanning bool `long:"nobanning" description:"Disable banning of misbehaving peers"`
EnableBanning bool `long:"enablebanning" description:"Enable banning of misbehaving peers"`
BanDuration time.Duration `long:"banduration" description:"How long to ban misbehaving peers. Valid time units are {s, m, h}. Minimum 1 second"`
BanThreshold uint32 `long:"banthreshold" description:"Maximum allowed ban score before disconnecting and banning misbehaving peers."`
Whitelists []string `long:"whitelist" description:"Add an IP network or IP that will not be banned. (eg. 192.168.1.0/24 or ::1)"`
@@ -121,6 +120,7 @@ type Flags struct {
MaxUTXOCacheSize uint64 `long:"maxutxocachesize" description:"Max size of loaded UTXO into ram from the disk in bytes"`
UTXOIndex bool `long:"utxoindex" description:"Enable the UTXO index"`
IsArchivalNode bool `long:"archival" description:"Run as an archival node: don't delete old block data when moving the pruning point (Warning: heavy disk usage)'"`
AllowSubmitBlockWhenNotSynced bool `long:"allow-submit-block-when-not-synced" hidden:"true" description:"Allow the node to accept blocks from RPC while not synced (this flag is mainly used for testing)"`
EnableSanityCheckPruningUTXOSet bool `long:"enable-sanity-check-pruning-utxo" hidden:"true" description:"When moving the pruning point - check that the utxo set matches the utxo commitment"`
NetworkFlags
ServiceOptions *ServiceOptions

View File

@@ -48,6 +48,7 @@ type overrideDAGParamsConfig struct {
EnableNonNativeSubnetworks *bool `json:"enableNonNativeSubnetworks"`
DisableDifficultyAdjustment *bool `json:"disableDifficultyAdjustment"`
SkipProofOfWork *bool `json:"skipProofOfWork"`
HardForkOmitGenesisFromParentsDAAScore *uint64 `json:"hardForkOmitGenesisFromParentsDaaScore"`
}
// ResolveNetwork parses the network command line argument and sets NetParams accordingly.

View File

@@ -66,7 +66,7 @@ func (c *ConnectionManager) checkRequestedConnections(connSet connectionSet) {
log.Debugf("Connecting to connection request %s", connReq.address)
err := c.initiateConnection(connReq.address)
if err != nil {
log.Infof("Couldn't connect to %s: %s", address, err)
log.Infof("Couldn't connect to requested connection %s: %s", address, err)
// if connection request is one try - remove from pending and ignore failure
if !connReq.isPermanent {
delete(c.pendingRequested, address)

View File

@@ -41,7 +41,7 @@ func (c *ConnectionManager) checkOutgoingConnections(connSet connectionSet) {
err := c.initiateConnection(addressString)
if err != nil {
log.Infof("Couldn't connect to %s: %s", addressString, err)
log.Debugf("Couldn't connect to %s: %s", addressString, err)
c.addressManager.MarkConnectionFailure(netAddress)
continue
}

View File

@@ -2,6 +2,7 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
//go:build !windows && !plan9
// +build !windows,!plan9
package limits

View File

@@ -2,6 +2,7 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
package signal

View File

@@ -115,6 +115,7 @@ func startNode() (teardown func(), err error) {
"--logdir", dataDir,
"--rpclisten", rpcAddress,
"--loglevel", "debug",
"--allow-submit-block-when-not-synced",
)
if err != nil {
return nil, err

View File

@@ -1 +1 @@
{"skipProofOfWork":true, "mergeSetSizeLimit": 30, "finalityDuration": 30000}
{"skipProofOfWork":true, "mergeSetSizeLimit": 30, "finalityDuration": 30000, "hardForkOmitGenesisFromParentsDaaScore": 2505}

View File

@@ -38,6 +38,7 @@ func startNode(name string, rpcAddress, listen, connect, profilePort, dataDir st
"--listen", listen,
"--profile", profilePort,
"--loglevel", "debug",
"--allow-submit-block-when-not-synced",
}
if connect != "" {
args = append(args, "--connect", connect)

View File

@@ -44,6 +44,7 @@ func startNodes() (teardown func(), err error) {
"--rpclisten", syncerRPCAddress,
"--listen", syncerListen,
"--loglevel", "debug",
"--allow-submit-block-when-not-synced",
)
if err != nil {
return nil, err

View File

@@ -36,6 +36,7 @@ func setConfig(t *testing.T, harness *appHarness) {
harness.config.Listeners = []string{harness.p2pAddress}
harness.config.RPCListeners = []string{harness.rpcAddress}
harness.config.UTXOIndex = harness.utxoIndex
harness.config.AllowSubmitBlockWhenNotSynced = true
if harness.overrideDAGParams != nil {
harness.config.ActiveNetParams = harness.overrideDAGParams

View File

@@ -14,7 +14,7 @@ import (
func TestGetHashrateString(t *testing.T) {
var results = map[string]string{
dagconfig.MainnetParams.Name: "131.07 KH/s",
dagconfig.MainnetParams.Name: "1.53 GH/s",
dagconfig.TestnetParams.Name: "131.07 KH/s",
dagconfig.DevnetParams.Name: "131.07 KH/s",
dagconfig.SimnetParams.Name: "2.00 KH/s",

View File

@@ -11,7 +11,7 @@ const validCharacters = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrs
const (
appMajor uint = 0
appMinor uint = 11
appPatch uint = 3
appPatch uint = 6
)
// appBuild is defined as a variable so it can be overridden during the build