mirror of
https://github.com/kaspanet/kaspad.git
synced 2025-11-27 07:48:44 +00:00
Merge remote-tracking branch 'origin/v0.11.0-dev' into testsLoctTimeAndSequence
This commit is contained in:
commit
4d36f2c561
@ -4,6 +4,7 @@ package appmessage
|
|||||||
// its respective RPC message
|
// its respective RPC message
|
||||||
type EstimateNetworkHashesPerSecondRequestMessage struct {
|
type EstimateNetworkHashesPerSecondRequestMessage struct {
|
||||||
baseMessage
|
baseMessage
|
||||||
|
StartHash string
|
||||||
WindowSize uint32
|
WindowSize uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -13,8 +14,9 @@ func (msg *EstimateNetworkHashesPerSecondRequestMessage) Command() MessageComman
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewEstimateNetworkHashesPerSecondRequestMessage returns a instance of the message
|
// NewEstimateNetworkHashesPerSecondRequestMessage returns a instance of the message
|
||||||
func NewEstimateNetworkHashesPerSecondRequestMessage(windowSize uint32) *EstimateNetworkHashesPerSecondRequestMessage {
|
func NewEstimateNetworkHashesPerSecondRequestMessage(startHash string, windowSize uint32) *EstimateNetworkHashesPerSecondRequestMessage {
|
||||||
return &EstimateNetworkHashesPerSecondRequestMessage{
|
return &EstimateNetworkHashesPerSecondRequestMessage{
|
||||||
|
StartHash: startHash,
|
||||||
WindowSize: windowSize,
|
WindowSize: windowSize,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -130,7 +130,7 @@ type fakeRelayInvsContext struct {
|
|||||||
rwLock sync.RWMutex
|
rwLock sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *fakeRelayInvsContext) EstimateNetworkHashesPerSecond(windowSize int) (uint64, error) {
|
func (f *fakeRelayInvsContext) EstimateNetworkHashesPerSecond(startHash *externalapi.DomainHash, windowSize int) (uint64, error) {
|
||||||
panic(errors.Errorf("called unimplemented function from test '%s'", f.testName))
|
panic(errors.Errorf("called unimplemented function from test '%s'", f.testName))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -3,19 +3,35 @@ package rpchandlers
|
|||||||
import (
|
import (
|
||||||
"github.com/kaspanet/kaspad/app/appmessage"
|
"github.com/kaspanet/kaspad/app/appmessage"
|
||||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||||
)
|
)
|
||||||
|
|
||||||
// HandleEstimateNetworkHashesPerSecond handles the respectively named RPC command
|
// HandleEstimateNetworkHashesPerSecond handles the respectively named RPC command
|
||||||
func HandleEstimateNetworkHashesPerSecond(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
func HandleEstimateNetworkHashesPerSecond(
|
||||||
|
context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||||
|
|
||||||
estimateNetworkHashesPerSecondRequest := request.(*appmessage.EstimateNetworkHashesPerSecondRequestMessage)
|
estimateNetworkHashesPerSecondRequest := request.(*appmessage.EstimateNetworkHashesPerSecondRequestMessage)
|
||||||
|
|
||||||
windowSize := int(estimateNetworkHashesPerSecondRequest.WindowSize)
|
windowSize := int(estimateNetworkHashesPerSecondRequest.WindowSize)
|
||||||
networkHashesPerSecond, err := context.Domain.Consensus().EstimateNetworkHashesPerSecond(windowSize)
|
startHash := model.VirtualBlockHash
|
||||||
|
if estimateNetworkHashesPerSecondRequest.StartHash != "" {
|
||||||
|
var err error
|
||||||
|
startHash, err = externalapi.NewDomainHashFromString(estimateNetworkHashesPerSecondRequest.StartHash)
|
||||||
|
if err != nil {
|
||||||
|
response := &appmessage.EstimateNetworkHashesPerSecondResponseMessage{}
|
||||||
|
response.Error = appmessage.RPCErrorf("StartHash '%s' is not a valid block hash",
|
||||||
|
estimateNetworkHashesPerSecondRequest.StartHash)
|
||||||
|
return response, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
networkHashesPerSecond, err := context.Domain.Consensus().EstimateNetworkHashesPerSecond(startHash, windowSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
response := &appmessage.EstimateNetworkHashesPerSecondResponseMessage{}
|
response := &appmessage.EstimateNetworkHashesPerSecondResponseMessage{}
|
||||||
response.Error = appmessage.RPCErrorf("could not resolve network hashes per "+
|
response.Error = appmessage.RPCErrorf("could not resolve network hashes per "+
|
||||||
"second for window size %d: %s", windowSize, err)
|
"second for startHash %s and window size %d: %s", startHash, windowSize, err)
|
||||||
return response, nil
|
return response, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -48,6 +48,10 @@ func setField(commandValue reflect.Value, parameterValue reflect.Value, paramete
|
|||||||
}
|
}
|
||||||
|
|
||||||
func stringToValue(parameterDesc *parameterDescription, valueStr string) (reflect.Value, error) {
|
func stringToValue(parameterDesc *parameterDescription, valueStr string) (reflect.Value, error) {
|
||||||
|
if valueStr == "-" {
|
||||||
|
return reflect.Zero(parameterDesc.typeof), nil
|
||||||
|
}
|
||||||
|
|
||||||
var value interface{}
|
var value interface{}
|
||||||
var err error
|
var err error
|
||||||
switch parameterDesc.typeof.Kind() {
|
switch parameterDesc.typeof.Kind() {
|
||||||
|
|||||||
@ -27,7 +27,8 @@ func parseConfig() (*configFlags, error) {
|
|||||||
}
|
}
|
||||||
parser := flags.NewParser(cfg, flags.HelpFlag)
|
parser := flags.NewParser(cfg, flags.HelpFlag)
|
||||||
parser.Usage = "kaspactl [OPTIONS] [COMMAND] [COMMAND PARAMETERS].\n\nCommand can be supplied only if --json is not used." +
|
parser.Usage = "kaspactl [OPTIONS] [COMMAND] [COMMAND PARAMETERS].\n\nCommand can be supplied only if --json is not used." +
|
||||||
"\n\nUse `kaspactl --list-commands` to get a list of all commands and their parameters"
|
"\n\nUse `kaspactl --list-commands` to get a list of all commands and their parameters." +
|
||||||
|
"\nFor optional parameters- use '-' without quotes to not pass the parameter.\n"
|
||||||
remainingArgs, err := parser.Parse()
|
remainingArgs, err := parser.Parse()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|||||||
@ -1,6 +1,12 @@
|
|||||||
package server
|
package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/kaspanet/kaspad/util"
|
||||||
|
|
||||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||||
"github.com/kaspanet/kaspad/util/panics"
|
"github.com/kaspanet/kaspad/util/panics"
|
||||||
)
|
)
|
||||||
@ -9,4 +15,33 @@ var (
|
|||||||
backendLog = logger.NewBackend()
|
backendLog = logger.NewBackend()
|
||||||
log = backendLog.Logger("KSWD")
|
log = backendLog.Logger("KSWD")
|
||||||
spawn = panics.GoroutineWrapperFunc(log)
|
spawn = panics.GoroutineWrapperFunc(log)
|
||||||
|
|
||||||
|
defaultAppDir = util.AppDir("kaspawallet", false)
|
||||||
|
defaultLogFile = filepath.Join(defaultAppDir, "daemon.log")
|
||||||
|
defaultErrLogFile = filepath.Join(defaultAppDir, "daemon_err.log")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func initLog(logFile, errLogFile string) {
|
||||||
|
log.SetLevel(logger.LevelDebug)
|
||||||
|
err := backendLog.AddLogFile(logFile, logger.LevelTrace)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", logFile, logger.LevelTrace, err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
err = backendLog.AddLogFile(errLogFile, logger.LevelWarn)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", errLogFile, logger.LevelWarn, err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
err = backendLog.AddLogWriter(os.Stdout, logger.LevelInfo)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Error adding stdout to the loggerfor level %s: %s", logger.LevelWarn, err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
err = backendLog.Run()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Error starting the logger: %s ", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|||||||
@ -2,6 +2,11 @@ package server
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/daemon/pb"
|
"github.com/kaspanet/kaspad/cmd/kaspawallet/daemon/pb"
|
||||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/keys"
|
"github.com/kaspanet/kaspad/cmd/kaspawallet/keys"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
@ -10,10 +15,6 @@ import (
|
|||||||
"github.com/kaspanet/kaspad/infrastructure/os/signal"
|
"github.com/kaspanet/kaspad/infrastructure/os/signal"
|
||||||
"github.com/kaspanet/kaspad/util/panics"
|
"github.com/kaspanet/kaspad/util/panics"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"net"
|
|
||||||
"os"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
@ -33,6 +34,8 @@ type server struct {
|
|||||||
|
|
||||||
// Start starts the kaspawalletd server
|
// Start starts the kaspawalletd server
|
||||||
func Start(params *dagconfig.Params, listen, rpcServer string, keysFilePath string) error {
|
func Start(params *dagconfig.Params, listen, rpcServer string, keysFilePath string) error {
|
||||||
|
initLog(defaultLogFile, defaultErrLogFile)
|
||||||
|
|
||||||
defer panics.HandlePanic(log, "MAIN", nil)
|
defer panics.HandlePanic(log, "MAIN", nil)
|
||||||
interrupt := signal.InterruptListener()
|
interrupt := signal.InterruptListener()
|
||||||
|
|
||||||
@ -40,6 +43,7 @@ func Start(params *dagconfig.Params, listen, rpcServer string, keysFilePath stri
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return (errors.Wrapf(err, "Error listening to tcp at %s", listen))
|
return (errors.Wrapf(err, "Error listening to tcp at %s", listen))
|
||||||
}
|
}
|
||||||
|
log.Infof("Listening on %s", listen)
|
||||||
|
|
||||||
rpcClient, err := connectToRPC(params, rpcServer)
|
rpcClient, err := connectToRPC(params, rpcServer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@ -2,6 +2,7 @@ package libkaspawallet
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/libkaspawallet/bip32"
|
"github.com/kaspanet/kaspad/cmd/kaspawallet/libkaspawallet/bip32"
|
||||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@ -15,23 +16,23 @@ func CreateMnemonic() (string, error) {
|
|||||||
return bip39.NewMnemonic(entropy)
|
return bip39.NewMnemonic(entropy)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Purpose and CoinType constants
|
||||||
|
const (
|
||||||
|
SingleSignerPurpose = 44
|
||||||
|
// Note: this is not entirely compatible to BIP 45 since
|
||||||
|
// BIP 45 doesn't have a coin type in its derivation path.
|
||||||
|
MultiSigPurpose = 45
|
||||||
|
// TODO: Register the coin type in https://github.com/satoshilabs/slips/blob/master/slip-0044.md
|
||||||
|
CoinType = 111111
|
||||||
|
)
|
||||||
|
|
||||||
func defaultPath(isMultisig bool) string {
|
func defaultPath(isMultisig bool) string {
|
||||||
const (
|
purpose := SingleSignerPurpose
|
||||||
singleSignerPurpose = 44
|
|
||||||
|
|
||||||
// Note: this is not entirely compatible to BIP 45 since
|
|
||||||
// BIP 45 doesn't have a coin type in its derivation path.
|
|
||||||
multiSigPurpose = 45
|
|
||||||
)
|
|
||||||
|
|
||||||
purpose := singleSignerPurpose
|
|
||||||
if isMultisig {
|
if isMultisig {
|
||||||
purpose = multiSigPurpose
|
purpose = MultiSigPurpose
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Register the coin type in https://github.com/satoshilabs/slips/blob/master/slip-0044.md
|
return fmt.Sprintf("m/%d'/%d'/0'", purpose, CoinType)
|
||||||
const coinType = 111111
|
|
||||||
return fmt.Sprintf("m/%d'/%d'/0'", coinType, purpose)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// MasterPublicKeyFromMnemonic returns the master public key with the correct derivation for the given mnemonic.
|
// MasterPublicKeyFromMnemonic returns the master public key with the correct derivation for the given mnemonic.
|
||||||
|
|||||||
@ -1,14 +1,15 @@
|
|||||||
package libkaspawallet
|
package libkaspawallet
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"math"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/kaspanet/go-secp256k1"
|
"github.com/kaspanet/go-secp256k1"
|
||||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/libkaspawallet/bip32"
|
"github.com/kaspanet/kaspad/cmd/kaspawallet/libkaspawallet/bip32"
|
||||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||||
"github.com/kaspanet/kaspad/util"
|
"github.com/kaspanet/kaspad/util"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"math"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// CreateKeyPair generates a private-public key pair
|
// CreateKeyPair generates a private-public key pair
|
||||||
|
|||||||
@ -3,13 +3,15 @@ package utils
|
|||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ReadLine reads one line from the given reader with trimmed white space.
|
// ReadLine reads one line from the given reader with trimmed white space.
|
||||||
func ReadLine(reader *bufio.Reader) (string, error) {
|
func ReadLine(reader *bufio.Reader) (string, error) {
|
||||||
line, err := reader.ReadBytes('\n')
|
line, err := reader.ReadBytes('\n')
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return strings.TrimSpace(string(line)), nil
|
return strings.TrimSpace(string(line)), nil
|
||||||
|
|||||||
@ -536,9 +536,9 @@ func (s *consensus) Anticone(blockHash *externalapi.DomainHash) ([]*externalapi.
|
|||||||
return s.dagTraversalManager.Anticone(stagingArea, blockHash)
|
return s.dagTraversalManager.Anticone(stagingArea, blockHash)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *consensus) EstimateNetworkHashesPerSecond(windowSize int) (uint64, error) {
|
func (s *consensus) EstimateNetworkHashesPerSecond(startHash *externalapi.DomainHash, windowSize int) (uint64, error) {
|
||||||
s.lock.Lock()
|
s.lock.Lock()
|
||||||
defer s.lock.Unlock()
|
defer s.lock.Unlock()
|
||||||
|
|
||||||
return s.difficultyManager.EstimateNetworkHashesPerSecond(windowSize)
|
return s.difficultyManager.EstimateNetworkHashesPerSecond(startHash, windowSize)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -33,5 +33,5 @@ type Consensus interface {
|
|||||||
IsInSelectedParentChainOf(blockHashA *DomainHash, blockHashB *DomainHash) (bool, error)
|
IsInSelectedParentChainOf(blockHashA *DomainHash, blockHashB *DomainHash) (bool, error)
|
||||||
GetHeadersSelectedTip() (*DomainHash, error)
|
GetHeadersSelectedTip() (*DomainHash, error)
|
||||||
Anticone(blockHash *DomainHash) ([]*DomainHash, error)
|
Anticone(blockHash *DomainHash) ([]*DomainHash, error)
|
||||||
EstimateNetworkHashesPerSecond(windowSize int) (uint64, error)
|
EstimateNetworkHashesPerSecond(startHash *DomainHash, windowSize int) (uint64, error)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -67,13 +67,3 @@ func (bgd *BlockGHOSTDAGData) MergeSetReds() []*externalapi.DomainHash {
|
|||||||
func (bgd *BlockGHOSTDAGData) BluesAnticoneSizes() map[externalapi.DomainHash]KType {
|
func (bgd *BlockGHOSTDAGData) BluesAnticoneSizes() map[externalapi.DomainHash]KType {
|
||||||
return bgd.bluesAnticoneSizes
|
return bgd.bluesAnticoneSizes
|
||||||
}
|
}
|
||||||
|
|
||||||
// MergeSet returns the whole MergeSet of the block (equivalent to MergeSetBlues+MergeSetReds)
|
|
||||||
func (bgd *BlockGHOSTDAGData) MergeSet() []*externalapi.DomainHash {
|
|
||||||
mergeSet := make([]*externalapi.DomainHash, len(bgd.mergeSetBlues)+len(bgd.mergeSetReds))
|
|
||||||
copy(mergeSet, bgd.mergeSetBlues)
|
|
||||||
if len(bgd.mergeSetReds) > 0 {
|
|
||||||
copy(mergeSet[len(bgd.mergeSetBlues):], bgd.mergeSetReds)
|
|
||||||
}
|
|
||||||
return mergeSet
|
|
||||||
}
|
|
||||||
|
|||||||
@ -9,5 +9,5 @@ import (
|
|||||||
type DifficultyManager interface {
|
type DifficultyManager interface {
|
||||||
StageDAADataAndReturnRequiredDifficulty(stagingArea *StagingArea, blockHash *externalapi.DomainHash) (uint32, error)
|
StageDAADataAndReturnRequiredDifficulty(stagingArea *StagingArea, blockHash *externalapi.DomainHash) (uint32, error)
|
||||||
RequiredDifficulty(stagingArea *StagingArea, blockHash *externalapi.DomainHash) (uint32, error)
|
RequiredDifficulty(stagingArea *StagingArea, blockHash *externalapi.DomainHash) (uint32, error)
|
||||||
EstimateNetworkHashesPerSecond(windowSize int) (uint64, error)
|
EstimateNetworkHashesPerSecond(startHash *externalapi.DomainHash, windowSize int) (uint64, error)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -8,4 +8,5 @@ type GHOSTDAGManager interface {
|
|||||||
ChooseSelectedParent(stagingArea *StagingArea, blockHashes ...*externalapi.DomainHash) (*externalapi.DomainHash, error)
|
ChooseSelectedParent(stagingArea *StagingArea, blockHashes ...*externalapi.DomainHash) (*externalapi.DomainHash, error)
|
||||||
Less(blockHashA *externalapi.DomainHash, ghostdagDataA *BlockGHOSTDAGData,
|
Less(blockHashA *externalapi.DomainHash, ghostdagDataA *BlockGHOSTDAGData,
|
||||||
blockHashB *externalapi.DomainHash, ghostdagDataB *BlockGHOSTDAGData) bool
|
blockHashB *externalapi.DomainHash, ghostdagDataB *BlockGHOSTDAGData) bool
|
||||||
|
GetSortedMergeSet(stagingArea *StagingArea, current *externalapi.DomainHash) ([]*externalapi.DomainHash, error)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -147,7 +147,7 @@ func (v *blockValidator) checkMergeSizeLimit(stagingArea *model.StagingArea, has
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
mergeSetSize := len(ghostdagData.MergeSet())
|
mergeSetSize := len(ghostdagData.MergeSetBlues()) + len(ghostdagData.MergeSetReds())
|
||||||
|
|
||||||
if uint64(mergeSetSize) > v.mergeSetSizeLimit {
|
if uint64(mergeSetSize) > v.mergeSetSizeLimit {
|
||||||
return errors.Wrapf(ruleerrors.ErrViolatingMergeLimit,
|
return errors.Wrapf(ruleerrors.ErrViolatingMergeLimit,
|
||||||
|
|||||||
@ -313,6 +313,6 @@ func (dm *mocDifficultyManager) StageDAADataAndReturnRequiredDifficulty(stagingA
|
|||||||
return dm.testDifficulty, nil
|
return dm.testDifficulty, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dm *mocDifficultyManager) EstimateNetworkHashesPerSecond(windowSize int) (uint64, error) {
|
func (dm *mocDifficultyManager) EstimateNetworkHashesPerSecond(startHash *externalapi.DomainHash, windowSize int) (uint64, error) {
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@ -39,8 +39,9 @@ func (c *coinbaseManager) ExpectedCoinbaseTransaction(stagingArea *model.Staging
|
|||||||
}
|
}
|
||||||
|
|
||||||
txOuts := make([]*externalapi.DomainTransactionOutput, 0, len(ghostdagData.MergeSetBlues()))
|
txOuts := make([]*externalapi.DomainTransactionOutput, 0, len(ghostdagData.MergeSetBlues()))
|
||||||
for i, blue := range ghostdagData.MergeSetBlues() {
|
acceptanceDataMap := acceptanceDataFromArrayToMap(acceptanceData)
|
||||||
txOut, hasReward, err := c.coinbaseOutputForBlueBlock(stagingArea, blue, acceptanceData[i], daaAddedBlocksSet)
|
for _, blue := range ghostdagData.MergeSetBlues() {
|
||||||
|
txOut, hasReward, err := c.coinbaseOutputForBlueBlock(stagingArea, blue, acceptanceDataMap[*blue], daaAddedBlocksSet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -120,10 +121,10 @@ func (c *coinbaseManager) coinbaseOutputForRewardFromRedBlocks(stagingArea *mode
|
|||||||
ghostdagData *model.BlockGHOSTDAGData, acceptanceData externalapi.AcceptanceData, daaAddedBlocksSet hashset.HashSet,
|
ghostdagData *model.BlockGHOSTDAGData, acceptanceData externalapi.AcceptanceData, daaAddedBlocksSet hashset.HashSet,
|
||||||
coinbaseData *externalapi.DomainCoinbaseData) (*externalapi.DomainTransactionOutput, bool, error) {
|
coinbaseData *externalapi.DomainCoinbaseData) (*externalapi.DomainTransactionOutput, bool, error) {
|
||||||
|
|
||||||
|
acceptanceDataMap := acceptanceDataFromArrayToMap(acceptanceData)
|
||||||
totalReward := uint64(0)
|
totalReward := uint64(0)
|
||||||
mergeSetBluesCount := len(ghostdagData.MergeSetBlues())
|
for _, red := range ghostdagData.MergeSetReds() {
|
||||||
for i, red := range ghostdagData.MergeSetReds() {
|
reward, err := c.calcMergedBlockReward(stagingArea, red, acceptanceDataMap[*red], daaAddedBlocksSet)
|
||||||
reward, err := c.calcMergedBlockReward(stagingArea, red, acceptanceData[mergeSetBluesCount+i], daaAddedBlocksSet)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, err
|
return nil, false, err
|
||||||
}
|
}
|
||||||
@ -141,6 +142,14 @@ func (c *coinbaseManager) coinbaseOutputForRewardFromRedBlocks(stagingArea *mode
|
|||||||
}, true, nil
|
}, true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func acceptanceDataFromArrayToMap(acceptanceData externalapi.AcceptanceData) map[externalapi.DomainHash]*externalapi.BlockAcceptanceData {
|
||||||
|
acceptanceDataMap := make(map[externalapi.DomainHash]*externalapi.BlockAcceptanceData, len(acceptanceData))
|
||||||
|
for _, blockAcceptanceData := range acceptanceData {
|
||||||
|
acceptanceDataMap[*blockAcceptanceData.BlockHash] = blockAcceptanceData
|
||||||
|
}
|
||||||
|
return acceptanceDataMap
|
||||||
|
}
|
||||||
|
|
||||||
// calcBlockSubsidy returns the subsidy amount a block at the provided blue score
|
// calcBlockSubsidy returns the subsidy amount a block at the provided blue score
|
||||||
// should have. This is mainly used for determining how much the coinbase for
|
// should have. This is mainly used for determining how much the coinbase for
|
||||||
// newly generated blocks awards as well as validating the coinbase for blocks
|
// newly generated blocks awards as well as validating the coinbase for blocks
|
||||||
|
|||||||
@ -129,7 +129,8 @@ func (csm *consensusStateManager) calculateNewTips(
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
log.Debugf("The current tips are: %s", currentTips)
|
log.Debugf("The number of tips is: %d", len(currentTips))
|
||||||
|
log.Tracef("The current tips are: %s", currentTips)
|
||||||
|
|
||||||
newTipParents, err := csm.dagTopologyManager.Parents(stagingArea, newTipHash)
|
newTipParents, err := csm.dagTopologyManager.Parents(stagingArea, newTipHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -151,7 +152,8 @@ func (csm *consensusStateManager) calculateNewTips(
|
|||||||
newTips = append(newTips, currentTip)
|
newTips = append(newTips, currentTip)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
log.Debugf("The calculated new tips are: %s", newTips)
|
log.Debugf("The new number of tips is: %d", len(newTips))
|
||||||
|
log.Tracef("The new tips are: %s", newTips)
|
||||||
|
|
||||||
return newTips, nil
|
return newTips, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@ -61,8 +61,7 @@ func (csm *consensusStateManager) calculatePastUTXOAndAcceptanceDataWithSelected
|
|||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("Applying blue blocks to the selected parent past UTXO of block %s", blockHash)
|
log.Debugf("Applying blue blocks to the selected parent past UTXO of block %s", blockHash)
|
||||||
acceptanceData, utxoDiff, err := csm.applyMergeSetBlocks(
|
acceptanceData, utxoDiff, err := csm.applyMergeSetBlocks(stagingArea, blockHash, selectedParentPastUTXO, daaScore)
|
||||||
stagingArea, blockHash, selectedParentPastUTXO, blockGHOSTDAGData, daaScore)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
@ -136,13 +135,16 @@ func (csm *consensusStateManager) restorePastUTXO(
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (csm *consensusStateManager) applyMergeSetBlocks(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash,
|
func (csm *consensusStateManager) applyMergeSetBlocks(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash,
|
||||||
selectedParentPastUTXODiff externalapi.UTXODiff, ghostdagData *model.BlockGHOSTDAGData, daaScore uint64) (
|
selectedParentPastUTXODiff externalapi.UTXODiff, daaScore uint64) (
|
||||||
externalapi.AcceptanceData, externalapi.MutableUTXODiff, error) {
|
externalapi.AcceptanceData, externalapi.MutableUTXODiff, error) {
|
||||||
|
|
||||||
log.Debugf("applyMergeSetBlocks start for block %s", blockHash)
|
log.Debugf("applyMergeSetBlocks start for block %s", blockHash)
|
||||||
defer log.Debugf("applyMergeSetBlocks end for block %s", blockHash)
|
defer log.Debugf("applyMergeSetBlocks end for block %s", blockHash)
|
||||||
|
|
||||||
mergeSetHashes := ghostdagData.MergeSet()
|
mergeSetHashes, err := csm.ghostdagManager.GetSortedMergeSet(stagingArea, blockHash)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
log.Debugf("Merge set for block %s is %v", blockHash, mergeSetHashes)
|
log.Debugf("Merge set for block %s is %v", blockHash, mergeSetHashes)
|
||||||
mergeSetBlocks, err := csm.blockStore.Blocks(csm.databaseContext, stagingArea, mergeSetHashes)
|
mergeSetBlocks, err := csm.blockStore.Blocks(csm.databaseContext, stagingArea, mergeSetHashes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -266,8 +268,7 @@ func (csm *consensusStateManager) maybeAcceptTransaction(stagingArea *model.Stag
|
|||||||
return true, accumulatedMassAfter, nil
|
return true, accumulatedMassAfter, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (csm *consensusStateManager) checkTransactionMass(
|
func (csm *consensusStateManager) checkTransactionMass(transaction *externalapi.DomainTransaction, accumulatedMassBefore uint64) (
|
||||||
transaction *externalapi.DomainTransaction, accumulatedMassBefore uint64) (
|
|
||||||
isAccepted bool, accumulatedMassAfter uint64) {
|
isAccepted bool, accumulatedMassAfter uint64) {
|
||||||
|
|
||||||
transactionID := consensushashing.TransactionID(transaction)
|
transactionID := consensushashing.TransactionID(transaction)
|
||||||
|
|||||||
@ -2,6 +2,7 @@ package consensusstatemanager
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||||
|
"github.com/kaspanet/kaspad/util/math"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||||
@ -34,7 +35,16 @@ func (csm *consensusStateManager) pickVirtualParents(stagingArea *model.StagingA
|
|||||||
}
|
}
|
||||||
log.Debugf("The selected parent of the virtual is: %s", virtualSelectedParent)
|
log.Debugf("The selected parent of the virtual is: %s", virtualSelectedParent)
|
||||||
|
|
||||||
candidates := candidatesHeap.ToSlice()
|
// Limit to maxBlockParents*3 candidates, that way we don't go over thousands of tips when the network isn't healthy.
|
||||||
|
// There's no specific reason for a factor of 3, and its not a consensus rule, just an estimation saying we probably
|
||||||
|
// don't want to consider and calculate 3 times the amount of candidates for the set of parents.
|
||||||
|
maxCandidates := int(csm.maxBlockParents) * 3
|
||||||
|
candidateAllocationSize := math.MinInt(maxCandidates, candidatesHeap.Len())
|
||||||
|
candidates := make([]*externalapi.DomainHash, 0, candidateAllocationSize)
|
||||||
|
for len(candidates) < maxCandidates && candidatesHeap.Len() > 0 {
|
||||||
|
candidates = append(candidates, candidatesHeap.Pop())
|
||||||
|
}
|
||||||
|
|
||||||
// prioritize half the blocks with highest blueWork and half with lowest, so the network will merge splits faster.
|
// prioritize half the blocks with highest blueWork and half with lowest, so the network will merge splits faster.
|
||||||
if len(candidates) >= int(csm.maxBlockParents) {
|
if len(candidates) >= int(csm.maxBlockParents) {
|
||||||
// We already have the selectedParent, so we're left with csm.maxBlockParents-1.
|
// We already have the selectedParent, so we're left with csm.maxBlockParents-1.
|
||||||
@ -45,12 +55,6 @@ func (csm *consensusStateManager) pickVirtualParents(stagingArea *model.StagingA
|
|||||||
end--
|
end--
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Limit to maxBlockParents*3 candidates, that way we don't go over thousands of tips when the network isn't healthy.
|
|
||||||
// There's no specific reason for a factor of 3, and its not a consensus rule, just an estimation saying we probably
|
|
||||||
// don't want to consider and calculate 3 times the amount of candidates for the set of parents.
|
|
||||||
if len(candidates) > int(csm.maxBlockParents)*3 {
|
|
||||||
candidates = candidates[:int(csm.maxBlockParents)*3]
|
|
||||||
}
|
|
||||||
|
|
||||||
selectedVirtualParents := []*externalapi.DomainHash{virtualSelectedParent}
|
selectedVirtualParents := []*externalapi.DomainHash{virtualSelectedParent}
|
||||||
mergeSetSize := uint64(1) // starts counting from 1 because selectedParent is already in the mergeSet
|
mergeSetSize := uint64(1) // starts counting from 1 because selectedParent is already in the mergeSet
|
||||||
|
|||||||
@ -2,11 +2,12 @@ package consensusstatemanager_test
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/utils/subnetworks"
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/subnetworks"
|
|
||||||
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||||
|
|
||||||
@ -157,14 +158,15 @@ func TestDoubleSpends(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestTransactionAcceptance checks that blue blocks transactions are favoured above
|
// TestTransactionAcceptance checks that block transactions are accepted correctly when the merge set is sorted topologically.
|
||||||
// red blocks transactions, and that the block reward is paid only for blue blocks.
|
// DAG diagram:
|
||||||
|
// genesis <- blockA <- blockB <- blockC <- ..(chain of k-blocks).. lastBlockInChain <- blockD <- blockE <- blockF
|
||||||
|
// ^ ^ |
|
||||||
|
// | redBlock <------------------------ blueChildOfRedBlock <--------------------
|
||||||
func TestTransactionAcceptance(t *testing.T) {
|
func TestTransactionAcceptance(t *testing.T) {
|
||||||
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
|
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
|
||||||
stagingArea := model.NewStagingArea()
|
stagingArea := model.NewStagingArea()
|
||||||
|
|
||||||
consensusConfig.BlockCoinbaseMaturity = 0
|
consensusConfig.BlockCoinbaseMaturity = 0
|
||||||
|
|
||||||
factory := consensus.NewFactory()
|
factory := consensus.NewFactory()
|
||||||
testConsensus, teardown, err := factory.NewTestConsensus(consensusConfig, "TestTransactionAcceptance")
|
testConsensus, teardown, err := factory.NewTestConsensus(consensusConfig, "TestTransactionAcceptance")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -172,221 +174,199 @@ func TestTransactionAcceptance(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer teardown(false)
|
defer teardown(false)
|
||||||
|
|
||||||
fundingBlock1Hash, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil)
|
blockHashA, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error creating fundingBlock1: %+v", err)
|
t.Fatalf("Error creating blockA: %+v", err)
|
||||||
}
|
}
|
||||||
|
blockHashB, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{blockHashA}, nil, nil)
|
||||||
fundingBlock2Hash, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{fundingBlock1Hash}, nil, nil)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error creating fundingBlock2: %+v", err)
|
t.Fatalf("Error creating blockB: %+v", err)
|
||||||
}
|
}
|
||||||
|
blockHashC, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{blockHashB}, nil, nil)
|
||||||
// Generate fundingBlock3 to pay for fundingBlock2
|
|
||||||
fundingBlock3Hash, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{fundingBlock2Hash}, nil, nil)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error creating fundingBlock3: %+v", err)
|
t.Fatalf("Error creating blockC: %+v", err)
|
||||||
}
|
}
|
||||||
|
// Add a chain of K blocks above blockC so we'll
|
||||||
// Add a chain of K blocks above fundingBlock3 so we'll
|
|
||||||
// be able to mine a red block on top of it.
|
// be able to mine a red block on top of it.
|
||||||
tipHash := fundingBlock3Hash
|
chainTipHash := blockHashC
|
||||||
for i := model.KType(0); i < consensusConfig.K; i++ {
|
for i := model.KType(0); i < consensusConfig.K; i++ {
|
||||||
var err error
|
var err error
|
||||||
tipHash, _, err = testConsensus.AddBlock([]*externalapi.DomainHash{tipHash}, nil, nil)
|
chainTipHash, _, err = testConsensus.AddBlock([]*externalapi.DomainHash{chainTipHash}, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error creating fundingBlock1: %+v", err)
|
t.Fatalf("Error creating a block: %+v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
lastBlockInChain := chainTipHash
|
||||||
fundingBlock2, err := testConsensus.GetBlock(fundingBlock2Hash)
|
blockC, err := testConsensus.GetBlock(blockHashC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error getting fundingBlock: %+v", err)
|
t.Fatalf("Error getting blockC: %+v", err)
|
||||||
}
|
}
|
||||||
|
fees := uint64(1)
|
||||||
fundingTransaction1 := fundingBlock2.Transactions[transactionhelper.CoinbaseTransactionIndex]
|
transactionFromBlockC := blockC.Transactions[transactionhelper.CoinbaseTransactionIndex]
|
||||||
|
// transactionFromRedBlock is spending TransactionFromBlockC.
|
||||||
fundingBlock3, err := testConsensus.GetBlock(fundingBlock3Hash)
|
transactionFromRedBlock, err := testutils.CreateTransaction(transactionFromBlockC, fees)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error getting fundingBlock: %+v", err)
|
t.Fatalf("Error creating a transactionFromRedBlock: %+v", err)
|
||||||
}
|
}
|
||||||
|
transactionFromRedBlockInput0UTXOEntry, err := testConsensus.ConsensusStateStore().
|
||||||
fundingTransaction2 := fundingBlock3.Transactions[transactionhelper.CoinbaseTransactionIndex]
|
UTXOByOutpoint(testConsensus.DatabaseContext(), stagingArea, &transactionFromRedBlock.Inputs[0].PreviousOutpoint)
|
||||||
|
|
||||||
spendingTransaction1, err := testutils.CreateTransaction(fundingTransaction1, 1)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error creating spendingTransaction1: %+v", err)
|
t.Fatalf("Error getting UTXOEntry for transactionFromRedBlockInput: %s", err)
|
||||||
}
|
}
|
||||||
spendingTransaction1UTXOEntry, err := testConsensus.ConsensusStateStore().
|
redHash, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{blockHashC}, nil,
|
||||||
UTXOByOutpoint(testConsensus.DatabaseContext(), stagingArea, &spendingTransaction1.Inputs[0].PreviousOutpoint)
|
[]*externalapi.DomainTransaction{transactionFromRedBlock})
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Error getting UTXOEntry for spendingTransaction1: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
spendingTransaction2, err := testutils.CreateTransaction(fundingTransaction2, 1)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Error creating spendingTransaction1: %+v", err)
|
|
||||||
}
|
|
||||||
spendingTransaction2UTXOEntry, err := testConsensus.ConsensusStateStore().
|
|
||||||
UTXOByOutpoint(testConsensus.DatabaseContext(), stagingArea, &spendingTransaction2.Inputs[0].PreviousOutpoint)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Error getting UTXOEntry for spendingTransaction2: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
redHash, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{fundingBlock3Hash}, nil,
|
|
||||||
[]*externalapi.DomainTransaction{spendingTransaction1, spendingTransaction2})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error creating redBlock: %+v", err)
|
t.Fatalf("Error creating redBlock: %+v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
blueScriptPublicKey := &externalapi.ScriptPublicKey{Script: []byte{1}, Version: 0}
|
transactionFromBlueChildOfRedBlock, err := testutils.CreateTransaction(transactionFromRedBlock, fees)
|
||||||
blueHash, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{tipHash}, &externalapi.DomainCoinbaseData{
|
|
||||||
ScriptPublicKey: blueScriptPublicKey,
|
|
||||||
ExtraData: nil,
|
|
||||||
},
|
|
||||||
[]*externalapi.DomainTransaction{spendingTransaction1})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error creating blue: %+v", err)
|
t.Fatalf("Error creating transactionFromBlueChildOfRedBlock: %+v", err)
|
||||||
}
|
}
|
||||||
|
transactionFromBlueChildOfRedBlockInput0UTXOEntry, err := testConsensus.ConsensusStateStore().
|
||||||
// Mining two blocks so tipHash will definitely be the selected tip.
|
UTXOByOutpoint(testConsensus.DatabaseContext(), stagingArea, &transactionFromBlueChildOfRedBlock.Inputs[0].PreviousOutpoint)
|
||||||
tipHash, _, err = testConsensus.AddBlock([]*externalapi.DomainHash{tipHash}, nil, nil)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error creating tip: %+v", err)
|
t.Fatalf("Error getting UTXOEntry for transactionFromBlueChildOfRedBlockInput: %s", err)
|
||||||
}
|
}
|
||||||
|
blueChildOfRedBlockScriptPublicKey := &externalapi.ScriptPublicKey{Script: []byte{3}, Version: 0}
|
||||||
finalTipSelectedParentScriptPublicKey := &externalapi.ScriptPublicKey{Script: []byte{3}, Version: 0}
|
// The blueChildOfRedBlock contains a transaction that spent an output from the red block.
|
||||||
finalTipSelectedParentHash, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{tipHash},
|
hashBlueChildOfRedBlock, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{lastBlockInChain, redHash},
|
||||||
&externalapi.DomainCoinbaseData{
|
&externalapi.DomainCoinbaseData{
|
||||||
ScriptPublicKey: finalTipSelectedParentScriptPublicKey,
|
ScriptPublicKey: blueChildOfRedBlockScriptPublicKey,
|
||||||
|
ExtraData: nil,
|
||||||
|
}, []*externalapi.DomainTransaction{transactionFromBlueChildOfRedBlock})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error creating blueChildOfRedBlock: %+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// K blocks minded between blockC and blockD.
|
||||||
|
blockHashD, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{lastBlockInChain}, nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error creating blockD : %+v", err)
|
||||||
|
}
|
||||||
|
blockEScriptPublicKey := &externalapi.ScriptPublicKey{Script: []byte{4}, Version: 0}
|
||||||
|
blockHashE, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{blockHashD},
|
||||||
|
&externalapi.DomainCoinbaseData{
|
||||||
|
ScriptPublicKey: blockEScriptPublicKey,
|
||||||
ExtraData: nil,
|
ExtraData: nil,
|
||||||
}, nil)
|
}, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error creating tip: %+v", err)
|
t.Fatalf("Error creating blockE: %+v", err)
|
||||||
}
|
}
|
||||||
|
blockFScriptPublicKey := &externalapi.ScriptPublicKey{Script: []byte{5}, Version: 0}
|
||||||
finalTipScriptPublicKey := &externalapi.ScriptPublicKey{Script: []byte{4}, Version: 0}
|
blockHashF, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{blockHashE, hashBlueChildOfRedBlock},
|
||||||
finalTipHash, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{finalTipSelectedParentHash, redHash, blueHash},
|
|
||||||
&externalapi.DomainCoinbaseData{
|
&externalapi.DomainCoinbaseData{
|
||||||
ScriptPublicKey: finalTipScriptPublicKey,
|
ScriptPublicKey: blockFScriptPublicKey,
|
||||||
ExtraData: nil,
|
ExtraData: nil,
|
||||||
},
|
}, nil)
|
||||||
nil)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error creating finalTip: %+v", err)
|
t.Fatalf("Error creating blockF: %+v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
acceptanceData, err := testConsensus.AcceptanceDataStore().Get(testConsensus.DatabaseContext(), stagingArea, finalTipHash)
|
acceptanceData, err := testConsensus.AcceptanceDataStore().Get(testConsensus.DatabaseContext(), stagingArea, blockHashF)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error getting acceptance data: %+v", err)
|
t.Fatalf("Error getting acceptance data: %+v", err)
|
||||||
}
|
}
|
||||||
|
blueChildOfRedBlock, err := testConsensus.GetBlock(hashBlueChildOfRedBlock)
|
||||||
finalTipSelectedParent, err := testConsensus.GetBlock(finalTipSelectedParentHash)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error getting finalTipSelectedParent: %+v", err)
|
t.Fatalf("Error getting blueChildOfRedBlock: %+v", err)
|
||||||
}
|
}
|
||||||
|
blockE, err := testConsensus.GetBlock(blockHashE)
|
||||||
blue, err := testConsensus.GetBlock(blueHash)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error getting blue: %+v", err)
|
t.Fatalf("Error getting blockE: %+v", err)
|
||||||
}
|
}
|
||||||
|
redBlock, err := testConsensus.GetBlock(redHash)
|
||||||
red, err := testConsensus.GetBlock(redHash)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error getting red: %+v", err)
|
t.Fatalf("Error getting redBlock: %+v", err)
|
||||||
}
|
}
|
||||||
|
blockF, err := testConsensus.GetBlock(blockHashF)
|
||||||
// We expect spendingTransaction1 to be accepted by the blue block and not by the red one, because
|
if err != nil {
|
||||||
// blue blocks in the merge set should always be ordered before red blocks in the merge set.
|
t.Fatalf("Error getting blockF: %+v", err)
|
||||||
// We also expect spendingTransaction2 to be accepted by the red because nothing conflicts it.
|
}
|
||||||
|
updatedDAAScoreVirtualBlock := 25
|
||||||
|
//We expect the second transaction in the "blue block" (blueChildOfRedBlock) to be accepted because the merge set is ordered topologically
|
||||||
|
//and the red block is ordered topologically before the "blue block" so the input is known in the UTXOSet.
|
||||||
expectedAcceptanceData := externalapi.AcceptanceData{
|
expectedAcceptanceData := externalapi.AcceptanceData{
|
||||||
{
|
{
|
||||||
BlockHash: finalTipSelectedParentHash,
|
BlockHash: blockHashE,
|
||||||
TransactionAcceptanceData: []*externalapi.TransactionAcceptanceData{
|
TransactionAcceptanceData: []*externalapi.TransactionAcceptanceData{
|
||||||
{
|
{
|
||||||
Transaction: finalTipSelectedParent.Transactions[0],
|
Transaction: blockE.Transactions[0],
|
||||||
Fee: 0,
|
Fee: 0,
|
||||||
IsAccepted: true,
|
IsAccepted: true,
|
||||||
TransactionInputUTXOEntries: []externalapi.UTXOEntry{},
|
TransactionInputUTXOEntries: []externalapi.UTXOEntry{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
|
||||||
BlockHash: blueHash,
|
|
||||||
TransactionAcceptanceData: []*externalapi.TransactionAcceptanceData{
|
|
||||||
{
|
|
||||||
Transaction: blue.Transactions[0],
|
|
||||||
Fee: 0,
|
|
||||||
IsAccepted: false,
|
|
||||||
TransactionInputUTXOEntries: []externalapi.UTXOEntry{},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Transaction: spendingTransaction1,
|
|
||||||
Fee: 1,
|
|
||||||
IsAccepted: true,
|
|
||||||
TransactionInputUTXOEntries: []externalapi.UTXOEntry{spendingTransaction1UTXOEntry},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
BlockHash: redHash,
|
BlockHash: redHash,
|
||||||
TransactionAcceptanceData: []*externalapi.TransactionAcceptanceData{
|
TransactionAcceptanceData: []*externalapi.TransactionAcceptanceData{
|
||||||
{
|
{ //Coinbase transaction outputs are added to the UTXO-set only if they are in the selected parent chain,
|
||||||
Transaction: red.Transactions[0],
|
// and this block isn't.
|
||||||
|
Transaction: redBlock.Transactions[0],
|
||||||
Fee: 0,
|
Fee: 0,
|
||||||
IsAccepted: false,
|
IsAccepted: false,
|
||||||
TransactionInputUTXOEntries: []externalapi.UTXOEntry{},
|
TransactionInputUTXOEntries: []externalapi.UTXOEntry{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Transaction: spendingTransaction1,
|
Transaction: redBlock.Transactions[1],
|
||||||
Fee: 0,
|
Fee: fees,
|
||||||
IsAccepted: false,
|
|
||||||
TransactionInputUTXOEntries: []externalapi.UTXOEntry{},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Transaction: spendingTransaction2,
|
|
||||||
Fee: 1,
|
|
||||||
IsAccepted: true,
|
IsAccepted: true,
|
||||||
TransactionInputUTXOEntries: []externalapi.UTXOEntry{spendingTransaction2UTXOEntry},
|
TransactionInputUTXOEntries: []externalapi.UTXOEntry{transactionFromRedBlockInput0UTXOEntry},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockHash: hashBlueChildOfRedBlock,
|
||||||
|
TransactionAcceptanceData: []*externalapi.TransactionAcceptanceData{
|
||||||
|
{ //Coinbase transaction outputs are added to the UTXO-set only if they are in the selected parent chain,
|
||||||
|
// and this block isn't.
|
||||||
|
Transaction: blueChildOfRedBlock.Transactions[0],
|
||||||
|
Fee: 0,
|
||||||
|
IsAccepted: false,
|
||||||
|
TransactionInputUTXOEntries: []externalapi.UTXOEntry{},
|
||||||
|
},
|
||||||
|
{ // The DAAScore was calculated by the virtual block pov. The DAAScore has changed since more blocks were added to the DAG.
|
||||||
|
// So we will change the DAAScore in the UTXOEntryInput to the updated virtual DAAScore.
|
||||||
|
Transaction: blueChildOfRedBlock.Transactions[1],
|
||||||
|
Fee: fees,
|
||||||
|
IsAccepted: true,
|
||||||
|
TransactionInputUTXOEntries: []externalapi.UTXOEntry{
|
||||||
|
utxo.NewUTXOEntry(transactionFromBlueChildOfRedBlockInput0UTXOEntry.Amount(),
|
||||||
|
transactionFromBlueChildOfRedBlockInput0UTXOEntry.ScriptPublicKey(),
|
||||||
|
transactionFromBlueChildOfRedBlockInput0UTXOEntry.IsCoinbase(), uint64(updatedDAAScoreVirtualBlock))},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
if !acceptanceData.Equal(expectedAcceptanceData) {
|
if !acceptanceData.Equal(expectedAcceptanceData) {
|
||||||
t.Fatalf("The acceptance data is not the expected acceptance data")
|
t.Fatalf("The acceptance data is not the expected acceptance data")
|
||||||
}
|
}
|
||||||
|
// We expect the coinbase transaction to pay reward for the selected parent(block E), the
|
||||||
finalTip, err := testConsensus.GetBlock(finalTipHash)
|
// blueChildOfRedBlock, and bestow the red block reward to the merging block.
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Error getting finalTip: %+v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// We expect the coinbase transaction to pay reward for the selected parent, the
|
|
||||||
// blue block, and bestow the red block reward to the merging block.
|
|
||||||
expectedCoinbase := &externalapi.DomainTransaction{
|
expectedCoinbase := &externalapi.DomainTransaction{
|
||||||
Version: constants.MaxTransactionVersion,
|
Version: constants.MaxTransactionVersion,
|
||||||
Inputs: nil,
|
Inputs: nil,
|
||||||
Outputs: []*externalapi.DomainTransactionOutput{
|
Outputs: []*externalapi.DomainTransactionOutput{
|
||||||
{
|
{
|
||||||
Value: 50 * constants.SompiPerKaspa,
|
Value: 50 * constants.SompiPerKaspa,
|
||||||
ScriptPublicKey: finalTipSelectedParentScriptPublicKey,
|
ScriptPublicKey: blockEScriptPublicKey,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Value: 50*constants.SompiPerKaspa + 1, // testutils.CreateTransaction pays a fee of 1 sompi
|
Value: 50*constants.SompiPerKaspa + fees, // testutils.CreateTransaction pays fees
|
||||||
ScriptPublicKey: blueScriptPublicKey,
|
ScriptPublicKey: blueChildOfRedBlockScriptPublicKey,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Value: 50*constants.SompiPerKaspa + 1,
|
Value: 50*constants.SompiPerKaspa + fees, // testutils.CreateTransaction pays fees
|
||||||
ScriptPublicKey: finalTipScriptPublicKey,
|
ScriptPublicKey: blockFScriptPublicKey,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
LockTime: 0,
|
LockTime: 0,
|
||||||
SubnetworkID: subnetworks.SubnetworkIDCoinbase,
|
SubnetworkID: subnetworks.SubnetworkIDCoinbase,
|
||||||
Gas: 0,
|
Gas: 0,
|
||||||
Payload: finalTip.Transactions[0].Payload,
|
Payload: blockF.Transactions[0].Payload,
|
||||||
}
|
}
|
||||||
if !finalTip.Transactions[transactionhelper.CoinbaseTransactionIndex].Equal(expectedCoinbase) {
|
if !blockF.Transactions[transactionhelper.CoinbaseTransactionIndex].Equal(expectedCoinbase) {
|
||||||
t.Fatalf("Unexpected coinbase transaction")
|
t.Fatalf("Unexpected coinbase transaction")
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|||||||
@ -167,9 +167,13 @@ func (dm *difficultyManager) calculateDaaScoreAndAddedBlocks(stagingArea *model.
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, nil, err
|
return 0, nil, err
|
||||||
}
|
}
|
||||||
|
mergeSetLength := len(ghostdagData.MergeSetBlues()) + len(ghostdagData.MergeSetReds())
|
||||||
|
mergeSet := make(map[externalapi.DomainHash]struct{}, mergeSetLength)
|
||||||
|
for _, hash := range ghostdagData.MergeSetBlues() {
|
||||||
|
mergeSet[*hash] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
mergeSet := make(map[externalapi.DomainHash]struct{}, len(ghostdagData.MergeSet()))
|
for _, hash := range ghostdagData.MergeSetReds() {
|
||||||
for _, hash := range ghostdagData.MergeSet() {
|
|
||||||
mergeSet[*hash] = struct{}{}
|
mergeSet[*hash] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -1,26 +1,31 @@
|
|||||||
package difficultymanager
|
package difficultymanager
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"math/big"
|
||||||
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"math/big"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func (dm *difficultyManager) EstimateNetworkHashesPerSecond(windowSize int) (uint64, error) {
|
func (dm *difficultyManager) EstimateNetworkHashesPerSecond(startHash *externalapi.DomainHash, windowSize int) (uint64, error) {
|
||||||
onEnd := logger.LogAndMeasureExecutionTime(log, "EstimateNetworkHashesPerSecond")
|
onEnd := logger.LogAndMeasureExecutionTime(log, "EstimateNetworkHashesPerSecond")
|
||||||
defer onEnd()
|
defer onEnd()
|
||||||
|
|
||||||
stagingArea := model.NewStagingArea()
|
stagingArea := model.NewStagingArea()
|
||||||
return dm.estimateNetworkHashesPerSecond(stagingArea, windowSize)
|
return dm.estimateNetworkHashesPerSecond(stagingArea, startHash, windowSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dm *difficultyManager) estimateNetworkHashesPerSecond(stagingArea *model.StagingArea, windowSize int) (uint64, error) {
|
func (dm *difficultyManager) estimateNetworkHashesPerSecond(stagingArea *model.StagingArea,
|
||||||
if windowSize < 2 {
|
startHash *externalapi.DomainHash, windowSize int) (uint64, error) {
|
||||||
return 0, errors.Errorf("windowSize must be equal to or greater than 2")
|
|
||||||
|
const minWindowSize = 1000
|
||||||
|
if windowSize < minWindowSize {
|
||||||
|
return 0, errors.Errorf("windowSize must be equal to or greater than %d", minWindowSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
blockWindow, windowHashes, err := dm.blockWindow(stagingArea, model.VirtualBlockHash, windowSize)
|
blockWindow, windowHashes, err := dm.blockWindow(stagingArea, startHash, windowSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@ -57,8 +62,13 @@ func (dm *difficultyManager) estimateNetworkHashesPerSecond(stagingArea *model.S
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
windowsDiff := (maxWindowTimestamp - minWindowTimestamp) / 1000 // Divided by 1000 to convert milliseconds to seconds
|
||||||
|
if windowsDiff == 0 {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
nominator := new(big.Int).Sub(maxWindowBlueWork, minWindowBlueWork)
|
nominator := new(big.Int).Sub(maxWindowBlueWork, minWindowBlueWork)
|
||||||
denominator := big.NewInt((maxWindowTimestamp - minWindowTimestamp) / 1000) // Divided by 1000 to convert milliseconds to seconds
|
denominator := big.NewInt(windowsDiff)
|
||||||
networkHashesPerSecondBigInt := new(big.Int).Div(nominator, denominator)
|
networkHashesPerSecondBigInt := new(big.Int).Div(nominator, denominator)
|
||||||
return networkHashesPerSecondBigInt.Uint64(), nil
|
return networkHashesPerSecondBigInt.Uint64(), nil
|
||||||
}
|
}
|
||||||
|
|||||||
@ -409,3 +409,7 @@ func (gh *ghostdagHelper) ChooseSelectedParent(stagingArea *model.StagingArea, b
|
|||||||
func (gh *ghostdagHelper) Less(blockHashA *externalapi.DomainHash, ghostdagDataA *model.BlockGHOSTDAGData, blockHashB *externalapi.DomainHash, ghostdagDataB *model.BlockGHOSTDAGData) bool {
|
func (gh *ghostdagHelper) Less(blockHashA *externalapi.DomainHash, ghostdagDataA *model.BlockGHOSTDAGData, blockHashB *externalapi.DomainHash, ghostdagDataB *model.BlockGHOSTDAGData) bool {
|
||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (gh *ghostdagHelper) GetSortedMergeSet(*model.StagingArea, *externalapi.DomainHash) ([]*externalapi.DomainHash, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|||||||
@ -82,3 +82,47 @@ func (gm *ghostdagManager) sortMergeSet(stagingArea *model.StagingArea, mergeSet
|
|||||||
})
|
})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetSortedMergeSet return the merge set sorted in a toplogical order.
|
||||||
|
func (gm *ghostdagManager) GetSortedMergeSet(stagingArea *model.StagingArea,
|
||||||
|
current *externalapi.DomainHash) ([]*externalapi.DomainHash, error) {
|
||||||
|
|
||||||
|
currentGhostdagData, err := gm.ghostdagDataStore.Get(gm.databaseContext, stagingArea, current)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
blueMergeSet := currentGhostdagData.MergeSetBlues()
|
||||||
|
redMergeSet := currentGhostdagData.MergeSetReds()
|
||||||
|
sortedMergeSet := make([]*externalapi.DomainHash, 0, len(blueMergeSet)+len(redMergeSet))
|
||||||
|
// If the current block is the genesis block:
|
||||||
|
if len(blueMergeSet) == 0 {
|
||||||
|
return sortedMergeSet, nil
|
||||||
|
}
|
||||||
|
selectedParent, blueMergeSet := blueMergeSet[0], blueMergeSet[1:]
|
||||||
|
sortedMergeSet = append(sortedMergeSet, selectedParent)
|
||||||
|
i, j := 0, 0
|
||||||
|
for i < len(blueMergeSet) && j < len(redMergeSet) {
|
||||||
|
currentBlue := blueMergeSet[i]
|
||||||
|
currentBlueGhostdagData, err := gm.ghostdagDataStore.Get(gm.databaseContext, stagingArea, currentBlue)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
currentRed := redMergeSet[j]
|
||||||
|
currentRedGhostdagData, err := gm.ghostdagDataStore.Get(gm.databaseContext, stagingArea, currentRed)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if gm.Less(currentBlue, currentBlueGhostdagData, currentRed, currentRedGhostdagData) {
|
||||||
|
sortedMergeSet = append(sortedMergeSet, currentBlue)
|
||||||
|
i++
|
||||||
|
} else {
|
||||||
|
sortedMergeSet = append(sortedMergeSet, currentRed)
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sortedMergeSet = append(sortedMergeSet, blueMergeSet[i:]...)
|
||||||
|
sortedMergeSet = append(sortedMergeSet, redMergeSet[j:]...)
|
||||||
|
|
||||||
|
return sortedMergeSet, nil
|
||||||
|
}
|
||||||
|
|||||||
@ -71,7 +71,7 @@ func (sm *syncManager) antiPastHashesBetween(stagingArea *model.StagingArea, low
|
|||||||
// Since the rest of the merge set is in the anticone of selectedParent, it's position in the list does not
|
// Since the rest of the merge set is in the anticone of selectedParent, it's position in the list does not
|
||||||
// matter, even though it's blue score is the highest, we can arbitrarily decide it comes first.
|
// matter, even though it's blue score is the highest, we can arbitrarily decide it comes first.
|
||||||
// Therefore we first append the selectedParent, then the rest of blocks in ghostdag order.
|
// Therefore we first append the selectedParent, then the rest of blocks in ghostdag order.
|
||||||
sortedMergeSet, err := sm.getSortedMergeSet(stagingArea, current)
|
sortedMergeSet, err := sm.ghostdagManager.GetSortedMergeSet(stagingArea, current)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
@ -97,49 +97,9 @@ func (sm *syncManager) antiPastHashesBetween(stagingArea *model.StagingArea, low
|
|||||||
return blockHashes, highHash, nil
|
return blockHashes, highHash, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *syncManager) getSortedMergeSet(stagingArea *model.StagingArea, current *externalapi.DomainHash) (
|
func (sm *syncManager) findHighHashAccordingToMaxBlueScoreDifference(stagingArea *model.StagingArea, lowHash *externalapi.DomainHash,
|
||||||
[]*externalapi.DomainHash, error) {
|
highHash *externalapi.DomainHash, maxBlueScoreDifference uint64, highBlockGHOSTDAGData *model.BlockGHOSTDAGData,
|
||||||
|
lowBlockGHOSTDAGData *model.BlockGHOSTDAGData) (*externalapi.DomainHash, error) {
|
||||||
currentGhostdagData, err := sm.ghostdagDataStore.Get(sm.databaseContext, stagingArea, current)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
blueMergeSet := currentGhostdagData.MergeSetBlues()
|
|
||||||
redMergeSet := currentGhostdagData.MergeSetReds()
|
|
||||||
sortedMergeSet := make([]*externalapi.DomainHash, 0, len(blueMergeSet)+len(redMergeSet))
|
|
||||||
selectedParent, blueMergeSet := blueMergeSet[0], blueMergeSet[1:]
|
|
||||||
sortedMergeSet = append(sortedMergeSet, selectedParent)
|
|
||||||
i, j := 0, 0
|
|
||||||
for i < len(blueMergeSet) && j < len(redMergeSet) {
|
|
||||||
currentBlue := blueMergeSet[i]
|
|
||||||
currentBlueGhostdagData, err := sm.ghostdagDataStore.Get(sm.databaseContext, stagingArea, currentBlue)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
currentRed := redMergeSet[j]
|
|
||||||
currentRedGhostdagData, err := sm.ghostdagDataStore.Get(sm.databaseContext, stagingArea, currentRed)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if sm.ghostdagManager.Less(currentBlue, currentBlueGhostdagData, currentRed, currentRedGhostdagData) {
|
|
||||||
sortedMergeSet = append(sortedMergeSet, currentBlue)
|
|
||||||
i++
|
|
||||||
} else {
|
|
||||||
sortedMergeSet = append(sortedMergeSet, currentRed)
|
|
||||||
j++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sortedMergeSet = append(sortedMergeSet, blueMergeSet[i:]...)
|
|
||||||
sortedMergeSet = append(sortedMergeSet, redMergeSet[j:]...)
|
|
||||||
|
|
||||||
return sortedMergeSet, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sm *syncManager) findHighHashAccordingToMaxBlueScoreDifference(stagingArea *model.StagingArea,
|
|
||||||
lowHash *externalapi.DomainHash, highHash *externalapi.DomainHash, maxBlueScoreDifference uint64,
|
|
||||||
highBlockGHOSTDAGData *model.BlockGHOSTDAGData, lowBlockGHOSTDAGData *model.BlockGHOSTDAGData) (
|
|
||||||
*externalapi.DomainHash, error) {
|
|
||||||
|
|
||||||
if highBlockGHOSTDAGData.BlueScore()-lowBlockGHOSTDAGData.BlueScore() <= maxBlueScoreDifference {
|
if highBlockGHOSTDAGData.BlueScore()-lowBlockGHOSTDAGData.BlueScore() <= maxBlueScoreDifference {
|
||||||
return highHash, nil
|
return highHash, nil
|
||||||
|
|||||||
@ -17,11 +17,12 @@
|
|||||||
package protowire
|
package protowire
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
reflect "reflect"
|
||||||
|
sync "sync"
|
||||||
|
|
||||||
proto "github.com/golang/protobuf/proto"
|
proto "github.com/golang/protobuf/proto"
|
||||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||||
reflect "reflect"
|
|
||||||
sync "sync"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -5008,6 +5009,7 @@ type EstimateNetworkHashesPerSecondRequestMessage struct {
|
|||||||
unknownFields protoimpl.UnknownFields
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
WindowSize uint32 `protobuf:"varint,1,opt,name=windowSize,proto3" json:"windowSize,omitempty"`
|
WindowSize uint32 `protobuf:"varint,1,opt,name=windowSize,proto3" json:"windowSize,omitempty"`
|
||||||
|
StartHash string `protobuf:"bytes,2,opt,name=blockHash,proto3" json:"blockHash,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *EstimateNetworkHashesPerSecondRequestMessage) Reset() {
|
func (x *EstimateNetworkHashesPerSecondRequestMessage) Reset() {
|
||||||
@ -5049,6 +5051,13 @@ func (x *EstimateNetworkHashesPerSecondRequestMessage) GetWindowSize() uint32 {
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (x *EstimateNetworkHashesPerSecondRequestMessage) GetBlockHash() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.StartHash
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
type EstimateNetworkHashesPerSecondResponseMessage struct {
|
type EstimateNetworkHashesPerSecondResponseMessage struct {
|
||||||
state protoimpl.MessageState
|
state protoimpl.MessageState
|
||||||
sizeCache protoimpl.SizeCache
|
sizeCache protoimpl.SizeCache
|
||||||
@ -5727,25 +5736,26 @@ var file_rpc_proto_rawDesc = []byte{
|
|||||||
0x01, 0x28, 0x04, 0x52, 0x0b, 0x6d, 0x65, 0x6d, 0x70, 0x6f, 0x6f, 0x6c, 0x53, 0x69, 0x7a, 0x65,
|
0x01, 0x28, 0x04, 0x52, 0x0b, 0x6d, 0x65, 0x6d, 0x70, 0x6f, 0x6f, 0x6c, 0x53, 0x69, 0x7a, 0x65,
|
||||||
0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, 0x0b,
|
0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, 0x0b,
|
||||||
0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x50, 0x43,
|
0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x50, 0x43,
|
||||||
0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x4e, 0x0a, 0x2c,
|
0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x6c, 0x0a, 0x2c,
|
||||||
0x45, 0x73, 0x74, 0x69, 0x6d, 0x61, 0x74, 0x65, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x48,
|
0x45, 0x73, 0x74, 0x69, 0x6d, 0x61, 0x74, 0x65, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x48,
|
||||||
0x61, 0x73, 0x68, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x52, 0x65,
|
0x61, 0x73, 0x68, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x52, 0x65,
|
||||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a,
|
0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a,
|
||||||
0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d,
|
0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d,
|
||||||
0x52, 0x0a, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x93, 0x01, 0x0a,
|
0x52, 0x0a, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1c, 0x0a, 0x09,
|
||||||
0x2d, 0x45, 0x73, 0x74, 0x69, 0x6d, 0x61, 0x74, 0x65, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b,
|
0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||||
0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x52,
|
0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x22, 0x93, 0x01, 0x0a, 0x2d, 0x45,
|
||||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36,
|
0x73, 0x74, 0x69, 0x6d, 0x61, 0x74, 0x65, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x48, 0x61,
|
||||||
0x0a, 0x16, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x50,
|
0x73, 0x68, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x52, 0x65, 0x73,
|
||||||
0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x16,
|
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x16,
|
||||||
0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x50, 0x65, 0x72,
|
0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x50, 0x65, 0x72,
|
||||||
0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18,
|
0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x16, 0x6e, 0x65,
|
||||||
0xe8, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69,
|
0x74, 0x77, 0x6f, 0x72, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65,
|
||||||
0x72, 0x65, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72,
|
0x63, 0x6f, 0x6e, 0x64, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0xe8, 0x07,
|
||||||
0x6f, 0x72, 0x42, 0x26, 0x5a, 0x24, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
|
0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65,
|
||||||
0x2f, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x6e, 0x65, 0x74, 0x2f, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x64,
|
0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72,
|
||||||
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
|
0x42, 0x26, 0x5a, 0x24, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6b,
|
||||||
0x6f, 0x33,
|
0x61, 0x73, 0x70, 0x61, 0x6e, 0x65, 0x74, 0x2f, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x64, 0x2f, 0x70,
|
||||||
|
0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|||||||
@ -592,6 +592,7 @@ message GetInfoResponseMessage{
|
|||||||
|
|
||||||
message EstimateNetworkHashesPerSecondRequestMessage{
|
message EstimateNetworkHashesPerSecondRequestMessage{
|
||||||
uint32 windowSize = 1;
|
uint32 windowSize = 1;
|
||||||
|
string blockHash = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
message EstimateNetworkHashesPerSecondResponseMessage{
|
message EstimateNetworkHashesPerSecondResponseMessage{
|
||||||
|
|||||||
@ -15,6 +15,7 @@ func (x *KaspadMessage_EstimateNetworkHashesPerSecondRequest) toAppMessage() (ap
|
|||||||
func (x *KaspadMessage_EstimateNetworkHashesPerSecondRequest) fromAppMessage(message *appmessage.EstimateNetworkHashesPerSecondRequestMessage) error {
|
func (x *KaspadMessage_EstimateNetworkHashesPerSecondRequest) fromAppMessage(message *appmessage.EstimateNetworkHashesPerSecondRequestMessage) error {
|
||||||
x.EstimateNetworkHashesPerSecondRequest = &EstimateNetworkHashesPerSecondRequestMessage{
|
x.EstimateNetworkHashesPerSecondRequest = &EstimateNetworkHashesPerSecondRequestMessage{
|
||||||
WindowSize: message.WindowSize,
|
WindowSize: message.WindowSize,
|
||||||
|
StartHash: message.StartHash,
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -25,6 +26,7 @@ func (x *EstimateNetworkHashesPerSecondRequestMessage) toAppMessage() (appmessag
|
|||||||
}
|
}
|
||||||
return &appmessage.EstimateNetworkHashesPerSecondRequestMessage{
|
return &appmessage.EstimateNetworkHashesPerSecondRequestMessage{
|
||||||
WindowSize: x.WindowSize,
|
WindowSize: x.WindowSize,
|
||||||
|
StartHash: x.StartHash,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -3,8 +3,8 @@ package rpcclient
|
|||||||
import "github.com/kaspanet/kaspad/app/appmessage"
|
import "github.com/kaspanet/kaspad/app/appmessage"
|
||||||
|
|
||||||
// EstimateNetworkHashesPerSecond sends an RPC request respective to the function's name and returns the RPC server's response
|
// EstimateNetworkHashesPerSecond sends an RPC request respective to the function's name and returns the RPC server's response
|
||||||
func (c *RPCClient) EstimateNetworkHashesPerSecond(windowSize uint32) (*appmessage.EstimateNetworkHashesPerSecondResponseMessage, error) {
|
func (c *RPCClient) EstimateNetworkHashesPerSecond(startHash string, windowSize uint32) (*appmessage.EstimateNetworkHashesPerSecondResponseMessage, error) {
|
||||||
err := c.rpcRouter.outgoingRoute().Enqueue(appmessage.NewEstimateNetworkHashesPerSecondRequestMessage(windowSize))
|
err := c.rpcRouter.outgoingRoute().Enqueue(appmessage.NewEstimateNetworkHashesPerSecondRequestMessage(startHash, windowSize))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@ -38,6 +38,10 @@ echo "Running reorg"
|
|||||||
cd "${PROJECT_ROOT}/reorg/run" && ./run.sh || failedTests+=("reorg")
|
cd "${PROJECT_ROOT}/reorg/run" && ./run.sh || failedTests+=("reorg")
|
||||||
echo "Done running reorg"
|
echo "Done running reorg"
|
||||||
|
|
||||||
|
echo "Running many-tips"
|
||||||
|
cd "${PROJECT_ROOT}/many-tips/run" && ./run.sh || failedTests+=("many-tips")
|
||||||
|
echo "Done running many-tips"
|
||||||
|
|
||||||
echo "Running netsync - fast"
|
echo "Running netsync - fast"
|
||||||
cd "${PROJECT_ROOT}/netsync/run" && ./run-fast.sh || failedTests+=("netsync")
|
cd "${PROJECT_ROOT}/netsync/run" && ./run-fast.sh || failedTests+=("netsync")
|
||||||
echo "Done running netsync - fast"
|
echo "Done running netsync - fast"
|
||||||
|
|||||||
@ -34,6 +34,10 @@ echo "Running orphans"
|
|||||||
cd "${PROJECT_ROOT}/orphans/run" && ./run.sh || failedTests+=("orphans")
|
cd "${PROJECT_ROOT}/orphans/run" && ./run.sh || failedTests+=("orphans")
|
||||||
echo "Done running orphans"
|
echo "Done running orphans"
|
||||||
|
|
||||||
|
echo "Running many-tips"
|
||||||
|
cd "${PROJECT_ROOT}/many-tips/run" && ./run.sh || failedTests+=("many-tips")
|
||||||
|
echo "Done running many-tips"
|
||||||
|
|
||||||
echo "Running reorg"
|
echo "Running reorg"
|
||||||
cd "${PROJECT_ROOT}/reorg/run" && ./run-full-finality-window-reorg.sh || failedTests+=("reorg")
|
cd "${PROJECT_ROOT}/reorg/run" && ./run-full-finality-window-reorg.sh || failedTests+=("reorg")
|
||||||
echo "Done running reorg"
|
echo "Done running reorg"
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user