[NOD-650] Remove CPU miner from the node and add kaspaminer in ./cmd (#587)

* [NOD-650] Add kaspaminer

* [NOD-650] Remove CPU miner

* [NOD-650] Fix comments and error messages

* [NOD-650] Remove redundant check for closing foundBlock

* [NOD-650] Submit block synchronically

* [NOD-650] Use ParseUint instead of ParseInt

* [NOD-650] Rearrange functions order in mineloop.go

* [NOD-650] Add block delay CLI argument to kaspaminer

* [NOD-650] Remove redundant spawn

* [NOD-650] Add Dockerfile for kaspaminer

* [NOD-650] Remove redundant comments

* [NOD-650] Remove tests from kaspaminer Dockerfile

* [NOD-650] Remove redundant argument on OnFilteredBlockAdded
This commit is contained in:
Ori Newman 2020-01-19 15:18:26 +02:00 committed by Svarog
parent b5f365d282
commit 38883d1a98
39 changed files with 563 additions and 4871 deletions

87
cmd/kaspaminer/client.go Normal file
View File

@ -0,0 +1,87 @@
package main
import (
"github.com/kaspanet/kaspad/rpcclient"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors"
"io/ioutil"
"net"
"time"
)
type minerClient struct {
*rpcclient.Client
onBlockAdded chan struct{}
}
func newMinerClient(connCfg *rpcclient.ConnConfig) (*minerClient, error) {
client := &minerClient{
onBlockAdded: make(chan struct{}, 1),
}
notificationHandlers := &rpcclient.NotificationHandlers{
OnFilteredBlockAdded: func(_ uint64, header *wire.BlockHeader,
txs []*util.Tx) {
client.onBlockAdded <- struct{}{}
},
}
var err error
client.Client, err = rpcclient.New(connCfg, notificationHandlers)
if err != nil {
return nil, errors.Errorf("Error connecting to address %s: %s", connCfg.Host, err)
}
if err = client.NotifyBlocks(); err != nil {
return nil, errors.Errorf("Error while registering client %s for block notifications: %s", client.Host(), err)
}
return client, nil
}
func connectToServer(cfg *configFlags) (*minerClient, error) {
cert, err := readCert(cfg)
if err != nil {
return nil, err
}
connCfg := &rpcclient.ConnConfig{
Host: normalizeRPCServerAddress(cfg.RPCServer, cfg),
Endpoint: "ws",
User: cfg.RPCUser,
Pass: cfg.RPCPassword,
DisableTLS: cfg.DisableTLS,
RequestTimeout: time.Second * 10,
Certificates: cert,
}
client, err := newMinerClient(connCfg)
if err != nil {
return nil, err
}
log.Infof("Connected to server %s", client.Host())
return client, nil
}
// normalizeRPCServerAddress returns addr with the current network default
// port appended if there is not already a port specified.
func normalizeRPCServerAddress(addr string, cfg *configFlags) string {
_, _, err := net.SplitHostPort(addr)
if err != nil {
return net.JoinHostPort(addr, cfg.NetParams().RPCPort)
}
return addr
}
func readCert(cfg *configFlags) ([]byte, error) {
if cfg.DisableTLS {
return nil, nil
}
cert, err := ioutil.ReadFile(cfg.RPCCert)
if err != nil {
return nil, errors.Errorf("Error reading certificates file: %s", err)
}
return cert, nil
}

84
cmd/kaspaminer/config.go Normal file
View File

@ -0,0 +1,84 @@
package main
import (
"fmt"
"github.com/kaspanet/kaspad/config"
"os"
"path/filepath"
"strings"
"github.com/kaspanet/kaspad/util"
"github.com/pkg/errors"
"github.com/jessevdk/go-flags"
"github.com/kaspanet/kaspad/cmd/kaspaminer/version"
)
const (
defaultLogFilename = "kaspaminer.log"
defaultErrLogFilename = "kaspaminer_err.log"
)
var (
// Default configuration options
defaultHomeDir = util.AppDataDir("kaspaminer", false)
defaultLogFile = filepath.Join(defaultHomeDir, defaultLogFilename)
defaultErrLogFile = filepath.Join(defaultHomeDir, defaultErrLogFilename)
defaultRPCServer = "localhost"
)
type configFlags struct {
ShowVersion bool `short:"V" long:"version" description:"Display version information and exit"`
RPCUser string `short:"u" long:"rpcuser" description:"RPC username"`
RPCPassword string `short:"P" long:"rpcpass" default-mask:"-" description:"RPC password"`
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"`
RPCCert string `short:"c" long:"rpccert" description:"RPC server certificate chain for validation"`
DisableTLS bool `long:"notls" description:"Disable TLS"`
Verbose bool `long:"verbose" short:"v" description:"Enable logging of RPC requests"`
NumberOfBlocks uint64 `short:"n" long:"numblocks" description:"Number of blocks to mine. If omitted, will mine until the process is interrupted."`
BlockDelay uint64 `long:"block-delay" description:"Delay for block submission (in milliseconds). This is used only for testing purposes."`
config.NetworkFlags
}
func parseConfig() (*configFlags, error) {
cfg := &configFlags{
RPCServer: defaultRPCServer,
}
parser := flags.NewParser(cfg, flags.PrintErrors|flags.HelpFlag)
_, err := parser.Parse()
// Show the version and exit if the version flag was specified.
if cfg.ShowVersion {
appName := filepath.Base(os.Args[0])
appName = strings.TrimSuffix(appName, filepath.Ext(appName))
fmt.Println(appName, "version", version.Version())
os.Exit(0)
}
if err != nil {
return nil, err
}
err = cfg.ResolveNetwork(parser)
if err != nil {
return nil, err
}
if cfg.RPCUser == "" {
return nil, errors.New("--rpcuser is required")
}
if cfg.RPCPassword == "" {
return nil, errors.New("--rpcpass is required")
}
if cfg.RPCCert == "" && !cfg.DisableTLS {
return nil, errors.New("--notls has to be disabled if --cert is used")
}
if cfg.RPCCert != "" && cfg.DisableTLS {
return nil, errors.New("--rpccert should be omitted if --notls is used")
}
initLog(defaultLogFile, defaultErrLogFile)
return cfg, nil
}

View File

@ -0,0 +1,34 @@
# -- multistage docker build: stage #1: build stage
FROM golang:1.13-alpine AS build
RUN mkdir -p /go/src/github.com/kaspanet/kaspad
WORKDIR /go/src/github.com/kaspanet/kaspad
RUN apk add --no-cache curl git openssh binutils gcc musl-dev
RUN go get -u golang.org/x/lint/golint
COPY go.mod .
COPY go.sum .
RUN go mod download
COPY . .
WORKDIR /go/src/github.com/kaspanet/kaspad/cmd/kaspaminer
RUN GOFMT_RESULT=`go fmt ./...`; echo $GOFMT_RESULT; test -z "$GOFMT_RESULT"
RUN go vet ./...
RUN golint -set_exit_status ./...
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o kaspaminer .
# --- multistage docker build: stage #2: runtime image
FROM alpine
WORKDIR /app
RUN apk add --no-cache ca-certificates tini
COPY --from=build /go/src/github.com/kaspanet/kaspad/cmd/kaspaminer/kaspaminer /app/
USER nobody
ENTRYPOINT [ "/sbin/tini", "--" ]

34
cmd/kaspaminer/log.go Normal file
View File

@ -0,0 +1,34 @@
package main
import (
"fmt"
"github.com/kaspanet/kaspad/logs"
"github.com/kaspanet/kaspad/rpcclient"
"github.com/kaspanet/kaspad/util/panics"
"os"
)
var (
backendLog = logs.NewBackend()
log = backendLog.Logger("KSMN")
spawn = panics.GoroutineWrapperFunc(log)
)
func initLog(logFile, errLogFile string) {
err := backendLog.AddLogFile(logFile, logs.LevelTrace)
if err != nil {
fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", logFile, logs.LevelTrace, err)
os.Exit(1)
}
err = backendLog.AddLogFile(errLogFile, logs.LevelWarn)
if err != nil {
fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", errLogFile, logs.LevelWarn, err)
os.Exit(1)
}
}
func enableRPCLogging() {
rpclog := backendLog.Logger("RPCC")
rpclog.SetLevel(logs.LevelTrace)
rpcclient.UseLogger(rpclog)
}

50
cmd/kaspaminer/main.go Normal file
View File

@ -0,0 +1,50 @@
package main
import (
"fmt"
"os"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/cmd/kaspaminer/version"
"github.com/kaspanet/kaspad/signal"
"github.com/kaspanet/kaspad/util/panics"
)
func main() {
defer panics.HandlePanic(log, nil, nil)
interrupt := signal.InterruptListener()
cfg, err := parseConfig()
if err != nil {
fmt.Fprintf(os.Stderr, "Error parsing command-line arguments: %s\n", err)
os.Exit(1)
}
// Show version at startup.
log.Infof("Version %s", version.Version())
if cfg.Verbose {
enableRPCLogging()
}
client, err := connectToServer(cfg)
if err != nil {
panic(errors.Wrap(err, "Error connecting to the RPC server"))
}
defer client.Disconnect()
doneChan := make(chan struct{})
spawn(func() {
err = mineLoop(client, cfg.NumberOfBlocks, cfg.BlockDelay)
if err != nil {
panic(errors.Errorf("Error in mine loop: %s", err))
}
doneChan <- struct{}{}
})
select {
case <-doneChan:
case <-interrupt:
}
}

201
cmd/kaspaminer/mineloop.go Normal file
View File

@ -0,0 +1,201 @@
package main
import (
"encoding/hex"
nativeerrors "errors"
"math/rand"
"strconv"
"strings"
"sync"
"time"
"github.com/kaspanet/kaspad/rpcclient"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/rpcmodel"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
var random = rand.New(rand.NewSource(time.Now().UnixNano()))
func mineLoop(client *minerClient, numberOfBlocks uint64, blockDelay uint64) error {
errChan := make(chan error)
templateStopChan := make(chan struct{})
doneChan := make(chan struct{})
spawn(func() {
wg := sync.WaitGroup{}
for i := uint64(0); numberOfBlocks == 0 || i < numberOfBlocks; i++ {
foundBlock := make(chan *util.Block)
mineNextBlock(client, foundBlock, templateStopChan, errChan)
block := <-foundBlock
templateStopChan <- struct{}{}
wg.Add(1)
spawn(func() {
if blockDelay != 0 {
time.Sleep(time.Duration(blockDelay) * time.Millisecond)
}
err := handleFoundBlock(client, block)
if err != nil {
errChan <- err
}
wg.Done()
})
}
wg.Wait()
doneChan <- struct{}{}
})
select {
case err := <-errChan:
return err
case <-doneChan:
return nil
}
}
func mineNextBlock(client *minerClient, foundBlock chan *util.Block, templateStopChan chan struct{}, errChan chan error) {
newTemplateChan := make(chan *rpcmodel.GetBlockTemplateResult)
go templatesLoop(client, newTemplateChan, errChan, templateStopChan)
go solveLoop(newTemplateChan, foundBlock, errChan)
}
func handleFoundBlock(client *minerClient, block *util.Block) error {
log.Infof("Found block %s with parents %s. Submitting to %s", block.Hash(), block.MsgBlock().Header.ParentHashes, client.Host())
err := client.SubmitBlock(block, &rpcmodel.SubmitBlockOptions{})
if err != nil {
return errors.Errorf("Error submitting block %s to %s: %s", block.Hash(), client.Host(), err)
}
return nil
}
func parseBlock(template *rpcmodel.GetBlockTemplateResult) (*util.Block, error) {
// parse parent hashes
parentHashes := make([]*daghash.Hash, len(template.ParentHashes))
for i, parentHash := range template.ParentHashes {
hash, err := daghash.NewHashFromStr(parentHash)
if err != nil {
return nil, errors.Errorf("Error decoding hash %s: %s", parentHash, err)
}
parentHashes[i] = hash
}
// parse Bits
bitsUint64, err := strconv.ParseUint(template.Bits, 16, 32)
if err != nil {
return nil, errors.Errorf("Error decoding bits %s: %s", template.Bits, err)
}
bits := uint32(bitsUint64)
// parseAcceptedIDMerkleRoot
acceptedIDMerkleRoot, err := daghash.NewHashFromStr(template.AcceptedIDMerkleRoot)
if err != nil {
return nil, errors.Errorf("Error parsing acceptedIDMerkleRoot: %s", err)
}
utxoCommitment, err := daghash.NewHashFromStr(template.UTXOCommitment)
if err != nil {
return nil, errors.Errorf("Error parsing utxoCommitment: %s", err)
}
// parse rest of block
msgBlock := wire.NewMsgBlock(
wire.NewBlockHeader(template.Version, parentHashes, &daghash.Hash{},
acceptedIDMerkleRoot, utxoCommitment, bits, 0))
for i, txResult := range append([]rpcmodel.GetBlockTemplateResultTx{*template.CoinbaseTxn}, template.Transactions...) {
reader := hex.NewDecoder(strings.NewReader(txResult.Data))
tx := &wire.MsgTx{}
if err := tx.KaspaDecode(reader, 0); err != nil {
return nil, errors.Errorf("Error decoding tx #%d: %s", i, err)
}
msgBlock.AddTransaction(tx)
}
block := util.NewBlock(msgBlock)
msgBlock.Header.HashMerkleRoot = blockdag.BuildHashMerkleTreeStore(block.Transactions()).Root()
return block, nil
}
func solveBlock(block *util.Block, stopChan chan struct{}, foundBlock chan *util.Block) {
msgBlock := block.MsgBlock()
targetDifficulty := util.CompactToBig(msgBlock.Header.Bits)
initialNonce := random.Uint64()
for i := random.Uint64(); i != initialNonce-1; i++ {
select {
case <-stopChan:
return
default:
msgBlock.Header.Nonce = i
hash := msgBlock.BlockHash()
if daghash.HashToBig(hash).Cmp(targetDifficulty) <= 0 {
foundBlock <- block
return
}
}
}
}
func templatesLoop(client *minerClient, newTemplateChan chan *rpcmodel.GetBlockTemplateResult, errChan chan error, stopChan chan struct{}) {
longPollID := ""
getBlockTemplateLongPoll := func() {
if longPollID != "" {
log.Infof("Requesting template with longPollID '%s' from %s", longPollID, client.Host())
} else {
log.Infof("Requesting template without longPollID from %s", client.Host())
}
template, err := getBlockTemplate(client, longPollID)
if nativeerrors.Is(err, rpcclient.ErrResponseTimedOut) {
log.Infof("Got timeout while requesting template '%s' from %s", longPollID, client.Host())
return
} else if err != nil {
errChan <- errors.Errorf("Error getting block template from %s: %s", client.Host(), err)
return
}
if template.LongPollID != longPollID {
log.Infof("Got new long poll template: %s", template.LongPollID)
longPollID = template.LongPollID
newTemplateChan <- template
}
}
getBlockTemplateLongPoll()
for {
select {
case <-stopChan:
close(newTemplateChan)
return
case <-client.onBlockAdded:
getBlockTemplateLongPoll()
case <-time.Tick(500 * time.Millisecond):
getBlockTemplateLongPoll()
}
}
}
func getBlockTemplate(client *minerClient, longPollID string) (*rpcmodel.GetBlockTemplateResult, error) {
return client.GetBlockTemplate([]string{"coinbasetxn"}, longPollID)
}
func solveLoop(newTemplateChan chan *rpcmodel.GetBlockTemplateResult, foundBlock chan *util.Block, errChan chan error) {
var stopOldTemplateSolving chan struct{}
for template := range newTemplateChan {
if stopOldTemplateSolving != nil {
close(stopOldTemplateSolving)
}
stopOldTemplateSolving = make(chan struct{})
block, err := parseBlock(template)
if err != nil {
errChan <- errors.Errorf("Error parsing block: %s", err)
return
}
go solveBlock(block, stopOldTemplateSolving, foundBlock)
}
if stopOldTemplateSolving != nil {
close(stopOldTemplateSolving)
}
}

View File

@ -0,0 +1,50 @@
package version
import (
"fmt"
"strings"
)
// validCharacters is a list of characters valid in the appBuild string
const validCharacters = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-"
const (
appMajor uint = 0
appMinor uint = 1
appPatch uint = 0
)
// appBuild is defined as a variable so it can be overridden during the build
// process with '-ldflags "-X github.com/kaspanet/kaspad/cmd/kaspaminer/version.appBuild=foo"' if needed.
// It MUST only contain characters from validCharacters.
var appBuild string
var version = "" // string used for memoization of version
// Version returns the application version as a properly formed string
func Version() string {
if version == "" {
// Start with the major, minor, and patch versions.
version = fmt.Sprintf("%d.%d.%d", appMajor, appMinor, appPatch)
// Append build metadata if there is any.
// Panic if any invalid characters are encountered
if appBuild != "" {
checkAppBuild(appBuild)
version = fmt.Sprintf("%s-%s", version, appBuild)
}
}
return version
}
// checkAppBuild verifies that appBuild does not contain any characters outside of validCharacters.
// In case of any invalid characters checkAppBuild panics
func checkAppBuild(appBuild string) {
for _, r := range appBuild {
if !strings.ContainsRune(validCharacters, r) {
panic(fmt.Errorf("appBuild string (%s) contains forbidden characters. Only alphanumeric characters and dashes are allowed", appBuild))
}
}
}

View File

@ -51,7 +51,6 @@ const (
blockMaxMassMin = 1000
blockMaxMassMax = 10000000
defaultMinRelayTxFee = 1e-5 // 1 sompi per byte
defaultGenerate = false
defaultMaxOrphanTransactions = 100
//DefaultMaxOrphanTxSize is the default maximum size for an orphan transaction
DefaultMaxOrphanTxSize = 100000
@ -132,7 +131,6 @@ type Flags struct {
Upnp bool `long:"upnp" description:"Use UPnP to map our listening port outside of NAT"`
MinRelayTxFee float64 `long:"minrelaytxfee" description:"The minimum transaction fee in KAS/kB to be considered a non-zero fee."`
MaxOrphanTxs int `long:"maxorphantx" description:"Max number of orphan transactions to keep in memory"`
Generate bool `long:"generate" description:"Generate (mine) kaspa using the CPU"`
MiningAddrs []string `long:"miningaddr" description:"Add the specified payment address to the list of addresses to use for generated blocks -- At least one address is required if the generate option is set"`
BlockMaxMass uint64 `long:"blockmaxmass" description:"Maximum transaction mass to be used when creating a block"`
UserAgentComments []string `long:"uacomment" description:"Comment to add to the user agent -- See BIP 14 for more information."`
@ -253,7 +251,6 @@ func loadConfig() (*Config, []string, error) {
MaxOrphanTxs: defaultMaxOrphanTransactions,
SigCacheMaxSize: defaultSigCacheMaxSize,
MinRelayTxFee: defaultMinRelayTxFee,
Generate: defaultGenerate,
TxIndex: defaultTxIndex,
AddrIndex: defaultAddrIndex,
AcceptanceIndex: defaultAcceptanceIndex,
@ -706,26 +703,6 @@ func loadConfig() (*Config, []string, error) {
activeConfig.SubnetworkID = nil
}
// Check that 'generate' and 'subnetwork' flags do not conflict
if activeConfig.Generate && activeConfig.SubnetworkID != nil {
str := "%s: both generate flag and subnetwork filtering are set "
err := errors.Errorf(str, funcName)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
// Ensure there is at least one mining address when the generate flag is
// set.
if activeConfig.Generate && len(activeConfig.MiningAddrs) == 0 {
str := "%s: the generate flag is set, but there are no mining " +
"addresses specified "
err := errors.Errorf(str, funcName)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
// Add default port to all listener addresses if needed and remove
// duplicate addresses.
activeConfig.Listeners = network.NormalizeAddresses(activeConfig.Listeners,

View File

@ -137,9 +137,6 @@ type Params struct {
// to calculate the required difficulty of each block.
DifficultyAdjustmentWindowSize uint64
// GenerateSupported specifies whether or not CPU mining is allowed.
GenerateSupported bool
// These fields are related to voting on consensus rule changes as
// defined by BIP0009.
//
@ -197,7 +194,6 @@ var MainnetParams = Params{
FinalityInterval: 1000,
DifficultyAdjustmentWindowSize: difficultyAdjustmentWindowSize,
TimestampDeviationTolerance: timestampDeviationTolerance,
GenerateSupported: false,
// Consensus rule change deployments.
//
@ -255,7 +251,6 @@ var RegressionNetParams = Params{
FinalityInterval: 1000,
DifficultyAdjustmentWindowSize: difficultyAdjustmentWindowSize,
TimestampDeviationTolerance: timestampDeviationTolerance,
GenerateSupported: true,
// Consensus rule change deployments.
//
@ -311,7 +306,6 @@ var TestnetParams = Params{
FinalityInterval: 1000,
DifficultyAdjustmentWindowSize: difficultyAdjustmentWindowSize,
TimestampDeviationTolerance: timestampDeviationTolerance,
GenerateSupported: true,
// Consensus rule change deployments.
//
@ -373,7 +367,6 @@ var SimnetParams = Params{
FinalityInterval: 1000,
DifficultyAdjustmentWindowSize: difficultyAdjustmentWindowSize,
TimestampDeviationTolerance: timestampDeviationTolerance,
GenerateSupported: true,
// Consensus rule change deployments.
//
@ -427,7 +420,6 @@ var DevnetParams = Params{
FinalityInterval: 1000,
DifficultyAdjustmentWindowSize: difficultyAdjustmentWindowSize,
TimestampDeviationTolerance: timestampDeviationTolerance,
GenerateSupported: true,
// Consensus rule change deployments.
//

View File

@ -1,10 +0,0 @@
integration
===========
[![Build Status](http://img.shields.io/travis/kaspanet/kaspad.svg)](https://travis-ci.org/kaspanet/kaspad)
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
This contains integration tests which make use of the
[rpctest](https://github.com/kaspanet/kaspad/tree/master/integration/rpctest)
package to programmatically drive nodes via RPC.

View File

@ -1,401 +0,0 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
// This file is ignored during the regular tests due to the following build tag.
// +build rpctest
package integration
import (
"runtime"
"testing"
"time"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/integration/rpctest"
"github.com/kaspanet/kaspad/util/daghash"
)
const (
// vbLegacyBlockVersion is the highest legacy block version before the
// version bits scheme became active.
vbLegacyBlockVersion = 4
// vbTopBits defines the bits to set in the version to signal that the
// version bits scheme is being used.
vbTopBits = 0x20000000
)
// assertVersionBit gets the passed block hash from the given test harness and
// ensures its version either has the provided bit set or unset per the set
// flag.
func assertVersionBit(r *rpctest.Harness, t *testing.T, hash *daghash.Hash, bit uint8, set bool) {
block, err := r.Node.GetBlock(hash)
if err != nil {
t.Fatalf("failed to retrieve block %v: %v", hash, err)
}
switch {
case set && block.Header.Version&(1<<bit) == 0:
_, _, line, _ := runtime.Caller(1)
t.Fatalf("assertion failed at line %d: block %s, version 0x%x "+
"does not have bit %d set", line, hash,
block.Header.Version, bit)
case !set && block.Header.Version&(1<<bit) != 0:
_, _, line, _ := runtime.Caller(1)
t.Fatalf("assertion failed at line %d: block %s, version 0x%x "+
"has bit %d set", line, hash, block.Header.Version, bit)
}
}
// assertChainHeight retrieves the current chain height from the given test
// harness and ensures it matches the provided expected height.
func assertChainHeight(r *rpctest.Harness, t *testing.T, expectedHeight uint64) {
height, err := r.Node.GetBlockCount()
if err != nil {
t.Fatalf("failed to retrieve block height: %v", err)
}
if height != expectedHeight {
_, _, line, _ := runtime.Caller(1)
t.Fatalf("assertion failed at line %d: block height of %d "+
"is not the expected %d", line, height, expectedHeight)
}
}
// thresholdStateToStatus converts the passed threshold state to the equivalent
// status string returned in the getblockchaininfo RPC.
func thresholdStateToStatus(state blockdag.ThresholdState) (string, error) {
switch state {
case blockdag.ThresholdDefined:
return "defined", nil
case blockdag.ThresholdStarted:
return "started", nil
case blockdag.ThresholdLockedIn:
return "lockedin", nil
case blockdag.ThresholdActive:
return "active", nil
case blockdag.ThresholdFailed:
return "failed", nil
}
return "", errors.Errorf("unrecognized threshold state: %v", state)
}
// assertSoftForkStatus retrieves the current blockchain info from the given
// test harness and ensures the provided soft fork key is both available and its
// status is the equivalent of the passed state.
func assertSoftForkStatus(r *rpctest.Harness, t *testing.T, forkKey string, state blockdag.ThresholdState) {
// Convert the expected threshold state into the equivalent
// getblockchaininfo RPC status string.
status, err := thresholdStateToStatus(state)
if err != nil {
_, _, line, _ := runtime.Caller(1)
t.Fatalf("assertion failed at line %d: unable to convert "+
"threshold state %v to string", line, state)
}
info, err := r.Node.GetBlockChainInfo()
if err != nil {
t.Fatalf("failed to retrieve chain info: %v", err)
}
// Ensure the key is available.
desc, ok := info.Bip9SoftForks[forkKey]
if !ok {
_, _, line, _ := runtime.Caller(1)
t.Fatalf("assertion failed at line %d: softfork status for %q "+
"is not in getblockchaininfo results", line, forkKey)
}
// Ensure the status it the expected value.
if desc.Status != status {
_, _, line, _ := runtime.Caller(1)
t.Fatalf("assertion failed at line %d: softfork status for %q "+
"is %v instead of expected %v", line, forkKey,
desc.Status, status)
}
}
// testBIP0009 ensures the BIP0009 soft fork mechanism follows the state
// transition rules set forth by the BIP for the provided soft fork key. It
// uses the regression test network to signal support and advance through the
// various threshold states including failure to achieve locked in status.
//
// See TestBIP0009 for an overview of what is tested.
//
// NOTE: This only differs from the exported version in that it accepts the
// specific soft fork deployment to test.
func testBIP0009(t *testing.T, forkKey string, deploymentID uint32) {
// Initialize the primary mining node with only the genesis block.
r, err := rpctest.New(&dagconfig.RegressionNetParams, nil, nil)
if err != nil {
t.Fatalf("unable to create primary harness: %v", err)
}
if err := r.SetUp(false, 0); err != nil {
t.Fatalf("unable to setup test chain: %v", err)
}
defer r.TearDown()
// *** ThresholdDefined ***
//
// Assert the chain height is the expected value and the soft fork
// status starts out as defined.
assertChainHeight(r, t, 0)
assertSoftForkStatus(r, t, forkKey, blockdag.ThresholdDefined)
// *** ThresholdDefined part 2 - 1 block prior to ThresholdStarted ***
//
// Generate enough blocks to reach the height just before the first
// state transition without signalling support since the state should
// move to started once the start time has been reached regardless of
// support signalling.
//
// NOTE: This is two blocks before the confirmation window because the
// getblockchaininfo RPC reports the status for the block AFTER the
// current one. All of the heights below are thus offset by one to
// compensate.
//
// Assert the chain height is the expected value and soft fork status is
// still defined and did NOT move to started.
confirmationWindow := r.ActiveNet.MinerConfirmationWindow
for i := uint32(0); i < confirmationWindow-2; i++ {
_, err := r.GenerateAndSubmitBlock(nil, vbLegacyBlockVersion,
time.Time{})
if err != nil {
t.Fatalf("failed to generated block %d: %v", i, err)
}
}
assertChainHeight(r, t, confirmationWindow-2)
assertSoftForkStatus(r, t, forkKey, blockdag.ThresholdDefined)
// *** ThresholdStarted ***
//
// Generate another block to reach the next window.
//
// Assert the chain height is the expected value and the soft fork
// status is started.
_, err = r.GenerateAndSubmitBlock(nil, vbLegacyBlockVersion, time.Time{})
if err != nil {
t.Fatalf("failed to generated block: %v", err)
}
assertChainHeight(r, t, confirmationWindow-1)
assertSoftForkStatus(r, t, forkKey, blockdag.ThresholdStarted)
// *** ThresholdStarted part 2 - Fail to achieve ThresholdLockedIn ***
//
// Generate enough blocks to reach the next window in such a way that
// the number blocks with the version bit set to signal support is 1
// less than required to achieve locked in status.
//
// Assert the chain height is the expected value and the soft fork
// status is still started and did NOT move to locked in.
if deploymentID > uint32(len(r.ActiveNet.Deployments)) {
t.Fatalf("deployment ID %d does not exist", deploymentID)
}
deployment := &r.ActiveNet.Deployments[deploymentID]
activationThreshold := r.ActiveNet.RuleChangeActivationThreshold
signalForkVersion := int32(1<<deployment.BitNumber) | vbTopBits
for i := uint32(0); i < activationThreshold-1; i++ {
_, err := r.GenerateAndSubmitBlock(nil, signalForkVersion,
time.Time{})
if err != nil {
t.Fatalf("failed to generated block %d: %v", i, err)
}
}
for i := uint32(0); i < confirmationWindow-(activationThreshold-1); i++ {
_, err := r.GenerateAndSubmitBlock(nil, vbLegacyBlockVersion,
time.Time{})
if err != nil {
t.Fatalf("failed to generated block %d: %v", i, err)
}
}
assertChainHeight(r, t, (confirmationWindow*2)-1)
assertSoftForkStatus(r, t, forkKey, blockdag.ThresholdStarted)
// *** ThresholdLockedIn ***
//
// Generate enough blocks to reach the next window in such a way that
// the number blocks with the version bit set to signal support is
// exactly the number required to achieve locked in status.
//
// Assert the chain height is the expected value and the soft fork
// status moved to locked in.
for i := uint32(0); i < activationThreshold; i++ {
_, err := r.GenerateAndSubmitBlock(nil, signalForkVersion,
time.Time{})
if err != nil {
t.Fatalf("failed to generated block %d: %v", i, err)
}
}
for i := uint32(0); i < confirmationWindow-activationThreshold; i++ {
_, err := r.GenerateAndSubmitBlock(nil, vbLegacyBlockVersion,
time.Time{})
if err != nil {
t.Fatalf("failed to generated block %d: %v", i, err)
}
}
assertChainHeight(r, t, (confirmationWindow*3)-1)
assertSoftForkStatus(r, t, forkKey, blockdag.ThresholdLockedIn)
// *** ThresholdLockedIn part 2 -- 1 block prior to ThresholdActive ***
//
// Generate enough blocks to reach the height just before the next
// window without continuing to signal support since it is already
// locked in.
//
// Assert the chain height is the expected value and the soft fork
// status is still locked in and did NOT move to active.
for i := uint32(0); i < confirmationWindow-1; i++ {
_, err := r.GenerateAndSubmitBlock(nil, vbLegacyBlockVersion,
time.Time{})
if err != nil {
t.Fatalf("failed to generated block %d: %v", i, err)
}
}
assertChainHeight(r, t, (confirmationWindow*4)-2)
assertSoftForkStatus(r, t, forkKey, blockdag.ThresholdLockedIn)
// *** ThresholdActive ***
//
// Generate another block to reach the next window without continuing to
// signal support since it is already locked in.
//
// Assert the chain height is the expected value and the soft fork
// status moved to active.
_, err = r.GenerateAndSubmitBlock(nil, vbLegacyBlockVersion, time.Time{})
if err != nil {
t.Fatalf("failed to generated block: %v", err)
}
assertChainHeight(r, t, (confirmationWindow*4)-1)
assertSoftForkStatus(r, t, forkKey, blockdag.ThresholdActive)
}
// TestBIP0009 ensures the BIP0009 soft fork mechanism follows the state
// transition rules set forth by the BIP for all soft forks. It uses the
// regression test network to signal support and advance through the various
// threshold states including failure to achieve locked in status.
//
// Overview:
// - Assert the chain height is 0 and the state is ThresholdDefined
// - Generate 1 fewer blocks than needed to reach the first state transition
// - Assert chain height is expected and state is still ThresholdDefined
// - Generate 1 more block to reach the first state transition
// - Assert chain height is expected and state moved to ThresholdStarted
// - Generate enough blocks to reach the next state transition window, but only
// signal support in 1 fewer than the required number to achieve
// ThresholdLockedIn
// - Assert chain height is expected and state is still ThresholdStarted
// - Generate enough blocks to reach the next state transition window with only
// the exact number of blocks required to achieve locked in status signalling
// support.
// - Assert chain height is expected and state moved to ThresholdLockedIn
// - Generate 1 fewer blocks than needed to reach the next state transition
// - Assert chain height is expected and state is still ThresholdLockedIn
// - Generate 1 more block to reach the next state transition
// - Assert chain height is expected and state moved to ThresholdActive
func TestBIP0009(t *testing.T) {
t.Parallel()
testBIP0009(t, "dummy", dagconfig.DeploymentTestDummy)
}
// TestBIP0009Mining ensures blocks built via kaspad's CPU miner follow the rules
// set forth by BIP0009 by using the test dummy deployment.
//
// Overview:
// - Generate block 1
// - Assert bit is NOT set (ThresholdDefined)
// - Generate enough blocks to reach first state transition
// - Assert bit is NOT set for block prior to state transition
// - Assert bit is set for block at state transition (ThresholdStarted)
// - Generate enough blocks to reach second state transition
// - Assert bit is set for block at state transition (ThresholdLockedIn)
// - Generate enough blocks to reach third state transition
// - Assert bit is set for block prior to state transition (ThresholdLockedIn)
// - Assert bit is NOT set for block at state transition (ThresholdActive)
func TestBIP0009Mining(t *testing.T) {
t.Parallel()
// Initialize the primary mining node with only the genesis block.
r, err := rpctest.New(&dagconfig.SimnetParams, nil, nil)
if err != nil {
t.Fatalf("unable to create primary harness: %v", err)
}
if err := r.SetUp(true, 0); err != nil {
t.Fatalf("unable to setup test chain: %v", err)
}
defer r.TearDown()
// Assert the chain only consists of the gensis block.
assertChainHeight(r, t, 0)
// *** ThresholdDefined ***
//
// Generate a block that extends the genesis block. It should not have
// the test dummy bit set in the version since the first window is
// in the defined threshold state.
deployment := &r.ActiveNet.Deployments[dagconfig.DeploymentTestDummy]
testDummyBitNum := deployment.BitNumber
hashes, err := r.Node.Generate(1)
if err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
assertChainHeight(r, t, 1)
assertVersionBit(r, t, hashes[0], testDummyBitNum, false)
// *** ThresholdStarted ***
//
// Generate enough blocks to reach the first state transition.
//
// The second to last generated block should not have the test bit set
// in the version.
//
// The last generated block should now have the test bit set in the
// version since the kaspad mining code will have recognized the test
// dummy deployment as started.
confirmationWindow := r.ActiveNet.MinerConfirmationWindow
numNeeded := confirmationWindow - 1
hashes, err = r.Node.Generate(numNeeded)
if err != nil {
t.Fatalf("failed to generated %d blocks: %v", numNeeded, err)
}
assertChainHeight(r, t, confirmationWindow)
assertVersionBit(r, t, hashes[len(hashes)-2], testDummyBitNum, false)
assertVersionBit(r, t, hashes[len(hashes)-1], testDummyBitNum, true)
// *** ThresholdLockedIn ***
//
// Generate enough blocks to reach the next state transition.
//
// The last generated block should still have the test bit set in the
// version since the kaspad mining code will have recognized the test
// dummy deployment as locked in.
hashes, err = r.Node.Generate(confirmationWindow)
if err != nil {
t.Fatalf("failed to generated %d blocks: %v", confirmationWindow,
err)
}
assertChainHeight(r, t, confirmationWindow*2)
assertVersionBit(r, t, hashes[len(hashes)-1], testDummyBitNum, true)
// *** ThresholdActivated ***
//
// Generate enough blocks to reach the next state transition.
//
// The second to last generated block should still have the test bit set
// in the version since it is still locked in.
//
// The last generated block should NOT have the test bit set in the
// version since the kaspad mining code will have recognized the test
// dummy deployment as activated and thus there is no longer any need
// to set the bit.
hashes, err = r.Node.Generate(confirmationWindow)
if err != nil {
t.Fatalf("failed to generated %d blocks: %v", confirmationWindow,
err)
}
assertChainHeight(r, t, confirmationWindow*3)
assertVersionBit(r, t, hashes[len(hashes)-2], testDummyBitNum, true)
assertVersionBit(r, t, hashes[len(hashes)-1], testDummyBitNum, false)
}

View File

@ -1,574 +0,0 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
// This file is ignored during the regular tests due to the following build tag.
// +build rpctest
package integration
import (
"bytes"
"runtime"
"strings"
"testing"
"time"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/ecc"
"github.com/kaspanet/kaspad/integration/rpctest"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
// makeTestOutput creates an on-chain output paying to a freshly generated
// p2pkh output with the specified amount.
func makeTestOutput(r *rpctest.Harness, t *testing.T,
amt util.Amount) (*ecc.PrivateKey, *wire.Outpoint, []byte, error) {
// Create a fresh key, then send some coins to an address spendable by
// that key.
key, err := ecc.NewPrivateKey(ecc.S256())
if err != nil {
return nil, nil, nil, err
}
// Using the key created above, generate a scriptPubKey which it's able to
// spend.
a, err := util.NewAddressPubKey(key.PubKey().SerializeCompressed(), r.ActiveNet)
if err != nil {
return nil, nil, nil, err
}
selfAddrScript, err := txscript.PayToAddrScript(a.AddressPubKeyHash())
if err != nil {
return nil, nil, nil, err
}
output := &wire.TxOut{ScriptPubKey: selfAddrScript, Value: 1e8}
// Next, create and broadcast a transaction paying to the output.
fundTx, err := r.CreateTransaction([]*wire.TxOut{output}, 10)
if err != nil {
return nil, nil, nil, err
}
txHash, err := r.Node.SendRawTransaction(fundTx, true)
if err != nil {
return nil, nil, nil, err
}
// The transaction created above should be included within the next
// generated block.
blockHash, err := r.Node.Generate(1)
if err != nil {
return nil, nil, nil, err
}
assertTxInBlock(r, t, blockHash[0], txHash)
// Locate the output index of the coins spendable by the key we
// generated above, this is needed in order to create a proper utxo for
// this output.
var outputIndex uint32
if bytes.Equal(fundTx.TxOut[0].ScriptPubKey, selfAddrScript) {
outputIndex = 0
} else {
outputIndex = 1
}
utxo := &wire.Outpoint{
TxID: fundTx.TxID(),
Index: outputIndex,
}
return key, utxo, selfAddrScript, nil
}
// TestBIP0113Activation tests for proper adherence of the BIP 113 rule
// constraint which requires all transaction finality tests to use the MTP of
// the last 11 blocks, rather than the timestamp of the block which includes
// them.
//
// Overview:
// - Transactions with non-final lock-times from the PoV of MTP should be
// rejected from the mempool and when found within otherwise valid blocks.
// - Transactions with final lock-times from the PoV of MTP should be
// accepted to the mempool and mined in future block.
func TestBIP0113(t *testing.T) {
t.Parallel()
kaspadCfg := []string{"--rejectnonstd"}
r, err := rpctest.New(&dagconfig.SimnetParams, nil, kaspadCfg)
if err != nil {
t.Fatal("unable to create primary harness: ", err)
}
if err := r.SetUp(true, 1); err != nil {
t.Fatalf("unable to setup test chain: %v", err)
}
defer r.TearDown()
// Create a fresh output for usage within the test below.
const outputValue = util.SompiPerKaspa
outputKey, testOutput, testScriptPubKey, err := makeTestOutput(r, t,
outputValue)
if err != nil {
t.Fatalf("unable to create test output: %v", err)
}
// Fetch a fresh address from the harness, we'll use this address to
// send funds back into the Harness.
addr, err := r.NewAddress()
if err != nil {
t.Fatalf("unable to generate address: %v", err)
}
addrScript, err := txscript.PayToAddrScript(addr)
if err != nil {
t.Fatalf("unable to generate addr script: %v", err)
}
// Now create a transaction with a lock time which is "final" according
// to the latest block, but not according to the current median time
// past.
tx := wire.NewMsgTx(1)
tx.AddTxIn(&wire.TxIn{
PreviousOutpoint: *testOutput,
})
tx.AddTxOut(&wire.TxOut{
ScriptPubKey: addrScript,
Value: outputValue - 1000,
})
// We set the lock-time of the transaction to just one minute after the
// current MTP of the chain.
chainInfo, err := r.Node.GetBlockChainInfo()
if err != nil {
t.Fatalf("unable to query for chain info: %v", err)
}
tx.LockTime = chainInfo.MedianTime + 1
sigScript, err := txscript.SignatureScript(tx, 0, testScriptPubKey,
txscript.SigHashAll, outputKey, true)
if err != nil {
t.Fatalf("unable to generate sig: %v", err)
}
tx.TxIn[0].SignatureScript = sigScript
// This transaction should be rejected from the mempool as using MTP
// for transactions finality is now a policy rule. Additionally, the
// exact error should be the rejection of a non-final transaction.
_, err = r.Node.SendRawTransaction(tx, true)
if err == nil {
t.Fatalf("transaction accepted, but should be non-final")
} else if !strings.Contains(err.Error(), "not finalized") {
t.Fatalf("transaction should be rejected due to being "+
"non-final, instead: %v", err)
}
// The timeLockDeltas slice represents a series of deviations from the
// current MTP which will be used to test border conditions w.r.t
// transaction finality. -1 indicates 1 second prior to the MTP, 0
// indicates the current MTP, and 1 indicates 1 second after the
// current MTP.
//
// This time, all transactions which are final according to the MTP
// *should* be accepted to both the mempool and within a valid block.
// While transactions with lock-times *after* the current MTP should be
// rejected.
timeLockDeltas := []int64{-1, 0, 1}
for _, timeLockDelta := range timeLockDeltas {
chainInfo, err = r.Node.GetBlockChainInfo()
if err != nil {
t.Fatalf("unable to query for chain info: %v", err)
}
medianTimePast := chainInfo.MedianTime
// Create another test output to be spent shortly below.
outputKey, testOutput, testScriptPubKey, err = makeTestOutput(r, t,
outputValue)
if err != nil {
t.Fatalf("unable to create test output: %v", err)
}
// Create a new transaction with a lock-time past the current known
// MTP.
tx = wire.NewMsgTx(1)
tx.AddTxIn(&wire.TxIn{
PreviousOutpoint: *testOutput,
})
tx.AddTxOut(&wire.TxOut{
ScriptPubKey: addrScript,
Value: outputValue - 1000,
})
tx.LockTime = medianTimePast + timeLockDelta
sigScript, err = txscript.SignatureScript(tx, 0, testScriptPubKey,
txscript.SigHashAll, outputKey, true)
if err != nil {
t.Fatalf("unable to generate sig: %v", err)
}
tx.TxIn[0].SignatureScript = sigScript
// If the time-lock delta is greater than -1, then the
// transaction should be rejected from the mempool and when
// included within a block. A time-lock delta of -1 should be
// accepted as it has a lock-time of one
// second _before_ the current MTP.
_, err = r.Node.SendRawTransaction(tx, true)
if err == nil && timeLockDelta >= 0 {
t.Fatal("transaction was accepted into the mempool " +
"but should be rejected!")
} else if err != nil && !strings.Contains(err.Error(), "not finalized") {
t.Fatalf("transaction should be rejected from mempool "+
"due to being non-final, instead: %v", err)
}
txns := []*util.Tx{util.NewTx(tx)}
_, err := r.GenerateAndSubmitBlock(txns, -1, time.Time{})
if err == nil && timeLockDelta >= 0 {
t.Fatal("block should be rejected due to non-final " +
"txn, but was accepted")
} else if err != nil && !strings.Contains(err.Error(), "unfinalized") {
t.Fatalf("block should be rejected due to non-final "+
"tx, instead: %v", err)
}
}
}
// createCSVOutput creates an output paying to a trivially redeemable CSV
// scriptPubKey with the specified time-lock.
func createCSVOutput(r *rpctest.Harness, t *testing.T,
numSompis util.Amount, timeLock int64,
isSeconds bool) ([]byte, *wire.Outpoint, *wire.MsgTx, error) {
// Convert the time-lock to the proper sequence lock based according to
// if the lock is seconds or time based.
sequenceLock := blockdag.LockTimeToSequence(isSeconds,
int64(timeLock))
// Our CSV script is simply: <sequenceLock> OP_CSV
b := txscript.NewScriptBuilder().
AddInt64(int64(sequenceLock)).
AddOp(txscript.OpCheckSequenceVerify)
csvScript, err := b.Script()
if err != nil {
return nil, nil, nil, err
}
// Using the script generated above, create a P2SH output which will be
// accepted into the mempool.
p2shAddr, err := util.NewAddressScriptHash(csvScript, r.ActiveNet)
if err != nil {
return nil, nil, nil, err
}
p2shScript, err := txscript.PayToAddrScript(p2shAddr)
if err != nil {
return nil, nil, nil, err
}
output := &wire.TxOut{
ScriptPubKey: p2shScript,
Value: int64(numSompis),
}
// Finally create a valid transaction which creates the output crafted
// above.
tx, err := r.CreateTransaction([]*wire.TxOut{output}, 10)
if err != nil {
return nil, nil, nil, err
}
var outputIndex uint32
if !bytes.Equal(tx.TxOut[0].ScriptPubKey, p2shScript) {
outputIndex = 1
}
utxo := &wire.Outpoint{
TxID: tx.TxID(),
Index: outputIndex,
}
return csvScript, utxo, tx, nil
}
// spendCSVOutput spends an output previously created by the createCSVOutput
// function. The sigScript is a trivial push of OP_TRUE followed by the
// redeemScript to pass P2SH evaluation.
func spendCSVOutput(redeemScript []byte, csvUTXO *wire.Outpoint,
sequence uint64, targetOutput *wire.TxOut,
txVersion int32) (*wire.MsgTx, error) {
tx := wire.NewMsgTx(txVersion)
tx.AddTxIn(&wire.TxIn{
PreviousOutpoint: *csvUTXO,
Sequence: sequence,
})
tx.AddTxOut(targetOutput)
b := txscript.NewScriptBuilder().
AddOp(txscript.OpTrue).
AddData(redeemScript)
sigScript, err := b.Script()
if err != nil {
return nil, err
}
tx.TxIn[0].SignatureScript = sigScript
return tx, nil
}
// assertTxInBlock asserts a transaction with the specified txid is found
// within the block with the passed block hash.
func assertTxInBlock(r *rpctest.Harness, t *testing.T, blockHash *daghash.Hash,
txid *daghash.Hash) {
block, err := r.Node.GetBlock(blockHash)
if err != nil {
t.Fatalf("unable to get block: %v", err)
}
if len(block.Transactions) < 2 {
t.Fatal("target transaction was not mined")
}
for _, txn := range block.Transactions {
txHash := txn.TxID()
if txn.TxID() == txHash {
return
}
}
_, _, line, _ := runtime.Caller(1)
t.Fatalf("assertion failed at line %v: txid %v was not found in "+
"block %v", line, txid, blockHash)
}
// TestBIP0068AndCsv tests for the proper adherence to the BIP 68
// rule-set and the behaviour of OP_CHECKSEQUENCEVERIFY
func TestBIP0068AndCsv(t *testing.T) {
t.Parallel()
// We'd like the test proper evaluation and validation of the BIP 68
// (sequence locks) and BIP 112 rule-sets which add input-age based
// relative lock times.
kaspadCfg := []string{"--rejectnonstd"}
r, err := rpctest.New(&dagconfig.SimnetParams, nil, kaspadCfg)
if err != nil {
t.Fatal("unable to create primary harness: ", err)
}
if err := r.SetUp(true, 1); err != nil {
t.Fatalf("unable to setup test chain: %v", err)
}
defer r.TearDown()
harnessAddr, err := r.NewAddress()
if err != nil {
t.Fatalf("unable to obtain harness address: %v", err)
}
harnessScript, err := txscript.PayToAddrScript(harnessAddr)
if err != nil {
t.Fatalf("unable to generate scriptPubKey: %v", err)
}
const (
outputAmt = util.SompiPerKaspa
relativeBlockLock = 10
)
sweepOutput := &wire.TxOut{
Value: outputAmt - 5000,
ScriptPubKey: harnessScript,
}
// With the height at 104 we need 200 blocks to be mined after the
// genesis target period, so we mine 192 blocks. This'll put us at
// height 296. The getblockchaininfo call checks the state for the
// block AFTER the current height.
numBlocks := (r.ActiveNet.MinerConfirmationWindow * 2) - 8
if _, err := r.Node.Generate(numBlocks); err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
assertChainHeight(r, t, 293)
// Knowing the number of outputs needed for the tests below, create a
// fresh output for use within each of the test-cases below.
const relativeTimeLock = 512
const numTests = 7
type csvOutput struct {
RedeemScript []byte
Utxo *wire.Outpoint
Timelock int64
}
var spendableInputs [numTests]csvOutput
// Create three outputs which have a block-based sequence locks, and
// three outputs which use the above time based sequence lock.
for i := 0; i < numTests; i++ {
timeLock := relativeTimeLock
isSeconds := true
if i < 6 {
timeLock = relativeBlockLock
isSeconds = false
}
redeemScript, utxo, tx, err := createCSVOutput(r, t, outputAmt,
timeLock, isSeconds)
if err != nil {
t.Fatalf("unable to create CSV output: %v", err)
}
if _, err := r.Node.SendRawTransaction(tx, true); err != nil {
t.Fatalf("unable to broadcast transaction: %v", err)
}
spendableInputs[i] = csvOutput{
RedeemScript: redeemScript,
Utxo: utxo,
Timelock: int64(timeLock),
}
}
// Mine a single block including all the transactions generated above.
if _, err := r.Node.Generate(1); err != nil {
t.Fatalf("unable to generate block: %v", err)
}
// Now mine 10 additional blocks giving the inputs generated above a
// age of 11. Space out each block 10 minutes after the previous block.
parentBlockHash, err := r.Node.GetSelectedTipHash()
if err != nil {
t.Fatalf("unable to get prior block hash: %v", err)
}
parentBlock, err := r.Node.GetBlock(parentBlockHash)
if err != nil {
t.Fatalf("unable to get block: %v", err)
}
for i := 0; i < relativeBlockLock; i++ {
timeStamp := parentBlock.Header.Timestamp.Add(time.Minute * 10)
b, err := r.GenerateAndSubmitBlock(nil, -1, timeStamp)
if err != nil {
t.Fatalf("unable to generate block: %v", err)
}
parentBlock = b.MsgBlock()
}
// A helper function to create fully signed transactions in-line during
// the array initialization below.
var inputIndex uint32
makeTxCase := func(sequenceNum uint64, txVersion int32) *wire.MsgTx {
csvInput := spendableInputs[inputIndex]
tx, err := spendCSVOutput(csvInput.RedeemScript, csvInput.Utxo,
sequenceNum, sweepOutput, txVersion)
if err != nil {
t.Fatalf("unable to spend CSV output: %v", err)
}
inputIndex++
return tx
}
tests := [numTests]struct {
tx *wire.MsgTx
accept bool
}{
// A transaction spending a single input. The
// input has a relative time-lock of 1 block, but the disable
// bit it set. The transaction should be rejected as a result.
{
tx: makeTxCase(
blockdag.LockTimeToSequence(false, 1)|wire.SequenceLockTimeDisabled,
1,
),
accept: false,
},
// A transaction with a single input having a 9 block
// relative time lock. The referenced input is 11 blocks old,
// but the CSV output requires a 10 block relative lock-time.
// Therefore, the transaction should be rejected.
{
tx: makeTxCase(blockdag.LockTimeToSequence(false, 9), 1),
accept: false,
},
// A transaction with a single input having a 10 block
// relative time lock. The referenced input is 11 blocks old so
// the transaction should be accepted.
{
tx: makeTxCase(blockdag.LockTimeToSequence(false, 10), 1),
accept: true,
},
// A transaction with a single input having a 11 block
// relative time lock. The input referenced has an input age of
// 11 and the CSV op-code requires 10 blocks to have passed, so
// this transaction should be accepted.
{
tx: makeTxCase(blockdag.LockTimeToSequence(false, 11), 1),
accept: true,
},
// A transaction whose input has a 1000 blck relative time
// lock. This should be rejected as the input's age is only 11
// blocks.
{
tx: makeTxCase(blockdag.LockTimeToSequence(false, 1000), 1),
accept: false,
},
// A transaction with a single input having a 512,000 second
// relative time-lock. This transaction should be rejected as 6
// days worth of blocks haven't yet been mined. The referenced
// input doesn't have sufficient age.
{
tx: makeTxCase(blockdag.LockTimeToSequence(true, 512000), 1),
accept: false,
},
// A transaction whose single input has a 512 second
// relative time-lock. This transaction should be accepted as
// finalized.
{
tx: makeTxCase(blockdag.LockTimeToSequence(true, 512), 1),
accept: true,
},
}
for i, test := range tests {
txid, err := r.Node.SendRawTransaction(test.tx, true)
switch {
// Test case passes, nothing further to report.
case test.accept && err == nil:
// Transaction should have been accepted but we have a non-nil
// error.
case test.accept && err != nil:
t.Fatalf("test #%d, transaction should be accepted, "+
"but was rejected: %v", i, err)
// Transaction should have been rejected, but it was accepted.
case !test.accept && err == nil:
t.Fatalf("test #%d, transaction should be rejected, "+
"but was accepted", i)
// Transaction was rejected as wanted, nothing more to do.
case !test.accept && err != nil:
}
// If the transaction should be rejected, manually mine a block
// with the non-final transaction. It should be rejected.
if !test.accept {
txns := []*util.Tx{util.NewTx(test.tx)}
_, err := r.GenerateAndSubmitBlock(txns, -1, time.Time{})
if err == nil {
t.Fatalf("test #%d, invalid block accepted", i)
}
continue
}
// Generate a block, the transaction should be included within
// the newly mined block.
blockHashes, err := r.Node.Generate(1)
if err != nil {
t.Fatalf("unable to mine block: %v", err)
}
assertTxInBlock(r, t, blockHashes[0], txid)
}
}

View File

@ -1,8 +0,0 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package integration
// This file only exists to prevent warnings due to no buildable source files
// when the build tag for enabling the tests is not specified.

View File

@ -1,166 +0,0 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
// This file is ignored during the regular tests due to the following build tag.
// +build rpctest
package integration
import (
"bytes"
"fmt"
"os"
"runtime/debug"
"testing"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/integration/rpctest"
)
func testGetSelectedTip(r *rpctest.Harness, t *testing.T) {
_, prevbestHeight, err := r.Node.GetSelectedTip()
if err != nil {
t.Fatalf("Call to `GetSelectedTip` failed: %v", err)
}
// Create a new block connecting to the current tip.
generatedBlockHashes, err := r.Node.Generate(1)
if err != nil {
t.Fatalf("Unable to generate block: %v", err)
}
bestHash, bestHeight, err := r.Node.GetSelectedTip()
if err != nil {
t.Fatalf("Call to `GetSelectedTip` failed: %v", err)
}
// Hash should be the same as the newly submitted block.
if !bytes.Equal(bestHash[:], generatedBlockHashes[0][:]) {
t.Fatalf("Block hashes do not match. Returned hash %v, wanted "+
"hash %v", bestHash, generatedBlockHashes[0][:])
}
// Block height should now reflect newest height.
if bestHeight != prevbestHeight+1 {
t.Fatalf("Block heights do not match. Got %v, wanted %v",
bestHeight, prevbestHeight+1)
}
}
func testGetBlockCount(r *rpctest.Harness, t *testing.T) {
// Save the current count.
currentCount, err := r.Node.GetBlockCount()
if err != nil {
t.Fatalf("Unable to get block count: %v", err)
}
if _, err := r.Node.Generate(1); err != nil {
t.Fatalf("Unable to generate block: %v", err)
}
// Count should have increased by one.
newCount, err := r.Node.GetBlockCount()
if err != nil {
t.Fatalf("Unable to get block count: %v", err)
}
if newCount != currentCount+1 {
t.Fatalf("Block count incorrect. Got %v should be %v",
newCount, currentCount+1)
}
}
func testGetBlockHash(r *rpctest.Harness, t *testing.T) {
// Create a new block connecting to the current tip.
generatedBlockHashes, err := r.Node.Generate(1)
if err != nil {
t.Fatalf("Unable to generate block: %v", err)
}
info, err := r.Node.GetInfo()
if err != nil {
t.Fatalf("call to getinfo cailed: %v", err)
}
blockHash, err := r.Node.GetBlockHash(int64(info.Blocks))
if err != nil {
t.Fatalf("Call to `getblockhash` failed: %v", err)
}
// Block hashes should match newly created block.
if !bytes.Equal(generatedBlockHashes[0][:], blockHash[:]) {
t.Fatalf("Block hashes do not match. Returned hash %v, wanted "+
"hash %v", blockHash, generatedBlockHashes[0][:])
}
}
var rpcTestCases = []rpctest.HarnessTestCase{
testGetSelectedTip,
testGetBlockCount,
testGetBlockHash,
}
var primaryHarness *rpctest.Harness
func TestMain(m *testing.M) {
var err error
// In order to properly test scenarios on as if we were on mainnet,
// ensure that non-standard transactions aren't accepted into the
// mempool or relayed.
kaspadCfg := []string{"--rejectnonstd"}
primaryHarness, err = rpctest.New(&dagconfig.SimnetParams, nil, kaspadCfg)
if err != nil {
fmt.Println("unable to create primary harness: ", err)
os.Exit(1)
}
// Initialize the primary mining node with a chain of length 125,
// providing 25 mature coinbases to allow spending from for testing
// purposes.
if err := primaryHarness.SetUp(true, 25); err != nil {
fmt.Println("unable to setup test chain: ", err)
// Even though the harness was not fully setup, it still needs
// to be torn down to ensure all resources such as temp
// directories are cleaned up. The error is intentionally
// ignored since this is already an error path and nothing else
// could be done about it anyways.
_ = primaryHarness.TearDown()
os.Exit(1)
}
exitCode := m.Run()
// Clean up any active harnesses that are still currently running.This
// includes removing all temporary directories, and shutting down any
// created processes.
if err := rpctest.TearDownAll(); err != nil {
fmt.Println("unable to tear down all harnesses: ", err)
os.Exit(1)
}
os.Exit(exitCode)
}
func TestRpcServer(t *testing.T) {
var currentTestNum int
defer func() {
// If one of the integration tests caused a panic within the main
// goroutine, then tear down all the harnesses in order to avoid
// any leaked kaspad processes.
if r := recover(); r != nil {
fmt.Println("recovering from test panic: ", r)
if err := rpctest.TearDownAll(); err != nil {
fmt.Println("unable to tear down all harnesses: ", err)
}
t.Fatalf("test #%v panicked: %s", currentTestNum, debug.Stack())
}
}()
for _, testCase := range rpcTestCases {
testCase(primaryHarness, t)
currentTestNum++
}
}

View File

@ -1,13 +0,0 @@
rpctest
=======
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/kaspanet/kaspad/integration/rpctest)
Package rpctest provides a kaspad-specific RPC testing harness crafting and
executing integration tests by driving a `kaspad` instance via the `RPC`
interface. Each instance of an active harness comes equipped with a simple
in-memory HD wallet capable of properly syncing to the generated DAG,
creating new addresses, and crafting fully signed transactions paying to an
arbitrary set of outputs.

View File

@ -1,207 +0,0 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package rpctest
import (
"github.com/pkg/errors"
"math"
"math/big"
"runtime"
"time"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
// solveBlock attempts to find a nonce which makes the passed block header hash
// to a value less than the target difficulty. When a successful solution is
// found true is returned and the nonce field of the passed header is updated
// with the solution. False is returned if no solution exists.
func solveBlock(header *wire.BlockHeader, targetDifficulty *big.Int) bool {
// sbResult is used by the solver goroutines to send results.
type sbResult struct {
found bool
nonce uint64
}
// solver accepts a block header and a nonce range to test. It is
// intended to be run as a goroutine.
quit := make(chan bool)
results := make(chan sbResult)
solver := func(hdr wire.BlockHeader, startNonce, stopNonce uint64) {
// We need to modify the nonce field of the header, so make sure
// we work with a copy of the original header.
for i := startNonce; i >= startNonce && i <= stopNonce; i++ {
select {
case <-quit:
return
default:
hdr.Nonce = i
hash := hdr.BlockHash()
if daghash.HashToBig(hash).Cmp(targetDifficulty) <= 0 {
select {
case results <- sbResult{true, i}:
return
case <-quit:
return
}
}
}
}
select {
case results <- sbResult{false, 0}:
case <-quit:
return
}
}
startNonce := uint64(0)
stopNonce := uint64(math.MaxUint64)
numCores := uint64(runtime.NumCPU())
noncesPerCore := (stopNonce - startNonce) / numCores
for i := uint64(0); i < numCores; i++ {
rangeStart := startNonce + (noncesPerCore * i)
rangeStop := startNonce + (noncesPerCore * (i + 1)) - 1
if i == numCores-1 {
rangeStop = stopNonce
}
go solver(*header, rangeStart, rangeStop)
}
for i := uint64(0); i < numCores; i++ {
result := <-results
if result.found {
close(quit)
header.Nonce = result.nonce
return true
}
}
return false
}
// standardCoinbaseScript returns a standard script suitable for use as the
// signature script of the coinbase transaction of a new block. In particular,
// it starts with the block blue score.
func standardCoinbaseScript(nextBlueScore uint64, extraNonce uint64) ([]byte, error) {
return txscript.NewScriptBuilder().AddInt64(int64(nextBlueScore)).
AddInt64(int64(extraNonce)).Script()
}
// createCoinbaseTx returns a coinbase transaction paying an appropriate
// subsidy based on the passed block blue score to the provided address.
func createCoinbaseTx(coinbaseScript []byte, nextBlueScore uint64,
addr util.Address, mineTo []wire.TxOut,
net *dagconfig.Params) (*util.Tx, error) {
// Create the script to pay to the provided payment address.
scriptPubKey, err := txscript.PayToAddrScript(addr)
if err != nil {
return nil, err
}
txIns := []*wire.TxIn{&wire.TxIn{
// Coinbase transactions have no inputs, so previous outpoint is
// zero hash and max index.
PreviousOutpoint: *wire.NewOutpoint(&daghash.TxID{},
wire.MaxPrevOutIndex),
SignatureScript: coinbaseScript,
Sequence: wire.MaxTxInSequenceNum,
}}
txOuts := []*wire.TxOut{}
if len(mineTo) == 0 {
txOuts = append(txOuts, &wire.TxOut{
Value: blockdag.CalcBlockSubsidy(nextBlueScore, net),
ScriptPubKey: scriptPubKey,
})
} else {
for i := range mineTo {
txOuts = append(txOuts, &mineTo[i])
}
}
return util.NewTx(wire.NewNativeMsgTx(wire.TxVersion, txIns, txOuts)), nil
}
// CreateBlock creates a new block building from the previous block with a
// specified blockversion and timestamp. If the timestamp passed is zero (not
// initialized), then the timestamp of the previous block will be used plus 1
// second is used. Passing nil for the previous block results in a block that
// builds off of the genesis block for the specified chain.
func CreateBlock(parentBlock *util.Block, parentBlueScore uint64,
inclusionTxs []*util.Tx, blockVersion int32, blockTime time.Time,
miningAddr util.Address, mineTo []wire.TxOut, net *dagconfig.Params,
powMaxBits uint32) (*util.Block, error) {
var (
parentHash *daghash.Hash
blockBlueScore uint64
parentBlockTime time.Time
)
// If the parent block isn't specified, then we'll construct a block
// that builds off of the genesis block for the chain.
if parentBlock == nil {
parentHash = net.GenesisHash
parentBlockTime = net.GenesisBlock.Header.Timestamp.Add(time.Minute)
} else {
parentHash = parentBlock.Hash()
parentBlockTime = parentBlock.MsgBlock().Header.Timestamp
}
blockBlueScore = parentBlueScore + 1
// If a target block time was specified, then use that as the header's
// timestamp. Otherwise, add one second to the parent block unless
// it's the genesis block in which case use the current time.
var ts time.Time
switch {
case !blockTime.IsZero():
ts = blockTime
default:
ts = parentBlockTime.Add(time.Second)
}
extraNonce := uint64(0)
coinbaseScript, err := standardCoinbaseScript(blockBlueScore, extraNonce)
if err != nil {
return nil, err
}
coinbaseTx, err := createCoinbaseTx(coinbaseScript, blockBlueScore,
miningAddr, mineTo, net)
if err != nil {
return nil, err
}
// Create a new block ready to be solved.
blockTxns := []*util.Tx{coinbaseTx}
if inclusionTxs != nil {
blockTxns = append(blockTxns, inclusionTxs...)
}
hashMerkleTree := blockdag.BuildHashMerkleTreeStore(blockTxns)
var block wire.MsgBlock
block.Header = wire.BlockHeader{
Version: blockVersion,
ParentHashes: []*daghash.Hash{parentHash},
HashMerkleRoot: hashMerkleTree.Root(),
AcceptedIDMerkleRoot: &daghash.ZeroHash,
UTXOCommitment: &daghash.ZeroHash,
Timestamp: ts,
Bits: powMaxBits,
}
for _, tx := range blockTxns {
block.AddTransaction(tx.MsgTx())
}
found := solveBlock(&block.Header, net.PowMax)
if !found {
return nil, errors.New("Unable to solve block")
}
utilBlock := util.NewBlock(&block)
return utilBlock, nil
}

View File

@ -1,73 +0,0 @@
// Copyright (c) 2017 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package rpctest
import (
"github.com/pkg/errors"
"go/build"
"os/exec"
"path/filepath"
"runtime"
"sync"
)
var (
// compileMtx guards access to the executable path so that the project is
// only compiled once.
compileMtx sync.Mutex
// executablePath is the path to the compiled executable. This is the empty
// string until kaspad is compiled. This should not be accessed directly;
// instead use the function kaspadExecutablePath().
executablePath string
)
// kaspadExecutablePath returns a path to the kaspad executable to be used by
// rpctests. To ensure the code tests against the most up-to-date version of
// kaspad, this method compiles kaspad the first time it is called. After that, the
// generated binary is used for subsequent test harnesses. The executable file
// is not cleaned up, but since it lives at a static path in a temp directory,
// it is not a big deal.
func kaspadExecutablePath() (string, error) {
compileMtx.Lock()
defer compileMtx.Unlock()
// If kaspad has already been compiled, just use that.
if len(executablePath) != 0 {
return executablePath, nil
}
testDir, err := baseDir()
if err != nil {
return "", err
}
// Determine import path of this package. Not necessarily kaspanet/kaspad if
// this is a forked repo.
_, rpctestDir, _, ok := runtime.Caller(1)
if !ok {
return "", errors.Errorf("Cannot get path to kaspad source code")
}
kaspadPkgPath := filepath.Join(rpctestDir, "..", "..", "..")
kaspadPkg, err := build.ImportDir(kaspadPkgPath, build.FindOnly)
if err != nil {
return "", errors.Errorf("Failed to build kaspad: %s", err)
}
// Build kaspad and output an executable in a static temp path.
outputPath := filepath.Join(testDir, "kaspad")
if runtime.GOOS == "windows" {
outputPath += ".exe"
}
cmd := exec.Command("go", "build", "-o", outputPath, kaspadPkg.ImportPath)
err = cmd.Run()
if err != nil {
return "", errors.Errorf("Failed to build kaspad: %s", err)
}
// Save executable path so future calls do not recompile.
executablePath = outputPath
return executablePath, nil
}

View File

@ -1,9 +0,0 @@
/*
Package rpctest provides a kaspad-specific RPC testing harness crafting and
executing integration tests by driving a `kaspad` instance via the `RPC`
interface. Each instance of an active harness comes equipped with a simple
in-memory HD wallet capable of properly syncing to the generated chain,
creating new addresses, and crafting fully signed transactions paying to an
arbitrary set of outputs.
*/
package rpctest

View File

@ -1,526 +0,0 @@
// Copyright (c) 2016-2017 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package rpctest
import (
"bytes"
"encoding/binary"
"github.com/pkg/errors"
"sync"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/ecc"
"github.com/kaspanet/kaspad/rpcclient"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/hdkeychain"
"github.com/kaspanet/kaspad/wire"
)
var (
// hdSeed is the BIP 32 seed used by the memWallet to initialize it's
// HD root key. This value is hard coded in order to ensure
// deterministic behavior across test runs.
hdSeed = [daghash.HashSize]byte{
0x79, 0xa6, 0x1a, 0xdb, 0xc6, 0xe5, 0xa2, 0xe1,
0x39, 0xd2, 0x71, 0x3a, 0x54, 0x6e, 0xc7, 0xc8,
0x75, 0x63, 0x2e, 0x75, 0xf1, 0xdf, 0x9c, 0x3f,
0xa6, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
}
)
// utxo represents an unspent output spendable by the memWallet. The maturity
// height of the transaction is recorded in order to properly observe the
// maturity period of direct coinbase outputs.
type utxo struct {
scriptPubKey []byte
value util.Amount
keyIndex uint32
maturityHeight uint64
isLocked bool
}
// isMature returns true if the target utxo is considered "mature" at the
// passed block height. Otherwise, false is returned.
func (u *utxo) isMature(height uint64) bool {
return height >= u.maturityHeight
}
// dagUpdate encapsulates an update to the current DAG. This struct is
// used to sync up the memWallet each time a new block is connected to the DAG.
type dagUpdate struct {
blockHeight uint64
filteredTxns []*util.Tx
isConnect bool // True if connect, false if disconnect
}
// undoEntry is functionally the opposite of a dagUpdate. An undoEntry is
// created for each new block received, then stored in a log in order to
// properly handle block re-orgs.
type undoEntry struct {
utxosDestroyed map[wire.Outpoint]*utxo
utxosCreated []wire.Outpoint
}
// memWallet is a simple in-memory wallet whose purpose is to provide basic
// wallet functionality to the harness. The wallet uses a hard-coded HD key
// hierarchy which promotes reproducibility between harness test runs.
type memWallet struct {
coinbaseKey *ecc.PrivateKey
coinbaseAddr util.Address
// hdRoot is the root master private key for the wallet.
hdRoot *hdkeychain.ExtendedKey
// hdIndex is the next available key index offset from the hdRoot.
hdIndex uint32
// currentHeight is the latest height the wallet is known to be synced
// to.
currentHeight uint64
// addrs tracks all addresses belonging to the wallet. The addresses
// are indexed by their keypath from the hdRoot.
addrs map[uint32]util.Address
// utxos is the set of utxos spendable by the wallet.
utxos map[wire.Outpoint]*utxo
// reorgJournal is a map storing an undo entry for each new block
// received. Once a block is disconnected, the undo entry for the
// particular height is evaluated, thereby rewinding the effect of the
// disconnected block on the wallet's set of spendable utxos.
reorgJournal map[uint64]*undoEntry
dagUpdates []*dagUpdate
dagUpdateSignal chan struct{}
dagMtx sync.Mutex
net *dagconfig.Params
rpc *rpcclient.Client
sync.RWMutex
}
// newMemWallet creates and returns a fully initialized instance of the
// memWallet given a particular blockchain's parameters.
func newMemWallet(net *dagconfig.Params, harnessID uint32) (*memWallet, error) {
// The wallet's final HD seed is: hdSeed || harnessID. This method
// ensures that each harness instance uses a deterministic root seed
// based on its harness ID.
var harnessHDSeed [daghash.HashSize + 4]byte
copy(harnessHDSeed[:], hdSeed[:])
binary.BigEndian.PutUint32(harnessHDSeed[:daghash.HashSize], harnessID)
hdRoot, err := hdkeychain.NewMaster(harnessHDSeed[:], net.HDKeyIDPair.PrivateKeyID)
if err != nil {
return nil, nil
}
// The first child key from the hd root is reserved as the coinbase
// generation address.
coinbaseChild, err := hdRoot.Child(0)
if err != nil {
return nil, err
}
coinbaseKey, err := coinbaseChild.ECPrivKey()
if err != nil {
return nil, err
}
coinbaseAddr, err := keyToAddr(coinbaseKey, net)
if err != nil {
return nil, err
}
// Track the coinbase generation address to ensure we properly track
// newly generated kaspa we can spend.
addrs := make(map[uint32]util.Address)
addrs[0] = coinbaseAddr
return &memWallet{
net: net,
coinbaseKey: coinbaseKey,
coinbaseAddr: coinbaseAddr,
hdIndex: 1,
hdRoot: hdRoot,
addrs: addrs,
utxos: make(map[wire.Outpoint]*utxo),
dagUpdateSignal: make(chan struct{}),
reorgJournal: make(map[uint64]*undoEntry),
}, nil
}
// Start launches all goroutines required for the wallet to function properly.
func (m *memWallet) Start() {
go m.dagSyncer()
}
// SyncedHeight returns the height the wallet is known to be synced to.
//
// This function is safe for concurrent access.
func (m *memWallet) SyncedHeight() uint64 {
m.RLock()
defer m.RUnlock()
return m.currentHeight
}
// SetRPCClient saves the passed rpc connection to kaspad as the wallet's
// personal rpc connection.
func (m *memWallet) SetRPCClient(rpcClient *rpcclient.Client) {
m.rpc = rpcClient
}
// IngestBlock is a call-back which is to be triggered each time a new block is
// connected to the blockDAG. It queues the update for the DAG syncer,
// calling the private version in sequential order.
func (m *memWallet) IngestBlock(height uint64, header *wire.BlockHeader, filteredTxns []*util.Tx) {
// Append this new DAG update to the end of the queue of new DAG
// updates.
m.dagMtx.Lock()
m.dagUpdates = append(m.dagUpdates, &dagUpdate{height,
filteredTxns, true})
m.dagMtx.Unlock()
// Launch a goroutine to signal the dagSyncer that a new update is
// available. We do this in a new goroutine in order to avoid blocking
// the main loop of the rpc client.
go func() {
m.dagUpdateSignal <- struct{}{}
}()
}
// ingestBlock updates the wallet's internal utxo state based on the outputs
// created and destroyed within each block.
func (m *memWallet) ingestBlock(update *dagUpdate) {
// Update the latest synced height, then process each filtered
// transaction in the block creating and destroying utxos within
// the wallet as a result.
m.currentHeight = update.blockHeight
undo := &undoEntry{
utxosDestroyed: make(map[wire.Outpoint]*utxo),
}
for _, tx := range update.filteredTxns {
mtx := tx.MsgTx()
isCoinbase := mtx.IsCoinBase()
m.evalOutputs(mtx.TxOut, mtx.TxID(), isCoinbase, undo)
m.evalInputs(mtx.TxIn, undo)
}
// Finally, record the undo entry for this block so we can
// properly update our internal state in response to the block
// being re-org'd from the main chain.
m.reorgJournal[update.blockHeight] = undo
}
// dagSyncer is a goroutine dedicated to processing new blocks in order to
// keep the wallet's utxo state up to date.
//
// NOTE: This MUST be run as a goroutine.
func (m *memWallet) dagSyncer() {
var update *dagUpdate
for range m.dagUpdateSignal {
// A new update is available, so pop the new chain update from
// the front of the update queue.
m.dagMtx.Lock()
update = m.dagUpdates[0]
m.dagUpdates[0] = nil // Set to nil to prevent GC leak.
m.dagUpdates = m.dagUpdates[1:]
m.dagMtx.Unlock()
m.Lock()
if update.isConnect {
m.ingestBlock(update)
}
m.Unlock()
}
}
// evalOutputs evaluates each of the passed outputs, creating a new matching
// utxo within the wallet if we're able to spend the output.
func (m *memWallet) evalOutputs(outputs []*wire.TxOut, txID *daghash.TxID,
isCoinbase bool, undo *undoEntry) {
for i, output := range outputs {
scriptPubKey := output.ScriptPubKey
// Scan all the addresses we currently control to see if the
// output is paying to us.
for keyIndex, addr := range m.addrs {
pkHash := addr.ScriptAddress()
if !bytes.Contains(scriptPubKey, pkHash) {
continue
}
// If this is a coinbase output, then we mark the
// maturity height at the proper block height in the
// future.
var maturityHeight uint64
if isCoinbase {
maturityHeight = m.currentHeight + m.net.BlockCoinbaseMaturity
}
op := wire.Outpoint{TxID: *txID, Index: uint32(i)}
m.utxos[op] = &utxo{
value: util.Amount(output.Value),
keyIndex: keyIndex,
maturityHeight: maturityHeight,
scriptPubKey: scriptPubKey,
}
undo.utxosCreated = append(undo.utxosCreated, op)
}
}
}
// evalInputs scans all the passed inputs, destroying any utxos within the
// wallet which are spent by an input.
func (m *memWallet) evalInputs(inputs []*wire.TxIn, undo *undoEntry) {
for _, txIn := range inputs {
op := txIn.PreviousOutpoint
oldUtxo, ok := m.utxos[op]
if !ok {
continue
}
undo.utxosDestroyed[op] = oldUtxo
delete(m.utxos, op)
}
}
// newAddress returns a new address from the wallet's hd key chain. It also
// loads the address into the RPC client's transaction filter to ensure any
// transactions that involve it are delivered via the notifications.
func (m *memWallet) newAddress() (util.Address, error) {
index := m.hdIndex
childKey, err := m.hdRoot.Child(index)
if err != nil {
return nil, err
}
privKey, err := childKey.ECPrivKey()
if err != nil {
return nil, err
}
addr, err := keyToAddr(privKey, m.net)
if err != nil {
return nil, err
}
err = m.rpc.LoadTxFilter(false, []util.Address{addr}, nil)
if err != nil {
return nil, err
}
m.addrs[index] = addr
m.hdIndex++
return addr, nil
}
// NewAddress returns a fresh address spendable by the wallet.
//
// This function is safe for concurrent access.
func (m *memWallet) NewAddress() (util.Address, error) {
m.Lock()
defer m.Unlock()
return m.newAddress()
}
// fundTx attempts to fund a transaction sending amt kaspa. The coins are
// selected such that the final amount spent pays enough fees as dictated by
// the passed fee rate. The passed fee rate should be expressed in
// sompis-per-byte.
//
// NOTE: The memWallet's mutex must be held when this function is called.
func (m *memWallet) fundTx(tx *wire.MsgTx, amt util.Amount, feeRate util.Amount) error {
const (
// spendSize is the largest number of bytes of a sigScript
// which spends a p2pkh output: OP_DATA_73 <sig> OP_DATA_33 <pubkey>
spendSize = 1 + 73 + 1 + 33
)
var (
amtSelected util.Amount
txSize int
)
for outpoint, utxo := range m.utxos {
// Skip any outputs that are still currently immature or are
// currently locked.
if !utxo.isMature(m.currentHeight) || utxo.isLocked {
continue
}
amtSelected += utxo.value
// Add the selected output to the transaction, updating the
// current tx size while accounting for the size of the future
// sigScript.
tx.AddTxIn(wire.NewTxIn(&outpoint, nil))
txSize = tx.SerializeSize() + spendSize*len(tx.TxIn)
// Calculate the fee required for the txn at this point
// observing the specified fee rate. If we don't have enough
// coins from he current amount selected to pay the fee, then
// continue to grab more coins.
reqFee := util.Amount(txSize * int(feeRate))
if amtSelected-reqFee < amt {
continue
}
// If we have any change left over, then add an additional
// output to the transaction reserved for change.
changeVal := amtSelected - amt - reqFee
if changeVal > 0 {
addr, err := m.newAddress()
if err != nil {
return err
}
scriptPubKey, err := txscript.PayToAddrScript(addr)
if err != nil {
return err
}
changeOutput := &wire.TxOut{
Value: uint64(changeVal),
ScriptPubKey: scriptPubKey,
}
tx.AddTxOut(changeOutput)
}
return nil
}
// If we've reached this point, then coin selection failed due to an
// insufficient amount of coins.
return errors.Errorf("not enough funds for coin selection")
}
// SendOutputs creates, then sends a transaction paying to the specified output
// while observing the passed fee rate. The passed fee rate should be expressed
// in sompis-per-byte.
func (m *memWallet) SendOutputs(outputs []*wire.TxOut,
feeRate util.Amount) (*daghash.TxID, error) {
tx, err := m.CreateTransaction(outputs, feeRate)
if err != nil {
return nil, err
}
return m.rpc.SendRawTransaction(tx, true)
}
// CreateTransaction returns a fully signed transaction paying to the specified
// outputs while observing the desired fee rate. The passed fee rate should be
// expressed in sompis-per-byte.
//
// This function is safe for concurrent access.
func (m *memWallet) CreateTransaction(outputs []*wire.TxOut, feeRate util.Amount) (*wire.MsgTx, error) {
m.Lock()
defer m.Unlock()
tx := wire.NewNativeMsgTx(wire.TxVersion, nil, nil)
// Tally up the total amount to be sent in order to perform coin
// selection shortly below.
var outputAmt util.Amount
for _, output := range outputs {
outputAmt += util.Amount(output.Value)
tx.AddTxOut(output)
}
// Attempt to fund the transaction with spendable utxos.
if err := m.fundTx(tx, outputAmt, feeRate); err != nil {
return nil, err
}
// Populate all the selected inputs with valid sigScript for spending.
// Along the way record all outputs being spent in order to avoid a
// potential double spend.
spentOutputs := make([]*utxo, 0, len(tx.TxIn))
for i, txIn := range tx.TxIn {
outpoint := txIn.PreviousOutpoint
utxo := m.utxos[outpoint]
extendedKey, err := m.hdRoot.Child(utxo.keyIndex)
if err != nil {
return nil, err
}
privKey, err := extendedKey.ECPrivKey()
if err != nil {
return nil, err
}
sigScript, err := txscript.SignatureScript(tx, i, utxo.scriptPubKey,
txscript.SigHashAll, privKey, true)
if err != nil {
return nil, err
}
txIn.SignatureScript = sigScript
spentOutputs = append(spentOutputs, utxo)
}
// As these outputs are now being spent by this newly created
// transaction, mark the outputs are "locked". This action ensures
// these outputs won't be double spent by any subsequent transactions.
// These locked outputs can be freed via a call to UnlockOutputs.
for _, utxo := range spentOutputs {
utxo.isLocked = true
}
return tx, nil
}
// UnlockOutputs unlocks any outputs which were previously locked due to
// being selected to fund a transaction via the CreateTransaction method.
//
// This function is safe for concurrent access.
func (m *memWallet) UnlockOutputs(inputs []*wire.TxIn) {
m.Lock()
defer m.Unlock()
for _, input := range inputs {
utxo, ok := m.utxos[input.PreviousOutpoint]
if !ok {
continue
}
utxo.isLocked = false
}
}
// ConfirmedBalance returns the confirmed balance of the wallet.
//
// This function is safe for concurrent access.
func (m *memWallet) ConfirmedBalance() util.Amount {
m.RLock()
defer m.RUnlock()
var balance util.Amount
for _, utxo := range m.utxos {
// Prevent any immature or locked outputs from contributing to
// the wallet's total confirmed balance.
if !utxo.isMature(m.currentHeight) || utxo.isLocked {
continue
}
balance += utxo.value
}
return balance
}
// keyToAddr maps the passed private to corresponding p2pkh address.
func keyToAddr(key *ecc.PrivateKey, net *dagconfig.Params) (util.Address, error) {
serializedKey := key.PubKey().SerializeCompressed()
return util.NewAddressPubKeyHashFromPublicKey(serializedKey, net.Prefix)
}

View File

@ -1,291 +0,0 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package rpctest
import (
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"runtime"
"time"
rpc "github.com/kaspanet/kaspad/rpcclient"
"github.com/kaspanet/kaspad/util"
)
// nodeConfig contains all the args, and data required to launch a kaspad process
// and connect the rpc client to it.
type nodeConfig struct {
rpcUser string
rpcPass string
listen string
rpcListen string
rpcConnect string
dataDir string
logDir string
profile string
debugLevel string
extra []string
prefix string
exe string
endpoint string
certFile string
keyFile string
certificates []byte
}
// newConfig returns a newConfig with all default values.
func newConfig(prefix, certFile, keyFile string, extra []string) (*nodeConfig, error) {
kaspadPath, err := kaspadExecutablePath()
if err != nil {
kaspadPath = "kaspad"
}
a := &nodeConfig{
listen: "127.0.0.1:18555",
rpcListen: "127.0.0.1:16510",
rpcUser: "user",
rpcPass: "pass",
extra: extra,
prefix: prefix,
exe: kaspadPath,
endpoint: "ws",
certFile: certFile,
keyFile: keyFile,
}
if err := a.setDefaults(); err != nil {
return nil, err
}
return a, nil
}
// setDefaults sets the default values of the config. It also creates the
// temporary data, and log directories which must be cleaned up with a call to
// cleanup().
func (n *nodeConfig) setDefaults() error {
datadir, err := ioutil.TempDir("", n.prefix+"-data")
if err != nil {
return err
}
n.dataDir = datadir
logdir, err := ioutil.TempDir("", n.prefix+"-logs")
if err != nil {
return err
}
n.logDir = logdir
cert, err := ioutil.ReadFile(n.certFile)
if err != nil {
return err
}
n.certificates = cert
return nil
}
// arguments returns an array of arguments that be used to launch the kaspad
// process.
func (n *nodeConfig) arguments() []string {
args := []string{}
if n.rpcUser != "" {
// --rpcuser
args = append(args, fmt.Sprintf("--rpcuser=%s", n.rpcUser))
}
if n.rpcPass != "" {
// --rpcpass
args = append(args, fmt.Sprintf("--rpcpass=%s", n.rpcPass))
}
if n.listen != "" {
// --listen
args = append(args, fmt.Sprintf("--listen=%s", n.listen))
}
if n.rpcListen != "" {
// --rpclisten
args = append(args, fmt.Sprintf("--rpclisten=%s", n.rpcListen))
}
if n.rpcConnect != "" {
// --rpcconnect
args = append(args, fmt.Sprintf("--rpcconnect=%s", n.rpcConnect))
}
// --rpccert
args = append(args, fmt.Sprintf("--rpccert=%s", n.certFile))
// --rpckey
args = append(args, fmt.Sprintf("--rpckey=%s", n.keyFile))
// --txindex
args = append(args, "--txindex")
// --addrindex
args = append(args, "--addrindex")
if n.dataDir != "" {
// --datadir
args = append(args, fmt.Sprintf("--datadir=%s", n.dataDir))
}
if n.logDir != "" {
// --logdir
args = append(args, fmt.Sprintf("--logdir=%s", n.logDir))
}
if n.profile != "" {
// --profile
args = append(args, fmt.Sprintf("--profile=%s", n.profile))
}
if n.debugLevel != "" {
// --debuglevel
args = append(args, fmt.Sprintf("--debuglevel=%s", n.debugLevel))
}
args = append(args, n.extra...)
return args
}
// command returns the exec.Cmd which will be used to start the kaspad process.
func (n *nodeConfig) command() *exec.Cmd {
return exec.Command(n.exe, n.arguments()...)
}
// rpcConnConfig returns the rpc connection config that can be used to connect
// to the kaspad process that is launched via Start().
func (n *nodeConfig) rpcConnConfig() rpc.ConnConfig {
return rpc.ConnConfig{
Host: n.rpcListen,
Endpoint: n.endpoint,
User: n.rpcUser,
Pass: n.rpcPass,
Certificates: n.certificates,
DisableAutoReconnect: true,
}
}
// String returns the string representation of this nodeConfig.
func (n *nodeConfig) String() string {
return n.prefix
}
// cleanup removes the tmp data and log directories.
func (n *nodeConfig) cleanup() error {
dirs := []string{
n.logDir,
n.dataDir,
}
var err error
for _, dir := range dirs {
if err = os.RemoveAll(dir); err != nil {
log.Printf("Cannot remove dir %s: %s", dir, err)
}
}
return err
}
// node houses the necessary state required to configure, launch, and manage a
// kaspad process.
type node struct {
config *nodeConfig
cmd *exec.Cmd
pidFile string
dataDir string
}
// newNode creates a new node instance according to the passed config. dataDir
// will be used to hold a file recording the pid of the launched process, and
// as the base for the log and data directories for kaspad.
func newNode(config *nodeConfig, dataDir string) (*node, error) {
return &node{
config: config,
dataDir: dataDir,
cmd: config.command(),
}, nil
}
// start creates a new kaspad process, and writes its pid in a file reserved for
// recording the pid of the launched process. This file can be used to
// terminate the process in case of a hang, or panic. In the case of a failing
// test case, or panic, it is important that the process be stopped via stop(),
// otherwise, it will persist unless explicitly killed.
func (n *node) start() error {
if err := n.cmd.Start(); err != nil {
return err
}
pid, err := os.Create(filepath.Join(n.dataDir,
fmt.Sprintf("%s.pid", n.config)))
if err != nil {
return err
}
n.pidFile = pid.Name()
if _, err = fmt.Fprintf(pid, "%d\n", n.cmd.Process.Pid); err != nil {
return err
}
if err := pid.Close(); err != nil {
return err
}
return nil
}
// stop interrupts the running kaspad process process, and waits until it exits
// properly. On windows, interrupt is not supported, so a kill signal is used
// instead
func (n *node) stop() error {
if n.cmd == nil || n.cmd.Process == nil {
// return if not properly initialized
// or error starting the process
return nil
}
defer n.cmd.Wait()
if runtime.GOOS == "windows" {
return n.cmd.Process.Signal(os.Kill)
}
return n.cmd.Process.Signal(os.Interrupt)
}
// cleanup cleanups process and args files. The file housing the pid of the
// created process will be deleted, as well as any directories created by the
// process.
func (n *node) cleanup() error {
if n.pidFile != "" {
if err := os.Remove(n.pidFile); err != nil {
log.Printf("unable to remove file %s: %s", n.pidFile,
err)
}
}
return n.config.cleanup()
}
// shutdown terminates the running kaspad process, and cleans up all
// file/directories created by node.
func (n *node) shutdown() error {
if err := n.stop(); err != nil {
return err
}
if err := n.cleanup(); err != nil {
return err
}
return nil
}
// genCertPair generates a key/cert pair to the paths provided.
func genCertPair(certFile, keyFile string) error {
org := "rpctest autogenerated cert"
validUntil := time.Now().Add(10 * 365 * 24 * time.Hour)
cert, key, err := util.NewTLSCertPair(org, validUntil, nil)
if err != nil {
return err
}
// Write cert and key files.
if err = ioutil.WriteFile(certFile, cert, 0666); err != nil {
return err
}
if err = ioutil.WriteFile(keyFile, key, 0600); err != nil {
os.Remove(certFile)
return err
}
return nil
}

View File

@ -1,488 +0,0 @@
// Copyright (c) 2016-2017 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package rpctest
import (
"fmt"
"github.com/pkg/errors"
"io/ioutil"
"net"
"os"
"path/filepath"
"strconv"
"sync"
"testing"
"time"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/rpcclient"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
const (
// These constants define the minimum and maximum p2p and rpc port
// numbers used by a test harness. The min port is inclusive while the
// max port is exclusive.
minPeerPort = 10000
maxPeerPort = 35000
minRPCPort = maxPeerPort
maxRPCPort = 60000
// BlockVersion is the default block version used when generating
// blocks.
BlockVersion = 1
)
var (
// current number of active test nodes.
numTestInstances = 0
// processID is the process ID of the current running process. It is
// used to calculate ports based upon it when launching an rpc
// harnesses. The intent is to allow multiple process to run in
// parallel without port collisions.
//
// It should be noted however that there is still some small probability
// that there will be port collisions either due to other processes
// running or simply due to the stars aligning on the process IDs.
processID = os.Getpid()
// testInstances is a private package-level slice used to keep track of
// all active test harnesses. This global can be used to perform
// various "joins", shutdown several active harnesses after a test,
// etc.
testInstances = make(map[string]*Harness)
// Used to protest concurrent access to above declared variables.
harnessStateMtx sync.RWMutex
)
// HarnessTestCase represents a test-case which utilizes an instance of the
// Harness to exercise functionality.
type HarnessTestCase func(r *Harness, t *testing.T)
// Harness fully encapsulates an active kaspad process to provide a unified
// platform for creating rpc driven integration tests involving kaspad. The
// active kaspad node will typically be run in simnet mode in order to allow for
// easy generation of test blockchains. The active kaspad process is fully
// managed by Harness, which handles the necessary initialization, and teardown
// of the process along with any temporary directories created as a result.
// Multiple Harness instances may be run concurrently, in order to allow for
// testing complex scenarios involving multiple nodes. The harness also
// includes an in-memory wallet to streamline various classes of tests.
type Harness struct {
// ActiveNet is the parameters of the blockchain the Harness belongs
// to.
ActiveNet *dagconfig.Params
powMaxBits uint32
Node *rpcclient.Client
node *node
handlers *rpcclient.NotificationHandlers
wallet *memWallet
testNodeDir string
maxConnRetries int
nodeNum int
sync.Mutex
}
// New creates and initializes new instance of the rpc test harness.
// Optionally, websocket handlers and a specified configuration may be passed.
// In the case that a nil config is passed, a default configuration will be
// used.
//
// NOTE: This function is safe for concurrent access.
func New(activeNet *dagconfig.Params, handlers *rpcclient.NotificationHandlers,
extraArgs []string) (*Harness, error) {
harnessStateMtx.Lock()
defer harnessStateMtx.Unlock()
// Add a flag for the appropriate network type based on the provided
// chain params.
switch activeNet.Net {
case wire.Mainnet:
// No extra flags since mainnet is the default
case wire.Testnet:
extraArgs = append(extraArgs, "--testnet")
case wire.Regtest:
extraArgs = append(extraArgs, "--regtest")
case wire.Simnet:
extraArgs = append(extraArgs, "--simnet")
default:
return nil, errors.Errorf("rpctest.New must be called with one " +
"of the supported chain networks")
}
testDir, err := baseDir()
if err != nil {
return nil, err
}
harnessID := strconv.Itoa(numTestInstances)
nodeTestData, err := ioutil.TempDir(testDir, "harness-"+harnessID)
if err != nil {
return nil, err
}
certFile := filepath.Join(nodeTestData, "rpc.cert")
keyFile := filepath.Join(nodeTestData, "rpc.key")
if err := genCertPair(certFile, keyFile); err != nil {
return nil, err
}
wallet, err := newMemWallet(activeNet, uint32(numTestInstances))
if err != nil {
return nil, err
}
miningAddr := fmt.Sprintf("--miningaddr=%s", wallet.coinbaseAddr)
extraArgs = append(extraArgs, miningAddr)
config, err := newConfig("rpctest", certFile, keyFile, extraArgs)
if err != nil {
return nil, err
}
// Generate p2p+rpc listening addresses.
config.listen, config.rpcListen = generateListeningAddresses()
// Create the testing node bounded to the simnet.
node, err := newNode(config, nodeTestData)
if err != nil {
return nil, err
}
nodeNum := numTestInstances
numTestInstances++
if handlers == nil {
handlers = &rpcclient.NotificationHandlers{}
}
// If a handler for the OnFilteredBlock{Connected,Disconnected} callback
// callback has already been set, then create a wrapper callback which
// executes both the currently registered callback and the mem wallet's
// callback.
if handlers.OnFilteredBlockAdded != nil {
obc := handlers.OnFilteredBlockAdded
handlers.OnFilteredBlockAdded = func(height uint64, header *wire.BlockHeader, filteredTxns []*util.Tx) {
wallet.IngestBlock(height, header, filteredTxns)
obc(height, header, filteredTxns)
}
} else {
// Otherwise, we can claim the callback ourselves.
handlers.OnFilteredBlockAdded = wallet.IngestBlock
}
h := &Harness{
handlers: handlers,
node: node,
maxConnRetries: 20,
testNodeDir: nodeTestData,
ActiveNet: activeNet,
powMaxBits: util.BigToCompact(activeNet.PowMax),
nodeNum: nodeNum,
wallet: wallet,
}
// Track this newly created test instance within the package level
// global map of all active test instances.
testInstances[h.testNodeDir] = h
return h, nil
}
// SetUp initializes the rpc test state. Initialization includes: starting up a
// simnet node, creating a websockets client and connecting to the started
// node, and finally: optionally generating and submitting a testchain with a
// configurable number of mature coinbase outputs coinbase outputs.
//
// NOTE: This method and TearDown should always be called from the same
// goroutine as they are not concurrent safe.
func (h *Harness) SetUp(createTestChain bool, numMatureOutputs uint32) error {
// Start the kaspad node itself. This spawns a new process which will be
// managed
if err := h.node.start(); err != nil {
return err
}
if err := h.connectRPCClient(); err != nil {
return err
}
h.wallet.Start()
// Filter transactions that pay to the coinbase associated with the
// wallet.
filterAddrs := []util.Address{h.wallet.coinbaseAddr}
if err := h.Node.LoadTxFilter(true, filterAddrs, nil); err != nil {
return err
}
// Ensure kaspad properly dispatches our registered call-back for each new
// block. Otherwise, the memWallet won't function properly.
if err := h.Node.NotifyBlocks(); err != nil {
return err
}
// Create a test chain with the desired number of mature coinbase
// outputs.
if createTestChain && numMatureOutputs != 0 {
numToGenerate := (uint32(h.ActiveNet.BlockCoinbaseMaturity) +
numMatureOutputs)
_, err := h.Node.Generate(numToGenerate)
if err != nil {
return err
}
}
// Block until the wallet has fully synced up to the tip of the main
// chain.
selectedTip, err := h.Node.GetSelectedTip()
if err != nil {
return err
}
blueScore := selectedTip.BlueScore
ticker := time.NewTicker(time.Millisecond * 100)
for range ticker.C {
walletHeight := h.wallet.SyncedHeight()
if walletHeight == blueScore {
break
}
}
ticker.Stop()
return nil
}
// tearDown stops the running rpc test instance. All created processes are
// killed, and temporary directories removed.
//
// This function MUST be called with the harness state mutex held (for writes).
func (h *Harness) tearDown() error {
if h.Node != nil {
h.Node.Shutdown()
}
if err := h.node.shutdown(); err != nil {
return err
}
if err := os.RemoveAll(h.testNodeDir); err != nil {
return err
}
delete(testInstances, h.testNodeDir)
return nil
}
// TearDown stops the running rpc test instance. All created processes are
// killed, and temporary directories removed.
//
// NOTE: This method and SetUp should always be called from the same goroutine
// as they are not concurrent safe.
func (h *Harness) TearDown() error {
harnessStateMtx.Lock()
defer harnessStateMtx.Unlock()
return h.tearDown()
}
// connectRPCClient attempts to establish an RPC connection to the created kaspad
// process belonging to this Harness instance. If the initial connection
// attempt fails, this function will retry h.maxConnRetries times, backing off
// the time between subsequent attempts. If after h.maxConnRetries attempts,
// we're not able to establish a connection, this function returns with an
// error.
func (h *Harness) connectRPCClient() error {
var client *rpcclient.Client
var err error
rpcConf := h.node.config.rpcConnConfig()
for i := 0; i < h.maxConnRetries; i++ {
if client, err = rpcclient.New(&rpcConf, h.handlers); err != nil {
time.Sleep(time.Duration(i) * 50 * time.Millisecond)
continue
}
break
}
if client == nil {
return errors.Errorf("connection timeout")
}
h.Node = client
h.wallet.SetRPCClient(client)
return nil
}
// NewAddress returns a fresh address spendable by the Harness' internal
// wallet.
//
// This function is safe for concurrent access.
func (h *Harness) NewAddress() (util.Address, error) {
return h.wallet.NewAddress()
}
// ConfirmedBalance returns the confirmed balance of the Harness' internal
// wallet.
//
// This function is safe for concurrent access.
func (h *Harness) ConfirmedBalance() util.Amount {
return h.wallet.ConfirmedBalance()
}
// SendOutputs creates, signs, and finally broadcasts a transaction spending
// the harness' available mature coinbase outputs creating new outputs
// according to targetOutputs.
//
// This function is safe for concurrent access.
func (h *Harness) SendOutputs(targetOutputs []*wire.TxOut,
feeRate util.Amount) (*daghash.TxID, error) {
return h.wallet.SendOutputs(targetOutputs, feeRate)
}
// CreateTransaction returns a fully signed transaction paying to the specified
// outputs while observing the desired fee rate. The passed fee rate should be
// expressed in sompis-per-byte. Any unspent outputs selected as inputs for
// the crafted transaction are marked as unspendable in order to avoid
// potential double-spends by future calls to this method. If the created
// transaction is cancelled for any reason then the selected inputs MUST be
// freed via a call to UnlockOutputs. Otherwise, the locked inputs won't be
// returned to the pool of spendable outputs.
//
// This function is safe for concurrent access.
func (h *Harness) CreateTransaction(targetOutputs []*wire.TxOut,
feeRate util.Amount) (*wire.MsgTx, error) {
return h.wallet.CreateTransaction(targetOutputs, feeRate)
}
// UnlockOutputs unlocks any outputs which were previously marked as
// unspendabe due to being selected to fund a transaction via the
// CreateTransaction method.
//
// This function is safe for concurrent access.
func (h *Harness) UnlockOutputs(inputs []*wire.TxIn) {
h.wallet.UnlockOutputs(inputs)
}
// RPCConfig returns the harnesses current rpc configuration. This allows other
// potential RPC clients created within tests to connect to a given test
// harness instance.
func (h *Harness) RPCConfig() rpcclient.ConnConfig {
return h.node.config.rpcConnConfig()
}
// P2PAddress returns the harness' P2P listening address. This allows potential
// peers (such as SPV peers) created within tests to connect to a given test
// harness instance.
func (h *Harness) P2PAddress() string {
return h.node.config.listen
}
// GenerateAndSubmitBlock creates a block whose contents include the passed
// transactions and submits it to the running simnet node. For generating
// blocks with only a coinbase tx, callers can simply pass nil instead of
// transactions to be mined. Additionally, a custom block version can be set by
// the caller. A blockVersion of -1 indicates that the current default block
// version should be used. An uninitialized time.Time should be used for the
// blockTime parameter if one doesn't wish to set a custom time.
//
// This function is safe for concurrent access.
func (h *Harness) GenerateAndSubmitBlock(txns []*util.Tx, blockVersion int32,
blockTime time.Time) (*util.Block, error) {
return h.GenerateAndSubmitBlockWithCustomCoinbaseOutputs(txns,
blockVersion, blockTime, []wire.TxOut{})
}
// GenerateAndSubmitBlockWithCustomCoinbaseOutputs creates a block whose
// contents include the passed coinbase outputs and transactions and submits
// it to the running simnet node. For generating blocks with only a coinbase tx,
// callers can simply pass nil instead of transactions to be mined.
// Additionally, a custom block version can be set by the caller. A blockVersion
// of -1 indicates that the current default block version should be used. An
// uninitialized time.Time should be used for the blockTime parameter if one
// doesn't wish to set a custom time. The mineTo list of outputs will be added
// to the coinbase; this is not checked for correctness until the block is
// submitted; thus, it is the caller's responsibility to ensure that the outputs
// are correct. If the list is empty, the coinbase reward goes to the wallet
// managed by the Harness.
//
// This function is safe for concurrent access.
func (h *Harness) GenerateAndSubmitBlockWithCustomCoinbaseOutputs(
txns []*util.Tx, blockVersion int32, blockTime time.Time,
mineTo []wire.TxOut) (*util.Block, error) {
h.Lock()
defer h.Unlock()
if blockVersion == -1 {
blockVersion = BlockVersion
}
selectedTip, err := h.Node.GetSelectedTip()
if err != nil {
return nil, err
}
selectedTipHash, err := daghash.NewHashFromStr(selectedTip.Hash)
if err != nil {
return nil, err
}
mBlock, err := h.Node.GetBlock(selectedTipHash, nil)
if err != nil {
return nil, err
}
parentBlock := util.NewBlock(mBlock)
// Create a new block including the specified transactions
newBlock, err := CreateBlock(parentBlock, selectedTip.BlueScore, txns, blockVersion,
blockTime, h.wallet.coinbaseAddr, mineTo, h.ActiveNet, h.powMaxBits)
if err != nil {
return nil, err
}
// Submit the block to the simnet node.
if err := h.Node.SubmitBlock(newBlock, nil); err != nil {
return nil, err
}
return newBlock, nil
}
// generateListeningAddresses returns two strings representing listening
// addresses designated for the current rpc test. If there haven't been any
// test instances created, the default ports are used. Otherwise, in order to
// support multiple test nodes running at once, the p2p and rpc port are
// incremented after each initialization.
func generateListeningAddresses() (string, string) {
localhost := "127.0.0.1"
portString := func(minPort, maxPort int) string {
port := minPort + numTestInstances + ((20 * processID) %
(maxPort - minPort))
return strconv.Itoa(port)
}
p2p := net.JoinHostPort(localhost, portString(minPeerPort, maxPeerPort))
rpc := net.JoinHostPort(localhost, portString(minRPCPort, maxRPCPort))
return p2p, rpc
}
// baseDir is the directory path of the temp directory for all rpctest files.
func baseDir() (string, error) {
dirPath := filepath.Join(os.TempDir(), "kaspad", "rpctest")
err := os.MkdirAll(dirPath, 0755)
return dirPath, err
}

View File

@ -1,630 +0,0 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
// This file is ignored during the regular tests due to the following build tag.
// +build rpctest
package rpctest
import (
"fmt"
"os"
"testing"
"time"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
func testSendOutputs(r *Harness, t *testing.T) {
genSpend := func(amt util.Amount) *daghash.Hash {
// Grab a fresh address from the wallet.
addr, err := r.NewAddress()
if err != nil {
t.Fatalf("unable to get new address: %v", err)
}
// Next, send amt KAS to this address, spending from one of our mature
// coinbase outputs.
addrScript, err := txscript.PayToAddrScript(addr)
if err != nil {
t.Fatalf("unable to generate scriptPubKey to addr: %v", err)
}
output := wire.NewTxOut(int64(amt), addrScript)
txid, err := r.SendOutputs([]*wire.TxOut{output}, 10)
if err != nil {
t.Fatalf("coinbase spend failed: %v", err)
}
return txid
}
assertTxMined := func(txid *daghash.Hash, blockHash *daghash.Hash) {
block, err := r.Node.GetBlock(blockHash)
if err != nil {
t.Fatalf("unable to get block: %v", err)
}
numBlockTxns := len(block.Transactions)
if numBlockTxns < 2 {
t.Fatalf("crafted transaction wasn't mined, block should have "+
"at least %v transactions instead has %v", 2, numBlockTxns)
}
minedTx := block.Transactions[1]
minedTxID := minedTx.TxID()
if minedTxID != *txid {
t.Fatalf("txid's don't match, %v vs %v", minedTxID, txid)
}
}
// First, generate a small spend which will require only a single
// input.
txid := genSpend(util.Amount(5 * util.SompiPerKaspa))
// Generate a single block, the transaction the wallet created should
// be found in this block.
blockHashes, err := r.Node.Generate(1)
if err != nil {
t.Fatalf("unable to generate single block: %v", err)
}
assertTxMined(txid, blockHashes[0])
// Next, generate a spend much greater than the block reward. This
// transaction should also have been mined properly.
txid = genSpend(util.Amount(500 * util.SompiPerKaspa))
blockHashes, err = r.Node.Generate(1)
if err != nil {
t.Fatalf("unable to generate single block: %v", err)
}
assertTxMined(txid, blockHashes[0])
}
func assertConnectedTo(t *testing.T, nodeA *Harness, nodeB *Harness) {
nodeAPeers, err := nodeA.Node.GetPeerInfo()
if err != nil {
t.Fatalf("unable to get nodeA's peer info")
}
nodeAddr := nodeB.node.config.listen
addrFound := false
for _, peerInfo := range nodeAPeers {
if peerInfo.Addr == nodeAddr {
addrFound = true
break
}
}
if !addrFound {
t.Fatal("nodeA not connected to nodeB")
}
}
func testConnectNode(r *Harness, t *testing.T) {
// Create a fresh test harness.
harness, err := New(&dagconfig.SimnetParams, nil, nil)
if err != nil {
t.Fatal(err)
}
if err := harness.SetUp(false, 0); err != nil {
t.Fatalf("unable to complete rpctest setup: %v", err)
}
defer harness.TearDown()
// Establish a p2p connection from our new local harness to the main
// harness.
if err := ConnectNode(harness, r); err != nil {
t.Fatalf("unable to connect local to main harness: %v", err)
}
// The main harness should show up in our local harness' peer's list,
// and vice verse.
assertConnectedTo(t, harness, r)
}
func testTearDownAll(t *testing.T) {
// Grab a local copy of the currently active harnesses before
// attempting to tear them all down.
initialActiveHarnesses := ActiveHarnesses()
// Tear down all currently active harnesses.
if err := TearDownAll(); err != nil {
t.Fatalf("unable to teardown all harnesses: %v", err)
}
// The global testInstances map should now be fully purged with no
// active test harnesses remaining.
if len(ActiveHarnesses()) != 0 {
t.Fatalf("test harnesses still active after TearDownAll")
}
for _, harness := range initialActiveHarnesses {
// Ensure all test directories have been deleted.
if _, err := os.Stat(harness.testNodeDir); err == nil {
t.Errorf("created test datadir was not deleted.")
}
}
}
func testActiveHarnesses(r *Harness, t *testing.T) {
numInitialHarnesses := len(ActiveHarnesses())
// Create a single test harness.
harness1, err := New(&dagconfig.SimnetParams, nil, nil)
if err != nil {
t.Fatal(err)
}
defer harness1.TearDown()
// With the harness created above, a single harness should be detected
// as active.
numActiveHarnesses := len(ActiveHarnesses())
if !(numActiveHarnesses > numInitialHarnesses) {
t.Fatalf("ActiveHarnesses not updated, should have an " +
"additional test harness listed.")
}
}
func testJoinMempools(r *Harness, t *testing.T) {
// Assert main test harness has no transactions in its mempool.
pooledHashes, err := r.Node.GetRawMempool()
if err != nil {
t.Fatalf("unable to get mempool for main test harness: %v", err)
}
if len(pooledHashes) != 0 {
t.Fatal("main test harness mempool not empty")
}
// Create a local test harness with only the genesis block. The nodes
// will be synced below so the same transaction can be sent to both
// nodes without it being an orphan.
harness, err := New(&dagconfig.SimnetParams, nil, nil)
if err != nil {
t.Fatal(err)
}
if err := harness.SetUp(false, 0); err != nil {
t.Fatalf("unable to complete rpctest setup: %v", err)
}
defer harness.TearDown()
nodeSlice := []*Harness{r, harness}
// Both mempools should be considered synced as they are empty.
// Therefore, this should return instantly.
if err := JoinNodes(nodeSlice, Mempools); err != nil {
t.Fatalf("unable to join node on mempools: %v", err)
}
// Generate a coinbase spend to a new address within the main harness'
// mempool.
addr, err := r.NewAddress()
addrScript, err := txscript.PayToAddrScript(addr)
if err != nil {
t.Fatalf("unable to generate scriptPubKey to addr: %v", err)
}
output := wire.NewTxOut(5e8, addrScript)
testTx, err := r.CreateTransaction([]*wire.TxOut{output}, 10)
if err != nil {
t.Fatalf("coinbase spend failed: %v", err)
}
if _, err := r.Node.SendRawTransaction(testTx, true); err != nil {
t.Fatalf("send transaction failed: %v", err)
}
// Wait until the transaction shows up to ensure the two mempools are
// not the same.
harnessSynced := make(chan struct{})
go func() {
for {
poolHashes, err := r.Node.GetRawMempool()
if err != nil {
t.Fatalf("failed to retrieve harness mempool: %v", err)
}
if len(poolHashes) > 0 {
break
}
time.Sleep(time.Millisecond * 100)
}
harnessSynced <- struct{}{}
}()
select {
case <-harnessSynced:
case <-time.After(time.Minute):
t.Fatalf("harness node never received transaction")
}
// This select case should fall through to the default as the goroutine
// should be blocked on the JoinNodes call.
poolsSynced := make(chan struct{})
go func() {
if err := JoinNodes(nodeSlice, Mempools); err != nil {
t.Fatalf("unable to join node on mempools: %v", err)
}
poolsSynced <- struct{}{}
}()
select {
case <-poolsSynced:
t.Fatalf("mempools detected as synced yet harness has a new tx")
default:
}
// Establish an outbound connection from the local harness to the main
// harness and wait for the chains to be synced.
if err := ConnectNode(harness, r); err != nil {
t.Fatalf("unable to connect harnesses: %v", err)
}
if err := JoinNodes(nodeSlice, Blocks); err != nil {
t.Fatalf("unable to join node on blocks: %v", err)
}
// Send the transaction to the local harness which will result in synced
// mempools.
if _, err := harness.Node.SendRawTransaction(testTx, true); err != nil {
t.Fatalf("send transaction failed: %v", err)
}
// Select once again with a special timeout case after 1 minute. The
// goroutine above should now be blocked on sending into the unbuffered
// channel. The send should immediately succeed. In order to avoid the
// test hanging indefinitely, a 1 minute timeout is in place.
select {
case <-poolsSynced:
// fall through
case <-time.After(time.Minute):
t.Fatalf("mempools never detected as synced")
}
}
func testJoinBlocks(r *Harness, t *testing.T) {
// Create a second harness with only the genesis block so it is behind
// the main harness.
harness, err := New(&dagconfig.SimnetParams, nil, nil)
if err != nil {
t.Fatal(err)
}
if err := harness.SetUp(false, 0); err != nil {
t.Fatalf("unable to complete rpctest setup: %v", err)
}
defer harness.TearDown()
nodeSlice := []*Harness{r, harness}
blocksSynced := make(chan struct{})
go func() {
if err := JoinNodes(nodeSlice, Blocks); err != nil {
t.Fatalf("unable to join node on blocks: %v", err)
}
blocksSynced <- struct{}{}
}()
// This select case should fall through to the default as the goroutine
// should be blocked on the JoinNodes calls.
select {
case <-blocksSynced:
t.Fatalf("blocks detected as synced yet local harness is behind")
default:
}
// Connect the local harness to the main harness which will sync the
// chains.
if err := ConnectNode(harness, r); err != nil {
t.Fatalf("unable to connect harnesses: %v", err)
}
// Select once again with a special timeout case after 1 minute. The
// goroutine above should now be blocked on sending into the unbuffered
// channel. The send should immediately succeed. In order to avoid the
// test hanging indefinitely, a 1 minute timeout is in place.
select {
case <-blocksSynced:
// fall through
case <-time.After(time.Minute):
t.Fatalf("blocks never detected as synced")
}
}
func testGenerateAndSubmitBlock(r *Harness, t *testing.T) {
// Generate a few test spend transactions.
addr, err := r.NewAddress()
if err != nil {
t.Fatalf("unable to generate new address: %v", err)
}
scriptPubKey, err := txscript.PayToAddrScript(addr)
if err != nil {
t.Fatalf("unable to create script: %v", err)
}
output := wire.NewTxOut(util.SompiPerKaspa, scriptPubKey)
const numTxns = 5
txns := make([]*util.Tx, 0, numTxns)
for i := 0; i < numTxns; i++ {
tx, err := r.CreateTransaction([]*wire.TxOut{output}, 10)
if err != nil {
t.Fatalf("unable to create tx: %v", err)
}
txns = append(txns, util.NewTx(tx))
}
// Now generate a block with the default block version, and a zero'd
// out time.
block, err := r.GenerateAndSubmitBlock(txns, -1, time.Time{})
if err != nil {
t.Fatalf("unable to generate block: %v", err)
}
// Ensure that all created transactions were included, and that the
// block version was properly set to the default.
numBlocksTxns := len(block.Transactions())
if numBlocksTxns != numTxns+1 {
t.Fatalf("block did not include all transactions: "+
"expected %v, got %v", numTxns+1, numBlocksTxns)
}
blockVersion := block.MsgBlock().Header.Version
if blockVersion != BlockVersion {
t.Fatalf("block version is not default: expected %v, got %v",
BlockVersion, blockVersion)
}
// Next generate a block with a "non-standard" block version along with
// time stamp a minute after the previous block's timestamp.
timestamp := block.MsgBlock().Header.Timestamp.Add(time.Minute)
targetBlockVersion := int32(1337)
block, err = r.GenerateAndSubmitBlock(nil, targetBlockVersion, timestamp)
if err != nil {
t.Fatalf("unable to generate block: %v", err)
}
// Finally ensure that the desired block version and timestamp were set
// properly.
header := block.MsgBlock().Header
blockVersion = header.Version
if blockVersion != targetBlockVersion {
t.Fatalf("block version mismatch: expected %v, got %v",
targetBlockVersion, blockVersion)
}
if !timestamp.Equal(header.Timestamp) {
t.Fatalf("header time stamp mismatch: expected %v, got %v",
timestamp, header.Timestamp)
}
}
func testGenerateAndSubmitBlockWithCustomCoinbaseOutputs(r *Harness,
t *testing.T) {
// Generate a few test spend transactions.
addr, err := r.NewAddress()
if err != nil {
t.Fatalf("unable to generate new address: %v", err)
}
scriptPubKey, err := txscript.PayToAddrScript(addr)
if err != nil {
t.Fatalf("unable to create script: %v", err)
}
output := wire.NewTxOut(util.SompiPerKaspa, scriptPubKey)
const numTxns = 5
txns := make([]*util.Tx, 0, numTxns)
for i := 0; i < numTxns; i++ {
tx, err := r.CreateTransaction([]*wire.TxOut{output}, 10)
if err != nil {
t.Fatalf("unable to create tx: %v", err)
}
txns = append(txns, util.NewTx(tx))
}
// Now generate a block with the default block version, a zero'd out
// time, and a burn output.
block, err := r.GenerateAndSubmitBlockWithCustomCoinbaseOutputs(txns,
-1, time.Time{}, []wire.TxOut{{
Value: 0,
ScriptPubKey: []byte{},
}})
if err != nil {
t.Fatalf("unable to generate block: %v", err)
}
// Ensure that all created transactions were included, and that the
// block version was properly set to the default.
numBlocksTxns := len(block.Transactions())
if numBlocksTxns != numTxns+1 {
t.Fatalf("block did not include all transactions: "+
"expected %v, got %v", numTxns+1, numBlocksTxns)
}
blockVersion := block.MsgBlock().Header.Version
if blockVersion != BlockVersion {
t.Fatalf("block version is not default: expected %v, got %v",
BlockVersion, blockVersion)
}
// Next generate a block with a "non-standard" block version along with
// time stamp a minute after the previous block's timestamp.
timestamp := block.MsgBlock().Header.Timestamp.Add(time.Minute)
targetBlockVersion := int32(1337)
block, err = r.GenerateAndSubmitBlockWithCustomCoinbaseOutputs(nil,
targetBlockVersion, timestamp, []wire.TxOut{{
Value: 0,
ScriptPubKey: []byte{},
}})
if err != nil {
t.Fatalf("unable to generate block: %v", err)
}
// Finally ensure that the desired block version and timestamp were set
// properly.
header := block.MsgBlock().Header
blockVersion = header.Version
if blockVersion != targetBlockVersion {
t.Fatalf("block version mismatch: expected %v, got %v",
targetBlockVersion, blockVersion)
}
if !timestamp.Equal(header.Timestamp) {
t.Fatalf("header time stamp mismatch: expected %v, got %v",
timestamp, header.Timestamp)
}
}
func testMemWalletReorg(r *Harness, t *testing.T) {
// Create a fresh harness, we'll be using the main harness to force a
// re-org on this local harness.
harness, err := New(&dagconfig.SimnetParams, nil, nil)
if err != nil {
t.Fatal(err)
}
if err := harness.SetUp(true, 5); err != nil {
t.Fatalf("unable to complete rpctest setup: %v", err)
}
defer harness.TearDown()
// The internal wallet of this harness should now have 250 KAS.
expectedBalance := util.Amount(250 * util.SompiPerKaspa)
walletBalance := harness.ConfirmedBalance()
if expectedBalance != walletBalance {
t.Fatalf("wallet balance incorrect: expected %v, got %v",
expectedBalance, walletBalance)
}
// Now connect this local harness to the main harness then wait for
// their chains to synchronize.
if err := ConnectNode(harness, r); err != nil {
t.Fatalf("unable to connect harnesses: %v", err)
}
nodeSlice := []*Harness{r, harness}
if err := JoinNodes(nodeSlice, Blocks); err != nil {
t.Fatalf("unable to join node on blocks: %v", err)
}
// The original wallet should now have a balance of 0 KAS as its entire
// chain should have been decimated in favor of the main harness'
// chain.
expectedBalance = util.Amount(0)
walletBalance = harness.ConfirmedBalance()
if expectedBalance != walletBalance {
t.Fatalf("wallet balance incorrect: expected %v, got %v",
expectedBalance, walletBalance)
}
}
func testMemWalletLockedOutputs(r *Harness, t *testing.T) {
// Obtain the initial balance of the wallet at this point.
startingBalance := r.ConfirmedBalance()
// First, create a signed transaction spending some outputs.
addr, err := r.NewAddress()
if err != nil {
t.Fatalf("unable to generate new address: %v", err)
}
scriptPubKey, err := txscript.PayToAddrScript(addr)
if err != nil {
t.Fatalf("unable to create script: %v", err)
}
outputAmt := util.Amount(50 * util.SompiPerKaspa)
output := wire.NewTxOut(int64(outputAmt), scriptPubKey)
tx, err := r.CreateTransaction([]*wire.TxOut{output}, 10)
if err != nil {
t.Fatalf("unable to create transaction: %v", err)
}
// The current wallet balance should now be at least 50 KAS less
// (accounting for fees) than the period balance
currentBalance := r.ConfirmedBalance()
if !(currentBalance <= startingBalance-outputAmt) {
t.Fatalf("spent outputs not locked: previous balance %v, "+
"current balance %v", startingBalance, currentBalance)
}
// Now unlocked all the spent inputs within the unbroadcast signed
// transaction. The current balance should now be exactly that of the
// starting balance.
r.UnlockOutputs(tx.TxIn)
currentBalance = r.ConfirmedBalance()
if currentBalance != startingBalance {
t.Fatalf("current and starting balance should now match: "+
"expected %v, got %v", startingBalance, currentBalance)
}
}
var harnessTestCases = []HarnessTestCase{
testSendOutputs,
testConnectNode,
testActiveHarnesses,
testJoinBlocks,
testJoinMempools, // Depends on results of testJoinBlocks
testGenerateAndSubmitBlock,
testGenerateAndSubmitBlockWithCustomCoinbaseOutputs,
testMemWalletReorg,
testMemWalletLockedOutputs,
}
var mainHarness *Harness
const (
numMatureOutputs = 25
)
func TestMain(m *testing.M) {
var err error
mainHarness, err = New(&dagconfig.SimnetParams, nil, nil)
if err != nil {
fmt.Println("unable to create main harness: ", err)
os.Exit(1)
}
// Initialize the main mining node with a chain of length 125,
// providing 25 mature coinbases to allow spending from for testing
// purposes.
if err = mainHarness.SetUp(true, numMatureOutputs); err != nil {
fmt.Println("unable to setup test chain: ", err)
// Even though the harness was not fully setup, it still needs
// to be torn down to ensure all resources such as temp
// directories are cleaned up. The error is intentionally
// ignored since this is already an error path and nothing else
// could be done about it anyways.
_ = mainHarness.TearDown()
os.Exit(1)
}
exitCode := m.Run()
// Clean up any active harnesses that are still currently running.
if len(ActiveHarnesses()) > 0 {
if err := TearDownAll(); err != nil {
fmt.Println("unable to tear down chain: ", err)
os.Exit(1)
}
}
os.Exit(exitCode)
}
func TestHarness(t *testing.T) {
// We should have (numMatureOutputs * 50 KAS) of mature unspendable
// outputs.
expectedBalance := util.Amount(numMatureOutputs * 50 * util.SompiPerKaspa)
harnessBalance := mainHarness.ConfirmedBalance()
if harnessBalance != expectedBalance {
t.Fatalf("expected wallet balance of %v instead have %v",
expectedBalance, harnessBalance)
}
// Current tip should be at a height of numMatureOutputs plus the
// required number of blocks for coinbase maturity.
nodeInfo, err := mainHarness.Node.GetInfo()
if err != nil {
t.Fatalf("unable to execute getinfo on node: %v", err)
}
expectedChainHeight := numMatureOutputs + uint32(mainHarness.ActiveNet.CoinbaseMaturity)
if uint32(nodeInfo.Blocks) != expectedChainHeight {
t.Errorf("Chain height is %v, should be %v",
nodeInfo.Blocks, expectedChainHeight)
}
for _, testCase := range harnessTestCases {
testCase(mainHarness, t)
}
testTearDownAll(t)
}

View File

@ -1,170 +0,0 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package rpctest
import (
"reflect"
"time"
"github.com/kaspanet/kaspad/util/daghash"
)
// JoinType is an enum representing a particular type of "node join". A node
// join is a synchronization tool used to wait until a subset of nodes have a
// consistent state with respect to an attribute.
type JoinType uint8
const (
// Blocks is a JoinType which waits until all nodes share the same
// block height.
Blocks JoinType = iota
// Mempools is a JoinType which blocks until all nodes have identical
// mempool.
Mempools
)
// JoinNodes is a synchronization tool used to block until all passed nodes are
// fully synced with respect to an attribute. This function will block for a
// period of time, finally returning once all nodes are synced according to the
// passed JoinType. This function be used to to ensure all active test
// harnesses are at a consistent state before proceeding to an assertion or
// check within rpc tests.
func JoinNodes(nodes []*Harness, joinType JoinType) error {
switch joinType {
case Blocks:
return syncBlocks(nodes)
case Mempools:
return syncMempools(nodes)
}
return nil
}
// syncMempools blocks until all nodes have identical mempools.
func syncMempools(nodes []*Harness) error {
poolsMatch := false
retry:
for !poolsMatch {
firstPool, err := nodes[0].Node.GetRawMempool()
if err != nil {
return err
}
// If all nodes have an identical mempool with respect to the
// first node, then we're done. Otherwise, drop back to the top
// of the loop and retry after a short wait period.
for _, node := range nodes[1:] {
nodePool, err := node.Node.GetRawMempool()
if err != nil {
return err
}
if !reflect.DeepEqual(firstPool, nodePool) {
time.Sleep(time.Millisecond * 100)
continue retry
}
}
poolsMatch = true
}
return nil
}
// syncBlocks blocks until all nodes report the same best chain.
func syncBlocks(nodes []*Harness) error {
blocksMatch := false
retry:
for !blocksMatch {
var parentHash *daghash.Hash
var prevBlueScore uint64
for _, node := range nodes {
selectedTip, err := node.Node.GetSelectedTip()
if err != nil {
return err
}
blockHash, err := daghash.NewHashFromStr(selectedTip.Hash)
if err != nil {
return err
}
blueScore := selectedTip.BlueScore
if parentHash != nil && (*blockHash != *parentHash ||
blueScore != prevBlueScore) {
time.Sleep(time.Millisecond * 100)
continue retry
}
parentHash, prevBlueScore = blockHash, blueScore
}
blocksMatch = true
}
return nil
}
// ConnectNode establishes a new peer-to-peer connection between the "from"
// harness and the "to" harness. The connection made is flagged as persistent,
// therefore in the case of disconnects, "from" will attempt to reestablish a
// connection to the "to" harness.
func ConnectNode(from *Harness, to *Harness) error {
peerInfo, err := from.Node.GetPeerInfo()
if err != nil {
return err
}
numPeers := len(peerInfo)
targetAddr := to.node.config.listen
if err := from.Node.AddManualNode(targetAddr); err != nil {
return err
}
// Block until a new connection has been established.
peerInfo, err = from.Node.GetPeerInfo()
if err != nil {
return err
}
for len(peerInfo) <= numPeers {
peerInfo, err = from.Node.GetPeerInfo()
if err != nil {
return err
}
}
return nil
}
// TearDownAll tears down all active test harnesses.
func TearDownAll() error {
harnessStateMtx.Lock()
defer harnessStateMtx.Unlock()
for _, harness := range testInstances {
if err := harness.tearDown(); err != nil {
return err
}
}
return nil
}
// ActiveHarnesses returns a slice of all currently active test harnesses. A
// test harness if considered "active" if it has been created, but not yet torn
// down.
func ActiveHarnesses() []*Harness {
harnessStateMtx.RLock()
defer harnessStateMtx.RUnlock()
activeNodes := make([]*Harness, 0, len(testInstances))
for _, harness := range testInstances {
activeNodes = append(activeNodes, harness)
}
return activeNodes
}

View File

@ -1,639 +0,0 @@
// Copyright (c) 2014-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package cpuminer
import (
"fmt"
"github.com/pkg/errors"
"math/rand"
"runtime"
"sync"
"time"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/mining"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/random"
"github.com/kaspanet/kaspad/wire"
)
const (
// maxNonce is the maximum value a nonce can be in a block header.
maxNonce = ^uint64(0) // 2^64 - 1
// hpsUpdateSecs is the number of seconds to wait in between each
// update to the hashes per second monitor.
hpsUpdateSecs = 10
// hashUpdateSec is the number of seconds each worker waits in between
// notifying the speed monitor with how many hashes have been completed
// while they are actively searching for a solution. This is done to
// reduce the amount of syncs between the workers that must be done to
// keep track of the hashes per second.
hashUpdateSecs = 15
)
var (
// defaultNumWorkers is the default number of workers to use for mining
// and is based on the number of processor cores. This helps ensure the
// system stays reasonably responsive under heavy load.
defaultNumWorkers = uint32(runtime.NumCPU())
)
// Config is a descriptor containing the cpu miner configuration.
type Config struct {
// DAGParams identifies which DAG parameters the cpu miner is
// associated with.
DAGParams *dagconfig.Params
// BlockTemplateGenerator identifies the instance to use in order to
// generate block templates that the miner will attempt to solve.
BlockTemplateGenerator *mining.BlkTmplGenerator
// MiningAddrs is a list of payment addresses to use for the generated
// blocks. Each generated block will randomly choose one of them.
MiningAddrs []util.Address
// ProcessBlock defines the function to call with any solved blocks.
// It typically must run the provided block through the same set of
// rules and handling as any other block coming from the network.
ProcessBlock func(*util.Block, blockdag.BehaviorFlags) (bool, error)
// ConnectedCount defines the function to use to obtain how many other
// peers the server is connected to. This is used by the automatic
// persistent mining routine to determine whether or it should attempt
// mining. This is useful because there is no point in mining when not
// connected to any peers since there would no be anyone to send any
// found blocks to.
ConnectedCount func() int32
// ShouldMineOnGenesis checks if the node is connected to at least one
// peer, and at least one of its peers knows of any blocks that were mined
// on top of the genesis block.
ShouldMineOnGenesis func() bool
// IsCurrent defines the function to use to obtain whether or not the
// block DAG is current. This is used by the automatic persistent
// mining routine to determine whether or it should attempt mining.
// This is useful because there is no point in mining if the DAG is
// not current since any solved blocks would end up red anyways.
IsCurrent func() bool
}
// CPUMiner provides facilities for solving blocks (mining) using the CPU in
// a concurrency-safe manner. It consists of two main goroutines -- a speed
// monitor and a controller for worker goroutines which generate and solve
// blocks. The number of goroutines can be set via the SetMaxGoRoutines
// function, but the default is based on the number of processor cores in the
// system which is typically sufficient.
type CPUMiner struct {
sync.Mutex
g *mining.BlkTmplGenerator
cfg Config
numWorkers uint32
started bool
discreteMining bool
submitBlockLock sync.Mutex
wg sync.WaitGroup
workerWg sync.WaitGroup
updateNumWorkers chan struct{}
queryHashesPerSec chan float64
updateHashes chan uint64
speedMonitorQuit chan struct{}
quit chan struct{}
}
// speedMonitor handles tracking the number of hashes per second the mining
// process is performing. It must be run as a goroutine.
func (m *CPUMiner) speedMonitor() {
log.Tracef("CPU miner speed monitor started")
var hashesPerSec float64
var totalHashes uint64
ticker := time.NewTicker(time.Second * hpsUpdateSecs)
defer ticker.Stop()
out:
for {
select {
// Periodic updates from the workers with how many hashes they
// have performed.
case numHashes := <-m.updateHashes:
totalHashes += numHashes
// Time to update the hashes per second.
case <-ticker.C:
curHashesPerSec := float64(totalHashes) / hpsUpdateSecs
if hashesPerSec == 0 {
hashesPerSec = curHashesPerSec
}
hashesPerSec = (hashesPerSec + curHashesPerSec) / 2
totalHashes = 0
if hashesPerSec != 0 {
log.Debugf("Hash speed: %6.0f kilohashes/s",
hashesPerSec/1000)
}
// Request for the number of hashes per second.
case m.queryHashesPerSec <- hashesPerSec:
// Nothing to do.
case <-m.speedMonitorQuit:
break out
}
}
m.wg.Done()
log.Tracef("CPU miner speed monitor done")
}
// submitBlock submits the passed block to network after ensuring it passes all
// of the consensus validation rules.
func (m *CPUMiner) submitBlock(block *util.Block) bool {
m.submitBlockLock.Lock()
defer m.submitBlockLock.Unlock()
// Ensure the block is not stale since a new block could have shown up
// while the solution was being found. Typically that condition is
// detected and all work on the stale block is halted to start work on
// a new block, but the check only happens periodically, so it is
// possible a block was found and submitted in between.
msgBlock := block.MsgBlock()
if !daghash.AreEqual(msgBlock.Header.ParentHashes, m.g.TipHashes()) {
log.Debugf("Block submitted via CPU miner with previous "+
"blocks %s is stale", msgBlock.Header.ParentHashes)
return false
}
// Process this block using the same rules as blocks coming from other
// nodes. This will in turn relay it to the network like normal.
isOrphan, err := m.cfg.ProcessBlock(block, blockdag.BFNone)
if err != nil {
// Anything other than a rule violation is an unexpected error,
// so log that error as an internal error.
if _, ok := err.(blockdag.RuleError); !ok {
log.Errorf("Unexpected error while processing "+
"block submitted via CPU miner: %s", err)
return false
}
log.Debugf("Block submitted via CPU miner rejected: %s", err)
return false
}
if isOrphan {
log.Debugf("Block submitted via CPU miner is an orphan")
return false
}
// The block was accepted.
log.Infof("Block submitted via CPU miner accepted (hash %s)", block.Hash())
return true
}
// solveBlock attempts to find some combination of a nonce, extra nonce, and
// current timestamp which makes the passed block hash to a value less than the
// target difficulty. The timestamp is updated periodically and the passed
// block is modified with all tweaks during this process. This means that
// when the function returns true, the block is ready for submission.
//
// This function will return early with false when conditions that trigger a
// stale block such as a new block showing up or periodically when there are
// new transactions and enough time has elapsed without finding a solution.
func (m *CPUMiner) solveBlock(msgBlock *wire.MsgBlock, ticker *time.Ticker, quit chan struct{}) bool {
// Create some convenience variables.
header := &msgBlock.Header
targetDifficulty := util.CompactToBig(header.Bits)
// Initial state.
lastGenerated := time.Now()
lastTxUpdate := m.g.TxSource().LastUpdated()
hashesCompleted := uint64(0)
// Choose a random extra nonce for this block template and worker.
extraNonce, err := random.Uint64()
if err != nil {
log.Errorf("Unexpected error while generating random "+
"extra nonce offset: %s", err)
}
// Update the extra nonce in the block template with the
// new value by regenerating the coinbase script and
// setting the merkle root to the new value.
m.g.UpdateExtraNonce(msgBlock, extraNonce)
// Search through the entire nonce range for a solution while
// periodically checking for early quit and stale block
// conditions along with updates to the speed monitor.
for i := uint64(0); i <= maxNonce; i++ {
select {
case <-quit:
return false
case <-ticker.C:
m.updateHashes <- hashesCompleted
hashesCompleted = 0
// The current block is stale if the DAG has changed.
if !daghash.AreEqual(header.ParentHashes, m.g.TipHashes()) {
return false
}
// The current block is stale if the memory pool
// has been updated since the block template was
// generated and it has been at least one
// minute.
if lastTxUpdate != m.g.TxSource().LastUpdated() &&
time.Now().After(lastGenerated.Add(time.Minute)) {
return false
}
m.g.UpdateBlockTime(msgBlock)
default:
// Non-blocking select to fall through
}
// Update the nonce and hash the block header. Each
// hash is actually a double sha256 (two hashes), so
// increment the number of hashes completed for each
// attempt accordingly.
header.Nonce = i
hash := header.BlockHash()
hashesCompleted += 2
// The block is solved when the new block hash is less
// than the target difficulty. Yay!
if daghash.HashToBig(hash).Cmp(targetDifficulty) <= 0 {
m.updateHashes <- hashesCompleted
return true
}
}
return false
}
// generateBlocks is a worker that is controlled by the miningWorkerController.
// It is self contained in that it creates block templates and attempts to solve
// them while detecting when it is performing stale work and reacting
// accordingly by generating a new block template. When a block is solved, it
// is submitted.
//
// It must be run as a goroutine.
func (m *CPUMiner) generateBlocks(quit chan struct{}) {
log.Tracef("Starting generate blocks worker")
// Start a ticker which is used to signal checks for stale work and
// updates to the speed monitor.
ticker := time.NewTicker(time.Second * hashUpdateSecs)
defer ticker.Stop()
out:
for {
// Quit when the miner is stopped.
select {
case <-quit:
break out
default:
// Non-blocking select to fall through
}
// Wait until there is a connection to at least one other peer
// since there is no way to relay a found block or receive
// transactions to work on when there are no connected peers.
if m.cfg.ConnectedCount() == 0 {
time.Sleep(time.Second)
continue
}
// No point in searching for a solution before the DAG is
// synced. Also, grab the same lock as used for block
// submission, since the current block will be changing and
// this would otherwise end up building a new block template on
// a block that is in the process of becoming stale.
m.submitBlockLock.Lock()
currentBlueScore := m.g.VirtualBlueScore()
if (currentBlueScore != 0 && !m.cfg.IsCurrent()) || (currentBlueScore == 0 && !m.cfg.ShouldMineOnGenesis()) {
m.submitBlockLock.Unlock()
time.Sleep(time.Second)
continue
}
// Choose a payment address at random.
rand.Seed(time.Now().UnixNano())
payToAddr := m.cfg.MiningAddrs[rand.Intn(len(m.cfg.MiningAddrs))]
// Create a new block template using the available transactions
// in the memory pool as a source of transactions to potentially
// include in the block.
template, err := m.g.NewBlockTemplate(payToAddr)
m.submitBlockLock.Unlock()
if err != nil {
errStr := fmt.Sprintf("Failed to create new block "+
"template: %s", err)
log.Errorf(errStr)
continue
}
// Attempt to solve the block. The function will exit early
// with false when conditions that trigger a stale block, so
// a new block template can be generated. When the return is
// true a solution was found, so submit the solved block.
if m.solveBlock(template.Block, ticker, quit) {
block := util.NewBlock(template.Block)
m.submitBlock(block)
}
}
m.workerWg.Done()
log.Tracef("Generate blocks worker done")
}
// miningWorkerController launches the worker goroutines that are used to
// generate block templates and solve them. It also provides the ability to
// dynamically adjust the number of running worker goroutines.
//
// It must be run as a goroutine.
func (m *CPUMiner) miningWorkerController() {
// launchWorkers groups common code to launch a specified number of
// workers for generating blocks.
var runningWorkers []chan struct{}
launchWorkers := func(numWorkers uint32) {
for i := uint32(0); i < numWorkers; i++ {
quit := make(chan struct{})
runningWorkers = append(runningWorkers, quit)
m.workerWg.Add(1)
spawn(func() {
m.generateBlocks(quit)
})
}
}
// Launch the current number of workers by default.
runningWorkers = make([]chan struct{}, 0, m.numWorkers)
launchWorkers(m.numWorkers)
out:
for {
select {
// Update the number of running workers.
case <-m.updateNumWorkers:
// No change.
numRunning := uint32(len(runningWorkers))
if m.numWorkers == numRunning {
continue
}
// Add new workers.
if m.numWorkers > numRunning {
launchWorkers(m.numWorkers - numRunning)
continue
}
// Signal the most recently created goroutines to exit.
for i := numRunning - 1; i >= m.numWorkers; i-- {
close(runningWorkers[i])
runningWorkers[i] = nil
runningWorkers = runningWorkers[:i]
}
case <-m.quit:
for _, quit := range runningWorkers {
close(quit)
}
break out
}
}
// Wait until all workers shut down to stop the speed monitor since
// they rely on being able to send updates to it.
m.workerWg.Wait()
close(m.speedMonitorQuit)
m.wg.Done()
}
// Start begins the CPU mining process as well as the speed monitor used to
// track hashing metrics. Calling this function when the CPU miner has
// already been started will have no effect.
//
// This function is safe for concurrent access.
func (m *CPUMiner) Start() {
m.Lock()
defer m.Unlock()
// Nothing to do if the miner is already running or if running in
// discrete mode (using GenerateNBlocks).
if m.started || m.discreteMining {
return
}
m.quit = make(chan struct{})
m.speedMonitorQuit = make(chan struct{})
m.wg.Add(2)
spawn(m.speedMonitor)
spawn(m.miningWorkerController)
m.started = true
log.Infof("CPU miner started, number of workers %d", m.numWorkers)
}
// Stop gracefully stops the mining process by signalling all workers, and the
// speed monitor to quit. Calling this function when the CPU miner has not
// already been started will have no effect.
//
// This function is safe for concurrent access.
func (m *CPUMiner) Stop() {
m.Lock()
defer m.Unlock()
// Nothing to do if the miner is not currently running or if running in
// discrete mode (using GenerateNBlocks).
if !m.started || m.discreteMining {
return
}
close(m.quit)
m.wg.Wait()
m.started = false
log.Infof("CPU miner stopped")
}
// IsMining returns whether or not the CPU miner has been started and is
// therefore currenting mining.
//
// This function is safe for concurrent access.
func (m *CPUMiner) IsMining() bool {
m.Lock()
defer m.Unlock()
return m.started
}
// HashesPerSecond returns the number of hashes per second the mining process
// is performing. 0 is returned if the miner is not currently running.
//
// This function is safe for concurrent access.
func (m *CPUMiner) HashesPerSecond() float64 {
m.Lock()
defer m.Unlock()
// Nothing to do if the miner is not currently running.
if !m.started {
return 0
}
return <-m.queryHashesPerSec
}
// SetNumWorkers sets the number of workers to create which solve blocks. Any
// negative values will cause a default number of workers to be used which is
// based on the number of processor cores in the system. A value of 0 will
// cause all CPU mining to be stopped.
//
// This function is safe for concurrent access.
func (m *CPUMiner) SetNumWorkers(numWorkers int32) {
if numWorkers == 0 {
m.Stop()
}
// Don't lock until after the first check since Stop does its own
// locking.
m.Lock()
defer m.Unlock()
// Use default if provided value is negative.
if numWorkers < 0 {
m.numWorkers = defaultNumWorkers
} else {
m.numWorkers = uint32(numWorkers)
}
// When the miner is already running, notify the controller about the
// the change.
if m.started {
m.updateNumWorkers <- struct{}{}
}
}
// NumWorkers returns the number of workers which are running to solve blocks.
//
// This function is safe for concurrent access.
func (m *CPUMiner) NumWorkers() int32 {
m.Lock()
defer m.Unlock()
return int32(m.numWorkers)
}
// GenerateNBlocks generates the requested number of blocks. It is self
// contained in that it creates block templates and attempts to solve them while
// detecting when it is performing stale work and reacting accordingly by
// generating a new block template. When a block is solved, it is submitted.
// The function returns a list of the hashes of generated blocks.
func (m *CPUMiner) GenerateNBlocks(n uint32) ([]*daghash.Hash, error) {
m.Lock()
// Respond with an error if server is already mining.
if m.started || m.discreteMining {
m.Unlock()
return nil, errors.New("Server is already CPU mining. Please call " +
"`setgenerate 0` before calling discrete `generate` commands.")
}
m.started = true
m.discreteMining = true
m.speedMonitorQuit = make(chan struct{})
m.wg.Add(1)
spawn(m.speedMonitor)
m.Unlock()
log.Tracef("Generating %d blocks", n)
i := uint32(0)
blockHashes := make([]*daghash.Hash, n)
// Start a ticker which is used to signal checks for stale work and
// updates to the speed monitor.
ticker := time.NewTicker(time.Second * hashUpdateSecs)
defer ticker.Stop()
for {
// Read updateNumWorkers in case someone tries a `setgenerate` while
// we're generating. We can ignore it as the `generate` RPC call only
// uses 1 worker.
select {
case <-m.updateNumWorkers:
default:
}
// Grab the lock used for block submission, since the current block will
// be changing and this would otherwise end up building a new block
// template on a block that is in the process of becoming stale.
m.submitBlockLock.Lock()
// Choose a payment address at random.
rand.Seed(time.Now().UnixNano())
payToAddr := m.cfg.MiningAddrs[rand.Intn(len(m.cfg.MiningAddrs))]
// Create a new block template using the available transactions
// in the memory pool as a source of transactions to potentially
// include in the block.
template, err := m.g.NewBlockTemplate(payToAddr)
m.submitBlockLock.Unlock()
if err != nil {
errStr := fmt.Sprintf("Failed to create new block "+
"template: %s", err)
log.Errorf(errStr)
continue
}
// Attempt to solve the block. The function will exit early
// with false when conditions that trigger a stale block, so
// a new block template can be generated. When the return is
// true a solution was found, so submit the solved block.
if m.solveBlock(template.Block, ticker, nil) {
block := util.NewBlock(template.Block)
m.submitBlock(block)
blockHashes[i] = block.Hash()
i++
if i == n {
log.Tracef("Generated %d blocks", i)
m.Lock()
close(m.speedMonitorQuit)
m.wg.Wait()
m.started = false
m.discreteMining = false
m.Unlock()
return blockHashes, nil
}
}
}
}
// ShouldMineOnGenesis checks if the node is connected to at least one
// peer, and at least one of its peers knows of any blocks that were mined
// on top of the genesis block.
func (m *CPUMiner) ShouldMineOnGenesis() bool {
return m.cfg.ShouldMineOnGenesis()
}
// New returns a new instance of a CPU miner for the provided configuration.
// Use Start to begin the mining process. See the documentation for CPUMiner
// type for more details.
func New(cfg *Config) *CPUMiner {
return &CPUMiner{
g: cfg.BlockTemplateGenerator,
cfg: *cfg,
numWorkers: defaultNumWorkers,
updateNumWorkers: make(chan struct{}),
queryHashesPerSec: make(chan float64),
updateHashes: make(chan uint64),
}
}

View File

@ -1,13 +0,0 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package cpuminer
import (
"github.com/kaspanet/kaspad/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var log, _ = logger.Get(logger.SubsystemTags.MINR)
var spawn = panics.GoroutineWrapperFunc(log)

View File

@ -9,197 +9,9 @@ import (
"encoding/json"
"github.com/kaspanet/kaspad/rpcmodel"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/pkg/errors"
)
// FutureGenerateResult is a future promise to deliver the result of a
// GenerateAsync RPC invocation (or an applicable error).
type FutureGenerateResult chan *response
// Receive waits for the response promised by the future and returns a list of
// block hashes generated by the call.
func (r FutureGenerateResult) Receive() ([]*daghash.Hash, error) {
res, err := receiveFuture(r)
if err != nil {
return nil, err
}
// Unmarshal result as a list of strings.
var result []string
err = json.Unmarshal(res, &result)
if err != nil {
return nil, err
}
// Convert each block hash to a daghash.Hash and store a pointer to
// each.
convertedResult := make([]*daghash.Hash, len(result))
for i, hashString := range result {
convertedResult[i], err = daghash.NewHashFromStr(hashString)
if err != nil {
return nil, err
}
}
return convertedResult, nil
}
// GenerateAsync returns an instance of a type that can be used to get
// the result of the RPC at some future time by invoking the Receive function on
// the returned instance.
//
// See Generate for the blocking version and more details.
func (c *Client) GenerateAsync(numBlocks uint32) FutureGenerateResult {
cmd := rpcmodel.NewGenerateCmd(numBlocks)
return c.sendCmd(cmd)
}
// Generate generates numBlocks blocks and returns their hashes.
func (c *Client) Generate(numBlocks uint32) ([]*daghash.Hash, error) {
return c.GenerateAsync(numBlocks).Receive()
}
// FutureGetGenerateResult is a future promise to deliver the result of a
// GetGenerateAsync RPC invocation (or an applicable error).
type FutureGetGenerateResult chan *response
// Receive waits for the response promised by the future and returns true if the
// server is set to mine, otherwise false.
func (r FutureGetGenerateResult) Receive() (bool, error) {
res, err := receiveFuture(r)
if err != nil {
return false, err
}
// Unmarshal result as a boolean.
var result bool
err = json.Unmarshal(res, &result)
if err != nil {
return false, err
}
return result, nil
}
// GetGenerateAsync returns an instance of a type that can be used to get
// the result of the RPC at some future time by invoking the Receive function on
// the returned instance.
//
// See GetGenerate for the blocking version and more details.
func (c *Client) GetGenerateAsync() FutureGetGenerateResult {
cmd := rpcmodel.NewGetGenerateCmd()
return c.sendCmd(cmd)
}
// GetGenerate returns true if the server is set to mine, otherwise false.
func (c *Client) GetGenerate() (bool, error) {
return c.GetGenerateAsync().Receive()
}
// FutureSetGenerateResult is a future promise to deliver the result of a
// SetGenerateAsync RPC invocation (or an applicable error).
type FutureSetGenerateResult chan *response
// Receive waits for the response promised by the future and returns an error if
// any occurred when setting the server to generate coins (mine) or not.
func (r FutureSetGenerateResult) Receive() error {
_, err := receiveFuture(r)
return err
}
// SetGenerateAsync returns an instance of a type that can be used to get the
// result of the RPC at some future time by invoking the Receive function on the
// returned instance.
//
// See SetGenerate for the blocking version and more details.
func (c *Client) SetGenerateAsync(enable bool, numCPUs int) FutureSetGenerateResult {
cmd := rpcmodel.NewSetGenerateCmd(enable, &numCPUs)
return c.sendCmd(cmd)
}
// SetGenerate sets the server to generate coins (mine) or not.
func (c *Client) SetGenerate(enable bool, numCPUs int) error {
return c.SetGenerateAsync(enable, numCPUs).Receive()
}
// FutureGetHashesPerSecResult is a future promise to deliver the result of a
// GetHashesPerSecAsync RPC invocation (or an applicable error).
type FutureGetHashesPerSecResult chan *response
// Receive waits for the response promised by the future and returns a recent
// hashes per second performance measurement while generating coins (mining).
// Zero is returned if the server is not mining.
func (r FutureGetHashesPerSecResult) Receive() (int64, error) {
res, err := receiveFuture(r)
if err != nil {
return -1, err
}
// Unmarshal result as an int64.
var result int64
err = json.Unmarshal(res, &result)
if err != nil {
return 0, err
}
return result, nil
}
// GetHashesPerSecAsync returns an instance of a type that can be used to get
// the result of the RPC at some future time by invoking the Receive function on
// the returned instance.
//
// See GetHashesPerSec for the blocking version and more details.
func (c *Client) GetHashesPerSecAsync() FutureGetHashesPerSecResult {
cmd := rpcmodel.NewGetHashesPerSecCmd()
return c.sendCmd(cmd)
}
// GetHashesPerSec returns a recent hashes per second performance measurement
// while generating coins (mining). Zero is returned if the server is not
// mining.
func (c *Client) GetHashesPerSec() (int64, error) {
return c.GetHashesPerSecAsync().Receive()
}
// FutureGetMiningInfoResult is a future promise to deliver the result of a
// GetMiningInfoAsync RPC invocation (or an applicable error).
type FutureGetMiningInfoResult chan *response
// Receive waits for the response promised by the future and returns the mining
// information.
func (r FutureGetMiningInfoResult) Receive() (*rpcmodel.GetMiningInfoResult, error) {
res, err := receiveFuture(r)
if err != nil {
return nil, err
}
// Unmarshal result as a getmininginfo result object.
var infoResult rpcmodel.GetMiningInfoResult
err = json.Unmarshal(res, &infoResult)
if err != nil {
return nil, err
}
return &infoResult, nil
}
// GetMiningInfoAsync returns an instance of a type that can be used to get
// the result of the RPC at some future time by invoking the Receive function on
// the returned instance.
//
// See GetMiningInfo for the blocking version and more details.
func (c *Client) GetMiningInfoAsync() FutureGetMiningInfoResult {
cmd := rpcmodel.NewGetMiningInfoCmd()
return c.sendCmd(cmd)
}
// GetMiningInfo returns mining information.
func (c *Client) GetMiningInfo() (*rpcmodel.GetMiningInfoResult, error) {
return c.GetMiningInfoAsync().Receive()
}
// FutureSubmitBlockResult is a future promise to deliver the result of a
// SubmitBlockAsync RPC invocation (or an applicable error).
type FutureSubmitBlockResult chan *response

View File

@ -332,24 +332,6 @@ func NewGetDifficultyCmd() *GetDifficultyCmd {
return &GetDifficultyCmd{}
}
// GetGenerateCmd defines the getGenerate JSON-RPC command.
type GetGenerateCmd struct{}
// NewGetGenerateCmd returns a new instance which can be used to issue a
// getGenerate JSON-RPC command.
func NewGetGenerateCmd() *GetGenerateCmd {
return &GetGenerateCmd{}
}
// GetHashesPerSecCmd defines the getHashesPerSec JSON-RPC command.
type GetHashesPerSecCmd struct{}
// NewGetHashesPerSecCmd returns a new instance which can be used to issue a
// getHashesPerSec JSON-RPC command.
func NewGetHashesPerSecCmd() *GetHashesPerSecCmd {
return &GetHashesPerSecCmd{}
}
// GetInfoCmd defines the getInfo JSON-RPC command.
type GetInfoCmd struct{}
@ -381,15 +363,6 @@ func NewGetMempoolInfoCmd() *GetMempoolInfoCmd {
return &GetMempoolInfoCmd{}
}
// GetMiningInfoCmd defines the getMiningInfo JSON-RPC command.
type GetMiningInfoCmd struct{}
// NewGetMiningInfoCmd returns a new instance which can be used to issue a
// getMiningInfo JSON-RPC command.
func NewGetMiningInfoCmd() *GetMiningInfoCmd {
return &GetMiningInfoCmd{}
}
// GetNetworkInfoCmd defines the getNetworkInfo JSON-RPC command.
type GetNetworkInfoCmd struct{}
@ -564,24 +537,6 @@ func NewSendRawTransactionCmd(hexTx string, allowHighFees *bool) *SendRawTransac
}
}
// SetGenerateCmd defines the setGenerate JSON-RPC command.
type SetGenerateCmd struct {
Generate bool
GenProcLimit *int `jsonrpcdefault:"-1"`
}
// NewSetGenerateCmd returns a new instance which can be used to issue a
// setGenerate JSON-RPC command.
//
// The parameters which are pointers indicate they are optional. Passing nil
// for optional parameters will use the default value.
func NewSetGenerateCmd(generate bool, genProcLimit *int) *SetGenerateCmd {
return &SetGenerateCmd{
Generate: generate,
GenProcLimit: genProcLimit,
}
}
// StopCmd defines the stop JSON-RPC command.
type StopCmd struct{}
@ -686,19 +641,6 @@ func NewDebugLevelCmd(levelSpec string) *DebugLevelCmd {
}
}
// GenerateCmd defines the generate JSON-RPC command.
type GenerateCmd struct {
NumBlocks uint32
}
// NewGenerateCmd returns a new instance which can be used to issue a generate
// JSON-RPC command.
func NewGenerateCmd(numBlocks uint32) *GenerateCmd {
return &GenerateCmd{
NumBlocks: numBlocks,
}
}
// GetSelectedTipCmd defines the getSelectedTip JSON-RPC command.
type GetSelectedTipCmd struct {
Verbose *bool `jsonrpcdefault:"true"`
@ -778,13 +720,10 @@ func init() {
MustRegisterCommand("getDagTips", (*GetDAGTipsCmd)(nil), flags)
MustRegisterCommand("getConnectionCount", (*GetConnectionCountCmd)(nil), flags)
MustRegisterCommand("getDifficulty", (*GetDifficultyCmd)(nil), flags)
MustRegisterCommand("getGenerate", (*GetGenerateCmd)(nil), flags)
MustRegisterCommand("getHashesPerSec", (*GetHashesPerSecCmd)(nil), flags)
MustRegisterCommand("getInfo", (*GetInfoCmd)(nil), flags)
MustRegisterCommand("getManualNodeInfo", (*GetManualNodeInfoCmd)(nil), flags)
MustRegisterCommand("getMempoolEntry", (*GetMempoolEntryCmd)(nil), flags)
MustRegisterCommand("getMempoolInfo", (*GetMempoolInfoCmd)(nil), flags)
MustRegisterCommand("getMiningInfo", (*GetMiningInfoCmd)(nil), flags)
MustRegisterCommand("getNetworkInfo", (*GetNetworkInfoCmd)(nil), flags)
MustRegisterCommand("getNetTotals", (*GetNetTotalsCmd)(nil), flags)
MustRegisterCommand("getPeerInfo", (*GetPeerInfoCmd)(nil), flags)
@ -798,14 +737,12 @@ func init() {
MustRegisterCommand("removeManualNode", (*RemoveManualNodeCmd)(nil), flags)
MustRegisterCommand("searchRawTransactions", (*SearchRawTransactionsCmd)(nil), flags)
MustRegisterCommand("sendRawTransaction", (*SendRawTransactionCmd)(nil), flags)
MustRegisterCommand("setGenerate", (*SetGenerateCmd)(nil), flags)
MustRegisterCommand("stop", (*StopCmd)(nil), flags)
MustRegisterCommand("submitBlock", (*SubmitBlockCmd)(nil), flags)
MustRegisterCommand("uptime", (*UptimeCmd)(nil), flags)
MustRegisterCommand("validateAddress", (*ValidateAddressCmd)(nil), flags)
MustRegisterCommand("debugLevel", (*DebugLevelCmd)(nil), flags)
MustRegisterCommand("node", (*NodeCmd)(nil), flags)
MustRegisterCommand("generate", (*GenerateCmd)(nil), flags)
MustRegisterCommand("getSelectedTip", (*GetSelectedTipCmd)(nil), flags)
MustRegisterCommand("getCurrentNet", (*GetCurrentNetCmd)(nil), flags)
MustRegisterCommand("getHeaders", (*GetHeadersCmd)(nil), flags)

View File

@ -370,28 +370,6 @@ func TestRPCServerCommands(t *testing.T) {
marshalled: `{"jsonrpc":"1.0","method":"getDifficulty","params":[],"id":1}`,
unmarshalled: &rpcmodel.GetDifficultyCmd{},
},
{
name: "getGenerate",
newCmd: func() (interface{}, error) {
return rpcmodel.NewCommand("getGenerate")
},
staticCmd: func() interface{} {
return rpcmodel.NewGetGenerateCmd()
},
marshalled: `{"jsonrpc":"1.0","method":"getGenerate","params":[],"id":1}`,
unmarshalled: &rpcmodel.GetGenerateCmd{},
},
{
name: "getHashesPerSec",
newCmd: func() (interface{}, error) {
return rpcmodel.NewCommand("getHashesPerSec")
},
staticCmd: func() interface{} {
return rpcmodel.NewGetHashesPerSecCmd()
},
marshalled: `{"jsonrpc":"1.0","method":"getHashesPerSec","params":[],"id":1}`,
unmarshalled: &rpcmodel.GetHashesPerSecCmd{},
},
{
name: "getInfo",
newCmd: func() (interface{}, error) {
@ -441,17 +419,6 @@ func TestRPCServerCommands(t *testing.T) {
marshalled: `{"jsonrpc":"1.0","method":"getMempoolInfo","params":[],"id":1}`,
unmarshalled: &rpcmodel.GetMempoolInfoCmd{},
},
{
name: "getMiningInfo",
newCmd: func() (interface{}, error) {
return rpcmodel.NewCommand("getMiningInfo")
},
staticCmd: func() interface{} {
return rpcmodel.NewGetMiningInfoCmd()
},
marshalled: `{"jsonrpc":"1.0","method":"getMiningInfo","params":[],"id":1}`,
unmarshalled: &rpcmodel.GetMiningInfoCmd{},
},
{
name: "getNetworkInfo",
newCmd: func() (interface{}, error) {
@ -808,34 +775,6 @@ func TestRPCServerCommands(t *testing.T) {
AllowHighFees: rpcmodel.Bool(false),
},
},
{
name: "setGenerate",
newCmd: func() (interface{}, error) {
return rpcmodel.NewCommand("setGenerate", true)
},
staticCmd: func() interface{} {
return rpcmodel.NewSetGenerateCmd(true, nil)
},
marshalled: `{"jsonrpc":"1.0","method":"setGenerate","params":[true],"id":1}`,
unmarshalled: &rpcmodel.SetGenerateCmd{
Generate: true,
GenProcLimit: rpcmodel.Int(-1),
},
},
{
name: "setGenerate optional",
newCmd: func() (interface{}, error) {
return rpcmodel.NewCommand("setGenerate", true, 6)
},
staticCmd: func() interface{} {
return rpcmodel.NewSetGenerateCmd(true, rpcmodel.Int(6))
},
marshalled: `{"jsonrpc":"1.0","method":"setGenerate","params":[true,6],"id":1}`,
unmarshalled: &rpcmodel.SetGenerateCmd{
Generate: true,
GenProcLimit: rpcmodel.Int(6),
},
},
{
name: "stop",
newCmd: func() (interface{}, error) {
@ -975,19 +914,6 @@ func TestRPCServerCommands(t *testing.T) {
ConnectSubCmd: rpcmodel.String("temp"),
},
},
{
name: "generate",
newCmd: func() (interface{}, error) {
return rpcmodel.NewCommand("generate", 1)
},
staticCmd: func() interface{} {
return rpcmodel.NewGenerateCmd(1)
},
marshalled: `{"jsonrpc":"1.0","method":"generate","params":[1],"id":1}`,
unmarshalled: &rpcmodel.GenerateCmd{
NumBlocks: 1,
},
},
{
name: "getSelectedTip",
newCmd: func() (interface{}, error) {

View File

@ -389,21 +389,6 @@ type Vout struct {
ScriptPubKey ScriptPubKeyResult `json:"scriptPubKey"`
}
// GetMiningInfoResult models the data from the getmininginfo command.
type GetMiningInfoResult struct {
Blocks int64 `json:"blocks"`
CurrentBlockSize uint64 `json:"currentBlockSize"`
CurrentBlockTx uint64 `json:"currentBlockTx"`
Difficulty float64 `json:"difficulty"`
Errors string `json:"errors"`
Generate bool `json:"generate"`
GenProcLimit int32 `json:"genProcLimit"`
HashesPerSec int64 `json:"hashesPerSec"`
PooledTx uint64 `json:"pooledTx"`
Testnet bool `json:"testnet"`
Devnet bool `json:"devnet"`
}
// GetWorkResult models the data from the getwork command.
type GetWorkResult struct {
Data string `json:"data"`

View File

@ -267,18 +267,11 @@
; ------------------------------------------------------------------------------
; Coin Generation (Mining) Settings - The following options control the
; generation of block templates used by external mining applications through RPC
; calls as well as the built-in CPU miner (if enabled).
; calls.
; ------------------------------------------------------------------------------
; Enable built-in CPU mining.
;
; NOTE: This is typically only useful for testing purposes such as testnet or
; simnet since the difficulty on mainnet is far too high for CPU mining to be
; worth your while.
; generate=false
; Add addresses to pay mined blocks to for CPU mining and potentially in the
; block templates generated for the getblocktemplate RPC. One address per line.
; Add addresses to pay mined blocks to in the block templates generated
; for the getblocktemplate RPC. One address per line.
; miningaddr=kaspa:yourkaspaaddress
; miningaddr=kaspa:yourkaspaaddress2
; miningaddr=kaspa:yourkaspaaddress3
@ -325,5 +318,5 @@
; If subnetwork > 0, than node will request and process only payloads from
; specified subnetwork. And if subnetwork is 0, than payloads of all subnetworks
; are processed. It also requires that generate flag will be false.
; are processed.
; subnetwork=0

View File

@ -1,68 +0,0 @@
package rpc
import (
"fmt"
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/rpcmodel"
)
// handleGenerate handles generate commands.
func handleGenerate(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
// Respond with an error if there are no addresses to pay the
// created blocks to.
if len(config.ActiveConfig().MiningAddrs) == 0 {
return nil, &rpcmodel.RPCError{
Code: rpcmodel.ErrRPCInternal.Code,
Message: "No payment addresses specified " +
"via --miningaddr",
}
}
if config.ActiveConfig().SubnetworkID != nil {
return nil, &rpcmodel.RPCError{
Code: rpcmodel.ErrRPCInvalidRequest.Code,
Message: "`generate` is not supported on partial nodes.",
}
}
// Respond with an error if there's virtually 0 chance of mining a block
// with the CPU.
if !s.cfg.DAGParams.GenerateSupported {
return nil, &rpcmodel.RPCError{
Code: rpcmodel.ErrRPCDifficulty,
Message: fmt.Sprintf("No support for `generate` on "+
"the current network, %s, as it's unlikely to "+
"be possible to mine a block with the CPU.",
s.cfg.DAGParams.Net),
}
}
c := cmd.(*rpcmodel.GenerateCmd)
// Respond with an error if the client is requesting 0 blocks to be generated.
if c.NumBlocks == 0 {
return nil, &rpcmodel.RPCError{
Code: rpcmodel.ErrRPCInternal.Code,
Message: "Please request a nonzero number of blocks to generate.",
}
}
// Create a reply
reply := make([]string, c.NumBlocks)
blockHashes, err := s.cfg.CPUMiner.GenerateNBlocks(c.NumBlocks)
if err != nil {
return nil, &rpcmodel.RPCError{
Code: rpcmodel.ErrRPCInternal.Code,
Message: err.Error(),
}
}
// Mine the correct number of blocks, assigning the hex representation of the
// hash of each one to its place in the reply.
for i, hash := range blockHashes {
reply[i] = hash.String()
}
return reply, nil
}

View File

@ -110,7 +110,7 @@ func handleGetBlockTemplate(s *Server, cmd interface{}, closeChan <-chan struct{
// because in that state IsCurrent may still return true.
currentBlueScore := s.cfg.DAG.SelectedTipBlueScore()
if (currentBlueScore != 0 && !s.cfg.SyncMgr.IsCurrent()) ||
(currentBlueScore == 0 && !s.cfg.CPUMiner.ShouldMineOnGenesis()) {
(currentBlueScore == 0 && !s.cfg.shouldMineOnGenesis()) {
return nil, &rpcmodel.RPCError{
Code: rpcmodel.ErrRPCClientInInitialDownload,
Message: "Kaspa is downloading blocks...",

View File

@ -1,6 +0,0 @@
package rpc
// handleGetGenerate implements the getGenerate command.
func handleGetGenerate(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
return s.cfg.CPUMiner.IsMining(), nil
}

View File

@ -1,18 +0,0 @@
package rpc
import (
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/rpcmodel"
)
// handleGetHashesPerSec implements the getHashesPerSec command.
func handleGetHashesPerSec(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
if config.ActiveConfig().SubnetworkID != nil {
return nil, &rpcmodel.RPCError{
Code: rpcmodel.ErrRPCInvalidRequest.Code,
Message: "`getHashesPerSec` is not supported on partial nodes.",
}
}
return int64(s.cfg.CPUMiner.HashesPerSecond()), nil
}

View File

@ -1,40 +0,0 @@
package rpc
import (
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/rpcmodel"
)
// handleGetMiningInfo implements the getMiningInfo command. We only return the
// fields that are not related to wallet functionality.
func handleGetMiningInfo(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
if config.ActiveConfig().SubnetworkID != nil {
return nil, &rpcmodel.RPCError{
Code: rpcmodel.ErrRPCInvalidRequest.Code,
Message: "`getMiningInfo` is not supported on partial nodes.",
}
}
selectedTipHash := s.cfg.DAG.SelectedTipHash()
selectedBlock, err := s.cfg.DAG.BlockByHash(selectedTipHash)
if err != nil {
return nil, &rpcmodel.RPCError{
Code: rpcmodel.ErrRPCInternal.Code,
Message: "could not find block for selected tip",
}
}
result := rpcmodel.GetMiningInfoResult{
Blocks: int64(s.cfg.DAG.BlockCount()),
CurrentBlockSize: uint64(selectedBlock.MsgBlock().SerializeSize()),
CurrentBlockTx: uint64(len(selectedBlock.MsgBlock().Transactions)),
Difficulty: getDifficultyRatio(s.cfg.DAG.CurrentBits(), s.cfg.DAGParams),
Generate: s.cfg.CPUMiner.IsMining(),
GenProcLimit: s.cfg.CPUMiner.NumWorkers(),
HashesPerSec: int64(s.cfg.CPUMiner.HashesPerSecond()),
PooledTx: uint64(s.cfg.TxMemPool.Count()),
Testnet: config.ActiveConfig().Testnet,
Devnet: config.ActiveConfig().Devnet,
}
return &result, nil
}

View File

@ -1,49 +0,0 @@
package rpc
import (
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/rpcmodel"
)
// handleSetGenerate implements the setGenerate command.
func handleSetGenerate(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
if config.ActiveConfig().SubnetworkID != nil {
return nil, &rpcmodel.RPCError{
Code: rpcmodel.ErrRPCInvalidRequest.Code,
Message: "`setGenerate` is not supported on partial nodes.",
}
}
c := cmd.(*rpcmodel.SetGenerateCmd)
// Disable generation regardless of the provided generate flag if the
// maximum number of threads (goroutines for our purposes) is 0.
// Otherwise enable or disable it depending on the provided flag.
generate := c.Generate
genProcLimit := -1
if c.GenProcLimit != nil {
genProcLimit = *c.GenProcLimit
}
if genProcLimit == 0 {
generate = false
}
if !generate {
s.cfg.CPUMiner.Stop()
} else {
// Respond with an error if there are no addresses to pay the
// created blocks to.
if len(config.ActiveConfig().MiningAddrs) == 0 {
return nil, &rpcmodel.RPCError{
Code: rpcmodel.ErrRPCInternal.Code,
Message: "No payment addresses specified " +
"via --miningaddr",
}
}
// It's safe to call start even if it's already started.
s.cfg.CPUMiner.SetNumWorkers(int32(genProcLimit))
s.cfg.CPUMiner.Start()
}
return nil, nil
}

View File

@ -32,7 +32,6 @@ import (
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/mempool"
"github.com/kaspanet/kaspad/mining"
"github.com/kaspanet/kaspad/mining/cpuminer"
"github.com/kaspanet/kaspad/peer"
"github.com/kaspanet/kaspad/rpcmodel"
"github.com/kaspanet/kaspad/server/p2p"
@ -65,7 +64,6 @@ var rpcHandlersBeforeInit = map[string]commandHandler{
"debugLevel": handleDebugLevel,
"decodeRawTransaction": handleDecodeRawTransaction,
"decodeScript": handleDecodeScript,
"generate": handleGenerate,
"getAllManualNodesInfo": handleGetAllManualNodesInfo,
"getSelectedTip": handleGetSelectedTip,
"getSelectedTipHash": handleGetSelectedTipHash,
@ -79,14 +77,11 @@ var rpcHandlersBeforeInit = map[string]commandHandler{
"getConnectionCount": handleGetConnectionCount,
"getCurrentNet": handleGetCurrentNet,
"getDifficulty": handleGetDifficulty,
"getGenerate": handleGetGenerate,
"getHashesPerSec": handleGetHashesPerSec,
"getHeaders": handleGetHeaders,
"getTopHeaders": handleGetTopHeaders,
"getInfo": handleGetInfo,
"getManualNodeInfo": handleGetManualNodeInfo,
"getMempoolInfo": handleGetMempoolInfo,
"getMiningInfo": handleGetMiningInfo,
"getNetTotals": handleGetNetTotals,
"getPeerInfo": handleGetPeerInfo,
"getRawMempool": handleGetRawMempool,
@ -99,7 +94,6 @@ var rpcHandlersBeforeInit = map[string]commandHandler{
"removeManualNode": handleRemoveManualNode,
"searchRawTransactions": handleSearchRawTransactions,
"sendRawTransaction": handleSendRawTransaction,
"setGenerate": handleSetGenerate,
"stop": handleStop,
"submitBlock": handleSubmitBlock,
"uptime": handleUptime,
@ -781,17 +775,17 @@ type rpcserverConfig struct {
// These fields allow the RPC server to interface with mining.
//
// Generator produces block templates and the CPUMiner solves them using
// the CPU. CPU mining is typically only useful for test purposes when
// doing regression or simulation testing.
// Generator produces block templates that can be retrieved
// by the getBlockTemplate command.
Generator *mining.BlkTmplGenerator
CPUMiner *cpuminer.CPUMiner
// These fields define any optional indexes the RPC server can make use
// of to provide additional data when queried.
TxIndex *indexers.TxIndex
AddrIndex *indexers.AddrIndex
AcceptanceIndex *indexers.AcceptanceIndex
shouldMineOnGenesis func() bool
}
// setupRPCListeners returns a slice of listeners that are configured for use
@ -849,7 +843,6 @@ func NewRPCServer(
p2pServer *p2p.Server,
db database.DB,
blockTemplateGenerator *mining.BlkTmplGenerator,
cpuminer *cpuminer.CPUMiner,
) (*Server, error) {
// Setup listeners for the configured RPC listen addresses and
@ -862,20 +855,20 @@ func NewRPCServer(
return nil, errors.New("RPCS: No valid listen address")
}
cfg := &rpcserverConfig{
Listeners: rpcListeners,
StartupTime: startupTime,
ConnMgr: &rpcConnManager{p2pServer},
SyncMgr: &rpcSyncMgr{p2pServer, p2pServer.SyncManager},
TimeSource: p2pServer.TimeSource,
DAGParams: p2pServer.DAGParams,
DB: db,
TxMemPool: p2pServer.TxMemPool,
Generator: blockTemplateGenerator,
CPUMiner: cpuminer,
TxIndex: p2pServer.TxIndex,
AddrIndex: p2pServer.AddrIndex,
AcceptanceIndex: p2pServer.AcceptanceIndex,
DAG: p2pServer.DAG,
Listeners: rpcListeners,
StartupTime: startupTime,
ConnMgr: &rpcConnManager{p2pServer},
SyncMgr: &rpcSyncMgr{p2pServer, p2pServer.SyncManager},
TimeSource: p2pServer.TimeSource,
DAGParams: p2pServer.DAGParams,
DB: db,
TxMemPool: p2pServer.TxMemPool,
Generator: blockTemplateGenerator,
TxIndex: p2pServer.TxIndex,
AddrIndex: p2pServer.AddrIndex,
AcceptanceIndex: p2pServer.AcceptanceIndex,
DAG: p2pServer.DAG,
shouldMineOnGenesis: p2pServer.ShouldMineOnGenesis,
}
rpc := Server{
cfg: *cfg,

View File

@ -119,12 +119,6 @@ var helpDescsEnUS = map[string]string{
"decodeScript--synopsis": "Returns a JSON object with information about the provided hex-encoded script.",
"decodeScript-hexScript": "Hex-encoded script",
// GenerateCmd help
"generate--synopsis": "Generates a set number of blocks (simnet or regtest only) and returns a JSON\n" +
" array of their hashes.",
"generate-numBlocks": "Number of blocks to generate",
"generate--result0": "The hashes, in order, of blocks generated by the call",
// GetAllManualNodesInfoCmd help.
"getAllManualNodesInfo--synopsis": "Returns information about manually added (persistent) peers.",
"getAllManualNodesInfo-details": "Specifies whether the returned data is a JSON object including DNS and connection information, or just a list of added peers",
@ -385,14 +379,6 @@ var helpDescsEnUS = map[string]string{
"getDifficulty--synopsis": "Returns the proof-of-work difficulty as a multiple of the minimum difficulty.",
"getDifficulty--result0": "The difficulty",
// GetGenerateCmd help.
"getGenerate--synopsis": "Returns if the server is set to generate coins (mine) or not.",
"getGenerate--result0": "True if mining, false if not",
// GetHashesPerSecCmd help.
"getHashesPerSec--synopsis": "Returns a recent hashes per second performance measurement while generating coins (mining).",
"getHashesPerSec--result0": "The number of hashes per second",
// InfoDAGResult help.
"infoDagResult-version": "The version of the server",
"infoDagResult-protocolVersion": "The latest supported protocol version",
@ -427,22 +413,6 @@ var helpDescsEnUS = map[string]string{
"getMempoolInfoResult-bytes": "Size in bytes of the mempool",
"getMempoolInfoResult-size": "Number of transactions in the mempool",
// GetMiningInfoResult help.
"getMiningInfoResult-blocks": "Height of the latest best block",
"getMiningInfoResult-currentBlockSize": "Size of the latest best block",
"getMiningInfoResult-currentBlockTx": "Number of transactions in the latest best block",
"getMiningInfoResult-difficulty": "Current target difficulty",
"getMiningInfoResult-errors": "Any current errors",
"getMiningInfoResult-generate": "Whether or not server is set to generate coins",
"getMiningInfoResult-genProcLimit": "Number of processors to use for coin generation (-1 when disabled)",
"getMiningInfoResult-hashesPerSec": "Recent hashes per second performance measurement while generating coins",
"getMiningInfoResult-pooledTx": "Number of transactions in the memory pool",
"getMiningInfoResult-testnet": "Whether or not server is using testnet",
"getMiningInfoResult-devnet": "Whether or not server is using devnet",
// GetMiningInfoCmd help.
"getMiningInfo--synopsis": "Returns a JSON object containing mining-related information.",
// GetNetTotalsCmd help.
"getNetTotals--synopsis": "Returns a JSON object containing network traffic statistics.",
@ -560,11 +530,6 @@ var helpDescsEnUS = map[string]string{
"sendRawTransaction-allowHighFees": "Whether or not to allow insanely high fees (kaspad does not yet implement this parameter, so it has no effect)",
"sendRawTransaction--result0": "The hash of the transaction",
// SetGenerateCmd help.
"setGenerate--synopsis": "Set the server to generate coins (mine) or not.",
"setGenerate-generate": "Use true to enable generation, false to disable it",
"setGenerate-genProcLimit": "The number of processors (cores) to limit generation to or -1 for default",
// StopCmd help.
"stop--synopsis": "Shutdown kaspad.",
"stop--result0": "The string 'kaspad stopping.'",
@ -661,7 +626,6 @@ var rpcResultTypes = map[string][]interface{}{
"debugLevel": {(*string)(nil), (*string)(nil)},
"decodeRawTransaction": {(*rpcmodel.TxRawDecodeResult)(nil)},
"decodeScript": {(*rpcmodel.DecodeScriptResult)(nil)},
"generate": {(*[]string)(nil)},
"getAllManualNodesInfo": {(*[]string)(nil), (*[]rpcmodel.GetManualNodeInfoResult)(nil)},
"getSelectedTip": {(*rpcmodel.GetBlockVerboseResult)(nil)},
"getSelectedTipHash": {(*string)(nil)},
@ -675,14 +639,11 @@ var rpcResultTypes = map[string][]interface{}{
"getConnectionCount": {(*int32)(nil)},
"getCurrentNet": {(*uint32)(nil)},
"getDifficulty": {(*float64)(nil)},
"getGenerate": {(*bool)(nil)},
"getHashesPerSec": {(*float64)(nil)},
"getTopHeaders": {(*[]string)(nil)},
"getHeaders": {(*[]string)(nil)},
"getInfo": {(*rpcmodel.InfoDAGResult)(nil)},
"getManualNodeInfo": {(*string)(nil), (*rpcmodel.GetManualNodeInfoResult)(nil)},
"getMempoolInfo": {(*rpcmodel.GetMempoolInfoResult)(nil)},
"getMiningInfo": {(*rpcmodel.GetMiningInfoResult)(nil)},
"getNetTotals": {(*rpcmodel.GetNetTotalsResult)(nil)},
"getPeerInfo": {(*[]rpcmodel.GetPeerInfoResult)(nil)},
"getRawMempool": {(*[]string)(nil), (*rpcmodel.GetRawMempoolVerboseResult)(nil)},
@ -695,7 +656,6 @@ var rpcResultTypes = map[string][]interface{}{
"removeManualNode": nil,
"searchRawTransactions": {(*string)(nil), (*[]rpcmodel.SearchRawTransactionsResult)(nil)},
"sendRawTransaction": {(*string)(nil)},
"setGenerate": nil,
"stop": {(*string)(nil)},
"submitBlock": {nil, (*string)(nil)},
"uptime": {(*int64)(nil)},

View File

@ -9,7 +9,6 @@ import (
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/mempool"
"github.com/kaspanet/kaspad/mining"
"github.com/kaspanet/kaspad/mining/cpuminer"
"github.com/kaspanet/kaspad/server/p2p"
"github.com/kaspanet/kaspad/server/rpc"
"github.com/kaspanet/kaspad/signal"
@ -19,7 +18,6 @@ import (
type Server struct {
rpcServer *rpc.Server
p2pServer *p2p.Server
cpuminer *cpuminer.CPUMiner
startupTime int64
started, shutdown int32
@ -39,14 +37,9 @@ func (s *Server) Start() {
s.p2pServer.Start()
// Start the CPU miner if generation is enabled.
cfg := config.ActiveConfig()
if cfg.Generate {
s.cpuminer.Start()
}
if !cfg.DisableRPC {
s.rpcServer.Start()
}
}
@ -62,9 +55,6 @@ func (s *Server) Stop() error {
log.Warnf("Server shutting down")
// Stop the CPU miner if needed
s.cpuminer.Stop()
s.p2pServer.Stop()
// Shutdown the RPC server if it's not disabled.
@ -97,32 +87,18 @@ func NewServer(listenAddrs []string, db database.DB, dagParams *dagconfig.Params
// Create the mining policy and block template generator based on the
// configuration options.
//
// NOTE: The CPU miner relies on the mempool, so the mempool has to be
// created before calling the function to create the CPU miner.
policy := mining.Policy{
BlockMaxMass: cfg.BlockMaxMass,
}
blockTemplateGenerator := mining.NewBlkTmplGenerator(&policy,
s.p2pServer.DAGParams, s.p2pServer.TxMemPool, s.p2pServer.DAG, s.p2pServer.TimeSource, s.p2pServer.SigCache)
s.cpuminer = cpuminer.New(&cpuminer.Config{
DAGParams: dagParams,
BlockTemplateGenerator: blockTemplateGenerator,
MiningAddrs: cfg.MiningAddrs,
ProcessBlock: s.p2pServer.SyncManager.ProcessBlock,
ConnectedCount: s.p2pServer.ConnectedCount,
ShouldMineOnGenesis: s.p2pServer.ShouldMineOnGenesis,
IsCurrent: s.p2pServer.SyncManager.IsCurrent,
})
if !cfg.DisableRPC {
s.rpcServer, err = rpc.NewRPCServer(
s.startupTime,
s.p2pServer,
db,
blockTemplateGenerator,
s.cpuminer,
)
if err != nil {
return nil, err