Add stability tests (#1587)

* Add stability-tests

* Fix requires

* Fix golint errors

* Update README.md

* Remove payloadHash from everywhere

* don't run vet on kaspad in stability-tests/install_and_test
This commit is contained in:
Svarog 2021-03-09 15:01:08 +02:00 committed by GitHub
parent 27c1e4611e
commit a7299c1b87
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
104 changed files with 3720 additions and 0 deletions

View File

@ -0,0 +1,8 @@
# Stability-Test Tools
This package provides some higher-level tests for kaspad.
These are tests that are beyond the scope of unit-tests, and some of them might take long time to run.
# Running
* To run only the fast running tests call `./install_and_test.sh`
* To include all tests call `SLOW=1 ./install_and_test.sh` (Note this will take many hours to finish)
* To run a single test cd `[test-name]/run` and call `./run.sh`

View File

@ -0,0 +1,9 @@
# Application Level Garbage Generator
This tool sends invalid blocks to a node, making sure the node responds with a reject, and does not crash as a result.
## Running
1. `go install` kaspad and application-level-garbage.
2. `cd run`
3. `./run.sh`

View File

@ -0,0 +1,60 @@
package main
import (
"os"
"path/filepath"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/stability-tests/common"
"github.com/jessevdk/go-flags"
"github.com/kaspanet/kaspad/infrastructure/config"
)
const (
defaultLogFilename = "application_level_garbage.log"
defaultErrLogFilename = "application_level_garbage_err.log"
)
var (
// Default configuration options
defaultLogFile = filepath.Join(common.DefaultHomeDir, defaultLogFilename)
defaultErrLogFile = filepath.Join(common.DefaultHomeDir, defaultErrLogFilename)
)
type configFlags struct {
NodeP2PAddress string `long:"addr" short:"a" description:"node's P2P address"`
BlocksFilePath string `long:"blocks" short:"b" description:"path of file containing malformed blocks"`
Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"`
config.NetworkFlags
}
var cfg *configFlags
func activeConfig() *configFlags {
return cfg
}
func parseConfig() error {
cfg = &configFlags{}
parser := flags.NewParser(cfg, flags.PrintErrors|flags.HelpFlag)
_, err := parser.Parse()
if err != nil {
if err, ok := err.(*flags.Error); ok && err.Type == flags.ErrHelp {
os.Exit(0)
}
return err
}
err = cfg.ResolveNetwork(parser)
if err != nil {
return err
}
log.SetLevel(logger.LevelInfo)
common.InitBackend(backendLog, defaultLogFile, defaultErrLogFile)
return nil
}

View File

@ -0,0 +1,12 @@
package main
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var (
backendLog = logger.NewBackend()
log = backendLog.Logger("APLG")
spawn = panics.GoroutineWrapperFunc(log)
)

View File

@ -0,0 +1,51 @@
package main
import (
"fmt"
"os"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/standalone"
"github.com/kaspanet/kaspad/stability-tests/common"
"github.com/kaspanet/kaspad/util/panics"
"github.com/kaspanet/kaspad/util/profiling"
)
func main() {
defer panics.HandlePanic(log, "applicationLevelGarbage-main", nil)
err := parseConfig()
if err != nil {
fmt.Fprintf(os.Stderr, "Error parsing config: %+v", err)
os.Exit(1)
}
defer backendLog.Close()
common.UseLogger(backendLog, log.Level())
cfg := activeConfig()
if cfg.Profile != "" {
profiling.Start(cfg.Profile, log)
}
kaspadConfig := config.DefaultConfig()
kaspadConfig.NetworkFlags = cfg.NetworkFlags
minimalNetAdapter, err := standalone.NewMinimalNetAdapter(kaspadConfig)
if err != nil {
fmt.Fprintf(os.Stderr, "Error creating minimalNetAdapter: %+v", err)
backendLog.Close()
os.Exit(1)
}
blocksChan, err := readBlocks()
if err != nil {
log.Errorf("Error reading blocks: %+v", err)
backendLog.Close()
os.Exit(1)
}
err = sendBlocks(cfg.NodeP2PAddress, minimalNetAdapter, blocksChan)
if err != nil {
log.Errorf("Error sending blocks: %+v", err)
backendLog.Close()
os.Exit(1)
}
}

View File

@ -0,0 +1,32 @@
package main
import (
"encoding/json"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/stability-tests/common"
"github.com/pkg/errors"
)
var blockBuffer []byte
func readBlocks() (<-chan *externalapi.DomainBlock, error) {
c := make(chan *externalapi.DomainBlock)
spawn("applicationLevelGarbage-readBlocks", func() {
lineNum := 0
for blockJSON := range common.ScanFile(activeConfig().BlocksFilePath) {
domainBlock := &externalapi.DomainBlock{}
err := json.Unmarshal(blockJSON, domainBlock)
if err != nil {
panic(errors.Wrapf(err, "error deserializing line No. %d with json %s", lineNum, blockJSON))
}
c <- domainBlock
}
close(c)
})
return c, nil
}

View File

@ -0,0 +1,6 @@
// Genesis block
{"Header":{"Version":1,"ParentHashes":[],"HashMerkleRoot":[0,148,253,255,77,178,77,24,149,33,54,42,20,251,25,122,153,81,126,63,68,246,46,11,231,179,192,187,0,59,11,189],"AcceptedIDMerkleRoot":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"UTXOCommitment":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"TimeInMilliseconds":1593528309396,"Bits":511705087,"Nonce":282367},"Transactions":[{"Version":1,"Inputs":[],"Outputs":[],"LockTime":0,"SubnetworkID":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"Gas":0,"Payload":"AAAAAAAAAAAXqRTaF0XptUm9C/oaVplxx366MM1aS4drYXNwYS1kZXZuZXQ=","Fee":0,"Mass":0}]}
// Bad hasMerkleRoot
{"Header":{"Version":268435456,"ParentHashes":[[19,45,252,243,207,3,251,33,48,184,103,121,188,46,24,78,8,215,235,247,182,134,60,62,224,140,141,2,60,253,254,102]],"HashMerkleRoot":[1,2,3,4,5],"AcceptedIDMerkleRoot":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"UTXOCommitment":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"Timestamp":{},"Bits":511705087,"Nonce":4283},"Transactions":[{"Version":1,"TxIn":[],"TxOut":[],"LockTime":0,"SubnetworkID":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"Gas":0,"Payload":"AAAAAAAAAAAXqRTaF0XptUm9C/oaVplxx366MM1aS4drYXNwYS1kZXZuZXQ="}]}
// Zeroed hashMerkleRoot
{"Header":{"Version":268435456,"ParentHashes":[[19,45,252,243,207,3,251,33,48,184,103,121,188,46,24,78,8,215,235,247,182,134,60,62,224,140,141,2,60,253,254,102]],"HashMerkleRoot":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"AcceptedIDMerkleRoot":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"UTXOCommitment":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"Timestamp":{},"Bits":511705087,"Nonce":4283},"Transactions":[{"Version":1,"TxIn":[],"TxOut":[],"LockTime":0,"SubnetworkID":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"Gas":0,"Payload":"AAAAAAAAAAAXqRTaF0XptUm9C/oaVplxx366MM1aS4drYXNwYS1kZXZuZXQ="}]}

View File

@ -0,0 +1,33 @@
#!/bin/bash
rm -rf /tmp/kaspad-temp
kaspad --devnet --nobanning --datadir=/tmp/kaspad-temp --profile=6061 --loglevel=debug &
KASPAD_PID=$!
KASPAD_KILLED=0
function killKaspadIfNotKilled() {
if [ $KASPAD_KILLED -eq 0 ]; then
kill $KASPAD_PID
fi
}
trap "killKaspadIfNotKilled" EXIT
sleep 1
application-level-garbage --devnet -alocalhost:16611 -b blocks.dat --profile=7000
TEST_EXIT_CODE=$?
kill $KASPAD_PID
wait $KASPAD_PID
KASPAD_KILLED=1
KASPAD_EXIT_CODE=$?
echo "Exit code: $TEST_EXIT_CODE"
echo "Kaspad exit code: $KASPAD_EXIT_CODE"
if [ $TEST_EXIT_CODE -eq 0 ] && [ $KASPAD_EXIT_CODE -eq 0 ]; then
echo "application-level-garbage test: PASSED"
exit 0
fi
echo "application-level-garbage test: FAILED"
exit 1

View File

@ -0,0 +1,51 @@
package main
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/standalone"
"github.com/pkg/errors"
)
func sendBlocks(address string, minimalNetAdapter *standalone.MinimalNetAdapter, blocksChan <-chan *externalapi.DomainBlock) error {
for block := range blocksChan {
routes, err := minimalNetAdapter.Connect(address)
if err != nil {
return err
}
blockHash := consensushashing.BlockHash(block)
log.Infof("Sending block %s", blockHash)
err = routes.OutgoingRoute.Enqueue(&appmessage.MsgInvRelayBlock{
Hash: blockHash,
})
if err != nil {
return err
}
message, err := routes.WaitForMessageOfType(appmessage.CmdRequestRelayBlocks, common.DefaultTimeout)
if err != nil {
return err
}
requestRelayBlockMessage := message.(*appmessage.MsgRequestRelayBlocks)
if len(requestRelayBlockMessage.Hashes) != 1 || *requestRelayBlockMessage.Hashes[0] != *blockHash {
return errors.Errorf("Expecting requested hashes to be [%s], but got %v",
blockHash, requestRelayBlockMessage.Hashes)
}
err = routes.OutgoingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(block))
if err != nil {
return err
}
// TODO(libp2p): Wait for reject message once it has been implemented
err = routes.WaitForDisconnect(common.DefaultTimeout)
if err != nil {
return err
}
}
return nil
}

View File

@ -0,0 +1,32 @@
package common
import (
"fmt"
"os/exec"
"strings"
"github.com/kaspanet/kaspad/domain/dagconfig"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/pkg/errors"
)
// StartCmd runs a command as a separate process.
// The `name` parameter is used for logs.
// The command executable should be in args[0]
func StartCmd(name string, args ...string) (*exec.Cmd, error) {
cmd := exec.Command(args[0], args[1:]...)
cmd.Stdout = NewLogWriter(log, logger.LevelTrace, fmt.Sprintf("%s-STDOUT", name))
cmd.Stderr = NewLogWriter(log, logger.LevelWarn, fmt.Sprintf("%s-STDERR", name))
log.Debugf("Starting command %s: %s", name, cmd)
err := cmd.Start()
if err != nil {
return nil, errors.WithStack(err)
}
return cmd, nil
}
// NetworkCliArgumentFromNetParams returns the kaspad command line argument that starts the given network.
func NetworkCliArgumentFromNetParams(params *dagconfig.Params) string {
return fmt.Sprintf("--%s", strings.TrimPrefix(params.Name, "kaspa-"))
}

View File

@ -0,0 +1,67 @@
package common
import (
"bufio"
"bytes"
"encoding/hex"
"os"
"github.com/pkg/errors"
)
// ScanFile opens the file in the specified path, and returns a channel that
// sends the contents of the file line-by-line, ignoring lines beggining with //
func ScanFile(filePath string) <-chan []byte {
c := make(chan []byte)
spawn("ScanFile", func() {
file, err := os.Open(filePath)
if err != nil {
panic(errors.Wrapf(err, "error opening file %s", filePath))
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
if err := scanner.Err(); err != nil {
panic(errors.Wrap(err, "error reading line"))
}
line := scanner.Bytes()
if bytes.HasPrefix(line, []byte("//")) {
continue
}
c <- line
}
close(c)
})
return c
}
// ScanHexFile opens the file in the specified path, and returns a channel that
// sends the contents of the file line-by-line, ignoring lines beggining with //,
// parsing the hex data in all other lines
func ScanHexFile(filePath string) <-chan []byte {
c := make(chan []byte)
spawn("ScanHexFile", func() {
lineNum := 1
for lineHex := range ScanFile(filePath) {
lineBytes := make([]byte, hex.DecodedLen(len(lineHex)))
_, err := hex.Decode(lineBytes, lineHex)
if err != nil {
panic(errors.Wrapf(err, "error decoding line No. %d with hex %s", lineNum, lineHex))
}
c <- lineBytes
lineNum++
}
close(c)
})
return c
}

View File

@ -0,0 +1,29 @@
package common
import (
"strings"
"github.com/kaspanet/kaspad/infrastructure/logger"
)
// LogWriter writes to the given log with the given log level and prefix
type LogWriter struct {
log *logger.Logger
level logger.Level
prefix string
}
func (clw LogWriter) Write(p []byte) (n int, err error) {
logWithoutNewLine := strings.TrimSuffix(string(p), "\n")
clw.log.Writef(clw.level, "%s: %s", clw.prefix, logWithoutNewLine)
return len(p), nil
}
// NewLogWriter returns a new LogWriter that forwards to `log` all data written to it using at `level` level
func NewLogWriter(log *logger.Logger, level logger.Level, prefix string) LogWriter {
return LogWriter{
log: log,
level: level,
prefix: prefix,
}
}

View File

@ -0,0 +1,72 @@
package common
import (
"fmt"
"os"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/stability-tests/common/mine"
"github.com/kaspanet/kaspad/stability-tests/common/rpc"
"github.com/kaspanet/kaspad/util/panics"
)
// log is a logger that is initialized with no output filters. This
// means the package will not perform any logging by default until the caller
// requests it.
var log *logger.Logger
var spawn func(name string, spawnedFunction func())
const logSubsytem = "STCM"
// The default amount of logging is none.
func init() {
DisableLog()
}
// DisableLog disables all library log output. Logging output is disabled
// by default until UseLogger is called.
func DisableLog() {
backend := logger.NewBackend()
log = backend.Logger(logSubsytem)
log.SetLevel(logger.LevelOff)
spawn = panics.GoroutineWrapperFunc(log)
logger.SetLogLevels(logger.LevelOff)
logger.InitLogStdout(logger.LevelInfo)
}
// UseLogger uses a specified Logger to output package logging info.
func UseLogger(backend *logger.Backend, level logger.Level) {
log = backend.Logger(logSubsytem)
log.SetLevel(level)
spawn = panics.GoroutineWrapperFunc(log)
mine.UseLogger(backend, level)
rpc.UseLogger(backend, level)
logger.SetLogLevels(level)
}
// InitBackend initializes the test log backend
func InitBackend(backendLog *logger.Backend, logFile, errLogFile string) {
err := backendLog.AddLogFile(logFile, logger.LevelTrace)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %+v\n", logFile, logger.LevelTrace, err)
os.Exit(1)
}
err = backendLog.AddLogFile(errLogFile, logger.LevelWarn)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %+v\n", errLogFile, logger.LevelWarn, err)
os.Exit(1)
}
err = backendLog.AddLogWriter(os.Stdout, logger.LevelDebug)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "Error adding stdout to the loggerfor level %s: %+v\n", logger.LevelInfo, err)
os.Exit(1)
}
err = backendLog.Run()
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "Error starting the logger: %s ", err)
os.Exit(1)
}
}

View File

@ -0,0 +1,95 @@
package mine
import (
"path/filepath"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/kaspanet/kaspad/infrastructure/db/database/ldb"
)
const leveldbCacheSizeMiB = 256
var blockIDToHashBucket = database.MakeBucket([]byte("id-to-block-hash"))
var lastMinedBlockKey = database.MakeBucket(nil).Key([]byte("last-sent-block"))
type miningDB struct {
idToBlockHash map[string]*externalapi.DomainHash
hashToBlockID map[externalapi.DomainHash]string
db *ldb.LevelDB
}
func (mdb *miningDB) hashByID(id string) *externalapi.DomainHash {
return mdb.idToBlockHash[id]
}
func (mdb *miningDB) putID(id string, hash *externalapi.DomainHash) error {
mdb.idToBlockHash[id] = hash
mdb.hashToBlockID[*hash] = id
return mdb.db.Put(blockIDToHashBucket.Key([]byte(id)), hash.ByteSlice())
}
func (mdb *miningDB) updateLastMinedBlock(id string) error {
return mdb.db.Put(lastMinedBlockKey, []byte(id))
}
func (mdb *miningDB) lastMinedBlock() (string, error) {
has, err := mdb.db.Has(lastMinedBlockKey)
if err != nil {
return "", err
}
if !has {
return "0", nil
}
blockID, err := mdb.db.Get(lastMinedBlockKey)
if err != nil {
return "", err
}
return string(blockID), nil
}
func newMiningDB(dataDir string) (*miningDB, error) {
idToBlockHash := make(map[string]*externalapi.DomainHash)
hashToBlockID := make(map[externalapi.DomainHash]string)
dbPath := filepath.Join(dataDir, "minedb")
db, err := ldb.NewLevelDB(dbPath, leveldbCacheSizeMiB)
if err != nil {
return nil, err
}
cursor, err := db.Cursor(blockIDToHashBucket)
if err != nil {
return nil, err
}
for cursor.Next() {
key, err := cursor.Key()
if err != nil {
return nil, err
}
value, err := cursor.Value()
if err != nil {
return nil, err
}
hash, err := externalapi.NewDomainHashFromByteSlice(value)
if err != nil {
return nil, err
}
id := string(key.Suffix())
idToBlockHash[id] = hash
hashToBlockID[*hash] = id
}
return &miningDB{
idToBlockHash: idToBlockHash,
hashToBlockID: hashToBlockID,
db: db,
}, nil
}

View File

@ -0,0 +1,35 @@
package mine
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
)
// log is a logger that is initialized with no output filters. This
// means the package will not perform any logging by default until the caller
// requests it.
var log *logger.Logger
var spawn func(name string, spawnedFunction func())
const logSubsytem = "MFJS"
// The default amount of logging is none.
func init() {
DisableLog()
}
// DisableLog disables all library log output. Logging output is disabled
// by default until UseLogger is called.
func DisableLog() {
backend := logger.NewBackend()
log = backend.Logger(logSubsytem)
log.SetLevel(logger.LevelOff)
spawn = panics.GoroutineWrapperFunc(log)
}
// UseLogger uses a specified Logger to output package logging info.
func UseLogger(backend *logger.Backend, level logger.Level) {
log = backend.Logger(logSubsytem)
log.SetLevel(level)
spawn = panics.GoroutineWrapperFunc(log)
}

View File

@ -0,0 +1,150 @@
package mine
import (
"math/rand"
"path/filepath"
"strings"
"time"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/domain/consensus"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/model/testapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/mining"
"github.com/kaspanet/kaspad/domain/dagconfig"
"github.com/kaspanet/kaspad/stability-tests/common/rpc"
"github.com/pkg/errors"
)
// FromFile mines all blocks as described by `jsonFile`
func FromFile(jsonFile string, dagParams *dagconfig.Params, rpcClient *rpc.Client, dataDir string) error {
log.Infof("Mining blocks from JSON file %s from data directory %s", jsonFile, dataDir)
blockChan, err := readBlocks(jsonFile)
if err != nil {
return err
}
return mineBlocks(dagParams, rpcClient, blockChan, dataDir)
}
func mineBlocks(dagParams *dagconfig.Params, rpcClient *rpc.Client, blockChan <-chan JSONBlock, dataDir string) error {
mdb, err := newMiningDB(dataDir)
if err != nil {
return err
}
dbPath := filepath.Join(dataDir, "db")
factory := consensus.NewFactory()
factory.SetTestDataDir(dbPath)
testConsensus, tearDownFunc, err := factory.NewTestConsensus(dagParams, false, "minejson")
if err != nil {
return err
}
defer tearDownFunc(true)
info, err := testConsensus.GetSyncInfo()
if err != nil {
return err
}
log.Infof("Starting with data directory with %d headers and %d blocks", info.HeaderCount, info.BlockCount)
err = mdb.putID("0", dagParams.GenesisHash)
if err != nil {
return err
}
totalBlocksSubmitted := 0
lastLogTime := time.Now()
rpcWaitInInterval := 0 * time.Second
for blockData := range blockChan {
if hash := mdb.hashByID(blockData.ID); hash != nil {
_, err := rpcClient.GetBlock(hash.String(), false)
if err == nil {
continue
}
if !strings.Contains(err.Error(), "not found") {
return err
}
}
block, err := mineOrFetchBlock(blockData, mdb, testConsensus)
if err != nil {
return err
}
beforeSubmitBlockTime := time.Now()
rejectReason, err := rpcClient.SubmitBlock(block)
if err != nil {
return errors.Wrap(err, "error in SubmitBlock")
}
if rejectReason != appmessage.RejectReasonNone {
return errors.Errorf("block rejected in SubmitBlock")
}
rpcWaitInInterval += time.Since(beforeSubmitBlockTime)
totalBlocksSubmitted++
const logInterval = 1000
if totalBlocksSubmitted%logInterval == 0 {
intervalDuration := time.Since(lastLogTime)
blocksPerSecond := logInterval / intervalDuration.Seconds()
log.Infof("It took %s to submit %d blocks (%f blocks/sec) while %s of it it waited for RPC response"+
" (total blocks sent %d)", intervalDuration, logInterval, blocksPerSecond, rpcWaitInInterval,
totalBlocksSubmitted)
rpcWaitInInterval = 0
lastLogTime = time.Now()
}
blockHash := consensushashing.BlockHash(block)
log.Tracef("Submitted block %s with hash %s", blockData.ID, blockHash)
}
return nil
}
func mineOrFetchBlock(blockData JSONBlock, mdb *miningDB, testConsensus testapi.TestConsensus) (*externalapi.DomainBlock, error) {
hash := mdb.hashByID(blockData.ID)
if mdb.hashByID(blockData.ID) != nil {
return testConsensus.GetBlock(hash)
}
parentHashes := make([]*externalapi.DomainHash, len(blockData.Parents))
for i, parentID := range blockData.Parents {
parentHashes[i] = mdb.hashByID(parentID)
}
block, _, err := testConsensus.BuildBlockWithParents(parentHashes,
&externalapi.DomainCoinbaseData{ScriptPublicKey: &externalapi.ScriptPublicKey{}}, []*externalapi.DomainTransaction{})
if err != nil {
return nil, errors.Wrap(err, "error in BuildBlockWithParents")
}
if !testConsensus.DAGParams().SkipProofOfWork {
SolveBlock(block)
}
_, err = testConsensus.ValidateAndInsertBlock(block)
if err != nil {
return nil, errors.Wrap(err, "error in ValidateAndInsertBlock")
}
blockHash := consensushashing.BlockHash(block)
err = mdb.putID(blockData.ID, blockHash)
if err != nil {
return nil, err
}
err = mdb.updateLastMinedBlock(blockData.ID)
if err != nil {
return nil, err
}
return block, nil
}
var random = rand.New(rand.NewSource(time.Now().UnixNano()))
// SolveBlock increments the given block's nonce until it matches the difficulty requirements in its bits field
func SolveBlock(block *externalapi.DomainBlock) {
mining.SolveBlock(block, random)
}

View File

@ -0,0 +1,58 @@
package mine
import (
"compress/gzip"
"encoding/json"
"os"
)
// JSONBlock is a json representation of a block in mine format
type JSONBlock struct {
ID string `json:"id"`
Parents []string `json:"parents"`
}
func readBlocks(jsonFile string) (<-chan JSONBlock, error) {
f, err := os.Open(jsonFile)
if err != nil {
return nil, err
}
gzipReader, err := gzip.NewReader(f)
if err != nil {
panic(err)
}
defer gzipReader.Close()
decoder := json.NewDecoder(gzipReader)
blockChan := make(chan JSONBlock)
spawn("mineFromJson.readBlocks", func() {
// read open bracket
_, err := decoder.Token()
if err != nil {
panic(err)
}
// while the array contains values
for decoder.More() {
var block JSONBlock
// decode an array value (Message)
err := decoder.Decode(&block)
if err != nil {
panic(err)
}
blockChan <- block
}
// read closing bracket
_, err = decoder.Token()
if err != nil {
panic(err)
}
close(blockChan)
})
return blockChan, nil
}

View File

@ -0,0 +1,6 @@
package common
import "github.com/kaspanet/kaspad/util"
// DefaultHomeDir is the default home directory to be used by all tests
var DefaultHomeDir = util.AppDataDir("stability-tests", false)

View File

@ -0,0 +1,35 @@
package rpc
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
)
// log is a logger that is initialized with no output filters. This
// means the package will not perform any logging by default until the caller
// requests it.
var log *logger.Logger
var spawn func(name string, spawnedFunction func())
const logSubsytem = "CRPC"
// The default amount of logging is none.
func init() {
DisableLog()
}
// DisableLog disables all library log output. Logging output is disabled
// by default until UseLogger is called.
func DisableLog() {
backend := logger.NewBackend()
log = backend.Logger(logSubsytem)
log.SetLevel(logger.LevelOff)
spawn = panics.GoroutineWrapperFunc(log)
}
// UseLogger uses a specified Logger to output package logging info.
func UseLogger(backend *logger.Backend, level logger.Level) {
log = backend.Logger(logSubsytem)
log.SetLevel(level)
spawn = panics.GoroutineWrapperFunc(log)
}

View File

@ -0,0 +1,61 @@
package rpc
import (
"time"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/infrastructure/network/rpcclient"
"github.com/kaspanet/kaspad/domain/dagconfig"
)
const defaultRPCServer = "localhost"
// Config are configurations common to all tests that need to connect to json-rpc
type Config struct {
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"`
}
// ValidateRPCConfig makes sure that provided Config is valid or returns an error otherwise
func ValidateRPCConfig(config *Config) error {
if config.RPCServer == "" {
config.RPCServer = defaultRPCServer
}
return nil
}
// Client wraps rpcclient.RPCClient with extra functionality needed for stability-tests
type Client struct {
*rpcclient.RPCClient
OnBlockAdded chan struct{}
}
// ConnectToRPC connects to JSON-RPC server specified in the provided config
func ConnectToRPC(config *Config, dagParams *dagconfig.Params) (*Client, error) {
rpcAddress, err := dagParams.NormalizeRPCServerAddress(config.RPCServer)
if err != nil {
return nil, err
}
rpcClient, err := rpcclient.NewRPCClient(rpcAddress)
if err != nil {
return nil, err
}
rpcClient.SetTimeout(time.Second * 120)
rpcClient.SetOnErrorHandler(func(err error) {
log.Errorf("Error from Client: %+v", err)
})
client := &Client{
RPCClient: rpcClient,
OnBlockAdded: make(chan struct{}),
}
return client, nil
}
// RegisterForBlockAddedNotifications registers for block added notifications pushed by the node
func (c *Client) RegisterForBlockAddedNotifications() error {
return c.RPCClient.RegisterForBlockAddedNotifications(func(_ *appmessage.BlockAddedNotificationMessage) {
c.OnBlockAdded <- struct{}{}
})
}

View File

@ -0,0 +1,9 @@
package common
import "io/ioutil"
// TempDir returns a temporary directory with the given pattern, prefixed with STABILITY_TEMP_DIR_
func TempDir(pattern string) (string, error) {
const prefix = "STABILITY_TEMP_DIR_"
return ioutil.TempDir("", prefix+pattern)
}

View File

@ -0,0 +1,27 @@
ARG KASPAD_VERSION
FROM 578712463641.dkr.ecr.eu-central-1.amazonaws.com/kaspad-release-candidate:$KASPAD_VERSION as kaspad
FROM 578712463641.dkr.ecr.eu-central-1.amazonaws.com/kaspaminer-release-candidate:$KASPAD_VERSION as kaspaminer
FROM golang:1.16-alpine
RUN apk add bash build-base git
ARG KASPAD_VERSION
COPY --from=kaspad /app/ /app/
COPY --from=kaspaminer /app/ /app/
ENV PATH="/app:${PATH}"
COPY . /tests
WORKDIR /tests
RUN git ls-remote https://github.com/kaspanet/kaspad.git $KASPAD_VERSION | awk '{print $1;}' > /tmp/kaspad_git_commit
RUN go mod edit -dropreplace github.com/kaspanet/kaspad
RUN go mod edit -replace github.com/kaspanet/kaspad=github.com/kaspanet/kaspad@`cat /tmp/kaspad_git_commit` ;
RUN go mod download
RUN go install ./...
ENTRYPOINT ["./run/run.sh"]

View File

@ -0,0 +1,9 @@
# Infra Level Garbage Generator
This tool sends invalid messages to a node, making sure the node does not crash as a result.
## Running
1. `go install` kaspad and infra-level-garbage.
2. `cd run`
3. `./run.sh`

View File

@ -0,0 +1,60 @@
package main
import (
"os"
"path/filepath"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/stability-tests/common"
"github.com/jessevdk/go-flags"
)
const (
defaultLogFilename = "infra_level_garbage.log"
defaultErrLogFilename = "infra_level_garbage_err.log"
)
var (
// Default configuration options
defaultLogFile = filepath.Join(common.DefaultHomeDir, defaultLogFilename)
defaultErrLogFile = filepath.Join(common.DefaultHomeDir, defaultErrLogFilename)
)
type configFlags struct {
NodeP2PAddress string `long:"addr" short:"a" description:"node's P2P address"`
MessagesFilePath string `long:"messages" short:"m" description:"path of file containing malformed messages"`
Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"`
config.NetworkFlags
}
var cfg *configFlags
func activeConfig() *configFlags {
return cfg
}
func parseConfig() error {
cfg = &configFlags{}
parser := flags.NewParser(cfg, flags.PrintErrors|flags.HelpFlag)
_, err := parser.Parse()
if err != nil {
if err, ok := err.(*flags.Error); ok && err.Type == flags.ErrHelp {
os.Exit(0)
}
return err
}
err = cfg.ResolveNetwork(parser)
if err != nil {
return err
}
log.SetLevel(logger.LevelInfo)
common.InitBackend(backendLog, defaultLogFile, defaultErrLogFile)
return nil
}

View File

@ -0,0 +1,12 @@
package main
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var (
backendLog = logger.NewBackend()
log = backendLog.Logger("IFLG")
spawn = panics.GoroutineWrapperFunc(log)
)

View File

@ -0,0 +1,35 @@
package main
import (
"fmt"
"os"
"time"
"github.com/kaspanet/kaspad/stability-tests/common"
"github.com/kaspanet/kaspad/util/profiling"
)
const timeout = 5 * time.Second
func main() {
err := parseConfig()
if err != nil {
fmt.Fprintf(os.Stderr, "Error parsing config: %+v", err)
os.Exit(1)
}
defer backendLog.Close()
common.UseLogger(backendLog, log.Level())
cfg := activeConfig()
if cfg.Profile != "" {
profiling.Start(cfg.Profile, log)
}
messagesChan := common.ScanHexFile(cfg.MessagesFilePath)
err = sendMessages(cfg.NodeP2PAddress, messagesChan)
if err != nil {
log.Errorf("Error sending messages: %+v", err)
backendLog.Close()
os.Exit(1)
}
}

View File

@ -0,0 +1,6 @@
// Some block message without proper headers
0100000000b01c3b9e0d9ac0800a08425002a3eadbedc8d0ad3503d80e113c7bb2b520e584000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001cd3155e00000000ffff7f2003000000000000000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0e00000b2f503253482f627463642fffffffffffffffff000000000000000000010000000000000000000000000000000000000000000000000000008948d3239cf9882b63c7330fa364f2db39735f2ba8d57b5c3168c963375ce7412417a914da1745e9b549bd0bfa1a569971c77eba30cd5a4b876b617370612d73696d6e6574
// Yet another block message without proper headers
00000010011269cc4545745bf9d54e43564f1bdf3109b776aa2a3335c9a180e092bbaecd4989d3c593eaefeae6a6565d23fbf690baa3ca6d57879b891ff9b4a9e100f325d1f51149f1ce2eb20d5756cc018fa8fb33c30c5b22bee6e9f1115d91e664c21cac00000000000000000000000000000000000000000000000000000000000000001dd3155e00000000ffff7f200100000000000000010100000001ff69cc4545745bf9d54e43564f1bdf3109b776aa2a3335c9a180e092bbaecd49ffffffff00ffffffffffffffff0100f2052a0100000017a914da1745e9b549bd0bfa1a569971c77eba30cd5a4b870000000000000000010000000000000000000000000000000000000000000000000000009d41187eeda4734f163dbcea2049cdbfcac7818ce24f81f1307c7fb67b9057e22817a914da1745e9b549bd0bfa1a569971c77eba30cd5a4b8701000000000000002f6b61737061642f
// Something silly
4352ab48e65214869465465798713187437834576457854798436757946539738469873456983456983457684567

View File

@ -0,0 +1,25 @@
#!/bin/bash
rm -rf /tmp/kaspad-temp
kaspad --devnet --nobanning --datadir=/tmp/kaspad-temp --profile=6061 &
KASPAD_PID=$!
sleep 1
infra-level-garbage --devnet -alocalhost:16611 -m messages.dat --profile=7000
TEST_EXIT_CODE=$?
kill $KASPAD_PID
wait $KASPAD_PID
KASPAD_EXIT_CODE=$?
echo "Exit code: $TEST_EXIT_CODE"
echo "Kaspad exit code: $KASPAD_EXIT_CODE"
if [ $TEST_EXIT_CODE -eq 0 ] && [ $KASPAD_EXIT_CODE -eq 0 ]; then
echo "infra-level-garbage test: PASSED"
exit 0
fi
echo "infra-level-garbage test: FAILED"
exit 1

View File

@ -0,0 +1,56 @@
package main
import (
"encoding/hex"
"net"
"time"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/pkg/errors"
)
func sendMessages(address string, messagesChan <-chan []byte) error {
connection, err := dialToNode(address)
if err != nil {
return err
}
for message := range messagesChan {
messageHex := make([]byte, hex.EncodedLen(len(message)))
hex.Encode(messageHex, message)
log.Infof("Sending message %s", messageHex)
err := sendMessage(connection, message)
if err != nil {
// if failed once, we might have been disconnected because of a previous message,
// so re-connect and retry before reporting error
connection, err = dialToNode(address)
if err != nil {
return err
}
err = sendMessage(connection, message)
if err != nil {
return err
}
}
}
return nil
}
func sendMessage(connection net.Conn, message []byte) error {
err := connection.SetDeadline(time.Now().Add(common.DefaultTimeout))
if err != nil {
return errors.Wrap(err, "Error setting connection deadline")
}
_, err = connection.Write(message)
return err
}
func dialToNode(address string) (net.Conn, error) {
connection, err := net.Dial("tcp", address)
if err != nil {
return nil, errors.Wrap(err, "Error connecting to node")
}
return connection, nil
}

View File

@ -0,0 +1,25 @@
#!/bin/sh -ex
FLAGS=$@
go version
go get $FLAGS -t -d ../...
# This is to bypass a go bug: https://github.com/golang/go/issues/27643
GO111MODULE=off go get $FLAGS honnef.co/go/tools/cmd/staticcheck
test -z "$(go fmt ./...)"
staticcheck -checks SA4006,SA4008,SA4009,SA4010,SA5003,SA1004,SA1014,SA1021,SA1023,SA1024,SA1025,SA1026,SA1027,SA1028,SA2000,SA2001,SA2003,SA4000,SA4001,SA4003,SA4004,SA4011,SA4012,SA4013,SA4014,SA4015,SA4016,SA4017,SA4018,SA4019,SA4020,SA4021,SA4022,SA4023,SA5000,SA5002,SA5004,SA5005,SA5007,SA5008,SA5009,SA5010,SA5011,SA5012,SA6001,SA6002,SA9001,SA9002,SA9003,SA9004,SA9005,SA9006,ST1019 ./...
go vet -composites=false $FLAGS ./...
golint -set_exit_status $FLAGS ./...
go install $FLAGS ../...
if [ -n "${SLOW}" ]
then
./run/run.sh slow
else
./run/run.sh
fi

View File

@ -0,0 +1,13 @@
# Kaspad Sanity tool
This tries to run kapad with different sets of arguments for sanity.
In order to get clean run for each command, the tool injects its own --datadir
argument so it will be able to clean it between runs, so it's forbidden to use
--datadir as part of the arguments set.
## Running
1. `go install` kaspad and kaspadsanity.
2. `cd run`
3. `./run.sh`

View File

@ -0,0 +1,80 @@
package main
import (
"fmt"
"os"
"os/exec"
"strings"
"time"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/stability-tests/common"
"github.com/pkg/errors"
)
type commandFailure struct {
cmd *exec.Cmd
err error
}
func (cf commandFailure) String() string {
return fmt.Sprintf("command `%s` failed: %s", cf.cmd, cf.err)
}
func commandLoop(argsChan <-chan []string) ([]commandFailure, error) {
failures := make([]commandFailure, 0)
dataDirectoryPath, err := common.TempDir("kaspadsanity-kaspad-datadir")
if err != nil {
return nil, errors.Wrapf(err, "error creating temp dir")
}
defer os.RemoveAll(dataDirectoryPath)
for args := range argsChan {
err := os.RemoveAll(dataDirectoryPath)
if err != nil {
return nil, err
}
args, err = handleDataDirArg(args, dataDirectoryPath)
if err != nil {
return nil, err
}
cmd := exec.Command("kaspad", args...)
cmd.Stdout = common.NewLogWriter(log, logger.LevelTrace, "KASPAD-STDOUT")
cmd.Stderr = common.NewLogWriter(log, logger.LevelWarn, "KASPAD-STDERR")
log.Infof("Running `%s`", cmd)
errChan := make(chan error)
spawn("commandLoop-cmd.Run", func() {
errChan <- cmd.Run()
})
const timeout = time.Minute
select {
case err := <-errChan:
failure := commandFailure{
cmd: cmd,
err: err,
}
log.Error(failure)
failures = append(failures, failure)
case <-time.After(timeout):
err := cmd.Process.Kill()
if err != nil {
return nil, errors.Wrapf(err, "error in Kill")
}
log.Infof("Successfully run `%s`", cmd)
}
}
return failures, nil
}
func handleDataDirArg(args []string, dataDir string) ([]string, error) {
for _, arg := range args {
if strings.HasPrefix(arg, "--datadir") {
return nil, errors.New("invalid argument --datadir")
}
}
return append([]string{"--datadir", dataDir}, args...), nil
}

View File

@ -0,0 +1,44 @@
package main
import (
"path/filepath"
"github.com/jessevdk/go-flags"
"github.com/kaspanet/kaspad/stability-tests/common"
)
const (
defaultLogFilename = "kaspadsanity.log"
defaultErrLogFilename = "kaspadsanity_err.log"
)
var (
// Default configuration options
defaultLogFile = filepath.Join(common.DefaultHomeDir, defaultLogFilename)
defaultErrLogFile = filepath.Join(common.DefaultHomeDir, defaultErrLogFilename)
)
type configFlags struct {
CommandListFile string `long:"command-list-file" description:"Path to the command list file"`
LogLevel string `short:"d" long:"loglevel" description:"Set log level {trace, debug, info, warn, error, critical}"`
Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"`
}
var cfg *configFlags
func activeConfig() *configFlags {
return cfg
}
func parseConfig() error {
cfg = &configFlags{}
parser := flags.NewParser(cfg, flags.PrintErrors|flags.HelpFlag)
_, err := parser.Parse()
if err != nil {
return err
}
initLog(defaultLogFile, defaultErrLogFile)
return nil
}

View File

@ -0,0 +1,30 @@
package main
import (
"fmt"
"os"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/stability-tests/common"
"github.com/kaspanet/kaspad/util/panics"
)
var (
backendLog = logger.NewBackend()
log = backendLog.Logger("KSSA")
spawn = panics.GoroutineWrapperFunc(log)
)
func initLog(logFile, errLogFile string) {
level := logger.LevelInfo
if activeConfig().LogLevel != "" {
var ok bool
level, ok = logger.LevelFromString(activeConfig().LogLevel)
if !ok {
fmt.Fprintf(os.Stderr, "Log level %s doesn't exists", activeConfig().LogLevel)
os.Exit(1)
}
}
log.SetLevel(level)
common.InitBackend(backendLog, logFile, errLogFile)
}

View File

@ -0,0 +1,44 @@
package main
import (
"fmt"
"os"
"github.com/kaspanet/kaspad/stability-tests/common"
"github.com/kaspanet/kaspad/util/profiling"
"github.com/kaspanet/kaspad/util/panics"
"github.com/pkg/errors"
)
func main() {
defer panics.HandlePanic(log, "kaspadsanity-main", nil)
err := parseConfig()
if err != nil {
panic(errors.Wrap(err, "error in parseConfig"))
}
defer backendLog.Close()
common.UseLogger(backendLog, log.Level())
cfg := activeConfig()
if cfg.Profile != "" {
profiling.Start(cfg.Profile, log)
}
argsChan := readArgs()
failures, err := commandLoop(argsChan)
if err != nil {
panic(errors.Wrap(err, "error in commandLoop"))
}
if len(failures) > 0 {
fmt.Fprintf(os.Stderr, "FAILED:\n")
for _, failure := range failures {
fmt.Fprintln(os.Stderr, failure)
}
backendLog.Close()
os.Exit(1)
}
log.Infof("All tests have passed")
}

View File

@ -0,0 +1,43 @@
package main
import (
"bufio"
"io"
"os"
"strings"
"github.com/pkg/errors"
)
func readArgs() <-chan []string {
argsChan := make(chan []string)
spawn("readArgs", func() {
f, err := os.Open(cfg.CommandListFile)
if err != nil {
panic(errors.Wrapf(err, "error in Open"))
}
r := bufio.NewReader(f)
for {
line, _, err := r.ReadLine()
if err == io.EOF {
break
}
if err != nil {
panic(errors.Wrapf(err, "error in ReadLine"))
}
trimmedLine := strings.TrimSpace(string(line))
if trimmedLine == "" || strings.HasPrefix(trimmedLine, "//") {
continue
}
argsChan <- strings.Split(trimmedLine, " ")
}
close(argsChan)
})
return argsChan
}

View File

@ -0,0 +1,2 @@
--devnet
--devnet --nobanning

View File

@ -0,0 +1,12 @@
#!/bin/bash
kaspadsanity --command-list-file ./commands-list --profile=7000
TEST_EXIT_CODE=$?
echo "Exit code: $TEST_EXIT_CODE"
if [ $TEST_EXIT_CODE -eq 0 ]; then
echo "kaspadsanity test: PASSED"
exit 0
fi
echo "kaspadsanity test: FAILED"
exit 1

View File

@ -0,0 +1,61 @@
package main
import (
"path/filepath"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/stability-tests/common"
"github.com/kaspanet/kaspad/stability-tests/common/rpc"
"github.com/jessevdk/go-flags"
)
const (
defaultLogFilename = "minejson.log"
defaultErrLogFilename = "minejson_err.log"
)
var (
// Default configuration options
defaultLogFile = filepath.Join(common.DefaultHomeDir, defaultLogFilename)
defaultErrLogFile = filepath.Join(common.DefaultHomeDir, defaultErrLogFilename)
)
type configFlags struct {
rpc.Config
DAGFile string `long:"dag-file" description:"Path to DAG JSON file"`
Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"`
config.NetworkFlags
}
var cfg *configFlags
func activeConfig() *configFlags {
return cfg
}
func parseConfig() error {
cfg = &configFlags{}
parser := flags.NewParser(cfg, flags.PrintErrors|flags.HelpFlag)
_, err := parser.Parse()
if err != nil {
return err
}
err = cfg.ResolveNetwork(parser)
if err != nil {
return err
}
err = rpc.ValidateRPCConfig(&cfg.Config)
if err != nil {
return err
}
log.SetLevel(logger.LevelInfo)
common.InitBackend(backendLog, defaultLogFile, defaultErrLogFile)
return nil
}

View File

@ -0,0 +1,30 @@
[
{
"id": "0"
},
{
"id": "1",
"parents": [
"0"
]
},
{
"id": "2",
"parents": [
"0"
]
},
{
"id": "3",
"parents": [
"1",
"2"
]
},
{
"id": "4",
"parents": [
"1"
]
}
]

View File

@ -0,0 +1,12 @@
package main
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var (
backendLog = logger.NewBackend()
log = backendLog.Logger("MNJS")
spawn = panics.GoroutineWrapperFunc(log)
)

View File

@ -0,0 +1,40 @@
package main
import (
"github.com/kaspanet/kaspad/stability-tests/common"
"github.com/kaspanet/kaspad/stability-tests/common/mine"
"github.com/kaspanet/kaspad/stability-tests/common/rpc"
"github.com/kaspanet/kaspad/util/panics"
"github.com/kaspanet/kaspad/util/profiling"
"github.com/pkg/errors"
)
func main() {
defer panics.HandlePanic(log, "minejson-main", nil)
err := parseConfig()
if err != nil {
panic(errors.Wrap(err, "error parsing configuration"))
}
defer backendLog.Close()
common.UseLogger(backendLog, log.Level())
cfg := activeConfig()
if cfg.Profile != "" {
profiling.Start(cfg.Profile, log)
}
rpcClient, err := rpc.ConnectToRPC(&cfg.Config, cfg.NetParams())
if err != nil {
panic(errors.Wrap(err, "error connecting to JSON-RPC server"))
}
defer rpcClient.Disconnect()
dataDir, err := common.TempDir("minejson")
if err != nil {
panic(err)
}
err = mine.FromFile(cfg.DAGFile, cfg.NetParams(), rpcClient, dataDir)
if err != nil {
panic(errors.Wrap(err, "error in mine.FromFile"))
}
}

View File

@ -0,0 +1,12 @@
# Netsync Stability Tester
This tests that the netsync is at least 5 blocks per second.
Note: the test doesn't delete kaspad's data directory and it's the user
responsibility to delete the data directories that appear in the log.
## Running
1. `go install kaspad`.
2. `go install ./...`.
3. `cd run`
4. `./run.sh`

View File

@ -0,0 +1,44 @@
package main
import (
"path/filepath"
"github.com/jessevdk/go-flags"
"github.com/kaspanet/kaspad/stability-tests/common"
)
const (
defaultLogFilename = "netsync.log"
defaultErrLogFilename = "netsync_err.log"
)
var (
// Default configuration options
defaultLogFile = filepath.Join(common.DefaultHomeDir, defaultLogFilename)
defaultErrLogFile = filepath.Join(common.DefaultHomeDir, defaultErrLogFilename)
)
type configFlags struct {
LogLevel string `short:"d" long:"loglevel" description:"Set log level {trace, debug, info, warn, error, critical}"`
NumberOfBlocks uint64 `short:"n" long:"numblocks" description:"Number of blocks to mine" required:"true"`
TargetFile string `short:"f" long:"targetfile" description:"The target file for the JSON" required:"true"`
}
var cfg *configFlags
func activeConfig() *configFlags {
return cfg
}
func parseConfig() error {
cfg = &configFlags{}
parser := flags.NewParser(cfg, flags.PrintErrors|flags.HelpFlag)
_, err := parser.Parse()
if err != nil {
return err
}
initLog(defaultLogFile, defaultErrLogFile)
return nil
}

View File

@ -0,0 +1,30 @@
package main
import (
"fmt"
"os"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/stability-tests/common"
"github.com/kaspanet/kaspad/util/panics"
)
var (
backendLog = logger.NewBackend()
log = backendLog.Logger("CHGN")
spawn = panics.GoroutineWrapperFunc(log)
)
func initLog(logFile, errLogFile string) {
level := logger.LevelInfo
if activeConfig().LogLevel != "" {
var ok bool
level, ok = logger.LevelFromString(activeConfig().LogLevel)
if !ok {
fmt.Fprintf(os.Stderr, "Log level %s doesn't exists", activeConfig().LogLevel)
os.Exit(1)
}
}
log.SetLevel(level)
common.InitBackend(backendLog, logFile, errLogFile)
}

View File

@ -0,0 +1,57 @@
package main
import (
"encoding/json"
"os"
"strconv"
"github.com/kaspanet/kaspad/stability-tests/common"
"github.com/kaspanet/kaspad/stability-tests/common/mine"
"github.com/pkg/errors"
)
func main() {
err := parseConfig()
if err != nil {
panic(errors.Wrap(err, "error in parseConfig"))
}
common.UseLogger(backendLog, log.Level())
blocks := generateBlocks()
err = writeJSONToFile(blocks, cfg.TargetFile)
if err != nil {
panic(errors.Wrap(err, "error in generateBlocks"))
}
}
func generateBlocks() []mine.JSONBlock {
numBlocks := int(activeConfig().NumberOfBlocks)
blocks := make([]mine.JSONBlock, 0, numBlocks)
blocks = append(blocks, mine.JSONBlock{
ID: "0",
})
for i := 1; i < numBlocks; i++ {
blocks = append(blocks, mine.JSONBlock{
ID: strconv.Itoa(i),
Parents: []string{strconv.Itoa(i - 1)},
})
}
return blocks
}
func writeJSONToFile(blocks []mine.JSONBlock, fileName string) error {
f, err := openFile(fileName)
if err != nil {
return errors.Wrap(err, "error in openFile")
}
encoder := json.NewEncoder(f)
err = encoder.Encode(blocks)
return errors.Wrap(err, "error in Encode")
}
func openFile(name string) (*os.File, error) {
os.Remove(name)
f, err := os.Create(name)
return f, errors.WithStack(err)
}

View File

@ -0,0 +1,45 @@
package main
import (
"time"
"github.com/kaspanet/kaspad/stability-tests/common/rpc"
"github.com/pkg/errors"
)
func checkSyncRate(syncerClient, syncedClient *rpc.Client) error {
log.Info("Checking the sync rate")
syncerBlockCountResponse, err := syncerClient.GetBlockCount()
if err != nil {
return err
}
syncerHeadersCount := syncerBlockCountResponse.HeaderCount
syncerBlockCount := syncerBlockCountResponse.BlockCount
log.Infof("SYNCER block count: %d headers and %d blocks", syncerHeadersCount, syncerBlockCount)
// We give 5 seconds for IBD to start and then 100 milliseconds for each block.
expectedTime := time.Now().Add(5*time.Second + time.Duration(syncerHeadersCount)*100*time.Millisecond)
start := time.Now()
const tickDuration = 10 * time.Second
ticker := time.NewTicker(tickDuration)
defer ticker.Stop()
for range ticker.C {
log.Info("Getting SYNCED block count")
syncedBlockCountResponse, err := syncedClient.GetBlockCount()
if err != nil {
return err
}
log.Infof("SYNCED block count: %d headers and %d blocks", syncedBlockCountResponse.HeaderCount,
syncedBlockCountResponse.BlockCount)
if syncedBlockCountResponse.BlockCount >= syncerBlockCount &&
syncedBlockCountResponse.HeaderCount >= syncerHeadersCount {
break
}
if time.Now().After(expectedTime) {
return errors.Errorf("SYNCED is not synced in the expected rate")
}
}
log.Infof("IBD took approximately %s", time.Since(start))
return nil
}

View File

@ -0,0 +1,55 @@
package main
import (
"path/filepath"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/stability-tests/common"
"github.com/jessevdk/go-flags"
)
const (
defaultLogFilename = "netsync.log"
defaultErrLogFilename = "netsync_err.log"
)
var (
// Default configuration options
defaultLogFile = filepath.Join(common.DefaultHomeDir, defaultLogFilename)
defaultErrLogFile = filepath.Join(common.DefaultHomeDir, defaultErrLogFilename)
)
type configFlags struct {
LogLevel string `short:"d" long:"loglevel" description:"Set log level {trace, debug, info, warn, error, critical}"`
DAGFile string `long:"dag-file" description:"Path to DAG JSON file"`
Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"`
MiningDataDirectory string `long:"mining-data-dir" description:"Mining Data directory (will generate a random one if omitted)"`
SyncerDataDirectory string `long:"syncer-data-dir" description:"Syncer Data directory (will generate a random one if omitted)"`
SynceeDataDirectory string `long:"syncee-data-dir" description:"Syncee Data directory (will generate a random one if omitted)"`
config.NetworkFlags
}
var cfg *configFlags
func activeConfig() *configFlags {
return cfg
}
func parseConfig() error {
cfg = &configFlags{}
parser := flags.NewParser(cfg, flags.PrintErrors|flags.HelpFlag)
_, err := parser.Parse()
if err != nil {
return err
}
err = cfg.ResolveNetwork(parser)
if err != nil {
return err
}
initLog(defaultLogFile, defaultErrLogFile)
return nil
}

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1 @@
{"skipProofOfWork":true, "mergeSetSizeLimit": 30, "finalityDuration": 30000}

View File

@ -0,0 +1,30 @@
package main
import (
"fmt"
"os"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/stability-tests/common"
"github.com/kaspanet/kaspad/util/panics"
)
var (
backendLog = logger.NewBackend()
log = backendLog.Logger("NTSN")
spawn = panics.GoroutineWrapperFunc(log)
)
func initLog(logFile, errLogFile string) {
level := logger.LevelInfo
if activeConfig().LogLevel != "" {
var ok bool
level, ok = logger.LevelFromString(activeConfig().LogLevel)
if !ok {
fmt.Fprintf(os.Stderr, "Log level %s doesn't exists", activeConfig().LogLevel)
os.Exit(1)
}
}
log.SetLevel(level)
common.InitBackend(backendLog, logFile, errLogFile)
}

View File

@ -0,0 +1,60 @@
package main
import (
"sync/atomic"
"github.com/kaspanet/kaspad/stability-tests/common"
"github.com/kaspanet/kaspad/util/panics"
"github.com/kaspanet/kaspad/util/profiling"
"github.com/pkg/errors"
)
func main() {
defer panics.HandlePanic(log, "netsync-main", nil)
err := parseConfig()
if err != nil {
panic(errors.Wrap(err, "error in parseConfig"))
}
defer backendLog.Close()
common.UseLogger(backendLog, log.Level())
cfg := activeConfig()
if cfg.Profile != "" {
profiling.Start(cfg.Profile, log)
}
shutdown := uint64(0)
syncerClient, syncerTeardown, err := setupSyncer()
if err != nil {
panic(errors.Wrap(err, "error in setupSyncer"))
}
syncerClient.SetOnErrorHandler(func(err error) {
if atomic.LoadUint64(&shutdown) == 0 {
log.Debugf("received error from SYNCER: %s", err)
}
})
defer func() {
syncerClient.Disconnect()
syncerTeardown()
}()
syncedClient, syncedTeardown, err := setupSyncee()
if err != nil {
panic(errors.Wrap(err, "error in setupSyncee"))
}
syncedClient.SetOnErrorHandler(func(err error) {
if atomic.LoadUint64(&shutdown) == 0 {
log.Debugf("received error from SYNCEE: %s", err)
}
})
defer func() {
syncedClient.Disconnect()
syncedTeardown()
}()
err = checkSyncRate(syncerClient, syncedClient)
if err != nil {
panic(errors.Wrap(err, "error in checkSyncRate"))
}
atomic.StoreUint64(&shutdown, 1)
}

View File

@ -0,0 +1,193 @@
package main
import (
"fmt"
"os/exec"
"strings"
"sync/atomic"
"syscall"
"time"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/stability-tests/common"
"github.com/kaspanet/kaspad/stability-tests/common/mine"
"github.com/kaspanet/kaspad/stability-tests/common/rpc"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/panics"
"github.com/pkg/errors"
)
const (
syncerRPCAddress = "localhost:9000"
syncedRPCAddress = "localhost:9100"
syncerListen = "localhost:9001"
syncedListen = "localhost:9101"
)
func startNode(name string, rpcAddress, listen, connect, profilePort, dataDir string) (*exec.Cmd, func(), error) {
log.Infof("Data directory for %s is %s", name, dataDir)
args := []string{
"kaspad",
common.NetworkCliArgumentFromNetParams(activeConfig().NetParams()),
"--datadir", dataDir,
"--logdir", dataDir,
"--rpclisten", rpcAddress,
"--listen", listen,
"--connect", connect,
"--profile", profilePort,
"--loglevel", "debug",
}
if activeConfig().OverrideDAGParamsFile != "" {
args = append(args, "--override-dag-params-file", activeConfig().OverrideDAGParamsFile)
}
cmd, err := common.StartCmd(name,
args...,
)
if err != nil {
return nil, nil, err
}
var shutdown uint32
stopped := make(chan struct{})
spawn("startNode-cmd.Wait", func() {
err := cmd.Wait()
if err != nil {
if atomic.LoadUint32(&shutdown) == 0 {
panics.Exit(log, fmt.Sprintf("%s ( %s ) closed unexpectedly: %s", name, cmd, err))
}
if !strings.Contains(err.Error(), "signal: killed") {
panics.Exit(log, fmt.Sprintf("%s ( %s ) closed with an error: %s", name, cmd, err))
}
}
stopped <- struct{}{}
})
return cmd, func() {
atomic.StoreUint32(&shutdown, 1)
killWithSigkill(cmd, name)
const timeout = time.Second
select {
case <-stopped:
case <-time.After(timeout):
panics.Exit(log, fmt.Sprintf("%s couldn't be closed after %s", name, timeout))
}
}, nil
}
func killWithSigkill(cmd *exec.Cmd, commandName string) {
log.Error("SIGKILLED")
err := cmd.Process.Signal(syscall.SIGKILL)
if err != nil {
log.Criticalf("error sending SIGKILL to %s", commandName)
}
}
func setupNodeWithRPC(name, listen, rpcListen, connect, profilePort, dataDir string) (*rpc.Client, func(), error) {
_, teardown, err := startNode(name, rpcListen, listen, connect, profilePort, dataDir)
if err != nil {
return nil, nil, errors.Wrap(err, "error in startNode")
}
defer func() {
if r := recover(); r != nil {
teardown()
panic(r)
}
}()
log.Infof("Waiting for node %s to start...", name)
const initTime = 2 * time.Second
time.Sleep(initTime)
rpcClient, err := rpc.ConnectToRPC(&rpc.Config{
RPCServer: rpcListen,
}, activeConfig().NetParams())
if err != nil {
return nil, nil, errors.Wrap(err, "error connecting to JSON-RPC server")
}
return rpcClient, teardown, nil
}
func setupSyncee() (*rpc.Client, func(), error) {
const syncedProfilePort = "6061"
synceeDataDir, err := useDirOrCreateTemp(activeConfig().SynceeDataDirectory, "syncee-kaspad-data-dir")
if err != nil {
return nil, nil, err
}
return setupNodeWithRPC("SYNCEE", syncedListen, syncedRPCAddress, syncerListen, syncedProfilePort,
synceeDataDir)
}
func setupSyncer() (*rpc.Client, func(), error) {
const syncerProfilePort = "6062"
syncerDataDir, err := useDirOrCreateTemp(activeConfig().SyncerDataDirectory, "syncer-kaspad-data-dir")
if err != nil {
return nil, nil, err
}
rpcClient, teardown, err := setupNodeWithRPC("SYNCER", syncerListen, syncerRPCAddress, syncedListen,
syncerProfilePort, syncerDataDir)
if err != nil {
return nil, nil, err
}
defer func() {
if r := recover(); r != nil {
teardown()
panic(r)
}
}()
miningDataDir, err := useDirOrCreateTemp(activeConfig().MiningDataDirectory, "syncer-mining-data-dir")
if err != nil {
return nil, nil, err
}
err = mine.FromFile(cfg.DAGFile, activeConfig().NetParams(), rpcClient, miningDataDir)
if err != nil {
return nil, nil, errors.Wrap(err, "error in mine.FromFile")
}
log.Info("Mining on top of syncer tips")
rejectReason, err := mineOnTips(rpcClient)
if err != nil {
panic(err)
}
if rejectReason != appmessage.RejectReasonNone {
panic(fmt.Sprintf("mined block rejected: %s", rejectReason))
}
return rpcClient, teardown, nil
}
func useDirOrCreateTemp(dataDir, tempName string) (string, error) {
if dataDir != "" {
return dataDir, nil
}
return common.TempDir(tempName)
}
func mineOnTips(client *rpc.Client) (appmessage.RejectReason, error) {
fakePublicKeyHash := make([]byte, 20)
addr, err := util.NewAddressPubKeyHash(fakePublicKeyHash, activeConfig().NetParams().Prefix)
if err != nil {
return appmessage.RejectReasonNone, err
}
template, err := client.GetBlockTemplate(addr.String())
if err != nil {
return appmessage.RejectReasonNone, err
}
domainBlock := appmessage.MsgBlockToDomainBlock(template.MsgBlock)
mine.SolveBlock(domainBlock)
return client.SubmitBlock(domainBlock)
}

View File

@ -0,0 +1,32 @@
#!/bin/bash
set -ex
FAST_DAGS_DIR="../dags-fast"
mapfile -t DAGS < <( ls $FAST_DAGS_DIR)
for dagArchive in "${DAGS[@]}"
do
JSON_FILE=$FAST_DAGS_DIR/$dagArchive
netsync --simnet --dag-file $JSON_FILE --profile=7000
TEST_EXIT_CODE=$?
echo "$dagArchive processed"
if [ $TEST_EXIT_CODE -ne 0 ]; then
echo "netsync (fast) test: FAILED"
exit 1
fi
rm -rf /tmp/STABILITY_TEMP_DIR_*
done
JSON_FILE="../fast-pruning-ibd-test/dag-for-fast-pruning-ibd-test.json.gz"
netsync --devnet --dag-file $JSON_FILE --profile=7000 --override-dag-params-file=../fast-pruning-ibd-test/fast-pruning-ibd-test-params.json
TEST_EXIT_CODE=$?
echo "dag-for-fast-pruning-ibd-test.json processed"
if [ $TEST_EXIT_CODE -ne 0 ]; then
echo "netsync (fast) test: FAILED"
exit 1
fi
rm -rf /tmp/STABILITY_TEMP_DIR_*
echo "netsync (fast) test: PASSED"
exit 0

View File

@ -0,0 +1,21 @@
#!/bin/bash
set -e
SLOW_DAGS_DIR="../dags-slow"
mapfile -t DAGS < <( ls $SLOW_DAGS_DIR)
for dagArchive in "${DAGS[@]}"
do
JSON_FILE=$SLOW_DAGS_DIR/$dagArchive
netsync --simnet --dag-file $JSON_FILE --profile=7000
echo "$dagArchive processed"
if [ $TEST_EXIT_CODE -ne 0 ]; then
echo "netsync (slow) test: FAILED"
exit 1
fi
rm -rf /tmp/STABILITY_TEMP_DIR_*
done
echo "netsync (slow) test: PASSED"
exit 0

View File

@ -0,0 +1,33 @@
#!/bin/bash
FAST_DAGS_DIR="../dags-fast"
SLOW_DAGS_DIR="../dags-slow"
mapfile -t FAST_DAGS < <( ls $FAST_DAGS_DIR)
mapfile -t SLOW_DAGS < <( ls $SLOW_DAGS_DIR)
DAGS=()
for dagArchive in "${FAST_DAGS[@]}"
do
DAGS+=("$FAST_DAGS_DIR/$dagArchive")
done
for dagArchive in "${SLOW_DAGS[@]}"
do
DAGS+=("$SLOW_DAGS_DIR/$dagArchive")
done
for dagArchive in "${DAGS[@]}"
do
JSON_FILE=$FAST_DAGS_DIR/$dagArchive
netsync --simnet --dag-file $JSON_FILE --profile=7000
TEST_EXIT_CODE=$?
echo "$dagArchive processed"
if [ $TEST_EXIT_CODE -ne 0 ]; then
echo "netsync test: FAILED"
exit 1
fi
done
echo "netsync test: PASSED"
exit 0

View File

@ -0,0 +1,9 @@
# Orphans
This tool makes sure orphan resolution works and doesn't crash kaspad
## Running
1. `go install` kaspad and orphans.
2. `cd run`
3. `./run.sh`

View File

@ -0,0 +1,23 @@
package main
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/stability-tests/common/rpc"
"github.com/pkg/errors"
)
func checkTopBlockIsTip(rpcClient *rpc.Client, topBlock *externalapi.DomainBlock) error {
selectedTipHashResponse, err := rpcClient.GetSelectedTipHash()
if err != nil {
return err
}
topBlockHash := consensushashing.BlockHash(topBlock)
if selectedTipHashResponse.SelectedTipHash != topBlockHash.String() {
return errors.Errorf("selectedTipHash is '%s' while expected to be topBlock's hash `%s`",
selectedTipHashResponse.SelectedTipHash, topBlockHash)
}
return nil
}

View File

@ -0,0 +1,61 @@
package main
import (
"os"
"path/filepath"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/stability-tests/common"
"github.com/kaspanet/kaspad/stability-tests/common/rpc"
"github.com/jessevdk/go-flags"
)
const (
defaultLogFilename = "orphans.log"
defaultErrLogFilename = "orphans_err.log"
)
var (
// Default configuration options
defaultLogFile = filepath.Join(common.DefaultHomeDir, defaultLogFilename)
defaultErrLogFile = filepath.Join(common.DefaultHomeDir, defaultErrLogFilename)
)
type configFlags struct {
rpc.Config
NodeP2PAddress string `long:"addr" short:"a" description:"node's P2P address"`
Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"`
OrphanChainLength int `long:"num-orphans" short:"n" description:"Desired length of orphan chain"`
config.NetworkFlags
}
var cfg *configFlags
func activeConfig() *configFlags {
return cfg
}
func parseConfig() error {
cfg = &configFlags{}
parser := flags.NewParser(cfg, flags.PrintErrors|flags.HelpFlag)
_, err := parser.Parse()
if err != nil {
if err, ok := err.(*flags.Error); ok && err.Type == flags.ErrHelp {
os.Exit(0)
}
return err
}
err = cfg.ResolveNetwork(parser)
if err != nil {
return err
}
log.SetLevel(logger.LevelInfo)
common.InitBackend(backendLog, defaultLogFile, defaultErrLogFile)
return nil
}

View File

@ -0,0 +1,28 @@
package main
import (
"fmt"
"os"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/standalone"
)
func connectToNode() *standalone.Routes {
cfg := activeConfig()
kaspadConfig := config.DefaultConfig()
kaspadConfig.NetworkFlags = cfg.NetworkFlags
minimalNetAdapter, err := standalone.NewMinimalNetAdapter(kaspadConfig)
if err != nil {
fmt.Fprintf(os.Stderr, "error creating minimalNetAdapter: %+v", err)
os.Exit(1)
}
routes, err := minimalNetAdapter.Connect(cfg.NodeP2PAddress)
if err != nil {
fmt.Fprintf(os.Stderr, "error connecting to node: %+v", err)
os.Exit(1)
}
return routes
}

View File

@ -0,0 +1,12 @@
package main
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var (
backendLog = logger.NewBackend()
log = backendLog.Logger("ORPH")
spawn = panics.GoroutineWrapperFunc(log)
)

View File

@ -0,0 +1,61 @@
package main
import (
"fmt"
"os"
"time"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/stability-tests/common"
"github.com/kaspanet/kaspad/stability-tests/common/rpc"
"github.com/kaspanet/kaspad/util/profiling"
)
var timeout = 30 * time.Second
func main() {
err := parseConfig()
if err != nil {
fmt.Fprintf(os.Stderr, "Error parsing config: %+v", err)
os.Exit(1)
}
defer backendLog.Close()
common.UseLogger(backendLog, log.Level())
cfg := activeConfig()
if cfg.Profile != "" {
profiling.Start(cfg.Profile, log)
}
blocks, topBlock, err := prepareBlocks()
if err != nil {
log.Errorf("Error preparing blocks: %+v", err)
backendLog.Close()
os.Exit(1)
}
routes := connectToNode()
rpcClient, err := rpc.ConnectToRPC(&cfg.Config, cfg.NetParams())
if err != nil {
panic(errors.Wrap(err, "error connecting to JSON-RPC server"))
}
defer rpcClient.Disconnect()
err = sendBlocks(routes, blocks, topBlock)
if err != nil {
backendLog.Close()
log.Errorf("Error sending blocks: %+v", err)
os.Exit(1)
}
// Wait a second to let kaspad process orphans
<-time.After(1 * time.Second)
err = checkTopBlockIsTip(rpcClient, topBlock)
if err != nil {
log.Errorf("Error in checkTopBlockIsTip: %+v", err)
backendLog.Close()
os.Exit(1)
}
}

View File

@ -0,0 +1,62 @@
package main
import (
"github.com/kaspanet/kaspad/domain/consensus"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/infrastructure/db/database/ldb"
"github.com/kaspanet/kaspad/stability-tests/common"
"github.com/kaspanet/kaspad/stability-tests/common/mine"
"github.com/pkg/errors"
)
const leveldbCacheSizeMiB = 256
func prepareBlocks() (blocks []*externalapi.DomainBlock, topBlock *externalapi.DomainBlock, err error) {
config := activeConfig()
testDatabaseDir, err := common.TempDir("minejson")
if err != nil {
return nil, nil, err
}
db, err := ldb.NewLevelDB(testDatabaseDir, leveldbCacheSizeMiB)
if err != nil {
return nil, nil, err
}
defer db.Close()
testConsensus, tearDownFunc, err := consensus.NewFactory().NewTestConsensus(config.ActiveNetParams, false, "prepareBlocks")
if err != nil {
return nil, nil, err
}
defer tearDownFunc(true)
virtualSelectedParent, err := testConsensus.GetVirtualSelectedParent()
if err != nil {
return nil, nil, err
}
currentParentHash := virtualSelectedParent
blocksCount := config.OrphanChainLength + 1
blocks = make([]*externalapi.DomainBlock, 0, blocksCount)
for i := 0; i < blocksCount; i++ {
block, _, err := testConsensus.BuildBlockWithParents(
[]*externalapi.DomainHash{currentParentHash},
&externalapi.DomainCoinbaseData{ScriptPublicKey: &externalapi.ScriptPublicKey{}},
[]*externalapi.DomainTransaction{})
if err != nil {
return nil, nil, errors.Wrap(err, "error in BuildBlockWithParents")
}
mine.SolveBlock(block)
_, err = testConsensus.ValidateAndInsertBlock(block)
if err != nil {
return nil, nil, errors.Wrap(err, "error in ValidateAndInsertBlock")
}
blocks = append(blocks, block)
currentParentHash = consensushashing.BlockHash(block)
}
return blocks[:len(blocks)-1], blocks[len(blocks)-1], nil
}

View File

@ -0,0 +1,25 @@
#!/bin/bash
rm -rf /tmp/kaspad-temp
kaspad --devnet --datadir=/tmp/kaspad-temp --profile=6061 &
KASPAD_PID=$!
sleep 1
orphans --devnet -alocalhost:16611 -n20 --profile=7000
TEST_EXIT_CODE=$?
kill $KASPAD_PID
wait $KASPAD_PID
KASPAD_EXIT_CODE=$?
echo "Exit code: $TEST_EXIT_CODE"
echo "Kaspad exit code: $KASPAD_EXIT_CODE"
if [ $TEST_EXIT_CODE -eq 0 ] && [ $KASPAD_EXIT_CODE -eq 0 ]; then
echo "orphans test: PASSED"
exit 0
fi
echo "orphans test: FAILED"
exit 1

View File

@ -0,0 +1,79 @@
package main
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/standalone"
"github.com/pkg/errors"
)
func sendBlocks(routes *standalone.Routes, blocks []*externalapi.DomainBlock, topBlock *externalapi.DomainBlock) error {
topBlockHash := consensushashing.BlockHash(topBlock)
log.Infof("Sending top block with hash %s", topBlockHash)
err := routes.OutgoingRoute.Enqueue(&appmessage.MsgInvRelayBlock{Hash: topBlockHash})
if err != nil {
return err
}
err = waitForRequestAndSend(routes, topBlock)
if err != nil {
return err
}
for i := len(blocks) - 1; i >= 0; i-- {
block := blocks[i]
orphanBlock := topBlock
if i+1 != len(blocks) {
orphanBlock = blocks[i+1]
}
log.Infof("Waiting for request for block locator for block number %d with hash %s", i, consensushashing.BlockHash(block))
err = waitForRequestForBlockLocator(routes, orphanBlock)
if err != nil {
return err
}
log.Infof("Waiting for request and sending block number %d with hash %s", i, consensushashing.BlockHash(block))
err = waitForRequestAndSend(routes, block)
if err != nil {
return err
}
}
return nil
}
func waitForRequestForBlockLocator(routes *standalone.Routes, orphanBlock *externalapi.DomainBlock) error {
message, err := routes.WaitForMessageOfType(appmessage.CmdRequestBlockLocator, timeout)
if err != nil {
return err
}
requestBlockLocatorMessage := message.(*appmessage.MsgRequestBlockLocator)
orphanBlockHash := consensushashing.BlockHash(orphanBlock)
if *requestBlockLocatorMessage.HighHash != *orphanBlockHash {
return errors.Errorf("expected blockLocator request high hash to be %s but got %s",
orphanBlockHash, requestBlockLocatorMessage.HighHash)
}
locator := appmessage.NewMsgBlockLocator([]*externalapi.DomainHash{orphanBlockHash, activeConfig().ActiveNetParams.GenesisHash})
return routes.OutgoingRoute.Enqueue(locator)
}
func waitForRequestAndSend(routes *standalone.Routes, block *externalapi.DomainBlock) error {
message, err := routes.WaitForMessageOfType(appmessage.CmdRequestRelayBlocks, timeout)
if err != nil {
return err
}
requestRelayBlockMessage := message.(*appmessage.MsgRequestRelayBlocks)
blockHash := consensushashing.BlockHash(block)
if len(requestRelayBlockMessage.Hashes) != 1 || *requestRelayBlockMessage.Hashes[0] != *blockHash {
return errors.Errorf("expecting requested hashes to be [%s], but got %v",
blockHash, requestRelayBlockMessage.Hashes)
}
return routes.OutgoingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(block))
}

View File

@ -0,0 +1,51 @@
package main
import (
"os"
"path/filepath"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/stability-tests/common"
"github.com/jessevdk/go-flags"
)
const (
defaultLogFilename = "reorg.log"
defaultErrLogFilename = "reorg_err.log"
)
var (
// Default configuration options
defaultLogFile = filepath.Join(common.DefaultHomeDir, defaultLogFilename)
defaultErrLogFile = filepath.Join(common.DefaultHomeDir, defaultErrLogFilename)
)
type configFlags struct {
Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"`
DAGFile string `long:"dag-file" description:"Path to DAG JSON file"`
}
var cfg *configFlags
func activeConfig() *configFlags {
return cfg
}
func parseConfig() error {
cfg = &configFlags{}
parser := flags.NewParser(cfg, flags.PrintErrors|flags.HelpFlag)
_, err := parser.Parse()
if err != nil {
if err, ok := err.(*flags.Error); ok && err.Type == flags.ErrHelp {
os.Exit(0)
}
return err
}
log.SetLevel(logger.LevelInfo)
common.InitBackend(backendLog, defaultLogFile, defaultErrLogFile)
return nil
}

View File

@ -0,0 +1,12 @@
package main
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var (
backendLog = logger.NewBackend()
log = backendLog.Logger("RORG")
spawn = panics.GoroutineWrapperFunc(log)
)

View File

@ -0,0 +1,25 @@
package main
import (
"fmt"
"os"
"github.com/kaspanet/kaspad/stability-tests/common"
"github.com/kaspanet/kaspad/util/profiling"
)
func main() {
err := parseConfig()
if err != nil {
fmt.Fprintf(os.Stderr, "Error parsing config: %+v", err)
os.Exit(1)
}
defer backendLog.Close()
common.UseLogger(backendLog, log.Level())
cfg := activeConfig()
if cfg.Profile != "" {
profiling.Start(cfg.Profile, log)
}
testReorg(cfg)
}

View File

@ -0,0 +1,160 @@
package main
import (
"compress/gzip"
"fmt"
"math/rand"
"os"
"time"
"github.com/kaspanet/kaspad/domain/consensus"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/model/testapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/dagconfig"
)
func testReorg(cfg *configFlags) {
params := dagconfig.DevnetParams
params.SkipProofOfWork = true
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(&params, false, "ReorgHonest")
if err != nil {
panic(err)
}
defer teardown(false)
f, err := os.Open(cfg.DAGFile)
if err != nil {
panic(err)
}
defer f.Close()
gzipReader, err := gzip.NewReader(f)
if err != nil {
panic(err)
}
defer gzipReader.Close()
_, err = tc.MineJSON(gzipReader, testapi.MineJSONBlockTypeUTXOValidBlock)
if err != nil {
panic(err)
}
tcAttacker, teardownAttacker, err := factory.NewTestConsensus(&params, false, "ReorgAttacker")
if err != nil {
panic(err)
}
defer teardownAttacker(false)
virtualSelectedParent, err := tc.GetVirtualSelectedParent()
if err != nil {
panic(err)
}
virtualSelectedParentGHOSTDAGData, err := tc.GHOSTDAGDataStore().Get(tc.DatabaseContext(), virtualSelectedParent)
if err != nil {
panic(err)
}
log.Infof("Selected tip blue score %d", virtualSelectedParentGHOSTDAGData.BlueScore())
sideChain := make([]*externalapi.DomainBlock, 0)
for i := uint64(0); ; i++ {
tips, err := tcAttacker.Tips()
if err != nil {
panic(err)
}
block, _, err := tcAttacker.BuildBlockWithParents(tips, nil, nil)
if err != nil {
panic(err)
}
// We change the nonce of the first block so its hash won't be similar to any of the
// honest DAG blocks. As a result the rest of the side chain should have unique hashes
// as well.
if i == 0 {
mutableHeader := block.Header.ToMutable()
mutableHeader.SetNonce(uint64(rand.NewSource(84147).Int63()))
block.Header = mutableHeader.ToImmutable()
}
_, err = tcAttacker.ValidateAndInsertBlock(block)
if err != nil {
panic(err)
}
sideChain = append(sideChain, block)
if i%100 == 0 {
log.Infof("Attacker side chain mined %d blocks", i)
}
blockHash := consensushashing.BlockHash(block)
ghostdagData, err := tcAttacker.GHOSTDAGDataStore().Get(tcAttacker.DatabaseContext(), blockHash)
if err != nil {
panic(err)
}
if virtualSelectedParentGHOSTDAGData.BlueWork().Cmp(ghostdagData.BlueWork()) == -1 {
break
}
}
sideChainTipHash := consensushashing.BlockHash(sideChain[len(sideChain)-1])
sideChainTipGHOSTDAGData, err := tcAttacker.GHOSTDAGDataStore().Get(tcAttacker.DatabaseContext(), sideChainTipHash)
if err != nil {
panic(err)
}
log.Infof("Side chain tip (%s) blue score %d", sideChainTipHash, sideChainTipGHOSTDAGData.BlueScore())
doneChan := make(chan struct{})
spawn("add-sidechain-to-honest", func() {
for i, block := range sideChain {
if i%100 == 0 {
log.Infof("Validated %d blocks from the attacker chain", i)
}
_, err := tc.ValidateAndInsertBlock(block)
if err != nil {
panic(err)
}
}
doneChan <- struct{}{}
})
const timeout = 10 * time.Minute
select {
case <-doneChan:
case <-time.After(timeout):
fail("Adding the side chain took more than %s", timeout)
}
sideChainTipGHOSTDAGData, err = tc.GHOSTDAGDataStore().Get(tc.DatabaseContext(), sideChainTipHash)
if err != nil {
panic(err)
}
log.Infof("Side chain tip (%s) blue score %d", sideChainTipHash, sideChainTipGHOSTDAGData.BlueScore())
newVirtualSelectedParent, err := tc.GetVirtualSelectedParent()
if err != nil {
panic(err)
}
if !newVirtualSelectedParent.Equal(sideChainTipHash) {
fail("No reorg happened")
}
}
func fail(format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
fmt.Fprintln(os.Stderr, msg)
log.Criticalf(msg)
backendLog.Close()
os.Exit(1)
}

View File

@ -0,0 +1,12 @@
reorg --dag-file ../../netsync/dags-fast/wide-dag-blocks--2^12-delay-factor--1-k--18.json.gz --profile=6061
TEST_EXIT_CODE=$?
echo "Exit code: $TEST_EXIT_CODE"
if [ $TEST_EXIT_CODE -eq 0 ]; then
echo "reorg test: PASSED"
exit 0
fi
echo "reorg test: FAILED"
exit 1

View File

@ -0,0 +1,59 @@
package main
import (
"path/filepath"
"github.com/jessevdk/go-flags"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/stability-tests/common"
"github.com/kaspanet/kaspad/stability-tests/common/rpc"
)
const (
defaultLogFilename = "rpc_idle_clients.log"
defaultErrLogFilename = "rpc_idle_clients_err.log"
)
var (
// Default configuration options
defaultLogFile = filepath.Join(common.DefaultHomeDir, defaultLogFilename)
defaultErrLogFile = filepath.Join(common.DefaultHomeDir, defaultErrLogFilename)
)
type configFlags struct {
rpc.Config
config.NetworkFlags
NumClients uint32 `long:"numclients" short:"n" description:"Number of RPC clients to open"`
Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"`
}
var cfg *configFlags
func activeConfig() *configFlags {
return cfg
}
func parseConfig() error {
cfg = &configFlags{}
parser := flags.NewParser(cfg, flags.PrintErrors|flags.HelpFlag)
_, err := parser.Parse()
if err != nil {
return err
}
err = cfg.ResolveNetwork(parser)
if err != nil {
return err
}
err = rpc.ValidateRPCConfig(&cfg.Config)
if err != nil {
return err
}
log.SetLevel(logger.LevelInfo)
common.InitBackend(backendLog, defaultLogFile, defaultErrLogFile)
return nil
}

View File

@ -0,0 +1,12 @@
package main
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var (
backendLog = logger.NewBackend()
log = backendLog.Logger("RPIC")
spawn = panics.GoroutineWrapperFunc(log)
)

View File

@ -0,0 +1,44 @@
package main
import (
"time"
"github.com/kaspanet/kaspad/stability-tests/common"
"github.com/kaspanet/kaspad/stability-tests/common/rpc"
"github.com/kaspanet/kaspad/util/panics"
"github.com/kaspanet/kaspad/util/profiling"
"github.com/pkg/errors"
)
func main() {
defer panics.HandlePanic(log, "rpc-idle-clients-main", nil)
err := parseConfig()
if err != nil {
panic(errors.Wrap(err, "error parsing configuration"))
}
defer backendLog.Close()
common.UseLogger(backendLog, log.Level())
cfg := activeConfig()
if cfg.Profile != "" {
profiling.Start(cfg.Profile, log)
}
numRPCClients := cfg.NumClients
clients := make([]*rpc.Client, numRPCClients)
for i := uint32(0); i < numRPCClients; i++ {
rpcClient, err := rpc.ConnectToRPC(&cfg.Config, cfg.NetParams())
if err != nil {
panic(errors.Wrap(err, "error connecting to RPC server"))
}
clients[i] = rpcClient
}
const testDuration = 30 * time.Second
select {
case <-time.After(testDuration):
}
for _, client := range clients {
client.Close()
}
}

View File

@ -0,0 +1,34 @@
#!/bin/bash
rm -rf /tmp/kaspad-temp
NUM_CLIENTS=1000
kaspad --devnet --datadir=/tmp/kaspad-temp --profile=6061 --rpcmaxwebsockets=$NUM_CLIENTS &
KASPAD_PID=$!
KASPAD_KILLED=0
function killKaspadIfNotKilled() {
if [ $KASPAD_KILLED -eq 0 ]; then
kill $KASPAD_PID
fi
}
trap "killKaspadIfNotKilled" EXIT
sleep 1
rpc-idle-clients --devnet --profile=7000 -n=$NUM_CLIENTS
TEST_EXIT_CODE=$?
kill $KASPAD_PID
wait $KASPAD_PID
KASPAD_EXIT_CODE=$?
KASPAD_KILLED=1
echo "Exit code: $TEST_EXIT_CODE"
echo "Kaspad exit code: $KASPAD_EXIT_CODE"
if [ $TEST_EXIT_CODE -eq 0 ] && [ $KASPAD_EXIT_CODE -eq 0 ]; then
echo "rpc-idle-clients test: PASSED"
exit 0
fi
echo "rpc-idle-clients test: FAILED"
exit 1

View File

@ -0,0 +1,9 @@
# RPC Stability Tester
This tests JSON-RPC stability by sending the node commands and making sure it does not crash
## Running
1. `go install` kaspad and rpc-stability.
2. `cd run`
3. `./run.sh`

View File

@ -0,0 +1,61 @@
package main
import (
"path/filepath"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/stability-tests/common"
"github.com/kaspanet/kaspad/stability-tests/common/rpc"
"github.com/jessevdk/go-flags"
)
const (
defaultLogFilename = "json_stability.log"
defaultErrLogFilename = "json_stability_err.log"
)
var (
// Default configuration options
defaultLogFile = filepath.Join(common.DefaultHomeDir, defaultLogFilename)
defaultErrLogFile = filepath.Join(common.DefaultHomeDir, defaultErrLogFilename)
)
type configFlags struct {
rpc.Config
config.NetworkFlags
CommandsFilePath string `long:"commands" short:"p" description:"Path to commands file"`
Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"`
}
var cfg *configFlags
func activeConfig() *configFlags {
return cfg
}
func parseConfig() error {
cfg = &configFlags{}
parser := flags.NewParser(cfg, flags.PrintErrors|flags.HelpFlag)
_, err := parser.Parse()
if err != nil {
return err
}
err = cfg.ResolveNetwork(parser)
if err != nil {
return err
}
err = rpc.ValidateRPCConfig(&cfg.Config)
if err != nil {
return err
}
log.SetLevel(logger.LevelInfo)
common.InitBackend(backendLog, defaultLogFile, defaultErrLogFile)
return nil
}

View File

@ -0,0 +1,12 @@
package main
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var (
backendLog = logger.NewBackend()
log = backendLog.Logger("JSTT")
spawn = panics.GoroutineWrapperFunc(log)
)

View File

@ -0,0 +1,44 @@
package main
import (
"github.com/kaspanet/kaspad/infrastructure/network/rpcclient/grpcclient"
"github.com/kaspanet/kaspad/stability-tests/common"
"github.com/kaspanet/kaspad/util/panics"
"github.com/kaspanet/kaspad/util/profiling"
"github.com/pkg/errors"
)
func main() {
defer panics.HandlePanic(log, "rpc-stability-main", nil)
err := parseConfig()
if err != nil {
panic(errors.Wrap(err, "error parsing configuration"))
}
defer backendLog.Close()
common.UseLogger(backendLog, log.Level())
cfg := activeConfig()
if cfg.Profile != "" {
profiling.Start(cfg.Profile, log)
}
rpcAddress, err := cfg.NetParams().NormalizeRPCServerAddress(cfg.RPCServer)
if err != nil {
panic(errors.Wrap(err, "error parsing RPC server address"))
}
rpcClient, err := grpcclient.Connect(rpcAddress)
if err != nil {
panic(errors.Wrap(err, "error connecting to RPC server"))
}
defer rpcClient.Disconnect()
commandsChan, err := readCommands()
if err != nil {
panic(errors.Wrapf(err, "error reading commands from file %s", cfg.CommandsFilePath))
}
err = sendCommands(rpcClient, commandsChan)
if err != nil {
panic(errors.Wrap(err, "error sending commands"))
}
}

View File

@ -0,0 +1,25 @@
package main
import (
"bufio"
"os"
)
func readCommands() (<-chan string, error) {
cfg := activeConfig()
f, err := os.Open(cfg.CommandsFilePath)
if err != nil {
return nil, err
}
scanner := bufio.NewScanner(f)
commandsChan := make(chan string)
spawn("readCommands", func() {
for scanner.Scan() {
command := scanner.Text()
commandsChan <- command
}
close(commandsChan)
})
return commandsChan, nil
}

View File

@ -0,0 +1,5 @@
{"getBlockDagInfoRequest": {}}
{"getBlockRequest": {"hash": "0000691a26e1cd33ed9d0587d774181726f4e38eecd722a858d3baaa1fd19250"}}
{"getBlockRequest": {"hash": "666661a26e1cd33ed9d0587d774181726f4e38eecd722a858d3baaa1fd19250"}}
{"submitBlockRequest": {"block": {"header":{"version":1,"parentHashes":[],"hashMerkleRoot":{"bytes":"0000000000000000000000000000000000000000000"},"acceptedIdMerkleRoot":{"bytes":"0000000000000000000000000000000000000000000"},"utxoCommitment":{"bytes":"0000000000000000000000000000000000000000000"},"timestamp":1593528309396,"bits":511705087,"nonce":282366},"transactions":[{"version":1,"inputs":[],"outputs":[],"lockTime":0,"subnetworkId":{"bytes":"100000000000000000000000000"},"gas":0,"payload":"AAAAAAAAAAAXqRTaF0XptUm9C/oaVplxx366MM1aS4drYXNwYS1kZXZuZXQ="}]}}}
{"submitTransactionRequest": {"transaction": {"version":1,"inputs":[],"outputs":[],"lockTime":0,"subnetworkId":"100000000000000000000000000","gas":0,"payload":"AAAAAAAAAAAXqRTaF0XptUm9C/oaVplxx366MM1aS4drYXNwYS1kZXZuZXQ="}}}

View File

@ -0,0 +1,25 @@
#!/bin/bash
rm -rf /tmp/kaspad-temp
kaspad --devnet --datadir=/tmp/kaspad-temp --profile=6061 --loglevel=debug &
KASPAD_PID=$!
sleep 1
rpc-stability --devnet -p commands.json --profile=7000
TEST_EXIT_CODE=$?
kill $KASPAD_PID
wait $KASPAD_PID
KASPAD_EXIT_CODE=$?
echo "Exit code: $TEST_EXIT_CODE"
echo "Kaspad exit code: $KASPAD_EXIT_CODE"
if [ $TEST_EXIT_CODE -eq 0 ] && [ $KASPAD_EXIT_CODE -eq 0 ]; then
echo "rpc-stability test: PASSED"
exit 0
fi
echo "rpc-stability test: FAILED"
exit 1

View File

@ -0,0 +1,20 @@
package main
import (
"github.com/kaspanet/kaspad/infrastructure/network/rpcclient/grpcclient"
"github.com/pkg/errors"
)
func sendCommands(rpcClient *grpcclient.GRPCClient, commandsChan <-chan string) error {
for command := range commandsChan {
log.Infof("Sending command %s", command)
response, err := rpcClient.PostJSON(command)
if err != nil {
return errors.Wrap(err, "error sending message")
}
log.Infof("-> Got response: %s", response)
}
return nil
}

53
stability-tests/run/run-fast.sh Executable file
View File

@ -0,0 +1,53 @@
#!/bin/bash
set -e
BASEDIR=$(dirname "$0")
PROJECT_ROOT=$( cd "${BASEDIR}/.."; pwd)
failedTests=()
# echo "Running application-level-garbage"
# cd "${PROJECT_ROOT}/application-level-garbage/run" && ./run.sh || failedTests+=("application-level-garbage")
# echo "Done running application-level-garbage"
echo "Running infra-level-garbage"
cd "${PROJECT_ROOT}/infra-level-garbage/run" && ./run.sh || failedTests+=("infra-level-garbage")
echo "Done running infra-level-garbage"
echo "Running kaspadsanity"
cd "${PROJECT_ROOT}/kaspadsanity/run" && ./run.sh || failedTests+=("kaspadsanity")
echo "Done running kaspadsanity"
echo "Running rpc-stability"
cd "${PROJECT_ROOT}/rpc-stability/run" && ./run.sh || failedTests+=("rpc-stability")
echo "Done running rpc-stability"
echo "Running rpc-idle-clients"
cd "${PROJECT_ROOT}/rpc-idle-clients/run" && ./run.sh || failedTests+=("rpc-idle-clients")
echo "Done running rpc-idle-clients"
echo "Running simple-sync"
cd "${PROJECT_ROOT}/simple-sync/run" && ./run.sh || failedTests+=("simple-sync")
echo "Done running simple-sync"
echo "Running orphans"
cd "${PROJECT_ROOT}/orphans/run" && ./run.sh || failedTests+=("orphans")
echo "Done running orphans"
echo "Running reorg"
cd "${PROJECT_ROOT}/reorg/run" && ./run.sh || failedTests+=("reorg")
echo "Done running reorg"
echo "Running netsync - fast"
cd "${PROJECT_ROOT}/netsync/run" && ./run-fast.sh || failedTests+=("netsync")
echo "Done running netsync - fast"
EXIT_CODE=0
for t in "${failedTests[@]}"; do
EXIT_CODE=1
echo "FAILED: ${t}"
done
echo "Exiting with: ${EXIT_CODE}"
exit $EXIT_CODE

52
stability-tests/run/run-slow.sh Executable file
View File

@ -0,0 +1,52 @@
#!/bin/bash
set -e
BASEDIR=$(dirname "$0")
PROJECT_ROOT=$( cd "${BASEDIR}/.."; pwd)
failedTests=()
# echo "Running application-level-garbage"
# cd "${PROJECT_ROOT}/application-level-garbage/run" && ./run.sh || failedTests+=("application-level-garbage")
# echo "Done running application-level-garbage"
echo "Running infra-level-garbage"
cd "${PROJECT_ROOT}/infra-level-garbage/run" && ./run.sh || failedTests+=("infra-level-garbage")
echo "Done running infra-level-garbage"
echo "Running kaspadsanity"
cd "${PROJECT_ROOT}/kaspadsanity/run" && ./run.sh || failedTests+=("kaspadsanity")
echo "Done running kaspadsanity"
echo "Running rpc-stability"
cd "${PROJECT_ROOT}/rpc-stability/run" && ./run.sh || failedTests+=("rpc-stability")
echo "Done running rpc-stability"
echo "Running rpc-idle-clients"
cd "${PROJECT_ROOT}/rpc-idle-clients/run" && ./run.sh || failedTests+=("rpc-idle-clients")
echo "Done running rpc-idle-clients"
echo "Running simple-sync"
cd "${PROJECT_ROOT}/simple-sync/run" && ./run.sh || failedTests+=("simple-sync")
echo "Done running simple-sync"
echo "Running orphans"
cd "${PROJECT_ROOT}/orphans/run" && ./run.sh || failedTests+=("orphans")
echo "Done running orphans"
echo "Running reorg"
cd "${PROJECT_ROOT}/reorg/run" && ./run.sh || failedTests+=("reorg")
echo "Done running reorg"
echo "Running netsync - slow"
cd ${PROJECT_ROOT}/netsync/run"" && ./run.sh || failedTests+=("netsync")
echo "Done running netsync - slow"
EXIT_CODE=0
for t in "${failedTests[@]}"; do
EXIT_CODE=1
echo "FAILED: ${t}"
done
echo "Exiting with: ${EXIT_CODE}"
exit $EXIT_CODE

19
stability-tests/run/run.sh Executable file
View File

@ -0,0 +1,19 @@
#!/bin/bash
set -e
TEST_EXIT_CODE=1
BASEDIR=$(dirname "$0")
if [[ $1 == "slow" ]];
then
echo "Running slow stability tests"
"${BASEDIR}/run-slow.sh"
TEST_EXIT_CODE=$?
echo "Done running slow stability tests"
else
echo "Running fast stability tests"
"${BASEDIR}/run-fast.sh"
TEST_EXIT_CODE=$?
echo "Done running fast stability tests"
fi
echo "Exit code: $TEST_EXIT_CODE"
exit $TEST_EXIT_CODE

View File

@ -0,0 +1,10 @@
# Simple Sync Stability Tester
This tests that two nodes that are connected to each other
stay synced while one of them mines a chain.
## Running
1. `go install` kaspad and simple-sync.
2. `cd run`
3. `./run.sh`

View File

@ -0,0 +1,13 @@
package main
import (
"os/exec"
"syscall"
)
func killWithSigterm(cmd *exec.Cmd, commandName string) {
err := cmd.Process.Signal(syscall.SIGTERM)
if err != nil {
log.Criticalf("error sending SIGKILL to %s", commandName)
}
}

View File

@ -0,0 +1,51 @@
package main
import (
"path/filepath"
"github.com/jessevdk/go-flags"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/stability-tests/common"
)
const (
defaultLogFilename = "simplesync.log"
defaultErrLogFilename = "simplesync_err.log"
)
var (
// Default configuration options
defaultLogFile = filepath.Join(common.DefaultHomeDir, defaultLogFilename)
defaultErrLogFile = filepath.Join(common.DefaultHomeDir, defaultErrLogFilename)
)
type configFlags struct {
LogLevel string `short:"d" long:"loglevel" description:"Set log level {trace, debug, info, warn, error, critical}"`
NumberOfBlocks uint64 `short:"n" long:"numblocks" description:"Number of blocks to mine" required:"true"`
Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"`
config.NetworkFlags
}
var cfg *configFlags
func activeConfig() *configFlags {
return cfg
}
func parseConfig() error {
cfg = &configFlags{}
parser := flags.NewParser(cfg, flags.PrintErrors|flags.HelpFlag)
_, err := parser.Parse()
if err != nil {
return err
}
err = cfg.ResolveNetwork(parser)
if err != nil {
return err
}
initLog(defaultLogFile, defaultErrLogFile)
return nil
}

View File

@ -0,0 +1,30 @@
package main
import (
"fmt"
"os"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/stability-tests/common"
"github.com/kaspanet/kaspad/util/panics"
)
var (
backendLog = logger.NewBackend()
log = backendLog.Logger("KSSA")
spawn = panics.GoroutineWrapperFunc(log)
)
func initLog(logFile, errLogFile string) {
level := logger.LevelDebug
if activeConfig().LogLevel != "" {
var ok bool
level, ok = logger.LevelFromString(activeConfig().LogLevel)
if !ok {
fmt.Fprintf(os.Stderr, "Log level %s doesn't exists", activeConfig().LogLevel)
os.Exit(1)
}
}
log.SetLevel(level)
common.InitBackend(backendLog, logFile, errLogFile)
}

Some files were not shown because too many files have changed in this diff Show More