Compare commits

..

9 Commits

Author SHA1 Message Date
Elichai Turkel
bb96719698 Better benchmark 2021-02-09 16:03:41 +02:00
stasatdaglabs
1564972908 Add more logs. 2021-01-31 12:29:35 +02:00
stasatdaglabs
f0b772f4d6 Add more logs, In TestPickVirtualParents, only print relevant logs. 2021-01-31 09:56:22 +02:00
stasatdaglabs
8e09bc9cb6 Merge remote-tracking branch 'origin/large-reorg-logs' into multiple-chain-slowdown 2021-01-29 12:08:29 +02:00
stasatdaglabs
5cf1663108 Add logging. 2021-01-29 12:07:30 +02:00
stasatdaglabs
cda9d5f27e Fix an error string. 2021-01-29 11:50:53 +02:00
stasatdaglabs
ceb7cda983 Implement TestPickVirtualParents. 2021-01-29 11:50:12 +02:00
stasatdaglabs
5dfc630980 Merge branch 'v0.9.0-dev' into large-reorg-logs 2021-01-29 10:48:24 +02:00
stasatdaglabs
e6da05679f Add logs to help debug long virtual parent selection. 2021-01-29 10:46:44 +02:00
34 changed files with 398 additions and 473 deletions

View File

@@ -34,7 +34,7 @@ jobs:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: 1.15
go-version: 1.14
# Source: https://github.com/actions/cache/blob/main/examples.md#go---modules
@@ -60,7 +60,7 @@ jobs:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: 1.15
go-version: 1.14
- name: Create coverage file
# Because of https://github.com/golang/go/issues/27333 this seem to "fail" even though nothing is wrong, so ignore the failure

View File

@@ -18,7 +18,7 @@ Kaspa is an attempt at a proof-of-work cryptocurrency with instant confirmations
## Requirements
Go 1.15 or later.
Go 1.14 or later.
## Installation

View File

@@ -22,7 +22,7 @@ func (*FlowContext) HandleError(err error, flowName string, isStopping *uint32,
panic(err)
}
log.Errorf("error from %s: %s", flowName, err)
log.Errorf("error from %s: %+v", flowName, err)
}
if atomic.AddUint32(isStopping, 1) == 1 {

View File

@@ -60,7 +60,7 @@ func (flow *receiveVersionFlow) start() (*appmessage.NetAddress, error) {
}
if !allowSelfConnections && flow.NetAdapter().ID().IsEqual(msgVersion.ID) {
return nil, protocolerrors.New(false, "connected to self")
return nil, protocolerrors.New(true, "connected to self")
}
// Disconnect and ban peers from a different network

View File

@@ -18,7 +18,6 @@ import (
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/pkg/errors"
"sync"
"testing"
"time"
)
@@ -106,7 +105,6 @@ type fakeRelayInvsContext struct {
validateAndInsertImportedPruningPointResponse error
getBlockInfoResponse *externalapi.BlockInfo
validateAndInsertBlockResponse error
rwLock sync.RWMutex
}
func (f *fakeRelayInvsContext) BuildBlock(coinbaseData *externalapi.DomainCoinbaseData, transactions []*externalapi.DomainTransaction) (*externalapi.DomainBlock, error) {
@@ -130,8 +128,6 @@ func (f *fakeRelayInvsContext) GetBlockHeader(blockHash *externalapi.DomainHash)
}
func (f *fakeRelayInvsContext) GetBlockInfo(blockHash *externalapi.DomainHash) (*externalapi.BlockInfo, error) {
f.rwLock.RLock()
defer f.rwLock.RUnlock()
if f.getBlockInfoResponse != nil {
return f.getBlockInfoResponse, nil
}
@@ -171,8 +167,6 @@ func (f *fakeRelayInvsContext) AppendImportedPruningPointUTXOs(outpointAndUTXOEn
}
func (f *fakeRelayInvsContext) ValidateAndInsertImportedPruningPoint(newPruningPoint *externalapi.DomainBlock) error {
f.rwLock.RLock()
defer f.rwLock.RUnlock()
return f.validateAndInsertImportedPruningPointResponse
}
@@ -185,16 +179,12 @@ func (f *fakeRelayInvsContext) CreateBlockLocator(lowHash, highHash *externalapi
}
func (f *fakeRelayInvsContext) CreateHeadersSelectedChainBlockLocator(lowHash, highHash *externalapi.DomainHash) (externalapi.BlockLocator, error) {
f.rwLock.RLock()
defer f.rwLock.RUnlock()
return externalapi.BlockLocator{
f.params.GenesisHash,
}, nil
}
func (f *fakeRelayInvsContext) CreateFullHeadersSelectedChainBlockLocator() (externalapi.BlockLocator, error) {
f.rwLock.RLock()
defer f.rwLock.RUnlock()
return externalapi.BlockLocator{
f.params.GenesisHash,
}, nil
@@ -213,8 +203,6 @@ func (f *fakeRelayInvsContext) GetVirtualInfo() (*externalapi.VirtualInfo, error
}
func (f *fakeRelayInvsContext) IsValidPruningPoint(blockHash *externalapi.DomainHash) (bool, error) {
f.rwLock.RLock()
defer f.rwLock.RUnlock()
return f.isValidPruningPointResponse, nil
}
@@ -243,8 +231,6 @@ func (f *fakeRelayInvsContext) Domain() domain.Domain {
}
func (f *fakeRelayInvsContext) Config() *config.Config {
f.rwLock.RLock()
defer f.rwLock.RUnlock()
return &config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
@@ -283,59 +269,13 @@ func (f *fakeRelayInvsContext) IsIBDRunning() bool {
}
func (f *fakeRelayInvsContext) TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool {
f.rwLock.RLock()
defer f.rwLock.RUnlock()
return f.trySetIBDRunningResponse
}
func (f *fakeRelayInvsContext) UnsetIBDRunning() {
f.rwLock.RLock()
defer f.rwLock.RUnlock()
close(f.finishedIBD)
}
func (f *fakeRelayInvsContext) SetValidateAndInsertBlockResponse(err error) {
f.rwLock.Lock()
defer f.rwLock.Unlock()
f.validateAndInsertBlockResponse = err
}
func (f *fakeRelayInvsContext) SetValidateAndInsertImportedPruningPointResponse(err error) {
f.rwLock.Lock()
defer f.rwLock.Unlock()
f.validateAndInsertImportedPruningPointResponse = err
}
func (f *fakeRelayInvsContext) SetGetBlockInfoResponse(info externalapi.BlockInfo) {
f.rwLock.Lock()
defer f.rwLock.Unlock()
f.getBlockInfoResponse = &info
}
func (f *fakeRelayInvsContext) SetTrySetIBDRunningResponse(b bool) {
f.rwLock.Lock()
defer f.rwLock.Unlock()
f.trySetIBDRunningResponse = b
}
func (f *fakeRelayInvsContext) SetIsValidPruningPointResponse(b bool) {
f.rwLock.Lock()
defer f.rwLock.Unlock()
f.isValidPruningPointResponse = b
}
func (f *fakeRelayInvsContext) GetGenesisHeader() externalapi.BlockHeader {
f.rwLock.RLock()
defer f.rwLock.RUnlock()
return f.params.GenesisBlock.Header
}
func (f *fakeRelayInvsContext) GetFinishedIBDChan() chan struct{} {
f.rwLock.RLock()
defer f.rwLock.RUnlock()
return f.finishedIBD
}
func TestHandleRelayInvs(t *testing.T) {
triggerIBD := func(t *testing.T, incomingRoute, outgoingRoute *router.Route, context *fakeRelayInvsContext) {
err := incomingRoute.Enqueue(appmessage.NewMsgInvBlock(consensushashing.BlockHash(orphanBlock)))
@@ -349,7 +289,10 @@ func TestHandleRelayInvs(t *testing.T) {
}
_ = msg.(*appmessage.MsgRequestRelayBlocks)
context.SetValidateAndInsertBlockResponse(ruleerrors.NewErrMissingParents(orphanBlock.Header.ParentHashes()))
context.validateAndInsertBlockResponse = ruleerrors.NewErrMissingParents(orphanBlock.Header.ParentHashes())
defer func() {
context.validateAndInsertBlockResponse = nil
}()
err = incomingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(orphanBlock))
if err != nil {
@@ -399,10 +342,10 @@ func TestHandleRelayInvs(t *testing.T) {
name: "sending a known invalid inv",
funcToExecute: func(t *testing.T, incomingRoute, outgoingRoute *router.Route, context *fakeRelayInvsContext) {
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
context.getBlockInfoResponse = &externalapi.BlockInfo{
Exists: true,
BlockStatus: externalapi.StatusInvalid,
})
}
err := incomingRoute.Enqueue(appmessage.NewMsgInvBlock(knownInvalidBlockHash))
if err != nil {
@@ -459,7 +402,7 @@ func TestHandleRelayInvs(t *testing.T) {
}
_ = msg.(*appmessage.MsgRequestRelayBlocks)
context.SetValidateAndInsertBlockResponse(ruleerrors.ErrBadMerkleRoot)
context.validateAndInsertBlockResponse = ruleerrors.ErrBadMerkleRoot
err = incomingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(invalidBlock))
if err != nil {
t.Fatalf("Enqueue: %+v", err)
@@ -483,7 +426,7 @@ func TestHandleRelayInvs(t *testing.T) {
}
_ = msg.(*appmessage.MsgRequestRelayBlocks)
context.SetValidateAndInsertBlockResponse(ruleerrors.NewErrMissingParents(orphanBlock.Header.ParentHashes()))
context.validateAndInsertBlockResponse = ruleerrors.NewErrMissingParents(orphanBlock.Header.ParentHashes())
err = incomingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(orphanBlock))
if err != nil {
t.Fatalf("Enqueue: %+v", err)
@@ -509,7 +452,7 @@ func TestHandleRelayInvs(t *testing.T) {
{
name: "starting IBD when peer is already in IBD",
funcToExecute: func(t *testing.T, incomingRoute, outgoingRoute *router.Route, context *fakeRelayInvsContext) {
context.SetTrySetIBDRunningResponse(false)
context.trySetIBDRunningResponse = false
triggerIBD(t, incomingRoute, outgoingRoute, context)
checkNoActivity(t, outgoingRoute)
@@ -615,15 +558,15 @@ func TestHandleRelayInvs(t *testing.T) {
}
_ = msg.(*appmessage.MsgRequestHeaders)
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
context.getBlockInfoResponse = &externalapi.BlockInfo{
Exists: true,
BlockStatus: externalapi.StatusHeaderOnly,
})
}
err = incomingRoute.Enqueue(
appmessage.NewBlockHeadersMessage(
[]*appmessage.MsgBlockHeader{
appmessage.DomainBlockHeaderToBlockHeader(context.GetGenesisHeader())},
appmessage.DomainBlockHeaderToBlockHeader(context.params.GenesisBlock.Header)},
),
)
if err != nil {
@@ -638,10 +581,10 @@ func TestHandleRelayInvs(t *testing.T) {
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
// the pruning point UTXO set.
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
context.getBlockInfoResponse = &externalapi.BlockInfo{
Exists: true,
BlockStatus: externalapi.StatusHeaderOnly,
})
}
// Finish the IBD by sending DoneHeaders and send incompatible pruning point
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
@@ -655,7 +598,7 @@ func TestHandleRelayInvs(t *testing.T) {
}
_ = msg.(*appmessage.MsgRequestPruningPointHashMessage)
context.SetIsValidPruningPointResponse(false)
context.isValidPruningPointResponse = false
err = incomingRoute.Enqueue(appmessage.NewPruningPointHashMessage(invalidPruningPointHash))
if err != nil {
t.Fatalf("Enqueue: %+v", err)
@@ -687,11 +630,11 @@ func TestHandleRelayInvs(t *testing.T) {
}
_ = msg.(*appmessage.MsgRequestHeaders)
context.SetValidateAndInsertBlockResponse(ruleerrors.ErrDuplicateBlock)
context.validateAndInsertBlockResponse = ruleerrors.ErrDuplicateBlock
err = incomingRoute.Enqueue(
appmessage.NewBlockHeadersMessage(
[]*appmessage.MsgBlockHeader{
appmessage.DomainBlockHeaderToBlockHeader(context.GetGenesisHeader())},
appmessage.DomainBlockHeaderToBlockHeader(context.params.GenesisBlock.Header)},
),
)
if err != nil {
@@ -706,10 +649,10 @@ func TestHandleRelayInvs(t *testing.T) {
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
// the pruning point UTXO set.
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
context.getBlockInfoResponse = &externalapi.BlockInfo{
Exists: true,
BlockStatus: externalapi.StatusHeaderOnly,
})
}
// Finish the IBD by sending DoneHeaders and send incompatible pruning point
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
@@ -723,7 +666,7 @@ func TestHandleRelayInvs(t *testing.T) {
}
_ = msg.(*appmessage.MsgRequestPruningPointHashMessage)
context.SetIsValidPruningPointResponse(false)
context.isValidPruningPointResponse = false
err = incomingRoute.Enqueue(appmessage.NewPruningPointHashMessage(invalidPruningPointHash))
if err != nil {
t.Fatalf("Enqueue: %+v", err)
@@ -755,7 +698,7 @@ func TestHandleRelayInvs(t *testing.T) {
}
_ = msg.(*appmessage.MsgRequestHeaders)
context.SetValidateAndInsertBlockResponse(ruleerrors.ErrBadMerkleRoot)
context.validateAndInsertBlockResponse = ruleerrors.ErrBadMerkleRoot
err = incomingRoute.Enqueue(
appmessage.NewBlockHeadersMessage(
[]*appmessage.MsgBlockHeader{
@@ -795,10 +738,10 @@ func TestHandleRelayInvs(t *testing.T) {
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
// the pruning point UTXO set.
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
context.getBlockInfoResponse = &externalapi.BlockInfo{
Exists: true,
BlockStatus: externalapi.StatusHeaderOnly,
})
}
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
if err != nil {
@@ -847,10 +790,10 @@ func TestHandleRelayInvs(t *testing.T) {
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
// the pruning point UTXO set.
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
context.getBlockInfoResponse = &externalapi.BlockInfo{
Exists: true,
BlockStatus: externalapi.StatusHeaderOnly,
})
}
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
if err != nil {
@@ -863,7 +806,7 @@ func TestHandleRelayInvs(t *testing.T) {
}
_ = msg.(*appmessage.MsgRequestPruningPointHashMessage)
context.SetIsValidPruningPointResponse(false)
context.isValidPruningPointResponse = false
err = incomingRoute.Enqueue(appmessage.NewPruningPointHashMessage(invalidPruningPointHash))
if err != nil {
t.Fatalf("Enqueue: %+v", err)
@@ -897,10 +840,10 @@ func TestHandleRelayInvs(t *testing.T) {
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
// the pruning point UTXO set.
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
context.getBlockInfoResponse = &externalapi.BlockInfo{
Exists: true,
BlockStatus: externalapi.StatusHeaderOnly,
})
}
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
if err != nil {
@@ -962,10 +905,10 @@ func TestHandleRelayInvs(t *testing.T) {
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
// the pruning point UTXO set.
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
context.getBlockInfoResponse = &externalapi.BlockInfo{
Exists: true,
BlockStatus: externalapi.StatusHeaderOnly,
})
}
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
if err != nil {
@@ -1025,10 +968,10 @@ func TestHandleRelayInvs(t *testing.T) {
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
// the pruning point UTXO set.
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
context.getBlockInfoResponse = &externalapi.BlockInfo{
Exists: true,
BlockStatus: externalapi.StatusHeaderOnly,
})
}
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
if err != nil {
@@ -1094,10 +1037,10 @@ func TestHandleRelayInvs(t *testing.T) {
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
// the pruning point UTXO set.
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
context.getBlockInfoResponse = &externalapi.BlockInfo{
Exists: true,
BlockStatus: externalapi.StatusHeaderOnly,
})
}
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
if err != nil {
@@ -1121,7 +1064,7 @@ func TestHandleRelayInvs(t *testing.T) {
}
_ = msg.(*appmessage.MsgRequestPruningPointUTXOSetAndBlock)
context.SetValidateAndInsertImportedPruningPointResponse(ruleerrors.ErrBadMerkleRoot)
context.validateAndInsertImportedPruningPointResponse = ruleerrors.ErrBadMerkleRoot
err = incomingRoute.Enqueue(appmessage.NewMsgIBDBlock(appmessage.DomainBlockToMsgBlock(invalidPruningPointBlock)))
if err != nil {
t.Fatalf("Enqueue: %+v", err)
@@ -1161,10 +1104,10 @@ func TestHandleRelayInvs(t *testing.T) {
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
// the pruning point UTXO set.
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
context.getBlockInfoResponse = &externalapi.BlockInfo{
Exists: true,
BlockStatus: externalapi.StatusHeaderOnly,
})
}
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
if err != nil {
@@ -1188,7 +1131,7 @@ func TestHandleRelayInvs(t *testing.T) {
}
_ = msg.(*appmessage.MsgRequestPruningPointUTXOSetAndBlock)
context.SetValidateAndInsertImportedPruningPointResponse(ruleerrors.ErrSuggestedPruningViolatesFinality)
context.validateAndInsertImportedPruningPointResponse = ruleerrors.ErrSuggestedPruningViolatesFinality
err = incomingRoute.Enqueue(appmessage.NewMsgIBDBlock(appmessage.DomainBlockToMsgBlock(validPruningPointBlock)))
if err != nil {
t.Fatalf("Enqueue: %+v", err)
@@ -1225,10 +1168,10 @@ func TestHandleRelayInvs(t *testing.T) {
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
// the pruning point UTXO set.
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
context.getBlockInfoResponse = &externalapi.BlockInfo{
Exists: true,
BlockStatus: externalapi.StatusHeaderOnly,
})
}
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
if err != nil {
@@ -1304,10 +1247,10 @@ func TestHandleRelayInvs(t *testing.T) {
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
// the pruning point UTXO set.
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
context.getBlockInfoResponse = &externalapi.BlockInfo{
Exists: true,
BlockStatus: externalapi.StatusHeaderOnly,
})
}
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
if err != nil {
@@ -1381,10 +1324,10 @@ func TestHandleRelayInvs(t *testing.T) {
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
// the pruning point UTXO set.
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
context.getBlockInfoResponse = &externalapi.BlockInfo{
Exists: true,
BlockStatus: externalapi.StatusHeaderOnly,
})
}
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
if err != nil {
@@ -1424,7 +1367,7 @@ func TestHandleRelayInvs(t *testing.T) {
}
_ = msg.(*appmessage.MsgRequestIBDBlocks)
context.SetValidateAndInsertImportedPruningPointResponse(ruleerrors.ErrBadMerkleRoot)
context.validateAndInsertBlockResponse = ruleerrors.ErrBadMerkleRoot
err = incomingRoute.Enqueue(appmessage.NewMsgIBDBlock(appmessage.DomainBlockToMsgBlock(invalidBlock)))
if err != nil {
t.Fatalf("Enqueue: %+v", err)
@@ -1474,11 +1417,11 @@ func TestHandleRelayInvs(t *testing.T) {
}
select {
case <-context.GetFinishedIBDChan():
case <-context.finishedIBD:
if !test.expectsIBDToFinish {
t.Fatalf("IBD unexpecetedly finished")
}
case <-time.After(10 * time.Second):
case <-time.After(time.Second):
if test.expectsIBDToFinish {
t.Fatalf("IBD didn't finished after %d", time.Second)
}
@@ -1493,7 +1436,7 @@ func TestHandleRelayInvs(t *testing.T) {
if !errors.Is(err, router.ErrRouteClosed) {
t.Fatalf("unexpected error %+v", err)
}
case <-time.After(10 * time.Second):
case <-time.After(time.Second):
t.Fatalf("waiting for flow to finish timed out after %s", time.Second)
}
}

View File

@@ -78,7 +78,11 @@ func (m *Manager) handleError(err error, netConnection *netadapter.NetConnection
if !m.context.Config().DisableBanning && protocolErr.ShouldBan {
log.Warnf("Banning %s (reason: %s)", netConnection, protocolErr.Cause)
m.context.ConnectionManager().Ban(netConnection)
err := m.context.ConnectionManager().Ban(netConnection)
if err != nil && !errors.Is(err, addressmanager.ErrAddressNotFound) {
panic(err)
}
err = outgoingRoute.Enqueue(appmessage.NewMsgReject(protocolErr.Error()))
if err != nil && !errors.Is(err, routerpkg.ErrRouteClosed) {
panic(err)

View File

@@ -4,7 +4,7 @@ kaspactl is an RPC client for kaspad
## Requirements
Go 1.15 or later.
Go 1.14 or later.
## Installation

View File

@@ -1,5 +1,5 @@
# -- multistage docker build: stage #1: build stage
FROM golang:1.15-alpine AS build
FROM golang:1.14-alpine AS build
RUN mkdir -p /go/src/github.com/kaspanet/kaspad

View File

@@ -4,7 +4,7 @@ Kaspaminer is a CPU-based miner for kaspad
## Requirements
Go 1.15 or later.
Go 1.14 or later.
## Installation

View File

@@ -17,9 +17,8 @@ import (
)
const (
defaultLogFilename = "kaspaminer.log"
defaultErrLogFilename = "kaspaminer_err.log"
defaultTargetBlockRateRatio = 2.0
defaultLogFilename = "kaspaminer.log"
defaultErrLogFilename = "kaspaminer_err.log"
)
var (
@@ -31,13 +30,13 @@ var (
)
type configFlags struct {
ShowVersion bool `short:"V" long:"version" description:"Display version information and exit"`
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"`
MiningAddr string `long:"miningaddr" description:"Address to mine to"`
NumberOfBlocks uint64 `short:"n" long:"numblocks" description:"Number of blocks to mine. If omitted, will mine until the process is interrupted."`
MineWhenNotSynced bool `long:"mine-when-not-synced" description:"Mine even if the node is not synced with the rest of the network."`
Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"`
TargetBlocksPerSecond *float64 `long:"target-blocks-per-second" description:"Sets a maximum block rate. 0 means no limit (The default one is 2 * target network block rate)"`
ShowVersion bool `short:"V" long:"version" description:"Display version information and exit"`
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"`
MiningAddr string `long:"miningaddr" description:"Address to mine to"`
NumberOfBlocks uint64 `short:"n" long:"numblocks" description:"Number of blocks to mine. If omitted, will mine until the process is interrupted."`
MineWhenNotSynced bool `long:"mine-when-not-synced" description:"Mine even if the node is not synced with the rest of the network."`
Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"`
TargetBlocksPerSecond float64 `long:"target-blocks-per-second" description:"Sets a maximum block rate. This flag is for debugging purposes."`
config.NetworkFlags
}
@@ -65,11 +64,6 @@ func parseConfig() (*configFlags, error) {
return nil, err
}
if cfg.TargetBlocksPerSecond == nil {
targetBlocksPerSecond := defaultTargetBlockRateRatio / cfg.NetParams().TargetTimePerBlock.Seconds()
cfg.TargetBlocksPerSecond = &targetBlocksPerSecond
}
if cfg.Profile != "" {
profilePort, err := strconv.Atoi(cfg.Profile)
if err != nil || profilePort < 1024 || profilePort > 65535 {

View File

@@ -1,5 +1,5 @@
# -- multistage docker build: stage #1: build stage
FROM golang:1.15-alpine AS build
FROM golang:1.14-alpine AS build
RUN mkdir -p /go/src/github.com/kaspanet/kaspad

View File

@@ -48,7 +48,7 @@ func main() {
doneChan := make(chan struct{})
spawn("mineLoop", func() {
err = mineLoop(client, cfg.NumberOfBlocks, *cfg.TargetBlocksPerSecond, cfg.MineWhenNotSynced, miningAddr)
err = mineLoop(client, cfg.NumberOfBlocks, cfg.TargetBlocksPerSecond, cfg.MineWhenNotSynced, miningAddr)
if err != nil {
panic(errors.Wrap(err, "error in mine loop"))
}

View File

@@ -167,8 +167,6 @@ func templatesLoop(client *minerClient, miningAddr util.Address,
newTemplateChan <- template
}
getBlockTemplate()
const tickerTime = 500 * time.Millisecond
ticker := time.NewTicker(tickerTime)
for {
select {
case <-stopChan:
@@ -176,8 +174,7 @@ func templatesLoop(client *minerClient, miningAddr util.Address,
return
case <-client.blockAddedNotificationChan:
getBlockTemplate()
ticker.Reset(tickerTime)
case <-ticker.C:
case <-time.Tick(500 * time.Millisecond):
getBlockTemplate()
}
}

View File

@@ -10,7 +10,7 @@ It is capable of generating wallet key-pairs, printing a wallet's current balanc
## Requirements
Go 1.15 or later.
Go 1.14 or later.
## Installation

View File

@@ -1,5 +1,5 @@
# -- multistage docker build: stage #1: build stage
FROM golang:1.15-alpine AS build
FROM golang:1.14-alpine AS build
RUN mkdir -p /go/src/github.com/kaspanet/kaspad

View File

@@ -232,12 +232,12 @@ func TestBoundedMergeDepth(t *testing.T) {
}
factory := NewFactory()
consensusBuild, teardownFunc1, err := factory.NewTestConsensus(params, false, "TestBoundedMergeTestBuild")
consensusBuild, teardownFunc1, err := factory.NewTestConsensus(params, false, "BoundedMergeTestBuild")
if err != nil {
t.Fatalf("TestBoundedMergeDepth: Error setting up consensus: %+v", err)
}
consensusReal, teardownFunc2, err := factory.NewTestConsensus(params, false, "TestBoundedMergeTestReal")
consensusReal, teardownFunc2, err := factory.NewTestConsensus(params, false, "BoundedMergeTestReal")
if err != nil {
t.Fatalf("TestBoundedMergeDepth: Error setting up consensus: %+v", err)
}

View File

@@ -1,8 +1,6 @@
package blockvalidator
import (
"fmt"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
@@ -31,21 +29,6 @@ func (v *blockValidator) ValidateHeaderInContext(blockHash *externalapi.DomainHa
if err != nil {
return err
}
var logErr error
log.Debug(logger.NewLogClosure(func() string {
var ghostdagData *model.BlockGHOSTDAGData
ghostdagData, logErr = v.ghostdagDataStore.Get(v.databaseContext, blockHash)
if err != nil {
return ""
}
return fmt.Sprintf("block %s blue score is %d", blockHash, ghostdagData.BlueScore())
}))
if logErr != nil {
return logErr
}
}
err = v.validateMedianTime(header)

View File

@@ -4,6 +4,7 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/multiset"
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/domain/consensus/model"
@@ -15,8 +16,10 @@ import (
func (csm *consensusStateManager) CalculatePastUTXOAndAcceptanceData(blockHash *externalapi.DomainHash) (
model.UTXODiff, externalapi.AcceptanceData, model.Multiset, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "CalculatePastUTXOAndAcceptanceData")
defer onEnd()
log.Debugf("CalculatePastUTXOAndAcceptanceData start for block %s", blockHash)
defer log.Debugf("CalculatePastUTXOAndAcceptanceData end for block %s", blockHash)
if blockHash.Equal(csm.genesisHash) {
log.Debugf("Block %s is the genesis. By definition, "+
@@ -35,6 +38,9 @@ func (csm *consensusStateManager) CalculatePastUTXOAndAcceptanceData(blockHash *
if err != nil {
return nil, nil, nil, err
}
log.Debugf("Restored the past UTXO of block %s with selectedParent %s. "+
"Diff toAdd length: %d, toRemove length: %d", blockHash, blockGHOSTDAGData.SelectedParent(),
selectedParentPastUTXO.ToAdd().Len(), selectedParentPastUTXO.ToRemove().Len())
log.Debugf("Applying blue blocks to the selected parent past UTXO of block %s", blockHash)
acceptanceData, utxoDiff, err := csm.applyMergeSetBlocks(blockHash, selectedParentPastUTXO, blockGHOSTDAGData)
@@ -53,8 +59,10 @@ func (csm *consensusStateManager) CalculatePastUTXOAndAcceptanceData(blockHash *
}
func (csm *consensusStateManager) restorePastUTXO(blockHash *externalapi.DomainHash) (model.MutableUTXODiff, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "restorePastUTXO")
defer onEnd()
log.Debugf("restorePastUTXO start for block %s", blockHash)
defer log.Debugf("restorePastUTXO end for block %s", blockHash)
var err error
@@ -110,8 +118,10 @@ func (csm *consensusStateManager) applyMergeSetBlocks(blockHash *externalapi.Dom
selectedParentPastUTXODiff model.MutableUTXODiff, ghostdagData *model.BlockGHOSTDAGData) (
externalapi.AcceptanceData, model.MutableUTXODiff, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "applyMergeSetBlocks")
defer onEnd()
log.Debugf("applyMergeSetBlocks start for block %s", blockHash)
defer log.Tracef("applyMergeSetBlocks end for block %s", blockHash)
mergeSetHashes := ghostdagData.MergeSet()
log.Debugf("Merge set for block %s is %v", blockHash, mergeSetHashes)

View File

@@ -1,6 +1,7 @@
package consensusstatemanager
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/domain/consensus/model"
@@ -9,8 +10,10 @@ import (
)
func (csm *consensusStateManager) pickVirtualParents(tips []*externalapi.DomainHash) ([]*externalapi.DomainHash, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "pickVirtualParents")
defer onEnd()
log.Debugf("pickVirtualParents start for tips len: %d", len(tips))
defer log.Debugf("pickVirtualParents end for tips len: %d", len(tips))
log.Debugf("Pushing all tips into a DownHeap")
candidatesHeap := csm.dagTraversalManager.NewDownHeap()
@@ -84,8 +87,8 @@ func (csm *consensusStateManager) pickVirtualParents(tips []*externalapi.DomainH
func (csm *consensusStateManager) selectVirtualSelectedParent(
candidatesHeap model.BlockHeap) (*externalapi.DomainHash, error) {
log.Tracef("selectVirtualSelectedParent start")
defer log.Tracef("selectVirtualSelectedParent end")
onEnd := logger.LogAndMeasureExecutionTime(log, "selectVirtualSelectedParent")
defer onEnd()
disqualifiedCandidates := hashset.New()
@@ -153,8 +156,8 @@ func (csm *consensusStateManager) selectVirtualSelectedParent(
func (csm *consensusStateManager) mergeSetIncrease(
candidate *externalapi.DomainHash, selectedVirtualParents hashset.HashSet) (uint64, error) {
log.Tracef("mergeSetIncrease start")
defer log.Tracef("mergeSetIncrease end")
onEnd := logger.LogAndMeasureExecutionTime(log, "mergeSetIncrease")
defer onEnd()
visited := hashset.New()
queue := csm.dagTraversalManager.NewDownHeap()
@@ -204,8 +207,10 @@ func (csm *consensusStateManager) mergeSetIncrease(
func (csm *consensusStateManager) boundedMergeBreakingParents(
parents []*externalapi.DomainHash) (hashset.HashSet, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "boundedMergeBreakingParents")
defer onEnd()
log.Tracef("boundedMergeBreakingParents start for parents: %s", parents)
defer log.Tracef("boundedMergeBreakingParents end for parents: %s", parents)
log.Debug("Temporarily setting virtual to all parents, so that we can run ghostdag on it")
err := csm.dagTopologyManager.SetParents(model.VirtualBlockHash, parents)

View File

@@ -0,0 +1,117 @@
package consensusstatemanager_test
import (
"bytes"
"fmt"
"github.com/kaspanet/kaspad/domain/consensus"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils"
"github.com/kaspanet/kaspad/domain/dagconfig"
"github.com/kaspanet/kaspad/infrastructure/logger"
"io/ioutil"
"os/user"
"path"
"runtime/pprof"
"testing"
"time"
)
var log, _ = logger.Get(logger.SubsystemTags.CMGR)
func TestPickVirtualParents(t *testing.T) {
usr, err := user.Current()
if err != nil {
t.Fatal(err)
}
const chainSize = 97
params := dagconfig.DevnetParams
params.SkipProofOfWork = true
factory := consensus.NewFactory()
var chains [10][]*externalapi.DomainBlock
// Build three chains over the genesis
for chainIndex := range chains {
func() {
tipHash := params.GenesisHash
builder, teardown, err := factory.NewTestConsensus(&params, false, fmt.Sprintf("TestPickVirtualParents: %d", chainIndex))
if err != nil {
t.Fatalf("Error setting up consensus: %+v", err)
}
defer teardown(false)
for blockIndex := 0; blockIndex < chainSize; blockIndex++ {
scriptPubKey, _ := testutils.OpTrueScript()
extraData := []byte{byte(chainIndex)}
block, _, err := builder.BuildBlockWithParents([]*externalapi.DomainHash{tipHash}, &externalapi.DomainCoinbaseData{scriptPubKey, extraData}, nil)
if err != nil {
t.Fatalf("Could not build block: %s", err)
}
_, err = builder.ValidateAndInsertBlock(block)
if err != nil {
t.Fatalf("Could not build block: %s", err)
}
chains[chainIndex] = append(chains[chainIndex], block)
tipHash = consensushashing.BlockHash(block)
}
fmt.Printf("Finished Building chain: %d\n", chainIndex)
}()
}
testConsensus, teardown, err := factory.NewTestConsensus(&params, false, "TestPickVirtualParents")
if err != nil {
t.Fatalf("Error setting up consensus: %+v", err)
}
defer teardown(false)
var maxTime time.Duration
var maxString string
var profName string
maxProf := make([]byte, 0, 1024)
// Build three chains over the genesis
buf := bytes.NewBuffer(make([]byte, 0, 1024))
for chainIndex, chain := range chains {
accumulatedValidationTime := time.Duration(0)
for blockIndex, block := range chain {
if chainIndex == 9 && blockIndex > 90 {
logger.InitLog(path.Join(usr.HomeDir, "TestPickVirtualParents.log"), path.Join(usr.HomeDir, "TestPickVirtualParents_err.log"))
logger.SetLogLevels("debug")
}
log.Debugf("Starting chain:#%d, block: #%d", chainIndex, blockIndex)
blockHash := consensushashing.BlockHash(block)
buf.Reset()
err = pprof.StartCPUProfile(buf)
if err != nil {
t.Fatal(err)
}
start := time.Now()
_, err := testConsensus.ValidateAndInsertBlock(block)
validationTime := time.Since(start)
pprof.StopCPUProfile()
if err != nil {
t.Fatalf("Failed to validate block %s: %s", blockHash, err)
}
if validationTime > maxTime {
maxTime = validationTime
maxString = fmt.Sprintf("Chain: %d, Block: %d", chainIndex, blockIndex)
profName = fmt.Sprintf("TestPickVirtualParents-chain-%d-block-%d.pprof", chainIndex, blockIndex)
maxProf = append(maxProf[:0], buf.Bytes()...)
}
accumulatedValidationTime += validationTime
log.Debugf("Validated block #%d in chain #%d, took %s\n", blockIndex, chainIndex, validationTime)
}
averageValidationTime := accumulatedValidationTime / chainSize
fmt.Printf("Average validation time for chain #%d: %s\n", chainIndex, averageValidationTime)
}
err = ioutil.WriteFile(path.Join(usr.HomeDir, profName), maxProf, 0644)
if err != nil {
t.Fatal(err)
}
fmt.Printf("%s, took: %s\n", maxString, maxTime)
}

View File

@@ -3,13 +3,16 @@ package consensusstatemanager
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/logger"
)
func (csm *consensusStateManager) updateVirtual(newBlockHash *externalapi.DomainHash,
tips []*externalapi.DomainHash) (*externalapi.SelectedChainPath, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "updateVirtual")
defer onEnd()
log.Debugf("updateVirtual start for block %s", newBlockHash)
defer log.Debugf("updateVirtual end for block %s", newBlockHash)
log.Debugf("Saving a reference to the GHOSTDAG data of the old virtual")
var oldVirtualSelectedParent *externalapi.DomainHash
@@ -44,6 +47,9 @@ func (csm *consensusStateManager) updateVirtual(newBlockHash *externalapi.Domain
if err != nil {
return nil, err
}
log.Debugf("Calculated the past UTXO of the new virtual. "+
"Diff toAdd length: %d, toRemove length: %d",
virtualUTXODiff.ToAdd().Len(), virtualUTXODiff.ToRemove().Len())
log.Debugf("Staging new acceptance data for the virtual block")
csm.acceptanceDataStore.Stage(model.VirtualBlockHash, virtualAcceptanceData)
@@ -73,6 +79,8 @@ func (csm *consensusStateManager) updateVirtual(newBlockHash *externalapi.Domain
if err != nil {
return nil, err
}
log.Debugf("Selected parent chain changes: %d blocks were removed and %d blocks were added",
len(selectedParentChainChanges.Removed), len(selectedParentChainChanges.Added))
}
return selectedParentChainChanges, nil

View File

@@ -3,9 +3,13 @@ package ghostdagmanager
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/logger"
)
func (gm *ghostdagManager) findSelectedParent(parentHashes []*externalapi.DomainHash) (*externalapi.DomainHash, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "ghostdagManager.findSelectedParent")
defer onEnd()
var selectedParent *externalapi.DomainHash
for _, hash := range parentHashes {
if selectedParent == nil {

View File

@@ -3,6 +3,7 @@ package ghostdagmanager
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/difficulty"
"github.com/pkg/errors"
"math/big"
@@ -40,6 +41,9 @@ func (bg *blockGHOSTDAGData) toModel() *model.BlockGHOSTDAGData {
//
// For further details see the article https://eprint.iacr.org/2018/104.pdf
func (gm *ghostdagManager) GHOSTDAG(blockHash *externalapi.DomainHash) error {
onEnd := logger.LogAndMeasureExecutionTime(log, "GHOSTDAG")
defer onEnd()
newBlockData := &blockGHOSTDAGData{
blueWork: new(big.Int),
mergeSetBlues: make([]*externalapi.DomainHash, 0),
@@ -69,6 +73,7 @@ func (gm *ghostdagManager) GHOSTDAG(blockHash *externalapi.DomainHash) error {
return err
}
onMergeSetWithoutSelectedParentEnd := logger.LogAndMeasureExecutionTime(log, "GHOSTDAG.mergeSetWithoutSelectedParent")
for _, blueCandidate := range mergeSetWithoutSelectedParent {
isBlue, candidateAnticoneSize, candidateBluesAnticoneSizes, err := gm.checkBlueCandidate(newBlockData.toModel(), blueCandidate)
if err != nil {
@@ -86,6 +91,7 @@ func (gm *ghostdagManager) GHOSTDAG(blockHash *externalapi.DomainHash) error {
newBlockData.mergeSetReds = append(newBlockData.mergeSetReds, blueCandidate)
}
}
onMergeSetWithoutSelectedParentEnd()
if !isGenesis {
selectedParentGHOSTDAGData, err := gm.ghostdagDataStore.Get(gm.databaseContext, newBlockData.selectedParent)

View File

@@ -0,0 +1,7 @@
package ghostdagmanager
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
)
var log, _ = logger.Get(logger.SubsystemTags.BDAG)

2
go.mod
View File

@@ -1,6 +1,6 @@
module github.com/kaspanet/kaspad
go 1.15
go 1.14
require (
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd

View File

@@ -5,7 +5,9 @@
package config
import (
"bufio"
"fmt"
"io"
"net"
"os"
"path/filepath"
@@ -14,15 +16,18 @@ import (
"strings"
"time"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/dagconfig"
"github.com/pkg/errors"
"github.com/btcsuite/go-socks/socks"
"github.com/jessevdk/go-flags"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/dagconfig"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/network"
"github.com/kaspanet/kaspad/version"
"github.com/pkg/errors"
)
const (
@@ -239,7 +244,9 @@ func LoadConfig() (*Config, error) {
cfg := &Config{
Flags: cfgFlags,
}
if !preCfg.Simnet || preCfg.ConfigFile != defaultConfigFile {
if !preCfg.Simnet || preCfg.ConfigFile !=
defaultConfigFile {
if _, err := os.Stat(preCfg.ConfigFile); os.IsNotExist(err) {
err := createDefaultConfigFile(preCfg.ConfigFile)
if err != nil {
@@ -586,6 +593,13 @@ func createDefaultConfigFile(destinationPath string) error {
return err
}
// We assume sample config file path is same as binary
path, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
return err
}
sampleConfigPath := filepath.Join(path, sampleConfigFilename)
dest, err := os.OpenFile(destinationPath,
os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
@@ -593,7 +607,25 @@ func createDefaultConfigFile(destinationPath string) error {
}
defer dest.Close()
_, err = dest.WriteString(sampleConfig)
src, err := os.Open(sampleConfigPath)
if err != nil {
return err
}
defer src.Close()
return err
// We copy every line from the sample config file to the destination
reader := bufio.NewReader(src)
for err != io.EOF {
var line string
line, err = reader.ReadString('\n')
if err != nil && err != io.EOF {
return err
}
if _, err := dest.WriteString(line); err != nil {
return err
}
}
return nil
}

View File

@@ -18,7 +18,7 @@ func TestCreateDefaultConfigFile(t *testing.T) {
if !ok {
t.Fatalf("Failed finding config file path")
}
sampleConfigFile := filepath.Join(filepath.Dir(path), "sample-kaspad.conf")
sampleConfigFile := filepath.Join(filepath.Dir(path), "..", "..", "sample-kaspad.conf")
// Setup a temporary directory
tmpDir, err := ioutil.TempDir("", "kaspad")

View File

@@ -1,238 +0,0 @@
package config
// This should be identical to the content of sample-kaspad.conf
// TODO: Replace with go:embed once go1.16 lands
var sampleConfig = `[Application Options]
; ------------------------------------------------------------------------------
; Data settings
; ------------------------------------------------------------------------------
; The directory to store data such as the block DAG and peer addresses. The
; block DAG takes several GB, so this location must have a lot of free space.
; The default is ~/.kaspad/data on POSIX OSes, $LOCALAPPDATA/Kaspad/data on Windows,
; ~/Library/Application Support/Kaspad/data on Mac OS, and $home/kaspad/data on
; Plan9. Environment variables are expanded so they may be used. NOTE: Windows
; environment variables are typically %VARIABLE%, but they must be accessed with
; $VARIABLE here. Also, ~ is expanded to $LOCALAPPDATA on Windows.
; datadir=~/.kaspad/data
; ------------------------------------------------------------------------------
; Network settings
; ------------------------------------------------------------------------------
; Use testnet.
; testnet=1
; Connect via a SOCKS5 proxy. NOTE: Specifying a proxy will disable listening
; for incoming connections unless listen addresses are provided via the 'listen'
; option.
; proxy=127.0.0.1:9050
; proxyuser=
; proxypass=
; Use Universal Plug and Play (UPnP) to automatically open the listen port
; and obtain the external IP address from supported devices. NOTE: This option
; will have no effect if external IP addresses are specified.
; upnp=1
; Specify the external IP addresses your node is listening on. One address per
; line. kaspad will not contact 3rd-party sites to obtain external ip addresses.
; This means if you are behind NAT, your node will not be able to advertise a
; reachable address unless you specify it here or enable the 'upnp' option (and
; have a supported device).
; externalip=1.2.3.4
; externalip=2002::1234
; ******************************************************************************
; Summary of 'addpeer' versus 'connect'.
;
; Only one of the following two options, 'addpeer' and 'connect', may be
; specified. Both allow you to specify peers that you want to stay connected
; with, but the behavior is slightly different. By default, kaspad will query DNS
; to find peers to connect to, so unless you have a specific reason such as
; those described below, you probably won't need to modify anything here.
;
; 'addpeer' does not prevent connections to other peers discovered from
; the peers you are connected to and also lets the remote peers know you are
; available so they can notify other peers they can to connect to you. This
; option might be useful if you are having problems finding a node for some
; reason (perhaps due to a firewall).
;
; 'connect', on the other hand, will ONLY connect to the specified peers and
; no others. It also disables listening (unless you explicitly set listen
; addresses via the 'listen' option) and DNS seeding, so you will not be
; advertised as an available peer to the peers you connect to and won't accept
; connections from any other peers. So, the 'connect' option effectively allows
; you to only connect to "trusted" peers.
; ******************************************************************************
; Add persistent peers to connect to as desired. One peer per line.
; You may specify each IP address with or without a port. The default port will
; be added automatically if one is not specified here.
; addpeer=192.168.1.1
; addpeer=10.0.0.2:16111
; addpeer=fe80::1
; addpeer=[fe80::2]:16111
; Add persistent peers that you ONLY want to connect to as desired. One peer
; per line. You may specify each IP address with or without a port. The
; default port will be added automatically if one is not specified here.
; NOTE: Specifying this option has other side effects as described above in
; the 'addpeer' versus 'connect' summary section.
; connect=192.168.1.1
; connect=10.0.0.2:16111
; connect=fe80::1
; connect=[fe80::2]:16111
; Maximum number of inbound and outbound peers.
; maxinpeers=125
; Disable banning of misbehaving peers.
; nobanning=1
; Maximum allowed ban score before disconnecting and banning misbehaving peers.
; banthreshold=100
; How long to ban misbehaving peers. Valid time units are {s, m, h}.
; Minimum 1s.
; banduration=24h
; banduration=11h30m15s
; Add whitelisted IP networks and IPs. Connected peers whose IP matches a
; whitelist will not have their ban score increased.
; whitelist=127.0.0.1
; whitelist=::1
; whitelist=192.168.0.0/24
; whitelist=fd00::/16
; Disable DNS seeding for peers. By default, when kaspad starts, it will use
; DNS to query for available peers to connect with.
; nodnsseed=1
; Specify the interfaces to listen on. One listen address per line.
; NOTE: The default port is modified by some options such as 'testnet', so it is
; recommended to not specify a port and allow a proper default to be chosen
; unless you have a specific reason to do otherwise.
; All interfaces on default port (this is the default):
; listen=
; All ipv4 interfaces on default port:
; listen=0.0.0.0
; All ipv6 interfaces on default port:
; listen=::
; All interfaces on port 16111:
; listen=:16111
; All ipv4 interfaces on port 16111:
; listen=0.0.0.0:16111
; All ipv6 interfaces on port 16111:
; listen=[::]:16111
; Only ipv4 localhost on port 8333:
; listen=127.0.0.1:8333
; Only ipv6 localhost on port 8333:
; listen=[::1]:8333
; Only ipv4 localhost on non-standard port 8336:
; listen=127.0.0.1:8336
; All interfaces on non-standard port 8336:
; listen=:8336
; All ipv4 interfaces on non-standard port 8336:
; listen=0.0.0.0:8336
; All ipv6 interfaces on non-standard port 8336:
; listen=[::]:8336
; Disable listening for incoming connections. This will override all listeners.
; nolisten=1
; Disable peer bloom filtering. See BIP0111.
; nopeerbloomfilters=1
; Add comments to the user agent that is advertised to peers.
; Must not include characters '/', ':', '(' and ')'.
; uacomment=
; ------------------------------------------------------------------------------
; RPC server options - The following options control the built-in RPC server
; which is used to control and query information from a running kaspad process.
; ------------------------------------------------------------------------------
; Specify the interfaces for the RPC server listen on. One listen address per
; line. NOTE: The default port is modified by some options such as 'testnet',
; so it is recommended to not specify a port and allow a proper default to be
; chosen unless you have a specific reason to do otherwise. By default, the
; RPC server will only listen on localhost for IPv4 and IPv6.
; All interfaces on default port:
; rpclisten=
; All ipv4 interfaces on default port:
; rpclisten=0.0.0.0
; All ipv6 interfaces on default port:
; rpclisten=::
; All interfaces on port 16110:
; rpclisten=:16110
; All ipv4 interfaces on port 16110:
; rpclisten=0.0.0.0:16110
; All ipv6 interfaces on port 16110:
; rpclisten=[::]:16110
; Only ipv4 localhost on port 16110:
; rpclisten=127.0.0.1:16110
; Only ipv6 localhost on port 16110:
; rpclisten=[::1]:16110
; Only ipv4 localhost on non-standard port 8337:
; rpclisten=127.0.0.1:8337
; All interfaces on non-standard port 8337:
; rpclisten=:8337
; All ipv4 interfaces on non-standard port 8337:
; rpclisten=0.0.0.0:8337
; All ipv6 interfaces on non-standard port 8337:
; rpclisten=[::]:8337
; Specify the maximum number of concurrent RPC clients for standard connections.
; rpcmaxclients=10
; Use the following setting to disable the RPC server.
; norpc=1
; ------------------------------------------------------------------------------
; Mempool Settings - The following options
; ------------------------------------------------------------------------------
; Set the minimum transaction fee to be considered a non-zero fee,
; minrelaytxfee=0.00001
; Limit orphan transaction pool to 100 transactions.
; maxorphantx=100
; Do not accept transactions from remote peers.
; blocksonly=1
; Relay non-standard transactions regardless of default network settings.
; relaynonstd=1
; Reject non-standard transactions regardless of default network settings.
; rejectnonstd=1
; ------------------------------------------------------------------------------
; Signature Verification Cache
; ------------------------------------------------------------------------------
; Limit the signature cache to a max of 50000 entries.
; sigcachemaxsize=50000
; ------------------------------------------------------------------------------
; Debug
; ------------------------------------------------------------------------------
; Debug logging level.
; Valid levels are {trace, debug, info, warn, error, critical}
; You may also specify <subsystem>=<level>,<subsystem2>=<level>,... to set
; log level for individual subsystems. Use kaspad --debuglevel=show to list
; available subsystems.
; debuglevel=info
; The port used to listen for HTTP profile requests. The profile server will
; be disabled if this option is not specified. The profile information can be
; accessed at http://localhost:<profileport>/debug/pprof once running.
; profile=6061
`

View File

@@ -12,22 +12,16 @@ import (
"github.com/pkg/errors"
)
// addressRandomizer is the interface for the randomizer needed for the AddressManager.
type addressRandomizer interface {
// AddressRandomizer is the interface for the randomizer needed for the AddressManager.
type AddressRandomizer interface {
RandomAddress(addresses []*appmessage.NetAddress) *appmessage.NetAddress
RandomAddresses(addresses []*appmessage.NetAddress, count int) []*appmessage.NetAddress
}
// addressKey represents a pair of IP and port, the IP is always in V6 representation
type addressKey struct {
// AddressKey represents a pair of IP and port, the IP is always in V6 representation
type AddressKey struct {
port uint16
address ipv6
}
type ipv6 [net.IPv6len]byte
func (i ipv6) equal(other ipv6) bool {
return i == other
address [net.IPv6len]byte
}
// ErrAddressNotFound is an error returned from some functions when a
@@ -35,16 +29,16 @@ func (i ipv6) equal(other ipv6) bool {
var ErrAddressNotFound = errors.New("address not found")
// NetAddressKey returns a key of the ip address to use it in maps.
func netAddressKey(netAddress *appmessage.NetAddress) addressKey {
key := addressKey{port: netAddress.Port}
func netAddressKey(netAddress *appmessage.NetAddress) AddressKey {
key := AddressKey{port: netAddress.Port}
// all IPv4 can be represented as IPv6.
copy(key.address[:], netAddress.IP.To16())
return key
}
// netAddressKeys returns a key of the ip address to use it in maps.
func netAddressesKeys(netAddresses []*appmessage.NetAddress) map[addressKey]bool {
result := make(map[addressKey]bool, len(netAddresses))
func netAddressesKeys(netAddresses []*appmessage.NetAddress) map[AddressKey]bool {
result := make(map[AddressKey]bool, len(netAddresses))
for _, netAddress := range netAddresses {
key := netAddressKey(netAddress)
result[key] = true
@@ -56,12 +50,12 @@ func netAddressesKeys(netAddresses []*appmessage.NetAddress) map[addressKey]bool
// AddressManager provides a concurrency safe address manager for caching potential
// peers on the Kaspa network.
type AddressManager struct {
addresses map[addressKey]*appmessage.NetAddress
bannedAddresses map[ipv6]*appmessage.NetAddress
addresses map[AddressKey]*appmessage.NetAddress
bannedAddresses map[AddressKey]*appmessage.NetAddress
localAddresses *localAddressManager
mutex sync.Mutex
cfg *Config
random addressRandomizer
random AddressRandomizer
}
// New returns a new Kaspa address manager.
@@ -72,8 +66,8 @@ func New(cfg *Config) (*AddressManager, error) {
}
return &AddressManager{
addresses: map[addressKey]*appmessage.NetAddress{},
bannedAddresses: map[ipv6]*appmessage.NetAddress{},
addresses: map[AddressKey]*appmessage.NetAddress{},
bannedAddresses: map[AddressKey]*appmessage.NetAddress{},
localAddresses: localAddresses,
random: NewAddressRandomize(),
cfg: cfg,
@@ -117,6 +111,7 @@ func (am *AddressManager) RemoveAddress(address *appmessage.NetAddress) {
key := netAddressKey(address)
delete(am.addresses, key)
delete(am.bannedAddresses, key)
}
// Addresses returns all addresses
@@ -180,23 +175,21 @@ func (am *AddressManager) BestLocalAddress(remoteAddress *appmessage.NetAddress)
}
// Ban marks the given address as banned
func (am *AddressManager) Ban(addressToBan *appmessage.NetAddress) {
func (am *AddressManager) Ban(address *appmessage.NetAddress) error {
am.mutex.Lock()
defer am.mutex.Unlock()
keyToBan := netAddressKey(addressToBan)
keysToDelete := make([]addressKey, 0)
for _, address := range am.addresses {
key := netAddressKey(address)
if key.address.equal(keyToBan.address) {
keysToDelete = append(keysToDelete, key)
}
}
for _, key := range keysToDelete {
delete(am.addresses, key)
key := netAddressKey(address)
addressToBan, ok := am.addresses[key]
if !ok {
return errors.Wrapf(ErrAddressNotFound, "address %s "+
"is not registered with the address manager", address.TCPAddress())
}
am.bannedAddresses[keyToBan.address] = addressToBan
delete(am.addresses, key)
am.bannedAddresses[key] = addressToBan
return nil
}
// Unban unmarks the given address as banned
@@ -205,13 +198,13 @@ func (am *AddressManager) Unban(address *appmessage.NetAddress) error {
defer am.mutex.Unlock()
key := netAddressKey(address)
bannedAddress, ok := am.bannedAddresses[key.address]
bannedAddress, ok := am.bannedAddresses[key]
if !ok {
return errors.Wrapf(ErrAddressNotFound, "address %s "+
"is not registered with the address manager as banned", address.TCPAddress())
}
delete(am.bannedAddresses, key.address)
delete(am.bannedAddresses, key)
am.addresses[key] = bannedAddress
return nil
}
@@ -222,7 +215,7 @@ func (am *AddressManager) IsBanned(address *appmessage.NetAddress) (bool, error)
defer am.mutex.Unlock()
key := netAddressKey(address)
if _, ok := am.bannedAddresses[key.address]; !ok {
if _, ok := am.bannedAddresses[key]; !ok {
if _, ok = am.addresses[key]; !ok {
return false, errors.Wrapf(ErrAddressNotFound, "address %s "+
"is not registered with the address manager", address.TCPAddress())

View File

@@ -7,7 +7,7 @@ import (
"github.com/kaspanet/kaspad/app/appmessage"
)
// AddressRandomize implement addressRandomizer interface
// AddressRandomize implement AddressRandomizer interface
type AddressRandomize struct {
random *rand.Rand
}

View File

@@ -38,7 +38,7 @@ type localAddress struct {
}
type localAddressManager struct {
localAddresses map[addressKey]*localAddress
localAddresses map[AddressKey]*localAddress
lookupFunc func(string) ([]net.IP, error)
cfg *Config
mutex sync.Mutex
@@ -46,7 +46,7 @@ type localAddressManager struct {
func newLocalAddressManager(cfg *Config) (*localAddressManager, error) {
localAddressManager := localAddressManager{
localAddresses: map[addressKey]*localAddress{},
localAddresses: map[AddressKey]*localAddress{},
cfg: cfg,
lookupFunc: cfg.Lookup,
}

View File

@@ -126,41 +126,35 @@ func (c *ConnectionManager) ConnectionCount() int {
}
// Ban marks the given netConnection as banned
func (c *ConnectionManager) Ban(netConnection *netadapter.NetConnection) {
if c.isPermanent(netConnection.Address()) {
log.Infof("Cannot ban %s because it's a permanent connection", netConnection.Address())
return
}
c.addressManager.Ban(netConnection.NetAddress())
func (c *ConnectionManager) Ban(netConnection *netadapter.NetConnection) error {
return c.addressManager.Ban(netConnection.NetAddress())
}
// IsBanned returns whether the given netConnection is banned
func (c *ConnectionManager) IsBanned(netConnection *netadapter.NetConnection) (bool, error) {
if c.isPermanent(netConnection.Address()) {
return false, nil
}
return c.addressManager.IsBanned(netConnection.NetAddress())
}
func (c *ConnectionManager) waitTillNextIteration() {
select {
case <-c.resetLoopChan:
c.loopTicker.Reset(connectionsLoopInterval)
c.loopTicker.Stop()
c.loopTicker = time.NewTicker(connectionsLoopInterval)
case <-c.loopTicker.C:
}
}
func (c *ConnectionManager) isPermanent(addressString string) bool {
c.connectionRequestsLock.Lock()
defer c.connectionRequestsLock.Unlock()
if conn, ok := c.activeRequested[addressString]; ok {
return conn.isPermanent
func (c *ConnectionManager) connectionExists(addressString string) bool {
if _, ok := c.activeRequested[addressString]; ok {
return true
}
if conn, ok := c.pendingRequested[addressString]; ok {
return conn.isPermanent
if _, ok := c.activeOutgoing[addressString]; ok {
return true
}
if _, ok := c.activeIncoming[addressString]; ok {
return true
}
return false

View File

@@ -30,7 +30,7 @@
; Use Universal Plug and Play (UPnP) to automatically open the listen port
; and obtain the external IP address from supported devices. NOTE: This option
; will have no effect if external IP addresses are specified.
; will have no effect if exernal IP addresses are specified.
; upnp=1
; Specify the external IP addresses your node is listening on. One address per
@@ -83,7 +83,7 @@
; connect=[fe80::2]:16111
; Maximum number of inbound and outbound peers.
; maxinpeers=125
; maxpeers=125
; Disable banning of misbehaving peers.
; nobanning=1
@@ -142,6 +142,9 @@
; Disable peer bloom filtering. See BIP0111.
; nopeerbloomfilters=1
; Add additional checkpoints. Format: '<height>:<hash>'
; addcheckpoint=<height>:<hash>
; Add comments to the user agent that is advertised to peers.
; Must not include characters '/', ':', '(' and ')'.
; uacomment=
@@ -195,6 +198,13 @@
; Set the minimum transaction fee to be considered a non-zero fee,
; minrelaytxfee=0.00001
; Rate-limit free transactions to the value 15 * 1000 bytes per
; minute.
; limitfreerelay=15
; Require high priority for relaying free or low-fee transactions.
; norelaypriority=0
; Limit orphan transaction pool to 100 transactions.
; maxorphantx=100
@@ -208,6 +218,22 @@
; rejectnonstd=1
; ------------------------------------------------------------------------------
; Optional Indexes
; ------------------------------------------------------------------------------
; Build and maintain a full hash-based transaction index which makes all
; transactions available via the getrawtransaction RPC.
; txindex=1
; Build and maintain a full address-based transaction index which makes the
; searchrawtransactions RPC available.
; addrindex=1
; Delete the entire address index on start up, then exit.
; dropaddrindex=0
; ------------------------------------------------------------------------------
; Signature Verification Cache
; ------------------------------------------------------------------------------
@@ -216,6 +242,38 @@
; sigcachemaxsize=50000
; ------------------------------------------------------------------------------
; Coin Generation (Mining) Settings - The following options control the
; generation of block templates used by external mining applications through RPC
; calls.
; ------------------------------------------------------------------------------
; Add addresses to pay mined blocks to in the block templates generated
; for the getblocktemplate RPC. One address per line.
; miningaddr=kaspa:yourkaspaaddress
; miningaddr=kaspa:yourkaspaaddress2
; miningaddr=kaspa:yourkaspaaddress3
; Specify the minimum block size in bytes to create. By default, only
; transactions which have enough fees or a high enough priority will be included
; in generated block templates. Specifying a minimum block size will instead
; attempt to fill generated block templates up with transactions until it is at
; least the specified number of bytes.
; blockminsize=0
; Specify the maximum block size in bytes to create. This value will be limited
; to the consensus limit if it is larger than that value.
; blockmaxsize=750000
; Specify the size in bytes of the high-priority/low-fee area when creating a
; block. Transactions which consist of large amounts, old inputs, and small
; sizes have the highest priority. One consequence of this is that as low-fee
; or free transactions age, they raise in priority thereby making them more
; likely to be included in this section of a new block. This value is limited
; by the blackmaxsize option and will be limited as needed.
; blockprioritysize=50000
; ------------------------------------------------------------------------------
; Debug
; ------------------------------------------------------------------------------
@@ -232,3 +290,11 @@
; accessed at http://localhost:<profileport>/debug/pprof once running.
; profile=6061
; ------------------------------------------------------------------------------
; Subnetworks
; ------------------------------------------------------------------------------
; If subnetwork > 0, than node will request and process only payloads from
; specified subnetwork. And if subnetwork is 0, than payloads of all subnetworks
; are processed.
; subnetwork=0

View File

@@ -10,8 +10,8 @@ const validCharacters = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrs
const (
appMajor uint = 0
appMinor uint = 8
appPatch uint = 7
appMinor uint = 9
appPatch uint = 0
)
// appBuild is defined as a variable so it can be overridden during the build