Compare commits

..

11 Commits

Author SHA1 Message Date
Elichai Turkel
2871a6a527 Update to version 0.8.7 2021-02-01 15:38:40 +02:00
Svarog
d5a3a96bde Use hard-coded sample config instead of assumed path (#1466)
* Use hard-coded sample config instead of assumed path

* Fix bad path to sample-kaspad.conf in TestCreateDefaultConfigFile

Co-authored-by: Elichai Turkel <elichai.turkel@gmail.com>
2021-02-01 15:15:37 +02:00
Elichai Turkel
12c438d389 Fix data races in ConnectionManager and flow tests (#1474)
* Reuse the ticker in ConnectionManager.waitTillNextIteration

* Fix a data race in ConnectionManager by locking the mutex

* Add a mutex to fakeRelayInvsContext in block relay flow test

Co-authored-by: Ori Newman <orinewman1@gmail.com>
2021-02-01 15:03:31 +02:00
Elichai Turkel
280fa3de46 Prevent infinite ticker leaks in kaspaminer (#1476)
* Prevent infinite tickers leaks in kaspaminer

* Reset ticker in ConnectionManager instead of allocating a new one

Co-authored-by: Ori Newman <orinewman1@gmail.com>
2021-02-01 14:52:17 +02:00
Elichai Turkel
d281dabdb4 Bump Go version to 1.15 (#1477) 2021-02-01 14:35:11 +02:00
Ori Newman
331042edf1 Add defaultTargetBlocksPerSecond (#1473)
* Add defaultTargetBlocksPerSecond

* Use different default per network
2021-02-01 14:26:45 +02:00
Ori Newman
669a9ab4c3 Ban by IP (#1471)
* Ban by IP

* Fix panic

* Fix error format

* Remove failed addresses

Co-authored-by: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com>
2021-02-01 10:51:18 +02:00
stasatdaglabs
65e149b2bb In kaspaminer, don't crash on submitBlock timeout (#1462)
* In kaspaminer, don't crash on submitBlock timeout.

* Make timeout messages have a log level of Warn.

* Wait for a second after receiving a reject for IBD.

Co-authored-by: Elichai Turkel <elichai.turkel@gmail.com>
2021-01-29 09:10:21 +02:00
stasatdaglabs
7c1495ba65 Force stop gRPC servers after a short timeout (#1463)
* Force stop gRPC servers after a short timeout.

* Use spawn instead of go.
2021-01-28 19:43:04 +02:00
Ori Newman
13ffa5093c Increase the waiting for error timeout (#1465) 2021-01-28 13:33:37 +02:00
Ori Newman
a9a810a2b2 Add block type to MineJSON (#1464) 2021-01-28 13:22:20 +02:00
36 changed files with 549 additions and 516 deletions

View File

@@ -34,7 +34,7 @@ jobs:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: 1.14
go-version: 1.15
# Source: https://github.com/actions/cache/blob/main/examples.md#go---modules
@@ -60,7 +60,7 @@ jobs:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: 1.14
go-version: 1.15
- name: Create coverage file
# Because of https://github.com/golang/go/issues/27333 this seem to "fail" even though nothing is wrong, so ignore the failure

View File

@@ -18,7 +18,7 @@ Kaspa is an attempt at a proof-of-work cryptocurrency with instant confirmations
## Requirements
Go 1.14 or later.
Go 1.15 or later.
## Installation

View File

@@ -22,7 +22,7 @@ func (*FlowContext) HandleError(err error, flowName string, isStopping *uint32,
panic(err)
}
log.Errorf("error from %s: %+v", flowName, err)
log.Errorf("error from %s: %s", flowName, err)
}
if atomic.AddUint32(isStopping, 1) == 1 {

View File

@@ -60,7 +60,7 @@ func (flow *receiveVersionFlow) start() (*appmessage.NetAddress, error) {
}
if !allowSelfConnections && flow.NetAdapter().ID().IsEqual(msgVersion.ID) {
return nil, protocolerrors.New(true, "connected to self")
return nil, protocolerrors.New(false, "connected to self")
}
// Disconnect and ban peers from a different network

View File

@@ -18,6 +18,7 @@ import (
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/pkg/errors"
"sync"
"testing"
"time"
)
@@ -105,6 +106,7 @@ type fakeRelayInvsContext struct {
validateAndInsertImportedPruningPointResponse error
getBlockInfoResponse *externalapi.BlockInfo
validateAndInsertBlockResponse error
rwLock sync.RWMutex
}
func (f *fakeRelayInvsContext) BuildBlock(coinbaseData *externalapi.DomainCoinbaseData, transactions []*externalapi.DomainTransaction) (*externalapi.DomainBlock, error) {
@@ -128,6 +130,8 @@ func (f *fakeRelayInvsContext) GetBlockHeader(blockHash *externalapi.DomainHash)
}
func (f *fakeRelayInvsContext) GetBlockInfo(blockHash *externalapi.DomainHash) (*externalapi.BlockInfo, error) {
f.rwLock.RLock()
defer f.rwLock.RUnlock()
if f.getBlockInfoResponse != nil {
return f.getBlockInfoResponse, nil
}
@@ -167,6 +171,8 @@ func (f *fakeRelayInvsContext) AppendImportedPruningPointUTXOs(outpointAndUTXOEn
}
func (f *fakeRelayInvsContext) ValidateAndInsertImportedPruningPoint(newPruningPoint *externalapi.DomainBlock) error {
f.rwLock.RLock()
defer f.rwLock.RUnlock()
return f.validateAndInsertImportedPruningPointResponse
}
@@ -179,12 +185,16 @@ func (f *fakeRelayInvsContext) CreateBlockLocator(lowHash, highHash *externalapi
}
func (f *fakeRelayInvsContext) CreateHeadersSelectedChainBlockLocator(lowHash, highHash *externalapi.DomainHash) (externalapi.BlockLocator, error) {
f.rwLock.RLock()
defer f.rwLock.RUnlock()
return externalapi.BlockLocator{
f.params.GenesisHash,
}, nil
}
func (f *fakeRelayInvsContext) CreateFullHeadersSelectedChainBlockLocator() (externalapi.BlockLocator, error) {
f.rwLock.RLock()
defer f.rwLock.RUnlock()
return externalapi.BlockLocator{
f.params.GenesisHash,
}, nil
@@ -203,6 +213,8 @@ func (f *fakeRelayInvsContext) GetVirtualInfo() (*externalapi.VirtualInfo, error
}
func (f *fakeRelayInvsContext) IsValidPruningPoint(blockHash *externalapi.DomainHash) (bool, error) {
f.rwLock.RLock()
defer f.rwLock.RUnlock()
return f.isValidPruningPointResponse, nil
}
@@ -231,6 +243,8 @@ func (f *fakeRelayInvsContext) Domain() domain.Domain {
}
func (f *fakeRelayInvsContext) Config() *config.Config {
f.rwLock.RLock()
defer f.rwLock.RUnlock()
return &config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
@@ -269,13 +283,59 @@ func (f *fakeRelayInvsContext) IsIBDRunning() bool {
}
func (f *fakeRelayInvsContext) TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool {
f.rwLock.RLock()
defer f.rwLock.RUnlock()
return f.trySetIBDRunningResponse
}
func (f *fakeRelayInvsContext) UnsetIBDRunning() {
f.rwLock.RLock()
defer f.rwLock.RUnlock()
close(f.finishedIBD)
}
func (f *fakeRelayInvsContext) SetValidateAndInsertBlockResponse(err error) {
f.rwLock.Lock()
defer f.rwLock.Unlock()
f.validateAndInsertBlockResponse = err
}
func (f *fakeRelayInvsContext) SetValidateAndInsertImportedPruningPointResponse(err error) {
f.rwLock.Lock()
defer f.rwLock.Unlock()
f.validateAndInsertImportedPruningPointResponse = err
}
func (f *fakeRelayInvsContext) SetGetBlockInfoResponse(info externalapi.BlockInfo) {
f.rwLock.Lock()
defer f.rwLock.Unlock()
f.getBlockInfoResponse = &info
}
func (f *fakeRelayInvsContext) SetTrySetIBDRunningResponse(b bool) {
f.rwLock.Lock()
defer f.rwLock.Unlock()
f.trySetIBDRunningResponse = b
}
func (f *fakeRelayInvsContext) SetIsValidPruningPointResponse(b bool) {
f.rwLock.Lock()
defer f.rwLock.Unlock()
f.isValidPruningPointResponse = b
}
func (f *fakeRelayInvsContext) GetGenesisHeader() externalapi.BlockHeader {
f.rwLock.RLock()
defer f.rwLock.RUnlock()
return f.params.GenesisBlock.Header
}
func (f *fakeRelayInvsContext) GetFinishedIBDChan() chan struct{} {
f.rwLock.RLock()
defer f.rwLock.RUnlock()
return f.finishedIBD
}
func TestHandleRelayInvs(t *testing.T) {
triggerIBD := func(t *testing.T, incomingRoute, outgoingRoute *router.Route, context *fakeRelayInvsContext) {
err := incomingRoute.Enqueue(appmessage.NewMsgInvBlock(consensushashing.BlockHash(orphanBlock)))
@@ -289,10 +349,7 @@ func TestHandleRelayInvs(t *testing.T) {
}
_ = msg.(*appmessage.MsgRequestRelayBlocks)
context.validateAndInsertBlockResponse = ruleerrors.NewErrMissingParents(orphanBlock.Header.ParentHashes())
defer func() {
context.validateAndInsertBlockResponse = nil
}()
context.SetValidateAndInsertBlockResponse(ruleerrors.NewErrMissingParents(orphanBlock.Header.ParentHashes()))
err = incomingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(orphanBlock))
if err != nil {
@@ -342,10 +399,10 @@ func TestHandleRelayInvs(t *testing.T) {
name: "sending a known invalid inv",
funcToExecute: func(t *testing.T, incomingRoute, outgoingRoute *router.Route, context *fakeRelayInvsContext) {
context.getBlockInfoResponse = &externalapi.BlockInfo{
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
Exists: true,
BlockStatus: externalapi.StatusInvalid,
}
})
err := incomingRoute.Enqueue(appmessage.NewMsgInvBlock(knownInvalidBlockHash))
if err != nil {
@@ -402,7 +459,7 @@ func TestHandleRelayInvs(t *testing.T) {
}
_ = msg.(*appmessage.MsgRequestRelayBlocks)
context.validateAndInsertBlockResponse = ruleerrors.ErrBadMerkleRoot
context.SetValidateAndInsertBlockResponse(ruleerrors.ErrBadMerkleRoot)
err = incomingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(invalidBlock))
if err != nil {
t.Fatalf("Enqueue: %+v", err)
@@ -426,7 +483,7 @@ func TestHandleRelayInvs(t *testing.T) {
}
_ = msg.(*appmessage.MsgRequestRelayBlocks)
context.validateAndInsertBlockResponse = ruleerrors.NewErrMissingParents(orphanBlock.Header.ParentHashes())
context.SetValidateAndInsertBlockResponse(ruleerrors.NewErrMissingParents(orphanBlock.Header.ParentHashes()))
err = incomingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(orphanBlock))
if err != nil {
t.Fatalf("Enqueue: %+v", err)
@@ -452,7 +509,7 @@ func TestHandleRelayInvs(t *testing.T) {
{
name: "starting IBD when peer is already in IBD",
funcToExecute: func(t *testing.T, incomingRoute, outgoingRoute *router.Route, context *fakeRelayInvsContext) {
context.trySetIBDRunningResponse = false
context.SetTrySetIBDRunningResponse(false)
triggerIBD(t, incomingRoute, outgoingRoute, context)
checkNoActivity(t, outgoingRoute)
@@ -558,15 +615,15 @@ func TestHandleRelayInvs(t *testing.T) {
}
_ = msg.(*appmessage.MsgRequestHeaders)
context.getBlockInfoResponse = &externalapi.BlockInfo{
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
Exists: true,
BlockStatus: externalapi.StatusHeaderOnly,
}
})
err = incomingRoute.Enqueue(
appmessage.NewBlockHeadersMessage(
[]*appmessage.MsgBlockHeader{
appmessage.DomainBlockHeaderToBlockHeader(context.params.GenesisBlock.Header)},
appmessage.DomainBlockHeaderToBlockHeader(context.GetGenesisHeader())},
),
)
if err != nil {
@@ -581,10 +638,10 @@ func TestHandleRelayInvs(t *testing.T) {
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
// the pruning point UTXO set.
context.getBlockInfoResponse = &externalapi.BlockInfo{
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
Exists: true,
BlockStatus: externalapi.StatusHeaderOnly,
}
})
// Finish the IBD by sending DoneHeaders and send incompatible pruning point
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
@@ -598,7 +655,7 @@ func TestHandleRelayInvs(t *testing.T) {
}
_ = msg.(*appmessage.MsgRequestPruningPointHashMessage)
context.isValidPruningPointResponse = false
context.SetIsValidPruningPointResponse(false)
err = incomingRoute.Enqueue(appmessage.NewPruningPointHashMessage(invalidPruningPointHash))
if err != nil {
t.Fatalf("Enqueue: %+v", err)
@@ -630,11 +687,11 @@ func TestHandleRelayInvs(t *testing.T) {
}
_ = msg.(*appmessage.MsgRequestHeaders)
context.validateAndInsertBlockResponse = ruleerrors.ErrDuplicateBlock
context.SetValidateAndInsertBlockResponse(ruleerrors.ErrDuplicateBlock)
err = incomingRoute.Enqueue(
appmessage.NewBlockHeadersMessage(
[]*appmessage.MsgBlockHeader{
appmessage.DomainBlockHeaderToBlockHeader(context.params.GenesisBlock.Header)},
appmessage.DomainBlockHeaderToBlockHeader(context.GetGenesisHeader())},
),
)
if err != nil {
@@ -649,10 +706,10 @@ func TestHandleRelayInvs(t *testing.T) {
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
// the pruning point UTXO set.
context.getBlockInfoResponse = &externalapi.BlockInfo{
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
Exists: true,
BlockStatus: externalapi.StatusHeaderOnly,
}
})
// Finish the IBD by sending DoneHeaders and send incompatible pruning point
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
@@ -666,7 +723,7 @@ func TestHandleRelayInvs(t *testing.T) {
}
_ = msg.(*appmessage.MsgRequestPruningPointHashMessage)
context.isValidPruningPointResponse = false
context.SetIsValidPruningPointResponse(false)
err = incomingRoute.Enqueue(appmessage.NewPruningPointHashMessage(invalidPruningPointHash))
if err != nil {
t.Fatalf("Enqueue: %+v", err)
@@ -698,7 +755,7 @@ func TestHandleRelayInvs(t *testing.T) {
}
_ = msg.(*appmessage.MsgRequestHeaders)
context.validateAndInsertBlockResponse = ruleerrors.ErrBadMerkleRoot
context.SetValidateAndInsertBlockResponse(ruleerrors.ErrBadMerkleRoot)
err = incomingRoute.Enqueue(
appmessage.NewBlockHeadersMessage(
[]*appmessage.MsgBlockHeader{
@@ -738,10 +795,10 @@ func TestHandleRelayInvs(t *testing.T) {
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
// the pruning point UTXO set.
context.getBlockInfoResponse = &externalapi.BlockInfo{
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
Exists: true,
BlockStatus: externalapi.StatusHeaderOnly,
}
})
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
if err != nil {
@@ -790,10 +847,10 @@ func TestHandleRelayInvs(t *testing.T) {
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
// the pruning point UTXO set.
context.getBlockInfoResponse = &externalapi.BlockInfo{
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
Exists: true,
BlockStatus: externalapi.StatusHeaderOnly,
}
})
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
if err != nil {
@@ -806,7 +863,7 @@ func TestHandleRelayInvs(t *testing.T) {
}
_ = msg.(*appmessage.MsgRequestPruningPointHashMessage)
context.isValidPruningPointResponse = false
context.SetIsValidPruningPointResponse(false)
err = incomingRoute.Enqueue(appmessage.NewPruningPointHashMessage(invalidPruningPointHash))
if err != nil {
t.Fatalf("Enqueue: %+v", err)
@@ -840,10 +897,10 @@ func TestHandleRelayInvs(t *testing.T) {
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
// the pruning point UTXO set.
context.getBlockInfoResponse = &externalapi.BlockInfo{
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
Exists: true,
BlockStatus: externalapi.StatusHeaderOnly,
}
})
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
if err != nil {
@@ -905,10 +962,10 @@ func TestHandleRelayInvs(t *testing.T) {
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
// the pruning point UTXO set.
context.getBlockInfoResponse = &externalapi.BlockInfo{
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
Exists: true,
BlockStatus: externalapi.StatusHeaderOnly,
}
})
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
if err != nil {
@@ -968,10 +1025,10 @@ func TestHandleRelayInvs(t *testing.T) {
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
// the pruning point UTXO set.
context.getBlockInfoResponse = &externalapi.BlockInfo{
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
Exists: true,
BlockStatus: externalapi.StatusHeaderOnly,
}
})
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
if err != nil {
@@ -1037,10 +1094,10 @@ func TestHandleRelayInvs(t *testing.T) {
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
// the pruning point UTXO set.
context.getBlockInfoResponse = &externalapi.BlockInfo{
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
Exists: true,
BlockStatus: externalapi.StatusHeaderOnly,
}
})
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
if err != nil {
@@ -1064,7 +1121,7 @@ func TestHandleRelayInvs(t *testing.T) {
}
_ = msg.(*appmessage.MsgRequestPruningPointUTXOSetAndBlock)
context.validateAndInsertImportedPruningPointResponse = ruleerrors.ErrBadMerkleRoot
context.SetValidateAndInsertImportedPruningPointResponse(ruleerrors.ErrBadMerkleRoot)
err = incomingRoute.Enqueue(appmessage.NewMsgIBDBlock(appmessage.DomainBlockToMsgBlock(invalidPruningPointBlock)))
if err != nil {
t.Fatalf("Enqueue: %+v", err)
@@ -1104,10 +1161,10 @@ func TestHandleRelayInvs(t *testing.T) {
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
// the pruning point UTXO set.
context.getBlockInfoResponse = &externalapi.BlockInfo{
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
Exists: true,
BlockStatus: externalapi.StatusHeaderOnly,
}
})
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
if err != nil {
@@ -1131,7 +1188,7 @@ func TestHandleRelayInvs(t *testing.T) {
}
_ = msg.(*appmessage.MsgRequestPruningPointUTXOSetAndBlock)
context.validateAndInsertImportedPruningPointResponse = ruleerrors.ErrSuggestedPruningViolatesFinality
context.SetValidateAndInsertImportedPruningPointResponse(ruleerrors.ErrSuggestedPruningViolatesFinality)
err = incomingRoute.Enqueue(appmessage.NewMsgIBDBlock(appmessage.DomainBlockToMsgBlock(validPruningPointBlock)))
if err != nil {
t.Fatalf("Enqueue: %+v", err)
@@ -1168,10 +1225,10 @@ func TestHandleRelayInvs(t *testing.T) {
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
// the pruning point UTXO set.
context.getBlockInfoResponse = &externalapi.BlockInfo{
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
Exists: true,
BlockStatus: externalapi.StatusHeaderOnly,
}
})
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
if err != nil {
@@ -1247,10 +1304,10 @@ func TestHandleRelayInvs(t *testing.T) {
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
// the pruning point UTXO set.
context.getBlockInfoResponse = &externalapi.BlockInfo{
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
Exists: true,
BlockStatus: externalapi.StatusHeaderOnly,
}
})
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
if err != nil {
@@ -1324,10 +1381,10 @@ func TestHandleRelayInvs(t *testing.T) {
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
// the pruning point UTXO set.
context.getBlockInfoResponse = &externalapi.BlockInfo{
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
Exists: true,
BlockStatus: externalapi.StatusHeaderOnly,
}
})
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
if err != nil {
@@ -1367,7 +1424,7 @@ func TestHandleRelayInvs(t *testing.T) {
}
_ = msg.(*appmessage.MsgRequestIBDBlocks)
context.validateAndInsertBlockResponse = ruleerrors.ErrBadMerkleRoot
context.SetValidateAndInsertImportedPruningPointResponse(ruleerrors.ErrBadMerkleRoot)
err = incomingRoute.Enqueue(appmessage.NewMsgIBDBlock(appmessage.DomainBlockToMsgBlock(invalidBlock)))
if err != nil {
t.Fatalf("Enqueue: %+v", err)
@@ -1411,17 +1468,17 @@ func TestHandleRelayInvs(t *testing.T) {
select {
case err := <-errChan:
checkFlowError(t, err, test.expectsProtocolError, test.expectsBan, test.expectsErrToContain)
case <-time.After(time.Second):
t.Fatalf("waiting for error timed out after %s", time.Second)
case <-time.After(10 * time.Second):
t.Fatalf("waiting for error timed out after %s", 10*time.Second)
}
}
select {
case <-context.finishedIBD:
case <-context.GetFinishedIBDChan():
if !test.expectsIBDToFinish {
t.Fatalf("IBD unexpecetedly finished")
}
case <-time.After(time.Second):
case <-time.After(10 * time.Second):
if test.expectsIBDToFinish {
t.Fatalf("IBD didn't finished after %d", time.Second)
}
@@ -1436,7 +1493,7 @@ func TestHandleRelayInvs(t *testing.T) {
if !errors.Is(err, router.ErrRouteClosed) {
t.Fatalf("unexpected error %+v", err)
}
case <-time.After(time.Second):
case <-time.After(10 * time.Second):
t.Fatalf("waiting for flow to finish timed out after %s", time.Second)
}
}

View File

@@ -78,11 +78,7 @@ func (m *Manager) handleError(err error, netConnection *netadapter.NetConnection
if !m.context.Config().DisableBanning && protocolErr.ShouldBan {
log.Warnf("Banning %s (reason: %s)", netConnection, protocolErr.Cause)
err := m.context.ConnectionManager().Ban(netConnection)
if err != nil && !errors.Is(err, addressmanager.ErrAddressNotFound) {
panic(err)
}
m.context.ConnectionManager().Ban(netConnection)
err = outgoingRoute.Enqueue(appmessage.NewMsgReject(protocolErr.Error()))
if err != nil && !errors.Is(err, routerpkg.ErrRouteClosed) {
panic(err)

View File

@@ -4,7 +4,7 @@ kaspactl is an RPC client for kaspad
## Requirements
Go 1.14 or later.
Go 1.15 or later.
## Installation

View File

@@ -1,5 +1,5 @@
# -- multistage docker build: stage #1: build stage
FROM golang:1.14-alpine AS build
FROM golang:1.15-alpine AS build
RUN mkdir -p /go/src/github.com/kaspanet/kaspad

View File

@@ -4,7 +4,7 @@ Kaspaminer is a CPU-based miner for kaspad
## Requirements
Go 1.14 or later.
Go 1.15 or later.
## Installation

View File

@@ -17,8 +17,9 @@ import (
)
const (
defaultLogFilename = "kaspaminer.log"
defaultErrLogFilename = "kaspaminer_err.log"
defaultLogFilename = "kaspaminer.log"
defaultErrLogFilename = "kaspaminer_err.log"
defaultTargetBlockRateRatio = 2.0
)
var (
@@ -30,13 +31,13 @@ var (
)
type configFlags struct {
ShowVersion bool `short:"V" long:"version" description:"Display version information and exit"`
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"`
MiningAddr string `long:"miningaddr" description:"Address to mine to"`
NumberOfBlocks uint64 `short:"n" long:"numblocks" description:"Number of blocks to mine. If omitted, will mine until the process is interrupted."`
MineWhenNotSynced bool `long:"mine-when-not-synced" description:"Mine even if the node is not synced with the rest of the network."`
Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"`
TargetBlocksPerSecond float64 `long:"target-blocks-per-second" description:"Sets a maximum block rate. This flag is for debugging purposes."`
ShowVersion bool `short:"V" long:"version" description:"Display version information and exit"`
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"`
MiningAddr string `long:"miningaddr" description:"Address to mine to"`
NumberOfBlocks uint64 `short:"n" long:"numblocks" description:"Number of blocks to mine. If omitted, will mine until the process is interrupted."`
MineWhenNotSynced bool `long:"mine-when-not-synced" description:"Mine even if the node is not synced with the rest of the network."`
Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"`
TargetBlocksPerSecond *float64 `long:"target-blocks-per-second" description:"Sets a maximum block rate. 0 means no limit (The default one is 2 * target network block rate)"`
config.NetworkFlags
}
@@ -64,6 +65,11 @@ func parseConfig() (*configFlags, error) {
return nil, err
}
if cfg.TargetBlocksPerSecond == nil {
targetBlocksPerSecond := defaultTargetBlockRateRatio / cfg.NetParams().TargetTimePerBlock.Seconds()
cfg.TargetBlocksPerSecond = &targetBlocksPerSecond
}
if cfg.Profile != "" {
profilePort, err := strconv.Atoi(cfg.Profile)
if err != nil || profilePort < 1024 || profilePort > 65535 {

View File

@@ -1,5 +1,5 @@
# -- multistage docker build: stage #1: build stage
FROM golang:1.14-alpine AS build
FROM golang:1.15-alpine AS build
RUN mkdir -p /go/src/github.com/kaspanet/kaspad

View File

@@ -48,7 +48,7 @@ func main() {
doneChan := make(chan struct{})
spawn("mineLoop", func() {
err = mineLoop(client, cfg.NumberOfBlocks, cfg.TargetBlocksPerSecond, cfg.MineWhenNotSynced, miningAddr)
err = mineLoop(client, cfg.NumberOfBlocks, *cfg.TargetBlocksPerSecond, cfg.MineWhenNotSynced, miningAddr)
if err != nil {
panic(errors.Wrap(err, "error in mine loop"))
}

View File

@@ -117,8 +117,14 @@ func handleFoundBlock(client *minerClient, block *externalapi.DomainBlock) error
rejectReason, err := client.SubmitBlock(block)
if err != nil {
if nativeerrors.Is(err, router.ErrTimeout) {
log.Warnf("Got timeout while submitting block %s to %s: %s", blockHash, client.Address(), err)
return nil
}
if rejectReason == appmessage.RejectReasonIsInIBD {
log.Warnf("Block %s was rejected because the node is in IBD", blockHash)
const waitTime = 1 * time.Second
log.Warnf("Block %s was rejected because the node is in IBD. Waiting for %s", blockHash, waitTime)
time.Sleep(waitTime)
return nil
}
return errors.Errorf("Error submitting block %s to %s: %s", blockHash, client.Address(), err)
@@ -152,7 +158,7 @@ func templatesLoop(client *minerClient, miningAddr util.Address,
getBlockTemplate := func() {
template, err := client.GetBlockTemplate(miningAddr.String())
if nativeerrors.Is(err, router.ErrTimeout) {
log.Infof("Got timeout while requesting block template from %s", client.Address())
log.Warnf("Got timeout while requesting block template from %s: %s", client.Address(), err)
return
} else if err != nil {
errChan <- errors.Errorf("Error getting block template from %s: %s", client.Address(), err)
@@ -161,6 +167,8 @@ func templatesLoop(client *minerClient, miningAddr util.Address,
newTemplateChan <- template
}
getBlockTemplate()
const tickerTime = 500 * time.Millisecond
ticker := time.NewTicker(tickerTime)
for {
select {
case <-stopChan:
@@ -168,7 +176,8 @@ func templatesLoop(client *minerClient, miningAddr util.Address,
return
case <-client.blockAddedNotificationChan:
getBlockTemplate()
case <-time.Tick(500 * time.Millisecond):
ticker.Reset(tickerTime)
case <-ticker.C:
getBlockTemplate()
}
}

View File

@@ -10,7 +10,7 @@ It is capable of generating wallet key-pairs, printing a wallet's current balanc
## Requirements
Go 1.14 or later.
Go 1.15 or later.
## Installation

View File

@@ -1,5 +1,5 @@
# -- multistage docker build: stage #1: build stage
FROM golang:1.14-alpine AS build
FROM golang:1.15-alpine AS build
RUN mkdir -p /go/src/github.com/kaspanet/kaspad

View File

@@ -1,52 +0,0 @@
package binaryserialization
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
"github.com/kaspanet/kaspad/util/binaryserializer"
"io"
)
// SerializeUTXOCollection serializes the given utxoCollection into the given writer
func SerializeUTXOCollection(writer io.Writer, utxoCollection model.UTXOCollection) error {
length := uint64(utxoCollection.Len())
err := binaryserializer.PutUint64(writer, length)
if err != nil {
return err
}
utxoIterator := utxoCollection.Iterator()
for ok := utxoIterator.First(); ok; ok = utxoIterator.Next() {
outpoint, utxoEntry, err := utxoIterator.Get()
if err != nil {
return err
}
err = utxo.SerializeUTXOIntoWriter(writer, utxoEntry, outpoint)
if err != nil {
return err
}
}
return nil
}
// DeserializeUTXOCollection deserializes a utxoCollection out of the given reader
func DeserializeUTXOCollection(reader io.Reader) (model.UTXOCollection, error) {
length, err := binaryserializer.Uint64(reader)
if err != nil {
return nil, err
}
utxoMap := make(map[externalapi.DomainOutpoint]externalapi.UTXOEntry, length)
for i := uint64(0); i < length; i++ {
utxoEntry, outpoint, err := utxo.DeserializeUTXOOutOfReader(reader)
if err != nil {
return nil, err
}
utxoMap[*outpoint] = utxoEntry
}
utxoCollection := utxo.NewUTXOCollection(utxoMap)
return utxoCollection, nil
}

View File

@@ -1,29 +0,0 @@
package binaryserialization
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
"io"
)
// SerializeUTXODiff serializes the given utxoDiff into the given writer
func SerializeUTXODiff(writer io.Writer, utxoDiff model.UTXODiff) error {
err := SerializeUTXOCollection(writer, utxoDiff.ToAdd())
if err != nil {
return err
}
return SerializeUTXOCollection(writer, utxoDiff.ToRemove())
}
// DeserializeUTXODiff deserializes a utxoDiff out of the given reader
func DeserializeUTXODiff(reader io.Reader) (model.UTXODiff, error) {
toAdd, err := DeserializeUTXOCollection(reader)
if err != nil {
return nil, err
}
toRemove, err := DeserializeUTXOCollection(reader)
if err != nil {
return nil, err
}
return utxo.NewUTXODiffFromCollections(toAdd, toRemove)
}

View File

@@ -1,10 +1,8 @@
package utxodiffstore
import (
"bytes"
"github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/binaryserialization"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
@@ -190,17 +188,27 @@ func (uds *utxoDiffStore) utxoDiffChildHashAsKey(hash *externalapi.DomainHash) m
}
func (uds *utxoDiffStore) serializeUTXODiff(utxoDiff model.UTXODiff) ([]byte, error) {
writer := &bytes.Buffer{}
err := binaryserialization.SerializeUTXODiff(writer, utxoDiff)
dbUtxoDiff, err := serialization.UTXODiffToDBUTXODiff(utxoDiff)
if err != nil {
return nil, err
}
return writer.Bytes(), nil
bytes, err := proto.Marshal(dbUtxoDiff)
if err != nil {
return nil, errors.WithStack(err)
}
return bytes, nil
}
func (uds *utxoDiffStore) deserializeUTXODiff(utxoDiffBytes []byte) (model.UTXODiff, error) {
reader := bytes.NewReader(utxoDiffBytes)
return binaryserialization.DeserializeUTXODiff(reader)
dbUTXODiff := &serialization.DbUtxoDiff{}
err := proto.Unmarshal(utxoDiffBytes, dbUTXODiff)
if err != nil {
return nil, errors.WithStack(err)
}
return serialization.DBUTXODiffToUTXODiff(dbUTXODiff)
}
func (uds *utxoDiffStore) serializeUTXODiffChild(utxoDiffChild *externalapi.DomainHash) ([]byte, error) {

View File

@@ -1,168 +0,0 @@
package utxodiffstore
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
"math/rand"
"testing"
)
func TestUTXODiffSerializationAndDeserialization(t *testing.T) {
utxoDiffStore := New(0).(*utxoDiffStore)
testUTXODiff, err := buildTestUTXODiff()
if err != nil {
t.Fatalf("Could not create UTXODiff from toAdd and toRemove collections: %s", err)
}
serializedUTXODiff, err := utxoDiffStore.serializeUTXODiff(testUTXODiff)
if err != nil {
t.Fatalf("Could not serialize UTXO diff: %s", err)
}
deserializedUTXODiff, err := utxoDiffStore.deserializeUTXODiff(serializedUTXODiff)
if err != nil {
t.Fatalf("Could not deserialize UTXO diff: %s", err)
}
if testUTXODiff.ToAdd().Len() != deserializedUTXODiff.ToAdd().Len() {
t.Fatalf("Unexpected toAdd length in deserialized utxoDiff. Want: %d, got: %d",
testUTXODiff.ToAdd().Len(), deserializedUTXODiff.ToAdd().Len())
}
if testUTXODiff.ToRemove().Len() != deserializedUTXODiff.ToRemove().Len() {
t.Fatalf("Unexpected toRemove length in deserialized utxoDiff. Want: %d, got: %d",
testUTXODiff.ToRemove().Len(), deserializedUTXODiff.ToRemove().Len())
}
testToAddIterator := testUTXODiff.ToAdd().Iterator()
for ok := testToAddIterator.First(); ok; ok = testToAddIterator.Next() {
testOutpoint, testUTXOEntry, err := testToAddIterator.Get()
if err != nil {
t.Fatalf("Could not get an outpoint-utxoEntry pair out of the toAdd iterator: %s", err)
}
deserializedUTXOEntry, ok := deserializedUTXODiff.ToAdd().Get(testOutpoint)
if !ok {
t.Fatalf("Outpoint %s:%d not found in the deserialized toAdd collection",
testOutpoint.TransactionID, testOutpoint.Index)
}
if !testUTXOEntry.Equal(deserializedUTXOEntry) {
t.Fatalf("Deserialized UTXO entry is not equal to the original UTXO entry for outpoint %s:%d "+
"in the toAdd collection", testOutpoint.TransactionID, testOutpoint.Index)
}
}
testToRemoveIterator := testUTXODiff.ToRemove().Iterator()
for ok := testToRemoveIterator.First(); ok; ok = testToRemoveIterator.Next() {
testOutpoint, testUTXOEntry, err := testToRemoveIterator.Get()
if err != nil {
t.Fatalf("Could not get an outpoint-utxoEntry pair out of the toRemove iterator: %s", err)
}
deserializedUTXOEntry, ok := deserializedUTXODiff.ToRemove().Get(testOutpoint)
if !ok {
t.Fatalf("Outpoint %s:%d not found in the deserialized toRemove collection",
testOutpoint.TransactionID, testOutpoint.Index)
}
if !testUTXOEntry.Equal(deserializedUTXOEntry) {
t.Fatalf("Deserialized UTXO entry is not equal to the original UTXO entry for outpoint %s:%d "+
"in the toRemove collection", testOutpoint.TransactionID, testOutpoint.Index)
}
}
}
func BenchmarkUTXODiffSerialization(b *testing.B) {
utxoDiffStore := New(0).(*utxoDiffStore)
testUTXODiff, err := buildTestUTXODiff()
if err != nil {
b.Fatalf("Could not create UTXODiff from toAdd and toRemove collections: %s", err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := utxoDiffStore.serializeUTXODiff(testUTXODiff)
if err != nil {
b.Fatalf("Could not serialize UTXO diff: %s", err)
}
}
}
func BenchmarkUTXODiffDeserialization(b *testing.B) {
utxoDiffStore := New(0).(*utxoDiffStore)
testUTXODiff, err := buildTestUTXODiff()
if err != nil {
b.Fatalf("Could not create UTXODiff from toAdd and toRemove collections: %s", err)
}
serializedUTXODiff, err := utxoDiffStore.serializeUTXODiff(testUTXODiff)
if err != nil {
b.Fatalf("Could not serialize UTXO diff: %s", err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err = utxoDiffStore.deserializeUTXODiff(serializedUTXODiff)
if err != nil {
b.Fatalf("Could not deserialize UTXO diff: %s", err)
}
}
}
func BenchmarkUTXODiffSerializationAndDeserialization(b *testing.B) {
utxoDiffStore := New(0).(*utxoDiffStore)
testUTXODiff, err := buildTestUTXODiff()
if err != nil {
b.Fatalf("Could not create UTXODiff from toAdd and toRemove collections: %s", err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
serializedUTXODiff, err := utxoDiffStore.serializeUTXODiff(testUTXODiff)
if err != nil {
b.Fatalf("Could not serialize UTXO diff: %s", err)
}
_, err = utxoDiffStore.deserializeUTXODiff(serializedUTXODiff)
if err != nil {
b.Fatalf("Could not deserialize UTXO diff: %s", err)
}
}
}
func buildTestUTXODiff() (model.UTXODiff, error) {
toAdd := buildTestUTXOCollection()
toRemove := buildTestUTXOCollection()
utxoDiff, err := utxo.NewUTXODiffFromCollections(toAdd, toRemove)
if err != nil {
return nil, err
}
return utxoDiff, nil
}
func buildTestUTXOCollection() model.UTXOCollection {
utxoMap := make(map[externalapi.DomainOutpoint]externalapi.UTXOEntry)
for i := 0; i < 100_000; i++ {
var outpointTransactionIDBytes [32]byte
rand.Read(outpointTransactionIDBytes[:])
outpointTransactionID := externalapi.NewDomainTransactionIDFromByteArray(&outpointTransactionIDBytes)
outpointIndex := rand.Uint32()
outpoint := externalapi.NewDomainOutpoint(outpointTransactionID, outpointIndex)
utxoEntryAmount := rand.Uint64()
var utxoEntryScriptPublicKeyScript [256]byte
rand.Read(utxoEntryScriptPublicKeyScript[:])
utxoEntryScriptPublicKeyVersion := uint16(rand.Uint32())
utxoEntryScriptPublicKey := &externalapi.ScriptPublicKey{
Script: utxoEntryScriptPublicKeyScript[:],
Version: utxoEntryScriptPublicKeyVersion,
}
utxoEntryIsCoinbase := rand.Float32() > 0.5
utxoEntryBlockBlueScore := rand.Uint64()
utxoEntry := utxo.NewUTXOEntry(utxoEntryAmount, utxoEntryScriptPublicKey, utxoEntryIsCoinbase, utxoEntryBlockBlueScore)
utxoMap[*outpoint] = utxoEntry
}
return utxo.NewUTXOCollection(utxoMap)
}

View File

@@ -232,12 +232,12 @@ func TestBoundedMergeDepth(t *testing.T) {
}
factory := NewFactory()
consensusBuild, teardownFunc1, err := factory.NewTestConsensus(params, false, "BoundedMergeTestBuild")
consensusBuild, teardownFunc1, err := factory.NewTestConsensus(params, false, "TestBoundedMergeTestBuild")
if err != nil {
t.Fatalf("TestBoundedMergeDepth: Error setting up consensus: %+v", err)
}
consensusReal, teardownFunc2, err := factory.NewTestConsensus(params, false, "BoundedMergeTestReal")
consensusReal, teardownFunc2, err := factory.NewTestConsensus(params, false, "TestBoundedMergeTestReal")
if err != nil {
t.Fatalf("TestBoundedMergeDepth: Error setting up consensus: %+v", err)
}

View File

@@ -8,6 +8,20 @@ import (
"io"
)
// MineJSONBlockType indicates which type of blocks MineJSON mines
type MineJSONBlockType int
const (
// MineJSONBlockTypeUTXOValidBlock indicates for MineJSON to mine valid blocks.
MineJSONBlockTypeUTXOValidBlock MineJSONBlockType = iota
// MineJSONBlockTypeUTXOInvalidBlock indicates for MineJSON to mine UTXO invalid blocks.
MineJSONBlockTypeUTXOInvalidBlock
// MineJSONBlockTypeUTXOInvalidHeader indicates for MineJSON to mine UTXO invalid headers.
MineJSONBlockTypeUTXOInvalidHeader
)
// TestConsensus wraps the Consensus interface with some methods that are needed by tests only
type TestConsensus interface {
externalapi.Consensus
@@ -33,7 +47,7 @@ type TestConsensus interface {
AddUTXOInvalidBlock(parentHashes []*externalapi.DomainHash) (*externalapi.DomainHash,
*externalapi.BlockInsertionResult, error)
MineJSON(r io.Reader) (tips []*externalapi.DomainHash, err error)
MineJSON(r io.Reader, blockType MineJSONBlockType) (tips []*externalapi.DomainHash, err error)
DiscardAllStores()
AcceptanceDataStore() model.AcceptanceDataStore

View File

@@ -1,6 +1,8 @@
package blockvalidator
import (
"fmt"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
@@ -29,6 +31,21 @@ func (v *blockValidator) ValidateHeaderInContext(blockHash *externalapi.DomainHa
if err != nil {
return err
}
var logErr error
log.Debug(logger.NewLogClosure(func() string {
var ghostdagData *model.BlockGHOSTDAGData
ghostdagData, logErr = v.ghostdagDataStore.Get(v.databaseContext, blockHash)
if err != nil {
return ""
}
return fmt.Sprintf("block %s blue score is %d", blockHash, ghostdagData.BlueScore())
}))
if logErr != nil {
return logErr
}
}
err = v.validateMedianTime(header)

View File

@@ -54,7 +54,7 @@ func buildJsonDAG(t *testing.T, tc testapi.TestConsensus, attackJson bool) (tips
}
defer gzipReader.Close()
tips, err = tc.MineJSON(gzipReader)
tips, err = tc.MineJSON(gzipReader, testapi.MineJSONBlockTypeUTXOInvalidHeader)
if err != nil {
t.Fatal(err)
}

View File

@@ -106,7 +106,7 @@ func (tc *testConsensus) AddUTXOInvalidBlock(parentHashes []*externalapi.DomainH
return consensushashing.BlockHash(block), blockInsertionResult, nil
}
func (tc *testConsensus) MineJSON(r io.Reader) (tips []*externalapi.DomainHash, err error) {
func (tc *testConsensus) MineJSON(r io.Reader, blockType testapi.MineJSONBlockType) (tips []*externalapi.DomainHash, err error) {
// jsonBlock is a json representation of a block in mine format
type jsonBlock struct {
ID string `json:"id"`
@@ -145,10 +145,28 @@ func (tc *testConsensus) MineJSON(r io.Reader) (tips []*externalapi.DomainHash,
}
delete(tipSet, *parentHashes[i])
}
blockHash, _, err := tc.AddUTXOInvalidHeader(parentHashes)
if err != nil {
return nil, err
var blockHash *externalapi.DomainHash
switch blockType {
case testapi.MineJSONBlockTypeUTXOValidBlock:
blockHash, _, err = tc.AddBlock(parentHashes, nil, nil)
if err != nil {
return nil, err
}
case testapi.MineJSONBlockTypeUTXOInvalidBlock:
blockHash, _, err = tc.AddUTXOInvalidBlock(parentHashes)
if err != nil {
return nil, err
}
case testapi.MineJSONBlockTypeUTXOInvalidHeader:
blockHash, _, err = tc.AddUTXOInvalidHeader(parentHashes)
if err != nil {
return nil, err
}
default:
return nil, errors.Errorf("unknwon block type %v", blockType)
}
parentsMap[block.ID] = blockHash
tipSet[*blockHash] = blockHash
}

View File

@@ -14,7 +14,12 @@ import (
func SerializeUTXO(entry externalapi.UTXOEntry, outpoint *externalapi.DomainOutpoint) ([]byte, error) {
w := &bytes.Buffer{}
err := SerializeUTXOIntoWriter(w, entry, outpoint)
err := serializeOutpoint(w, outpoint)
if err != nil {
return nil, err
}
err = serializeUTXOEntry(w, entry)
if err != nil {
return nil, err
}
@@ -22,30 +27,15 @@ func SerializeUTXO(entry externalapi.UTXOEntry, outpoint *externalapi.DomainOutp
return w.Bytes(), nil
}
// SerializeUTXOIntoWriter serializes the byte-slice representation for given UTXOEntry-outpoint pair into the given writer
func SerializeUTXOIntoWriter(writer io.Writer, entry externalapi.UTXOEntry, outpoint *externalapi.DomainOutpoint) error {
err := serializeOutpoint(writer, outpoint)
if err != nil {
return err
}
return serializeUTXOEntry(writer, entry)
}
// DeserializeUTXO deserializes the given byte slice to UTXOEntry-outpoint pair
func DeserializeUTXO(utxoBytes []byte) (entry externalapi.UTXOEntry, outpoint *externalapi.DomainOutpoint, err error) {
r := bytes.NewReader(utxoBytes)
return DeserializeUTXOOutOfReader(r)
}
// DeserializeUTXOOutOfReader deserializes a UTXOEntry-outpoint pair out of the given reader
func DeserializeUTXOOutOfReader(reader io.Reader) (entry externalapi.UTXOEntry, outpoint *externalapi.DomainOutpoint, err error) {
outpoint, err = deserializeOutpoint(reader)
outpoint, err = deserializeOutpoint(r)
if err != nil {
return nil, nil, err
}
entry, err = deserializeUTXOEntry(reader)
entry, err = deserializeUTXOEntry(r)
if err != nil {
return nil, nil, err
}

2
go.mod
View File

@@ -1,6 +1,6 @@
module github.com/kaspanet/kaspad
go 1.14
go 1.15
require (
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd

View File

@@ -5,9 +5,7 @@
package config
import (
"bufio"
"fmt"
"io"
"net"
"os"
"path/filepath"
@@ -16,18 +14,15 @@ import (
"strings"
"time"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/dagconfig"
"github.com/pkg/errors"
"github.com/btcsuite/go-socks/socks"
"github.com/jessevdk/go-flags"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/dagconfig"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/network"
"github.com/kaspanet/kaspad/version"
"github.com/pkg/errors"
)
const (
@@ -244,9 +239,7 @@ func LoadConfig() (*Config, error) {
cfg := &Config{
Flags: cfgFlags,
}
if !preCfg.Simnet || preCfg.ConfigFile !=
defaultConfigFile {
if !preCfg.Simnet || preCfg.ConfigFile != defaultConfigFile {
if _, err := os.Stat(preCfg.ConfigFile); os.IsNotExist(err) {
err := createDefaultConfigFile(preCfg.ConfigFile)
if err != nil {
@@ -593,13 +586,6 @@ func createDefaultConfigFile(destinationPath string) error {
return err
}
// We assume sample config file path is same as binary
path, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
return err
}
sampleConfigPath := filepath.Join(path, sampleConfigFilename)
dest, err := os.OpenFile(destinationPath,
os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
@@ -607,25 +593,7 @@ func createDefaultConfigFile(destinationPath string) error {
}
defer dest.Close()
src, err := os.Open(sampleConfigPath)
if err != nil {
return err
}
defer src.Close()
_, err = dest.WriteString(sampleConfig)
// We copy every line from the sample config file to the destination
reader := bufio.NewReader(src)
for err != io.EOF {
var line string
line, err = reader.ReadString('\n')
if err != nil && err != io.EOF {
return err
}
if _, err := dest.WriteString(line); err != nil {
return err
}
}
return nil
return err
}

View File

@@ -18,7 +18,7 @@ func TestCreateDefaultConfigFile(t *testing.T) {
if !ok {
t.Fatalf("Failed finding config file path")
}
sampleConfigFile := filepath.Join(filepath.Dir(path), "..", "..", "sample-kaspad.conf")
sampleConfigFile := filepath.Join(filepath.Dir(path), "sample-kaspad.conf")
// Setup a temporary directory
tmpDir, err := ioutil.TempDir("", "kaspad")

View File

@@ -30,7 +30,7 @@
; Use Universal Plug and Play (UPnP) to automatically open the listen port
; and obtain the external IP address from supported devices. NOTE: This option
; will have no effect if exernal IP addresses are specified.
; will have no effect if external IP addresses are specified.
; upnp=1
; Specify the external IP addresses your node is listening on. One address per
@@ -83,7 +83,7 @@
; connect=[fe80::2]:16111
; Maximum number of inbound and outbound peers.
; maxpeers=125
; maxinpeers=125
; Disable banning of misbehaving peers.
; nobanning=1
@@ -142,9 +142,6 @@
; Disable peer bloom filtering. See BIP0111.
; nopeerbloomfilters=1
; Add additional checkpoints. Format: '<height>:<hash>'
; addcheckpoint=<height>:<hash>
; Add comments to the user agent that is advertised to peers.
; Must not include characters '/', ':', '(' and ')'.
; uacomment=
@@ -198,13 +195,6 @@
; Set the minimum transaction fee to be considered a non-zero fee,
; minrelaytxfee=0.00001
; Rate-limit free transactions to the value 15 * 1000 bytes per
; minute.
; limitfreerelay=15
; Require high priority for relaying free or low-fee transactions.
; norelaypriority=0
; Limit orphan transaction pool to 100 transactions.
; maxorphantx=100
@@ -218,22 +208,6 @@
; rejectnonstd=1
; ------------------------------------------------------------------------------
; Optional Indexes
; ------------------------------------------------------------------------------
; Build and maintain a full hash-based transaction index which makes all
; transactions available via the getrawtransaction RPC.
; txindex=1
; Build and maintain a full address-based transaction index which makes the
; searchrawtransactions RPC available.
; addrindex=1
; Delete the entire address index on start up, then exit.
; dropaddrindex=0
; ------------------------------------------------------------------------------
; Signature Verification Cache
; ------------------------------------------------------------------------------
@@ -242,38 +216,6 @@
; sigcachemaxsize=50000
; ------------------------------------------------------------------------------
; Coin Generation (Mining) Settings - The following options control the
; generation of block templates used by external mining applications through RPC
; calls.
; ------------------------------------------------------------------------------
; Add addresses to pay mined blocks to in the block templates generated
; for the getblocktemplate RPC. One address per line.
; miningaddr=kaspa:yourkaspaaddress
; miningaddr=kaspa:yourkaspaaddress2
; miningaddr=kaspa:yourkaspaaddress3
; Specify the minimum block size in bytes to create. By default, only
; transactions which have enough fees or a high enough priority will be included
; in generated block templates. Specifying a minimum block size will instead
; attempt to fill generated block templates up with transactions until it is at
; least the specified number of bytes.
; blockminsize=0
; Specify the maximum block size in bytes to create. This value will be limited
; to the consensus limit if it is larger than that value.
; blockmaxsize=750000
; Specify the size in bytes of the high-priority/low-fee area when creating a
; block. Transactions which consist of large amounts, old inputs, and small
; sizes have the highest priority. One consequence of this is that as low-fee
; or free transactions age, they raise in priority thereby making them more
; likely to be included in this section of a new block. This value is limited
; by the blackmaxsize option and will be limited as needed.
; blockprioritysize=50000
; ------------------------------------------------------------------------------
; Debug
; ------------------------------------------------------------------------------
@@ -290,11 +232,3 @@
; accessed at http://localhost:<profileport>/debug/pprof once running.
; profile=6061
; ------------------------------------------------------------------------------
; Subnetworks
; ------------------------------------------------------------------------------
; If subnetwork > 0, than node will request and process only payloads from
; specified subnetwork. And if subnetwork is 0, than payloads of all subnetworks
; are processed.
; subnetwork=0

View File

@@ -0,0 +1,238 @@
package config
// This should be identical to the content of sample-kaspad.conf
// TODO: Replace with go:embed once go1.16 lands
var sampleConfig = `[Application Options]
; ------------------------------------------------------------------------------
; Data settings
; ------------------------------------------------------------------------------
; The directory to store data such as the block DAG and peer addresses. The
; block DAG takes several GB, so this location must have a lot of free space.
; The default is ~/.kaspad/data on POSIX OSes, $LOCALAPPDATA/Kaspad/data on Windows,
; ~/Library/Application Support/Kaspad/data on Mac OS, and $home/kaspad/data on
; Plan9. Environment variables are expanded so they may be used. NOTE: Windows
; environment variables are typically %VARIABLE%, but they must be accessed with
; $VARIABLE here. Also, ~ is expanded to $LOCALAPPDATA on Windows.
; datadir=~/.kaspad/data
; ------------------------------------------------------------------------------
; Network settings
; ------------------------------------------------------------------------------
; Use testnet.
; testnet=1
; Connect via a SOCKS5 proxy. NOTE: Specifying a proxy will disable listening
; for incoming connections unless listen addresses are provided via the 'listen'
; option.
; proxy=127.0.0.1:9050
; proxyuser=
; proxypass=
; Use Universal Plug and Play (UPnP) to automatically open the listen port
; and obtain the external IP address from supported devices. NOTE: This option
; will have no effect if external IP addresses are specified.
; upnp=1
; Specify the external IP addresses your node is listening on. One address per
; line. kaspad will not contact 3rd-party sites to obtain external ip addresses.
; This means if you are behind NAT, your node will not be able to advertise a
; reachable address unless you specify it here or enable the 'upnp' option (and
; have a supported device).
; externalip=1.2.3.4
; externalip=2002::1234
; ******************************************************************************
; Summary of 'addpeer' versus 'connect'.
;
; Only one of the following two options, 'addpeer' and 'connect', may be
; specified. Both allow you to specify peers that you want to stay connected
; with, but the behavior is slightly different. By default, kaspad will query DNS
; to find peers to connect to, so unless you have a specific reason such as
; those described below, you probably won't need to modify anything here.
;
; 'addpeer' does not prevent connections to other peers discovered from
; the peers you are connected to and also lets the remote peers know you are
; available so they can notify other peers they can to connect to you. This
; option might be useful if you are having problems finding a node for some
; reason (perhaps due to a firewall).
;
; 'connect', on the other hand, will ONLY connect to the specified peers and
; no others. It also disables listening (unless you explicitly set listen
; addresses via the 'listen' option) and DNS seeding, so you will not be
; advertised as an available peer to the peers you connect to and won't accept
; connections from any other peers. So, the 'connect' option effectively allows
; you to only connect to "trusted" peers.
; ******************************************************************************
; Add persistent peers to connect to as desired. One peer per line.
; You may specify each IP address with or without a port. The default port will
; be added automatically if one is not specified here.
; addpeer=192.168.1.1
; addpeer=10.0.0.2:16111
; addpeer=fe80::1
; addpeer=[fe80::2]:16111
; Add persistent peers that you ONLY want to connect to as desired. One peer
; per line. You may specify each IP address with or without a port. The
; default port will be added automatically if one is not specified here.
; NOTE: Specifying this option has other side effects as described above in
; the 'addpeer' versus 'connect' summary section.
; connect=192.168.1.1
; connect=10.0.0.2:16111
; connect=fe80::1
; connect=[fe80::2]:16111
; Maximum number of inbound and outbound peers.
; maxinpeers=125
; Disable banning of misbehaving peers.
; nobanning=1
; Maximum allowed ban score before disconnecting and banning misbehaving peers.
; banthreshold=100
; How long to ban misbehaving peers. Valid time units are {s, m, h}.
; Minimum 1s.
; banduration=24h
; banduration=11h30m15s
; Add whitelisted IP networks and IPs. Connected peers whose IP matches a
; whitelist will not have their ban score increased.
; whitelist=127.0.0.1
; whitelist=::1
; whitelist=192.168.0.0/24
; whitelist=fd00::/16
; Disable DNS seeding for peers. By default, when kaspad starts, it will use
; DNS to query for available peers to connect with.
; nodnsseed=1
; Specify the interfaces to listen on. One listen address per line.
; NOTE: The default port is modified by some options such as 'testnet', so it is
; recommended to not specify a port and allow a proper default to be chosen
; unless you have a specific reason to do otherwise.
; All interfaces on default port (this is the default):
; listen=
; All ipv4 interfaces on default port:
; listen=0.0.0.0
; All ipv6 interfaces on default port:
; listen=::
; All interfaces on port 16111:
; listen=:16111
; All ipv4 interfaces on port 16111:
; listen=0.0.0.0:16111
; All ipv6 interfaces on port 16111:
; listen=[::]:16111
; Only ipv4 localhost on port 8333:
; listen=127.0.0.1:8333
; Only ipv6 localhost on port 8333:
; listen=[::1]:8333
; Only ipv4 localhost on non-standard port 8336:
; listen=127.0.0.1:8336
; All interfaces on non-standard port 8336:
; listen=:8336
; All ipv4 interfaces on non-standard port 8336:
; listen=0.0.0.0:8336
; All ipv6 interfaces on non-standard port 8336:
; listen=[::]:8336
; Disable listening for incoming connections. This will override all listeners.
; nolisten=1
; Disable peer bloom filtering. See BIP0111.
; nopeerbloomfilters=1
; Add comments to the user agent that is advertised to peers.
; Must not include characters '/', ':', '(' and ')'.
; uacomment=
; ------------------------------------------------------------------------------
; RPC server options - The following options control the built-in RPC server
; which is used to control and query information from a running kaspad process.
; ------------------------------------------------------------------------------
; Specify the interfaces for the RPC server listen on. One listen address per
; line. NOTE: The default port is modified by some options such as 'testnet',
; so it is recommended to not specify a port and allow a proper default to be
; chosen unless you have a specific reason to do otherwise. By default, the
; RPC server will only listen on localhost for IPv4 and IPv6.
; All interfaces on default port:
; rpclisten=
; All ipv4 interfaces on default port:
; rpclisten=0.0.0.0
; All ipv6 interfaces on default port:
; rpclisten=::
; All interfaces on port 16110:
; rpclisten=:16110
; All ipv4 interfaces on port 16110:
; rpclisten=0.0.0.0:16110
; All ipv6 interfaces on port 16110:
; rpclisten=[::]:16110
; Only ipv4 localhost on port 16110:
; rpclisten=127.0.0.1:16110
; Only ipv6 localhost on port 16110:
; rpclisten=[::1]:16110
; Only ipv4 localhost on non-standard port 8337:
; rpclisten=127.0.0.1:8337
; All interfaces on non-standard port 8337:
; rpclisten=:8337
; All ipv4 interfaces on non-standard port 8337:
; rpclisten=0.0.0.0:8337
; All ipv6 interfaces on non-standard port 8337:
; rpclisten=[::]:8337
; Specify the maximum number of concurrent RPC clients for standard connections.
; rpcmaxclients=10
; Use the following setting to disable the RPC server.
; norpc=1
; ------------------------------------------------------------------------------
; Mempool Settings - The following options
; ------------------------------------------------------------------------------
; Set the minimum transaction fee to be considered a non-zero fee,
; minrelaytxfee=0.00001
; Limit orphan transaction pool to 100 transactions.
; maxorphantx=100
; Do not accept transactions from remote peers.
; blocksonly=1
; Relay non-standard transactions regardless of default network settings.
; relaynonstd=1
; Reject non-standard transactions regardless of default network settings.
; rejectnonstd=1
; ------------------------------------------------------------------------------
; Signature Verification Cache
; ------------------------------------------------------------------------------
; Limit the signature cache to a max of 50000 entries.
; sigcachemaxsize=50000
; ------------------------------------------------------------------------------
; Debug
; ------------------------------------------------------------------------------
; Debug logging level.
; Valid levels are {trace, debug, info, warn, error, critical}
; You may also specify <subsystem>=<level>,<subsystem2>=<level>,... to set
; log level for individual subsystems. Use kaspad --debuglevel=show to list
; available subsystems.
; debuglevel=info
; The port used to listen for HTTP profile requests. The profile server will
; be disabled if this option is not specified. The profile information can be
; accessed at http://localhost:<profileport>/debug/pprof once running.
; profile=6061
`

View File

@@ -12,16 +12,22 @@ import (
"github.com/pkg/errors"
)
// AddressRandomizer is the interface for the randomizer needed for the AddressManager.
type AddressRandomizer interface {
// addressRandomizer is the interface for the randomizer needed for the AddressManager.
type addressRandomizer interface {
RandomAddress(addresses []*appmessage.NetAddress) *appmessage.NetAddress
RandomAddresses(addresses []*appmessage.NetAddress, count int) []*appmessage.NetAddress
}
// AddressKey represents a pair of IP and port, the IP is always in V6 representation
type AddressKey struct {
// addressKey represents a pair of IP and port, the IP is always in V6 representation
type addressKey struct {
port uint16
address [net.IPv6len]byte
address ipv6
}
type ipv6 [net.IPv6len]byte
func (i ipv6) equal(other ipv6) bool {
return i == other
}
// ErrAddressNotFound is an error returned from some functions when a
@@ -29,16 +35,16 @@ type AddressKey struct {
var ErrAddressNotFound = errors.New("address not found")
// NetAddressKey returns a key of the ip address to use it in maps.
func netAddressKey(netAddress *appmessage.NetAddress) AddressKey {
key := AddressKey{port: netAddress.Port}
func netAddressKey(netAddress *appmessage.NetAddress) addressKey {
key := addressKey{port: netAddress.Port}
// all IPv4 can be represented as IPv6.
copy(key.address[:], netAddress.IP.To16())
return key
}
// netAddressKeys returns a key of the ip address to use it in maps.
func netAddressesKeys(netAddresses []*appmessage.NetAddress) map[AddressKey]bool {
result := make(map[AddressKey]bool, len(netAddresses))
func netAddressesKeys(netAddresses []*appmessage.NetAddress) map[addressKey]bool {
result := make(map[addressKey]bool, len(netAddresses))
for _, netAddress := range netAddresses {
key := netAddressKey(netAddress)
result[key] = true
@@ -50,12 +56,12 @@ func netAddressesKeys(netAddresses []*appmessage.NetAddress) map[AddressKey]bool
// AddressManager provides a concurrency safe address manager for caching potential
// peers on the Kaspa network.
type AddressManager struct {
addresses map[AddressKey]*appmessage.NetAddress
bannedAddresses map[AddressKey]*appmessage.NetAddress
addresses map[addressKey]*appmessage.NetAddress
bannedAddresses map[ipv6]*appmessage.NetAddress
localAddresses *localAddressManager
mutex sync.Mutex
cfg *Config
random AddressRandomizer
random addressRandomizer
}
// New returns a new Kaspa address manager.
@@ -66,8 +72,8 @@ func New(cfg *Config) (*AddressManager, error) {
}
return &AddressManager{
addresses: map[AddressKey]*appmessage.NetAddress{},
bannedAddresses: map[AddressKey]*appmessage.NetAddress{},
addresses: map[addressKey]*appmessage.NetAddress{},
bannedAddresses: map[ipv6]*appmessage.NetAddress{},
localAddresses: localAddresses,
random: NewAddressRandomize(),
cfg: cfg,
@@ -111,7 +117,6 @@ func (am *AddressManager) RemoveAddress(address *appmessage.NetAddress) {
key := netAddressKey(address)
delete(am.addresses, key)
delete(am.bannedAddresses, key)
}
// Addresses returns all addresses
@@ -175,21 +180,23 @@ func (am *AddressManager) BestLocalAddress(remoteAddress *appmessage.NetAddress)
}
// Ban marks the given address as banned
func (am *AddressManager) Ban(address *appmessage.NetAddress) error {
func (am *AddressManager) Ban(addressToBan *appmessage.NetAddress) {
am.mutex.Lock()
defer am.mutex.Unlock()
key := netAddressKey(address)
addressToBan, ok := am.addresses[key]
if !ok {
return errors.Wrapf(ErrAddressNotFound, "address %s "+
"is not registered with the address manager", address.TCPAddress())
keyToBan := netAddressKey(addressToBan)
keysToDelete := make([]addressKey, 0)
for _, address := range am.addresses {
key := netAddressKey(address)
if key.address.equal(keyToBan.address) {
keysToDelete = append(keysToDelete, key)
}
}
for _, key := range keysToDelete {
delete(am.addresses, key)
}
delete(am.addresses, key)
am.bannedAddresses[key] = addressToBan
return nil
am.bannedAddresses[keyToBan.address] = addressToBan
}
// Unban unmarks the given address as banned
@@ -198,13 +205,13 @@ func (am *AddressManager) Unban(address *appmessage.NetAddress) error {
defer am.mutex.Unlock()
key := netAddressKey(address)
bannedAddress, ok := am.bannedAddresses[key]
bannedAddress, ok := am.bannedAddresses[key.address]
if !ok {
return errors.Wrapf(ErrAddressNotFound, "address %s "+
"is not registered with the address manager as banned", address.TCPAddress())
}
delete(am.bannedAddresses, key)
delete(am.bannedAddresses, key.address)
am.addresses[key] = bannedAddress
return nil
}
@@ -215,7 +222,7 @@ func (am *AddressManager) IsBanned(address *appmessage.NetAddress) (bool, error)
defer am.mutex.Unlock()
key := netAddressKey(address)
if _, ok := am.bannedAddresses[key]; !ok {
if _, ok := am.bannedAddresses[key.address]; !ok {
if _, ok = am.addresses[key]; !ok {
return false, errors.Wrapf(ErrAddressNotFound, "address %s "+
"is not registered with the address manager", address.TCPAddress())

View File

@@ -7,7 +7,7 @@ import (
"github.com/kaspanet/kaspad/app/appmessage"
)
// AddressRandomize implement AddressRandomizer interface
// AddressRandomize implement addressRandomizer interface
type AddressRandomize struct {
random *rand.Rand
}

View File

@@ -38,7 +38,7 @@ type localAddress struct {
}
type localAddressManager struct {
localAddresses map[AddressKey]*localAddress
localAddresses map[addressKey]*localAddress
lookupFunc func(string) ([]net.IP, error)
cfg *Config
mutex sync.Mutex
@@ -46,7 +46,7 @@ type localAddressManager struct {
func newLocalAddressManager(cfg *Config) (*localAddressManager, error) {
localAddressManager := localAddressManager{
localAddresses: map[AddressKey]*localAddress{},
localAddresses: map[addressKey]*localAddress{},
cfg: cfg,
lookupFunc: cfg.Lookup,
}

View File

@@ -126,35 +126,41 @@ func (c *ConnectionManager) ConnectionCount() int {
}
// Ban marks the given netConnection as banned
func (c *ConnectionManager) Ban(netConnection *netadapter.NetConnection) error {
return c.addressManager.Ban(netConnection.NetAddress())
func (c *ConnectionManager) Ban(netConnection *netadapter.NetConnection) {
if c.isPermanent(netConnection.Address()) {
log.Infof("Cannot ban %s because it's a permanent connection", netConnection.Address())
return
}
c.addressManager.Ban(netConnection.NetAddress())
}
// IsBanned returns whether the given netConnection is banned
func (c *ConnectionManager) IsBanned(netConnection *netadapter.NetConnection) (bool, error) {
if c.isPermanent(netConnection.Address()) {
return false, nil
}
return c.addressManager.IsBanned(netConnection.NetAddress())
}
func (c *ConnectionManager) waitTillNextIteration() {
select {
case <-c.resetLoopChan:
c.loopTicker.Stop()
c.loopTicker = time.NewTicker(connectionsLoopInterval)
c.loopTicker.Reset(connectionsLoopInterval)
case <-c.loopTicker.C:
}
}
func (c *ConnectionManager) connectionExists(addressString string) bool {
if _, ok := c.activeRequested[addressString]; ok {
return true
func (c *ConnectionManager) isPermanent(addressString string) bool {
c.connectionRequestsLock.Lock()
defer c.connectionRequestsLock.Unlock()
if conn, ok := c.activeRequested[addressString]; ok {
return conn.isPermanent
}
if _, ok := c.activeOutgoing[addressString]; ok {
return true
}
if _, ok := c.activeIncoming[addressString]; ok {
return true
if conn, ok := c.pendingRequested[addressString]; ok {
return conn.isPermanent
}
return false

View File

@@ -9,6 +9,7 @@ import (
"google.golang.org/grpc"
"google.golang.org/grpc/peer"
"net"
"time"
)
type gRPCServer struct {
@@ -61,7 +62,20 @@ func (s *gRPCServer) listenOn(listenAddr string) error {
}
func (s *gRPCServer) Stop() error {
s.server.GracefulStop()
const stopTimeout = 2 * time.Second
stopChan := make(chan interface{})
spawn("gRPCServer.Stop", func() {
s.server.GracefulStop()
close(stopChan)
})
select {
case <-stopChan:
case <-time.After(stopTimeout):
log.Warnf("Could not gracefully stop %s: timed out after %s", s.name, stopTimeout)
s.server.Stop()
}
return nil
}

View File

@@ -10,8 +10,8 @@ const validCharacters = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrs
const (
appMajor uint = 0
appMinor uint = 9
appPatch uint = 0
appMinor uint = 8
appPatch uint = 7
)
// appBuild is defined as a variable so it can be overridden during the build