mirror of
https://github.com/kaspanet/kaspad.git
synced 2026-02-21 11:17:05 +00:00
Compare commits
11 Commits
optimize-u
...
v0.8.7-dev
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2871a6a527 | ||
|
|
d5a3a96bde | ||
|
|
12c438d389 | ||
|
|
280fa3de46 | ||
|
|
d281dabdb4 | ||
|
|
331042edf1 | ||
|
|
669a9ab4c3 | ||
|
|
65e149b2bb | ||
|
|
7c1495ba65 | ||
|
|
13ffa5093c | ||
|
|
a9a810a2b2 |
4
.github/workflows/go.yml
vendored
4
.github/workflows/go.yml
vendored
@@ -34,7 +34,7 @@ jobs:
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.14
|
||||
go-version: 1.15
|
||||
|
||||
|
||||
# Source: https://github.com/actions/cache/blob/main/examples.md#go---modules
|
||||
@@ -60,7 +60,7 @@ jobs:
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.14
|
||||
go-version: 1.15
|
||||
|
||||
- name: Create coverage file
|
||||
# Because of https://github.com/golang/go/issues/27333 this seem to "fail" even though nothing is wrong, so ignore the failure
|
||||
|
||||
@@ -18,7 +18,7 @@ Kaspa is an attempt at a proof-of-work cryptocurrency with instant confirmations
|
||||
|
||||
## Requirements
|
||||
|
||||
Go 1.14 or later.
|
||||
Go 1.15 or later.
|
||||
|
||||
## Installation
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ func (*FlowContext) HandleError(err error, flowName string, isStopping *uint32,
|
||||
panic(err)
|
||||
}
|
||||
|
||||
log.Errorf("error from %s: %+v", flowName, err)
|
||||
log.Errorf("error from %s: %s", flowName, err)
|
||||
}
|
||||
|
||||
if atomic.AddUint32(isStopping, 1) == 1 {
|
||||
|
||||
@@ -60,7 +60,7 @@ func (flow *receiveVersionFlow) start() (*appmessage.NetAddress, error) {
|
||||
}
|
||||
|
||||
if !allowSelfConnections && flow.NetAdapter().ID().IsEqual(msgVersion.ID) {
|
||||
return nil, protocolerrors.New(true, "connected to self")
|
||||
return nil, protocolerrors.New(false, "connected to self")
|
||||
}
|
||||
|
||||
// Disconnect and ban peers from a different network
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"github.com/pkg/errors"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
@@ -105,6 +106,7 @@ type fakeRelayInvsContext struct {
|
||||
validateAndInsertImportedPruningPointResponse error
|
||||
getBlockInfoResponse *externalapi.BlockInfo
|
||||
validateAndInsertBlockResponse error
|
||||
rwLock sync.RWMutex
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) BuildBlock(coinbaseData *externalapi.DomainCoinbaseData, transactions []*externalapi.DomainTransaction) (*externalapi.DomainBlock, error) {
|
||||
@@ -128,6 +130,8 @@ func (f *fakeRelayInvsContext) GetBlockHeader(blockHash *externalapi.DomainHash)
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) GetBlockInfo(blockHash *externalapi.DomainHash) (*externalapi.BlockInfo, error) {
|
||||
f.rwLock.RLock()
|
||||
defer f.rwLock.RUnlock()
|
||||
if f.getBlockInfoResponse != nil {
|
||||
return f.getBlockInfoResponse, nil
|
||||
}
|
||||
@@ -167,6 +171,8 @@ func (f *fakeRelayInvsContext) AppendImportedPruningPointUTXOs(outpointAndUTXOEn
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) ValidateAndInsertImportedPruningPoint(newPruningPoint *externalapi.DomainBlock) error {
|
||||
f.rwLock.RLock()
|
||||
defer f.rwLock.RUnlock()
|
||||
return f.validateAndInsertImportedPruningPointResponse
|
||||
}
|
||||
|
||||
@@ -179,12 +185,16 @@ func (f *fakeRelayInvsContext) CreateBlockLocator(lowHash, highHash *externalapi
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) CreateHeadersSelectedChainBlockLocator(lowHash, highHash *externalapi.DomainHash) (externalapi.BlockLocator, error) {
|
||||
f.rwLock.RLock()
|
||||
defer f.rwLock.RUnlock()
|
||||
return externalapi.BlockLocator{
|
||||
f.params.GenesisHash,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) CreateFullHeadersSelectedChainBlockLocator() (externalapi.BlockLocator, error) {
|
||||
f.rwLock.RLock()
|
||||
defer f.rwLock.RUnlock()
|
||||
return externalapi.BlockLocator{
|
||||
f.params.GenesisHash,
|
||||
}, nil
|
||||
@@ -203,6 +213,8 @@ func (f *fakeRelayInvsContext) GetVirtualInfo() (*externalapi.VirtualInfo, error
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) IsValidPruningPoint(blockHash *externalapi.DomainHash) (bool, error) {
|
||||
f.rwLock.RLock()
|
||||
defer f.rwLock.RUnlock()
|
||||
return f.isValidPruningPointResponse, nil
|
||||
}
|
||||
|
||||
@@ -231,6 +243,8 @@ func (f *fakeRelayInvsContext) Domain() domain.Domain {
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) Config() *config.Config {
|
||||
f.rwLock.RLock()
|
||||
defer f.rwLock.RUnlock()
|
||||
return &config.Config{
|
||||
Flags: &config.Flags{
|
||||
NetworkFlags: config.NetworkFlags{
|
||||
@@ -269,13 +283,59 @@ func (f *fakeRelayInvsContext) IsIBDRunning() bool {
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool {
|
||||
f.rwLock.RLock()
|
||||
defer f.rwLock.RUnlock()
|
||||
return f.trySetIBDRunningResponse
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) UnsetIBDRunning() {
|
||||
f.rwLock.RLock()
|
||||
defer f.rwLock.RUnlock()
|
||||
close(f.finishedIBD)
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) SetValidateAndInsertBlockResponse(err error) {
|
||||
f.rwLock.Lock()
|
||||
defer f.rwLock.Unlock()
|
||||
f.validateAndInsertBlockResponse = err
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) SetValidateAndInsertImportedPruningPointResponse(err error) {
|
||||
f.rwLock.Lock()
|
||||
defer f.rwLock.Unlock()
|
||||
f.validateAndInsertImportedPruningPointResponse = err
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) SetGetBlockInfoResponse(info externalapi.BlockInfo) {
|
||||
f.rwLock.Lock()
|
||||
defer f.rwLock.Unlock()
|
||||
f.getBlockInfoResponse = &info
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) SetTrySetIBDRunningResponse(b bool) {
|
||||
f.rwLock.Lock()
|
||||
defer f.rwLock.Unlock()
|
||||
f.trySetIBDRunningResponse = b
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) SetIsValidPruningPointResponse(b bool) {
|
||||
f.rwLock.Lock()
|
||||
defer f.rwLock.Unlock()
|
||||
f.isValidPruningPointResponse = b
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) GetGenesisHeader() externalapi.BlockHeader {
|
||||
f.rwLock.RLock()
|
||||
defer f.rwLock.RUnlock()
|
||||
return f.params.GenesisBlock.Header
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) GetFinishedIBDChan() chan struct{} {
|
||||
f.rwLock.RLock()
|
||||
defer f.rwLock.RUnlock()
|
||||
return f.finishedIBD
|
||||
}
|
||||
|
||||
func TestHandleRelayInvs(t *testing.T) {
|
||||
triggerIBD := func(t *testing.T, incomingRoute, outgoingRoute *router.Route, context *fakeRelayInvsContext) {
|
||||
err := incomingRoute.Enqueue(appmessage.NewMsgInvBlock(consensushashing.BlockHash(orphanBlock)))
|
||||
@@ -289,10 +349,7 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestRelayBlocks)
|
||||
|
||||
context.validateAndInsertBlockResponse = ruleerrors.NewErrMissingParents(orphanBlock.Header.ParentHashes())
|
||||
defer func() {
|
||||
context.validateAndInsertBlockResponse = nil
|
||||
}()
|
||||
context.SetValidateAndInsertBlockResponse(ruleerrors.NewErrMissingParents(orphanBlock.Header.ParentHashes()))
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(orphanBlock))
|
||||
if err != nil {
|
||||
@@ -342,10 +399,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
name: "sending a known invalid inv",
|
||||
funcToExecute: func(t *testing.T, incomingRoute, outgoingRoute *router.Route, context *fakeRelayInvsContext) {
|
||||
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusInvalid,
|
||||
}
|
||||
})
|
||||
|
||||
err := incomingRoute.Enqueue(appmessage.NewMsgInvBlock(knownInvalidBlockHash))
|
||||
if err != nil {
|
||||
@@ -402,7 +459,7 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestRelayBlocks)
|
||||
|
||||
context.validateAndInsertBlockResponse = ruleerrors.ErrBadMerkleRoot
|
||||
context.SetValidateAndInsertBlockResponse(ruleerrors.ErrBadMerkleRoot)
|
||||
err = incomingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(invalidBlock))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
@@ -426,7 +483,7 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestRelayBlocks)
|
||||
|
||||
context.validateAndInsertBlockResponse = ruleerrors.NewErrMissingParents(orphanBlock.Header.ParentHashes())
|
||||
context.SetValidateAndInsertBlockResponse(ruleerrors.NewErrMissingParents(orphanBlock.Header.ParentHashes()))
|
||||
err = incomingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(orphanBlock))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
@@ -452,7 +509,7 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
{
|
||||
name: "starting IBD when peer is already in IBD",
|
||||
funcToExecute: func(t *testing.T, incomingRoute, outgoingRoute *router.Route, context *fakeRelayInvsContext) {
|
||||
context.trySetIBDRunningResponse = false
|
||||
context.SetTrySetIBDRunningResponse(false)
|
||||
triggerIBD(t, incomingRoute, outgoingRoute, context)
|
||||
|
||||
checkNoActivity(t, outgoingRoute)
|
||||
@@ -558,15 +615,15 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestHeaders)
|
||||
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
}
|
||||
})
|
||||
|
||||
err = incomingRoute.Enqueue(
|
||||
appmessage.NewBlockHeadersMessage(
|
||||
[]*appmessage.MsgBlockHeader{
|
||||
appmessage.DomainBlockHeaderToBlockHeader(context.params.GenesisBlock.Header)},
|
||||
appmessage.DomainBlockHeaderToBlockHeader(context.GetGenesisHeader())},
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
@@ -581,10 +638,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
|
||||
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
|
||||
// the pruning point UTXO set.
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
}
|
||||
})
|
||||
|
||||
// Finish the IBD by sending DoneHeaders and send incompatible pruning point
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
@@ -598,7 +655,7 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestPruningPointHashMessage)
|
||||
|
||||
context.isValidPruningPointResponse = false
|
||||
context.SetIsValidPruningPointResponse(false)
|
||||
err = incomingRoute.Enqueue(appmessage.NewPruningPointHashMessage(invalidPruningPointHash))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
@@ -630,11 +687,11 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestHeaders)
|
||||
|
||||
context.validateAndInsertBlockResponse = ruleerrors.ErrDuplicateBlock
|
||||
context.SetValidateAndInsertBlockResponse(ruleerrors.ErrDuplicateBlock)
|
||||
err = incomingRoute.Enqueue(
|
||||
appmessage.NewBlockHeadersMessage(
|
||||
[]*appmessage.MsgBlockHeader{
|
||||
appmessage.DomainBlockHeaderToBlockHeader(context.params.GenesisBlock.Header)},
|
||||
appmessage.DomainBlockHeaderToBlockHeader(context.GetGenesisHeader())},
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
@@ -649,10 +706,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
|
||||
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
|
||||
// the pruning point UTXO set.
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
}
|
||||
})
|
||||
|
||||
// Finish the IBD by sending DoneHeaders and send incompatible pruning point
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
@@ -666,7 +723,7 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestPruningPointHashMessage)
|
||||
|
||||
context.isValidPruningPointResponse = false
|
||||
context.SetIsValidPruningPointResponse(false)
|
||||
err = incomingRoute.Enqueue(appmessage.NewPruningPointHashMessage(invalidPruningPointHash))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
@@ -698,7 +755,7 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestHeaders)
|
||||
|
||||
context.validateAndInsertBlockResponse = ruleerrors.ErrBadMerkleRoot
|
||||
context.SetValidateAndInsertBlockResponse(ruleerrors.ErrBadMerkleRoot)
|
||||
err = incomingRoute.Enqueue(
|
||||
appmessage.NewBlockHeadersMessage(
|
||||
[]*appmessage.MsgBlockHeader{
|
||||
@@ -738,10 +795,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
|
||||
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
|
||||
// the pruning point UTXO set.
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
}
|
||||
})
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
@@ -790,10 +847,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
|
||||
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
|
||||
// the pruning point UTXO set.
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
}
|
||||
})
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
@@ -806,7 +863,7 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestPruningPointHashMessage)
|
||||
|
||||
context.isValidPruningPointResponse = false
|
||||
context.SetIsValidPruningPointResponse(false)
|
||||
err = incomingRoute.Enqueue(appmessage.NewPruningPointHashMessage(invalidPruningPointHash))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
@@ -840,10 +897,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
|
||||
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
|
||||
// the pruning point UTXO set.
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
}
|
||||
})
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
@@ -905,10 +962,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
|
||||
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
|
||||
// the pruning point UTXO set.
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
}
|
||||
})
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
@@ -968,10 +1025,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
|
||||
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
|
||||
// the pruning point UTXO set.
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
}
|
||||
})
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
@@ -1037,10 +1094,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
|
||||
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
|
||||
// the pruning point UTXO set.
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
}
|
||||
})
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
@@ -1064,7 +1121,7 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestPruningPointUTXOSetAndBlock)
|
||||
|
||||
context.validateAndInsertImportedPruningPointResponse = ruleerrors.ErrBadMerkleRoot
|
||||
context.SetValidateAndInsertImportedPruningPointResponse(ruleerrors.ErrBadMerkleRoot)
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgIBDBlock(appmessage.DomainBlockToMsgBlock(invalidPruningPointBlock)))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
@@ -1104,10 +1161,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
|
||||
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
|
||||
// the pruning point UTXO set.
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
}
|
||||
})
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
@@ -1131,7 +1188,7 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestPruningPointUTXOSetAndBlock)
|
||||
|
||||
context.validateAndInsertImportedPruningPointResponse = ruleerrors.ErrSuggestedPruningViolatesFinality
|
||||
context.SetValidateAndInsertImportedPruningPointResponse(ruleerrors.ErrSuggestedPruningViolatesFinality)
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgIBDBlock(appmessage.DomainBlockToMsgBlock(validPruningPointBlock)))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
@@ -1168,10 +1225,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
|
||||
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
|
||||
// the pruning point UTXO set.
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
}
|
||||
})
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
@@ -1247,10 +1304,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
|
||||
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
|
||||
// the pruning point UTXO set.
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
}
|
||||
})
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
@@ -1324,10 +1381,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
|
||||
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
|
||||
// the pruning point UTXO set.
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
}
|
||||
})
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
@@ -1367,7 +1424,7 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestIBDBlocks)
|
||||
|
||||
context.validateAndInsertBlockResponse = ruleerrors.ErrBadMerkleRoot
|
||||
context.SetValidateAndInsertImportedPruningPointResponse(ruleerrors.ErrBadMerkleRoot)
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgIBDBlock(appmessage.DomainBlockToMsgBlock(invalidBlock)))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
@@ -1411,17 +1468,17 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
select {
|
||||
case err := <-errChan:
|
||||
checkFlowError(t, err, test.expectsProtocolError, test.expectsBan, test.expectsErrToContain)
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("waiting for error timed out after %s", time.Second)
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatalf("waiting for error timed out after %s", 10*time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case <-context.finishedIBD:
|
||||
case <-context.GetFinishedIBDChan():
|
||||
if !test.expectsIBDToFinish {
|
||||
t.Fatalf("IBD unexpecetedly finished")
|
||||
}
|
||||
case <-time.After(time.Second):
|
||||
case <-time.After(10 * time.Second):
|
||||
if test.expectsIBDToFinish {
|
||||
t.Fatalf("IBD didn't finished after %d", time.Second)
|
||||
}
|
||||
@@ -1436,7 +1493,7 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
if !errors.Is(err, router.ErrRouteClosed) {
|
||||
t.Fatalf("unexpected error %+v", err)
|
||||
}
|
||||
case <-time.After(time.Second):
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatalf("waiting for flow to finish timed out after %s", time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -78,11 +78,7 @@ func (m *Manager) handleError(err error, netConnection *netadapter.NetConnection
|
||||
if !m.context.Config().DisableBanning && protocolErr.ShouldBan {
|
||||
log.Warnf("Banning %s (reason: %s)", netConnection, protocolErr.Cause)
|
||||
|
||||
err := m.context.ConnectionManager().Ban(netConnection)
|
||||
if err != nil && !errors.Is(err, addressmanager.ErrAddressNotFound) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
m.context.ConnectionManager().Ban(netConnection)
|
||||
err = outgoingRoute.Enqueue(appmessage.NewMsgReject(protocolErr.Error()))
|
||||
if err != nil && !errors.Is(err, routerpkg.ErrRouteClosed) {
|
||||
panic(err)
|
||||
|
||||
@@ -4,7 +4,7 @@ kaspactl is an RPC client for kaspad
|
||||
|
||||
## Requirements
|
||||
|
||||
Go 1.14 or later.
|
||||
Go 1.15 or later.
|
||||
|
||||
## Installation
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -- multistage docker build: stage #1: build stage
|
||||
FROM golang:1.14-alpine AS build
|
||||
FROM golang:1.15-alpine AS build
|
||||
|
||||
RUN mkdir -p /go/src/github.com/kaspanet/kaspad
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ Kaspaminer is a CPU-based miner for kaspad
|
||||
|
||||
## Requirements
|
||||
|
||||
Go 1.14 or later.
|
||||
Go 1.15 or later.
|
||||
|
||||
## Installation
|
||||
|
||||
|
||||
@@ -17,8 +17,9 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
defaultLogFilename = "kaspaminer.log"
|
||||
defaultErrLogFilename = "kaspaminer_err.log"
|
||||
defaultLogFilename = "kaspaminer.log"
|
||||
defaultErrLogFilename = "kaspaminer_err.log"
|
||||
defaultTargetBlockRateRatio = 2.0
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -30,13 +31,13 @@ var (
|
||||
)
|
||||
|
||||
type configFlags struct {
|
||||
ShowVersion bool `short:"V" long:"version" description:"Display version information and exit"`
|
||||
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"`
|
||||
MiningAddr string `long:"miningaddr" description:"Address to mine to"`
|
||||
NumberOfBlocks uint64 `short:"n" long:"numblocks" description:"Number of blocks to mine. If omitted, will mine until the process is interrupted."`
|
||||
MineWhenNotSynced bool `long:"mine-when-not-synced" description:"Mine even if the node is not synced with the rest of the network."`
|
||||
Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"`
|
||||
TargetBlocksPerSecond float64 `long:"target-blocks-per-second" description:"Sets a maximum block rate. This flag is for debugging purposes."`
|
||||
ShowVersion bool `short:"V" long:"version" description:"Display version information and exit"`
|
||||
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"`
|
||||
MiningAddr string `long:"miningaddr" description:"Address to mine to"`
|
||||
NumberOfBlocks uint64 `short:"n" long:"numblocks" description:"Number of blocks to mine. If omitted, will mine until the process is interrupted."`
|
||||
MineWhenNotSynced bool `long:"mine-when-not-synced" description:"Mine even if the node is not synced with the rest of the network."`
|
||||
Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"`
|
||||
TargetBlocksPerSecond *float64 `long:"target-blocks-per-second" description:"Sets a maximum block rate. 0 means no limit (The default one is 2 * target network block rate)"`
|
||||
config.NetworkFlags
|
||||
}
|
||||
|
||||
@@ -64,6 +65,11 @@ func parseConfig() (*configFlags, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cfg.TargetBlocksPerSecond == nil {
|
||||
targetBlocksPerSecond := defaultTargetBlockRateRatio / cfg.NetParams().TargetTimePerBlock.Seconds()
|
||||
cfg.TargetBlocksPerSecond = &targetBlocksPerSecond
|
||||
}
|
||||
|
||||
if cfg.Profile != "" {
|
||||
profilePort, err := strconv.Atoi(cfg.Profile)
|
||||
if err != nil || profilePort < 1024 || profilePort > 65535 {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -- multistage docker build: stage #1: build stage
|
||||
FROM golang:1.14-alpine AS build
|
||||
FROM golang:1.15-alpine AS build
|
||||
|
||||
RUN mkdir -p /go/src/github.com/kaspanet/kaspad
|
||||
|
||||
|
||||
@@ -48,7 +48,7 @@ func main() {
|
||||
|
||||
doneChan := make(chan struct{})
|
||||
spawn("mineLoop", func() {
|
||||
err = mineLoop(client, cfg.NumberOfBlocks, cfg.TargetBlocksPerSecond, cfg.MineWhenNotSynced, miningAddr)
|
||||
err = mineLoop(client, cfg.NumberOfBlocks, *cfg.TargetBlocksPerSecond, cfg.MineWhenNotSynced, miningAddr)
|
||||
if err != nil {
|
||||
panic(errors.Wrap(err, "error in mine loop"))
|
||||
}
|
||||
|
||||
@@ -117,8 +117,14 @@ func handleFoundBlock(client *minerClient, block *externalapi.DomainBlock) error
|
||||
|
||||
rejectReason, err := client.SubmitBlock(block)
|
||||
if err != nil {
|
||||
if nativeerrors.Is(err, router.ErrTimeout) {
|
||||
log.Warnf("Got timeout while submitting block %s to %s: %s", blockHash, client.Address(), err)
|
||||
return nil
|
||||
}
|
||||
if rejectReason == appmessage.RejectReasonIsInIBD {
|
||||
log.Warnf("Block %s was rejected because the node is in IBD", blockHash)
|
||||
const waitTime = 1 * time.Second
|
||||
log.Warnf("Block %s was rejected because the node is in IBD. Waiting for %s", blockHash, waitTime)
|
||||
time.Sleep(waitTime)
|
||||
return nil
|
||||
}
|
||||
return errors.Errorf("Error submitting block %s to %s: %s", blockHash, client.Address(), err)
|
||||
@@ -152,7 +158,7 @@ func templatesLoop(client *minerClient, miningAddr util.Address,
|
||||
getBlockTemplate := func() {
|
||||
template, err := client.GetBlockTemplate(miningAddr.String())
|
||||
if nativeerrors.Is(err, router.ErrTimeout) {
|
||||
log.Infof("Got timeout while requesting block template from %s", client.Address())
|
||||
log.Warnf("Got timeout while requesting block template from %s: %s", client.Address(), err)
|
||||
return
|
||||
} else if err != nil {
|
||||
errChan <- errors.Errorf("Error getting block template from %s: %s", client.Address(), err)
|
||||
@@ -161,6 +167,8 @@ func templatesLoop(client *minerClient, miningAddr util.Address,
|
||||
newTemplateChan <- template
|
||||
}
|
||||
getBlockTemplate()
|
||||
const tickerTime = 500 * time.Millisecond
|
||||
ticker := time.NewTicker(tickerTime)
|
||||
for {
|
||||
select {
|
||||
case <-stopChan:
|
||||
@@ -168,7 +176,8 @@ func templatesLoop(client *minerClient, miningAddr util.Address,
|
||||
return
|
||||
case <-client.blockAddedNotificationChan:
|
||||
getBlockTemplate()
|
||||
case <-time.Tick(500 * time.Millisecond):
|
||||
ticker.Reset(tickerTime)
|
||||
case <-ticker.C:
|
||||
getBlockTemplate()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ It is capable of generating wallet key-pairs, printing a wallet's current balanc
|
||||
|
||||
## Requirements
|
||||
|
||||
Go 1.14 or later.
|
||||
Go 1.15 or later.
|
||||
|
||||
## Installation
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -- multistage docker build: stage #1: build stage
|
||||
FROM golang:1.14-alpine AS build
|
||||
FROM golang:1.15-alpine AS build
|
||||
|
||||
RUN mkdir -p /go/src/github.com/kaspanet/kaspad
|
||||
|
||||
|
||||
@@ -232,12 +232,12 @@ func TestBoundedMergeDepth(t *testing.T) {
|
||||
}
|
||||
|
||||
factory := NewFactory()
|
||||
consensusBuild, teardownFunc1, err := factory.NewTestConsensus(params, false, "BoundedMergeTestBuild")
|
||||
consensusBuild, teardownFunc1, err := factory.NewTestConsensus(params, false, "TestBoundedMergeTestBuild")
|
||||
if err != nil {
|
||||
t.Fatalf("TestBoundedMergeDepth: Error setting up consensus: %+v", err)
|
||||
}
|
||||
|
||||
consensusReal, teardownFunc2, err := factory.NewTestConsensus(params, false, "BoundedMergeTestReal")
|
||||
consensusReal, teardownFunc2, err := factory.NewTestConsensus(params, false, "TestBoundedMergeTestReal")
|
||||
if err != nil {
|
||||
t.Fatalf("TestBoundedMergeDepth: Error setting up consensus: %+v", err)
|
||||
}
|
||||
|
||||
@@ -8,6 +8,20 @@ import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// MineJSONBlockType indicates which type of blocks MineJSON mines
|
||||
type MineJSONBlockType int
|
||||
|
||||
const (
|
||||
// MineJSONBlockTypeUTXOValidBlock indicates for MineJSON to mine valid blocks.
|
||||
MineJSONBlockTypeUTXOValidBlock MineJSONBlockType = iota
|
||||
|
||||
// MineJSONBlockTypeUTXOInvalidBlock indicates for MineJSON to mine UTXO invalid blocks.
|
||||
MineJSONBlockTypeUTXOInvalidBlock
|
||||
|
||||
// MineJSONBlockTypeUTXOInvalidHeader indicates for MineJSON to mine UTXO invalid headers.
|
||||
MineJSONBlockTypeUTXOInvalidHeader
|
||||
)
|
||||
|
||||
// TestConsensus wraps the Consensus interface with some methods that are needed by tests only
|
||||
type TestConsensus interface {
|
||||
externalapi.Consensus
|
||||
@@ -33,7 +47,7 @@ type TestConsensus interface {
|
||||
AddUTXOInvalidBlock(parentHashes []*externalapi.DomainHash) (*externalapi.DomainHash,
|
||||
*externalapi.BlockInsertionResult, error)
|
||||
|
||||
MineJSON(r io.Reader) (tips []*externalapi.DomainHash, err error)
|
||||
MineJSON(r io.Reader, blockType MineJSONBlockType) (tips []*externalapi.DomainHash, err error)
|
||||
DiscardAllStores()
|
||||
|
||||
AcceptanceDataStore() model.AcceptanceDataStore
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package blockvalidator
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
@@ -29,6 +31,21 @@ func (v *blockValidator) ValidateHeaderInContext(blockHash *externalapi.DomainHa
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var logErr error
|
||||
log.Debug(logger.NewLogClosure(func() string {
|
||||
var ghostdagData *model.BlockGHOSTDAGData
|
||||
ghostdagData, logErr = v.ghostdagDataStore.Get(v.databaseContext, blockHash)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return fmt.Sprintf("block %s blue score is %d", blockHash, ghostdagData.BlueScore())
|
||||
}))
|
||||
|
||||
if logErr != nil {
|
||||
return logErr
|
||||
}
|
||||
}
|
||||
|
||||
err = v.validateMedianTime(header)
|
||||
|
||||
@@ -54,7 +54,7 @@ func buildJsonDAG(t *testing.T, tc testapi.TestConsensus, attackJson bool) (tips
|
||||
}
|
||||
defer gzipReader.Close()
|
||||
|
||||
tips, err = tc.MineJSON(gzipReader)
|
||||
tips, err = tc.MineJSON(gzipReader, testapi.MineJSONBlockTypeUTXOInvalidHeader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -106,7 +106,7 @@ func (tc *testConsensus) AddUTXOInvalidBlock(parentHashes []*externalapi.DomainH
|
||||
return consensushashing.BlockHash(block), blockInsertionResult, nil
|
||||
}
|
||||
|
||||
func (tc *testConsensus) MineJSON(r io.Reader) (tips []*externalapi.DomainHash, err error) {
|
||||
func (tc *testConsensus) MineJSON(r io.Reader, blockType testapi.MineJSONBlockType) (tips []*externalapi.DomainHash, err error) {
|
||||
// jsonBlock is a json representation of a block in mine format
|
||||
type jsonBlock struct {
|
||||
ID string `json:"id"`
|
||||
@@ -145,10 +145,28 @@ func (tc *testConsensus) MineJSON(r io.Reader) (tips []*externalapi.DomainHash,
|
||||
}
|
||||
delete(tipSet, *parentHashes[i])
|
||||
}
|
||||
blockHash, _, err := tc.AddUTXOInvalidHeader(parentHashes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
var blockHash *externalapi.DomainHash
|
||||
switch blockType {
|
||||
case testapi.MineJSONBlockTypeUTXOValidBlock:
|
||||
blockHash, _, err = tc.AddBlock(parentHashes, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case testapi.MineJSONBlockTypeUTXOInvalidBlock:
|
||||
blockHash, _, err = tc.AddUTXOInvalidBlock(parentHashes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case testapi.MineJSONBlockTypeUTXOInvalidHeader:
|
||||
blockHash, _, err = tc.AddUTXOInvalidHeader(parentHashes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, errors.Errorf("unknwon block type %v", blockType)
|
||||
}
|
||||
|
||||
parentsMap[block.ID] = blockHash
|
||||
tipSet[*blockHash] = blockHash
|
||||
}
|
||||
|
||||
2
go.mod
2
go.mod
@@ -1,6 +1,6 @@
|
||||
module github.com/kaspanet/kaspad
|
||||
|
||||
go 1.14
|
||||
go 1.15
|
||||
|
||||
require (
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd
|
||||
|
||||
@@ -5,9 +5,7 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -16,18 +14,15 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/btcsuite/go-socks/socks"
|
||||
"github.com/jessevdk/go-flags"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/network"
|
||||
"github.com/kaspanet/kaspad/version"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -244,9 +239,7 @@ func LoadConfig() (*Config, error) {
|
||||
cfg := &Config{
|
||||
Flags: cfgFlags,
|
||||
}
|
||||
if !preCfg.Simnet || preCfg.ConfigFile !=
|
||||
defaultConfigFile {
|
||||
|
||||
if !preCfg.Simnet || preCfg.ConfigFile != defaultConfigFile {
|
||||
if _, err := os.Stat(preCfg.ConfigFile); os.IsNotExist(err) {
|
||||
err := createDefaultConfigFile(preCfg.ConfigFile)
|
||||
if err != nil {
|
||||
@@ -593,13 +586,6 @@ func createDefaultConfigFile(destinationPath string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// We assume sample config file path is same as binary
|
||||
path, err := filepath.Abs(filepath.Dir(os.Args[0]))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sampleConfigPath := filepath.Join(path, sampleConfigFilename)
|
||||
|
||||
dest, err := os.OpenFile(destinationPath,
|
||||
os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
|
||||
if err != nil {
|
||||
@@ -607,25 +593,7 @@ func createDefaultConfigFile(destinationPath string) error {
|
||||
}
|
||||
defer dest.Close()
|
||||
|
||||
src, err := os.Open(sampleConfigPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer src.Close()
|
||||
_, err = dest.WriteString(sampleConfig)
|
||||
|
||||
// We copy every line from the sample config file to the destination
|
||||
reader := bufio.NewReader(src)
|
||||
for err != io.EOF {
|
||||
var line string
|
||||
line, err = reader.ReadString('\n')
|
||||
if err != nil && err != io.EOF {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := dest.WriteString(line); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ func TestCreateDefaultConfigFile(t *testing.T) {
|
||||
if !ok {
|
||||
t.Fatalf("Failed finding config file path")
|
||||
}
|
||||
sampleConfigFile := filepath.Join(filepath.Dir(path), "..", "..", "sample-kaspad.conf")
|
||||
sampleConfigFile := filepath.Join(filepath.Dir(path), "sample-kaspad.conf")
|
||||
|
||||
// Setup a temporary directory
|
||||
tmpDir, err := ioutil.TempDir("", "kaspad")
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
|
||||
; Use Universal Plug and Play (UPnP) to automatically open the listen port
|
||||
; and obtain the external IP address from supported devices. NOTE: This option
|
||||
; will have no effect if exernal IP addresses are specified.
|
||||
; will have no effect if external IP addresses are specified.
|
||||
; upnp=1
|
||||
|
||||
; Specify the external IP addresses your node is listening on. One address per
|
||||
@@ -83,7 +83,7 @@
|
||||
; connect=[fe80::2]:16111
|
||||
|
||||
; Maximum number of inbound and outbound peers.
|
||||
; maxpeers=125
|
||||
; maxinpeers=125
|
||||
|
||||
; Disable banning of misbehaving peers.
|
||||
; nobanning=1
|
||||
@@ -142,9 +142,6 @@
|
||||
; Disable peer bloom filtering. See BIP0111.
|
||||
; nopeerbloomfilters=1
|
||||
|
||||
; Add additional checkpoints. Format: '<height>:<hash>'
|
||||
; addcheckpoint=<height>:<hash>
|
||||
|
||||
; Add comments to the user agent that is advertised to peers.
|
||||
; Must not include characters '/', ':', '(' and ')'.
|
||||
; uacomment=
|
||||
@@ -198,13 +195,6 @@
|
||||
; Set the minimum transaction fee to be considered a non-zero fee,
|
||||
; minrelaytxfee=0.00001
|
||||
|
||||
; Rate-limit free transactions to the value 15 * 1000 bytes per
|
||||
; minute.
|
||||
; limitfreerelay=15
|
||||
|
||||
; Require high priority for relaying free or low-fee transactions.
|
||||
; norelaypriority=0
|
||||
|
||||
; Limit orphan transaction pool to 100 transactions.
|
||||
; maxorphantx=100
|
||||
|
||||
@@ -218,22 +208,6 @@
|
||||
; rejectnonstd=1
|
||||
|
||||
|
||||
; ------------------------------------------------------------------------------
|
||||
; Optional Indexes
|
||||
; ------------------------------------------------------------------------------
|
||||
|
||||
; Build and maintain a full hash-based transaction index which makes all
|
||||
; transactions available via the getrawtransaction RPC.
|
||||
; txindex=1
|
||||
|
||||
; Build and maintain a full address-based transaction index which makes the
|
||||
; searchrawtransactions RPC available.
|
||||
; addrindex=1
|
||||
|
||||
; Delete the entire address index on start up, then exit.
|
||||
; dropaddrindex=0
|
||||
|
||||
|
||||
; ------------------------------------------------------------------------------
|
||||
; Signature Verification Cache
|
||||
; ------------------------------------------------------------------------------
|
||||
@@ -242,38 +216,6 @@
|
||||
; sigcachemaxsize=50000
|
||||
|
||||
|
||||
; ------------------------------------------------------------------------------
|
||||
; Coin Generation (Mining) Settings - The following options control the
|
||||
; generation of block templates used by external mining applications through RPC
|
||||
; calls.
|
||||
; ------------------------------------------------------------------------------
|
||||
|
||||
; Add addresses to pay mined blocks to in the block templates generated
|
||||
; for the getblocktemplate RPC. One address per line.
|
||||
; miningaddr=kaspa:yourkaspaaddress
|
||||
; miningaddr=kaspa:yourkaspaaddress2
|
||||
; miningaddr=kaspa:yourkaspaaddress3
|
||||
|
||||
; Specify the minimum block size in bytes to create. By default, only
|
||||
; transactions which have enough fees or a high enough priority will be included
|
||||
; in generated block templates. Specifying a minimum block size will instead
|
||||
; attempt to fill generated block templates up with transactions until it is at
|
||||
; least the specified number of bytes.
|
||||
; blockminsize=0
|
||||
|
||||
; Specify the maximum block size in bytes to create. This value will be limited
|
||||
; to the consensus limit if it is larger than that value.
|
||||
; blockmaxsize=750000
|
||||
|
||||
; Specify the size in bytes of the high-priority/low-fee area when creating a
|
||||
; block. Transactions which consist of large amounts, old inputs, and small
|
||||
; sizes have the highest priority. One consequence of this is that as low-fee
|
||||
; or free transactions age, they raise in priority thereby making them more
|
||||
; likely to be included in this section of a new block. This value is limited
|
||||
; by the blackmaxsize option and will be limited as needed.
|
||||
; blockprioritysize=50000
|
||||
|
||||
|
||||
; ------------------------------------------------------------------------------
|
||||
; Debug
|
||||
; ------------------------------------------------------------------------------
|
||||
@@ -290,11 +232,3 @@
|
||||
; accessed at http://localhost:<profileport>/debug/pprof once running.
|
||||
; profile=6061
|
||||
|
||||
; ------------------------------------------------------------------------------
|
||||
; Subnetworks
|
||||
; ------------------------------------------------------------------------------
|
||||
|
||||
; If subnetwork > 0, than node will request and process only payloads from
|
||||
; specified subnetwork. And if subnetwork is 0, than payloads of all subnetworks
|
||||
; are processed.
|
||||
; subnetwork=0
|
||||
238
infrastructure/config/sample_config.go
Normal file
238
infrastructure/config/sample_config.go
Normal file
@@ -0,0 +1,238 @@
|
||||
package config
|
||||
|
||||
// This should be identical to the content of sample-kaspad.conf
|
||||
// TODO: Replace with go:embed once go1.16 lands
|
||||
var sampleConfig = `[Application Options]
|
||||
|
||||
; ------------------------------------------------------------------------------
|
||||
; Data settings
|
||||
; ------------------------------------------------------------------------------
|
||||
|
||||
; The directory to store data such as the block DAG and peer addresses. The
|
||||
; block DAG takes several GB, so this location must have a lot of free space.
|
||||
; The default is ~/.kaspad/data on POSIX OSes, $LOCALAPPDATA/Kaspad/data on Windows,
|
||||
; ~/Library/Application Support/Kaspad/data on Mac OS, and $home/kaspad/data on
|
||||
; Plan9. Environment variables are expanded so they may be used. NOTE: Windows
|
||||
; environment variables are typically %VARIABLE%, but they must be accessed with
|
||||
; $VARIABLE here. Also, ~ is expanded to $LOCALAPPDATA on Windows.
|
||||
; datadir=~/.kaspad/data
|
||||
|
||||
|
||||
; ------------------------------------------------------------------------------
|
||||
; Network settings
|
||||
; ------------------------------------------------------------------------------
|
||||
|
||||
; Use testnet.
|
||||
; testnet=1
|
||||
|
||||
; Connect via a SOCKS5 proxy. NOTE: Specifying a proxy will disable listening
|
||||
; for incoming connections unless listen addresses are provided via the 'listen'
|
||||
; option.
|
||||
; proxy=127.0.0.1:9050
|
||||
; proxyuser=
|
||||
; proxypass=
|
||||
|
||||
; Use Universal Plug and Play (UPnP) to automatically open the listen port
|
||||
; and obtain the external IP address from supported devices. NOTE: This option
|
||||
; will have no effect if external IP addresses are specified.
|
||||
; upnp=1
|
||||
|
||||
; Specify the external IP addresses your node is listening on. One address per
|
||||
; line. kaspad will not contact 3rd-party sites to obtain external ip addresses.
|
||||
; This means if you are behind NAT, your node will not be able to advertise a
|
||||
; reachable address unless you specify it here or enable the 'upnp' option (and
|
||||
; have a supported device).
|
||||
; externalip=1.2.3.4
|
||||
; externalip=2002::1234
|
||||
|
||||
; ******************************************************************************
|
||||
; Summary of 'addpeer' versus 'connect'.
|
||||
;
|
||||
; Only one of the following two options, 'addpeer' and 'connect', may be
|
||||
; specified. Both allow you to specify peers that you want to stay connected
|
||||
; with, but the behavior is slightly different. By default, kaspad will query DNS
|
||||
; to find peers to connect to, so unless you have a specific reason such as
|
||||
; those described below, you probably won't need to modify anything here.
|
||||
;
|
||||
; 'addpeer' does not prevent connections to other peers discovered from
|
||||
; the peers you are connected to and also lets the remote peers know you are
|
||||
; available so they can notify other peers they can to connect to you. This
|
||||
; option might be useful if you are having problems finding a node for some
|
||||
; reason (perhaps due to a firewall).
|
||||
;
|
||||
; 'connect', on the other hand, will ONLY connect to the specified peers and
|
||||
; no others. It also disables listening (unless you explicitly set listen
|
||||
; addresses via the 'listen' option) and DNS seeding, so you will not be
|
||||
; advertised as an available peer to the peers you connect to and won't accept
|
||||
; connections from any other peers. So, the 'connect' option effectively allows
|
||||
; you to only connect to "trusted" peers.
|
||||
; ******************************************************************************
|
||||
|
||||
; Add persistent peers to connect to as desired. One peer per line.
|
||||
; You may specify each IP address with or without a port. The default port will
|
||||
; be added automatically if one is not specified here.
|
||||
; addpeer=192.168.1.1
|
||||
; addpeer=10.0.0.2:16111
|
||||
; addpeer=fe80::1
|
||||
; addpeer=[fe80::2]:16111
|
||||
|
||||
; Add persistent peers that you ONLY want to connect to as desired. One peer
|
||||
; per line. You may specify each IP address with or without a port. The
|
||||
; default port will be added automatically if one is not specified here.
|
||||
; NOTE: Specifying this option has other side effects as described above in
|
||||
; the 'addpeer' versus 'connect' summary section.
|
||||
; connect=192.168.1.1
|
||||
; connect=10.0.0.2:16111
|
||||
; connect=fe80::1
|
||||
; connect=[fe80::2]:16111
|
||||
|
||||
; Maximum number of inbound and outbound peers.
|
||||
; maxinpeers=125
|
||||
|
||||
; Disable banning of misbehaving peers.
|
||||
; nobanning=1
|
||||
|
||||
; Maximum allowed ban score before disconnecting and banning misbehaving peers.
|
||||
; banthreshold=100
|
||||
|
||||
; How long to ban misbehaving peers. Valid time units are {s, m, h}.
|
||||
; Minimum 1s.
|
||||
; banduration=24h
|
||||
; banduration=11h30m15s
|
||||
|
||||
; Add whitelisted IP networks and IPs. Connected peers whose IP matches a
|
||||
; whitelist will not have their ban score increased.
|
||||
; whitelist=127.0.0.1
|
||||
; whitelist=::1
|
||||
; whitelist=192.168.0.0/24
|
||||
; whitelist=fd00::/16
|
||||
|
||||
; Disable DNS seeding for peers. By default, when kaspad starts, it will use
|
||||
; DNS to query for available peers to connect with.
|
||||
; nodnsseed=1
|
||||
|
||||
; Specify the interfaces to listen on. One listen address per line.
|
||||
; NOTE: The default port is modified by some options such as 'testnet', so it is
|
||||
; recommended to not specify a port and allow a proper default to be chosen
|
||||
; unless you have a specific reason to do otherwise.
|
||||
; All interfaces on default port (this is the default):
|
||||
; listen=
|
||||
; All ipv4 interfaces on default port:
|
||||
; listen=0.0.0.0
|
||||
; All ipv6 interfaces on default port:
|
||||
; listen=::
|
||||
; All interfaces on port 16111:
|
||||
; listen=:16111
|
||||
; All ipv4 interfaces on port 16111:
|
||||
; listen=0.0.0.0:16111
|
||||
; All ipv6 interfaces on port 16111:
|
||||
; listen=[::]:16111
|
||||
; Only ipv4 localhost on port 8333:
|
||||
; listen=127.0.0.1:8333
|
||||
; Only ipv6 localhost on port 8333:
|
||||
; listen=[::1]:8333
|
||||
; Only ipv4 localhost on non-standard port 8336:
|
||||
; listen=127.0.0.1:8336
|
||||
; All interfaces on non-standard port 8336:
|
||||
; listen=:8336
|
||||
; All ipv4 interfaces on non-standard port 8336:
|
||||
; listen=0.0.0.0:8336
|
||||
; All ipv6 interfaces on non-standard port 8336:
|
||||
; listen=[::]:8336
|
||||
|
||||
; Disable listening for incoming connections. This will override all listeners.
|
||||
; nolisten=1
|
||||
|
||||
; Disable peer bloom filtering. See BIP0111.
|
||||
; nopeerbloomfilters=1
|
||||
|
||||
; Add comments to the user agent that is advertised to peers.
|
||||
; Must not include characters '/', ':', '(' and ')'.
|
||||
; uacomment=
|
||||
|
||||
; ------------------------------------------------------------------------------
|
||||
; RPC server options - The following options control the built-in RPC server
|
||||
; which is used to control and query information from a running kaspad process.
|
||||
; ------------------------------------------------------------------------------
|
||||
|
||||
; Specify the interfaces for the RPC server listen on. One listen address per
|
||||
; line. NOTE: The default port is modified by some options such as 'testnet',
|
||||
; so it is recommended to not specify a port and allow a proper default to be
|
||||
; chosen unless you have a specific reason to do otherwise. By default, the
|
||||
; RPC server will only listen on localhost for IPv4 and IPv6.
|
||||
; All interfaces on default port:
|
||||
; rpclisten=
|
||||
; All ipv4 interfaces on default port:
|
||||
; rpclisten=0.0.0.0
|
||||
; All ipv6 interfaces on default port:
|
||||
; rpclisten=::
|
||||
; All interfaces on port 16110:
|
||||
; rpclisten=:16110
|
||||
; All ipv4 interfaces on port 16110:
|
||||
; rpclisten=0.0.0.0:16110
|
||||
; All ipv6 interfaces on port 16110:
|
||||
; rpclisten=[::]:16110
|
||||
; Only ipv4 localhost on port 16110:
|
||||
; rpclisten=127.0.0.1:16110
|
||||
; Only ipv6 localhost on port 16110:
|
||||
; rpclisten=[::1]:16110
|
||||
; Only ipv4 localhost on non-standard port 8337:
|
||||
; rpclisten=127.0.0.1:8337
|
||||
; All interfaces on non-standard port 8337:
|
||||
; rpclisten=:8337
|
||||
; All ipv4 interfaces on non-standard port 8337:
|
||||
; rpclisten=0.0.0.0:8337
|
||||
; All ipv6 interfaces on non-standard port 8337:
|
||||
; rpclisten=[::]:8337
|
||||
|
||||
; Specify the maximum number of concurrent RPC clients for standard connections.
|
||||
; rpcmaxclients=10
|
||||
|
||||
; Use the following setting to disable the RPC server.
|
||||
; norpc=1
|
||||
|
||||
|
||||
; ------------------------------------------------------------------------------
|
||||
; Mempool Settings - The following options
|
||||
; ------------------------------------------------------------------------------
|
||||
|
||||
; Set the minimum transaction fee to be considered a non-zero fee,
|
||||
; minrelaytxfee=0.00001
|
||||
|
||||
; Limit orphan transaction pool to 100 transactions.
|
||||
; maxorphantx=100
|
||||
|
||||
; Do not accept transactions from remote peers.
|
||||
; blocksonly=1
|
||||
|
||||
; Relay non-standard transactions regardless of default network settings.
|
||||
; relaynonstd=1
|
||||
|
||||
; Reject non-standard transactions regardless of default network settings.
|
||||
; rejectnonstd=1
|
||||
|
||||
|
||||
; ------------------------------------------------------------------------------
|
||||
; Signature Verification Cache
|
||||
; ------------------------------------------------------------------------------
|
||||
|
||||
; Limit the signature cache to a max of 50000 entries.
|
||||
; sigcachemaxsize=50000
|
||||
|
||||
|
||||
; ------------------------------------------------------------------------------
|
||||
; Debug
|
||||
; ------------------------------------------------------------------------------
|
||||
|
||||
; Debug logging level.
|
||||
; Valid levels are {trace, debug, info, warn, error, critical}
|
||||
; You may also specify <subsystem>=<level>,<subsystem2>=<level>,... to set
|
||||
; log level for individual subsystems. Use kaspad --debuglevel=show to list
|
||||
; available subsystems.
|
||||
; debuglevel=info
|
||||
|
||||
; The port used to listen for HTTP profile requests. The profile server will
|
||||
; be disabled if this option is not specified. The profile information can be
|
||||
; accessed at http://localhost:<profileport>/debug/pprof once running.
|
||||
; profile=6061
|
||||
`
|
||||
@@ -12,16 +12,22 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// AddressRandomizer is the interface for the randomizer needed for the AddressManager.
|
||||
type AddressRandomizer interface {
|
||||
// addressRandomizer is the interface for the randomizer needed for the AddressManager.
|
||||
type addressRandomizer interface {
|
||||
RandomAddress(addresses []*appmessage.NetAddress) *appmessage.NetAddress
|
||||
RandomAddresses(addresses []*appmessage.NetAddress, count int) []*appmessage.NetAddress
|
||||
}
|
||||
|
||||
// AddressKey represents a pair of IP and port, the IP is always in V6 representation
|
||||
type AddressKey struct {
|
||||
// addressKey represents a pair of IP and port, the IP is always in V6 representation
|
||||
type addressKey struct {
|
||||
port uint16
|
||||
address [net.IPv6len]byte
|
||||
address ipv6
|
||||
}
|
||||
|
||||
type ipv6 [net.IPv6len]byte
|
||||
|
||||
func (i ipv6) equal(other ipv6) bool {
|
||||
return i == other
|
||||
}
|
||||
|
||||
// ErrAddressNotFound is an error returned from some functions when a
|
||||
@@ -29,16 +35,16 @@ type AddressKey struct {
|
||||
var ErrAddressNotFound = errors.New("address not found")
|
||||
|
||||
// NetAddressKey returns a key of the ip address to use it in maps.
|
||||
func netAddressKey(netAddress *appmessage.NetAddress) AddressKey {
|
||||
key := AddressKey{port: netAddress.Port}
|
||||
func netAddressKey(netAddress *appmessage.NetAddress) addressKey {
|
||||
key := addressKey{port: netAddress.Port}
|
||||
// all IPv4 can be represented as IPv6.
|
||||
copy(key.address[:], netAddress.IP.To16())
|
||||
return key
|
||||
}
|
||||
|
||||
// netAddressKeys returns a key of the ip address to use it in maps.
|
||||
func netAddressesKeys(netAddresses []*appmessage.NetAddress) map[AddressKey]bool {
|
||||
result := make(map[AddressKey]bool, len(netAddresses))
|
||||
func netAddressesKeys(netAddresses []*appmessage.NetAddress) map[addressKey]bool {
|
||||
result := make(map[addressKey]bool, len(netAddresses))
|
||||
for _, netAddress := range netAddresses {
|
||||
key := netAddressKey(netAddress)
|
||||
result[key] = true
|
||||
@@ -50,12 +56,12 @@ func netAddressesKeys(netAddresses []*appmessage.NetAddress) map[AddressKey]bool
|
||||
// AddressManager provides a concurrency safe address manager for caching potential
|
||||
// peers on the Kaspa network.
|
||||
type AddressManager struct {
|
||||
addresses map[AddressKey]*appmessage.NetAddress
|
||||
bannedAddresses map[AddressKey]*appmessage.NetAddress
|
||||
addresses map[addressKey]*appmessage.NetAddress
|
||||
bannedAddresses map[ipv6]*appmessage.NetAddress
|
||||
localAddresses *localAddressManager
|
||||
mutex sync.Mutex
|
||||
cfg *Config
|
||||
random AddressRandomizer
|
||||
random addressRandomizer
|
||||
}
|
||||
|
||||
// New returns a new Kaspa address manager.
|
||||
@@ -66,8 +72,8 @@ func New(cfg *Config) (*AddressManager, error) {
|
||||
}
|
||||
|
||||
return &AddressManager{
|
||||
addresses: map[AddressKey]*appmessage.NetAddress{},
|
||||
bannedAddresses: map[AddressKey]*appmessage.NetAddress{},
|
||||
addresses: map[addressKey]*appmessage.NetAddress{},
|
||||
bannedAddresses: map[ipv6]*appmessage.NetAddress{},
|
||||
localAddresses: localAddresses,
|
||||
random: NewAddressRandomize(),
|
||||
cfg: cfg,
|
||||
@@ -111,7 +117,6 @@ func (am *AddressManager) RemoveAddress(address *appmessage.NetAddress) {
|
||||
|
||||
key := netAddressKey(address)
|
||||
delete(am.addresses, key)
|
||||
delete(am.bannedAddresses, key)
|
||||
}
|
||||
|
||||
// Addresses returns all addresses
|
||||
@@ -175,21 +180,23 @@ func (am *AddressManager) BestLocalAddress(remoteAddress *appmessage.NetAddress)
|
||||
}
|
||||
|
||||
// Ban marks the given address as banned
|
||||
func (am *AddressManager) Ban(address *appmessage.NetAddress) error {
|
||||
func (am *AddressManager) Ban(addressToBan *appmessage.NetAddress) {
|
||||
am.mutex.Lock()
|
||||
defer am.mutex.Unlock()
|
||||
|
||||
key := netAddressKey(address)
|
||||
addressToBan, ok := am.addresses[key]
|
||||
if !ok {
|
||||
return errors.Wrapf(ErrAddressNotFound, "address %s "+
|
||||
"is not registered with the address manager", address.TCPAddress())
|
||||
keyToBan := netAddressKey(addressToBan)
|
||||
keysToDelete := make([]addressKey, 0)
|
||||
for _, address := range am.addresses {
|
||||
key := netAddressKey(address)
|
||||
if key.address.equal(keyToBan.address) {
|
||||
keysToDelete = append(keysToDelete, key)
|
||||
}
|
||||
}
|
||||
for _, key := range keysToDelete {
|
||||
delete(am.addresses, key)
|
||||
}
|
||||
|
||||
delete(am.addresses, key)
|
||||
am.bannedAddresses[key] = addressToBan
|
||||
return nil
|
||||
|
||||
am.bannedAddresses[keyToBan.address] = addressToBan
|
||||
}
|
||||
|
||||
// Unban unmarks the given address as banned
|
||||
@@ -198,13 +205,13 @@ func (am *AddressManager) Unban(address *appmessage.NetAddress) error {
|
||||
defer am.mutex.Unlock()
|
||||
|
||||
key := netAddressKey(address)
|
||||
bannedAddress, ok := am.bannedAddresses[key]
|
||||
bannedAddress, ok := am.bannedAddresses[key.address]
|
||||
if !ok {
|
||||
return errors.Wrapf(ErrAddressNotFound, "address %s "+
|
||||
"is not registered with the address manager as banned", address.TCPAddress())
|
||||
}
|
||||
|
||||
delete(am.bannedAddresses, key)
|
||||
delete(am.bannedAddresses, key.address)
|
||||
am.addresses[key] = bannedAddress
|
||||
return nil
|
||||
}
|
||||
@@ -215,7 +222,7 @@ func (am *AddressManager) IsBanned(address *appmessage.NetAddress) (bool, error)
|
||||
defer am.mutex.Unlock()
|
||||
|
||||
key := netAddressKey(address)
|
||||
if _, ok := am.bannedAddresses[key]; !ok {
|
||||
if _, ok := am.bannedAddresses[key.address]; !ok {
|
||||
if _, ok = am.addresses[key]; !ok {
|
||||
return false, errors.Wrapf(ErrAddressNotFound, "address %s "+
|
||||
"is not registered with the address manager", address.TCPAddress())
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
)
|
||||
|
||||
// AddressRandomize implement AddressRandomizer interface
|
||||
// AddressRandomize implement addressRandomizer interface
|
||||
type AddressRandomize struct {
|
||||
random *rand.Rand
|
||||
}
|
||||
|
||||
@@ -38,7 +38,7 @@ type localAddress struct {
|
||||
}
|
||||
|
||||
type localAddressManager struct {
|
||||
localAddresses map[AddressKey]*localAddress
|
||||
localAddresses map[addressKey]*localAddress
|
||||
lookupFunc func(string) ([]net.IP, error)
|
||||
cfg *Config
|
||||
mutex sync.Mutex
|
||||
@@ -46,7 +46,7 @@ type localAddressManager struct {
|
||||
|
||||
func newLocalAddressManager(cfg *Config) (*localAddressManager, error) {
|
||||
localAddressManager := localAddressManager{
|
||||
localAddresses: map[AddressKey]*localAddress{},
|
||||
localAddresses: map[addressKey]*localAddress{},
|
||||
cfg: cfg,
|
||||
lookupFunc: cfg.Lookup,
|
||||
}
|
||||
|
||||
@@ -126,35 +126,41 @@ func (c *ConnectionManager) ConnectionCount() int {
|
||||
}
|
||||
|
||||
// Ban marks the given netConnection as banned
|
||||
func (c *ConnectionManager) Ban(netConnection *netadapter.NetConnection) error {
|
||||
return c.addressManager.Ban(netConnection.NetAddress())
|
||||
func (c *ConnectionManager) Ban(netConnection *netadapter.NetConnection) {
|
||||
if c.isPermanent(netConnection.Address()) {
|
||||
log.Infof("Cannot ban %s because it's a permanent connection", netConnection.Address())
|
||||
return
|
||||
}
|
||||
|
||||
c.addressManager.Ban(netConnection.NetAddress())
|
||||
}
|
||||
|
||||
// IsBanned returns whether the given netConnection is banned
|
||||
func (c *ConnectionManager) IsBanned(netConnection *netadapter.NetConnection) (bool, error) {
|
||||
if c.isPermanent(netConnection.Address()) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return c.addressManager.IsBanned(netConnection.NetAddress())
|
||||
}
|
||||
|
||||
func (c *ConnectionManager) waitTillNextIteration() {
|
||||
select {
|
||||
case <-c.resetLoopChan:
|
||||
c.loopTicker.Stop()
|
||||
c.loopTicker = time.NewTicker(connectionsLoopInterval)
|
||||
c.loopTicker.Reset(connectionsLoopInterval)
|
||||
case <-c.loopTicker.C:
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ConnectionManager) connectionExists(addressString string) bool {
|
||||
if _, ok := c.activeRequested[addressString]; ok {
|
||||
return true
|
||||
func (c *ConnectionManager) isPermanent(addressString string) bool {
|
||||
c.connectionRequestsLock.Lock()
|
||||
defer c.connectionRequestsLock.Unlock()
|
||||
if conn, ok := c.activeRequested[addressString]; ok {
|
||||
return conn.isPermanent
|
||||
}
|
||||
|
||||
if _, ok := c.activeOutgoing[addressString]; ok {
|
||||
return true
|
||||
}
|
||||
|
||||
if _, ok := c.activeIncoming[addressString]; ok {
|
||||
return true
|
||||
if conn, ok := c.pendingRequested[addressString]; ok {
|
||||
return conn.isPermanent
|
||||
}
|
||||
|
||||
return false
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/peer"
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
type gRPCServer struct {
|
||||
@@ -61,7 +62,20 @@ func (s *gRPCServer) listenOn(listenAddr string) error {
|
||||
}
|
||||
|
||||
func (s *gRPCServer) Stop() error {
|
||||
s.server.GracefulStop()
|
||||
const stopTimeout = 2 * time.Second
|
||||
|
||||
stopChan := make(chan interface{})
|
||||
spawn("gRPCServer.Stop", func() {
|
||||
s.server.GracefulStop()
|
||||
close(stopChan)
|
||||
})
|
||||
|
||||
select {
|
||||
case <-stopChan:
|
||||
case <-time.After(stopTimeout):
|
||||
log.Warnf("Could not gracefully stop %s: timed out after %s", s.name, stopTimeout)
|
||||
s.server.Stop()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -10,8 +10,8 @@ const validCharacters = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrs
|
||||
|
||||
const (
|
||||
appMajor uint = 0
|
||||
appMinor uint = 9
|
||||
appPatch uint = 0
|
||||
appMinor uint = 8
|
||||
appPatch uint = 7
|
||||
)
|
||||
|
||||
// appBuild is defined as a variable so it can be overridden during the build
|
||||
|
||||
Reference in New Issue
Block a user