mirror of
https://github.com/kaspanet/kaspad.git
synced 2026-02-21 03:03:08 +00:00
Compare commits
2 Commits
patch9
...
v0.11-with
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c46503ea7e | ||
|
|
bbdf7b246d |
@@ -1,6 +1,8 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
@@ -11,6 +13,7 @@ import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -109,6 +112,8 @@ func (flow *handleRelayInvsFlow) start() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msTime := mstime.UnixMilliseconds(block.Header.TimeInMilliseconds())
|
||||
time.Sleep(time.Until(msTime.ToNativeTime().Add(config.DelayDuration)))
|
||||
|
||||
log.Debugf("Processing block %s", inv.Hash)
|
||||
missingParents, blockInsertionResult, err := flow.processBlock(block)
|
||||
|
||||
@@ -2,7 +2,9 @@ package main
|
||||
|
||||
import (
|
||||
nativeerrors "errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"runtime"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
@@ -38,37 +40,40 @@ func mineLoop(client *minerClient, numberOfBlocks uint64, targetBlocksPerSecond
|
||||
templatesLoop(client, miningAddr, errChan)
|
||||
})
|
||||
|
||||
spawn("blocksLoop", func() {
|
||||
const windowSize = 10
|
||||
hasBlockRateTarget := targetBlocksPerSecond != 0
|
||||
var windowTicker, blockTicker *time.Ticker
|
||||
// We use tickers to limit the block rate:
|
||||
// 1. windowTicker -> makes sure that the last windowSize blocks take at least windowSize*targetBlocksPerSecond.
|
||||
// 2. blockTicker -> makes sure that each block takes at least targetBlocksPerSecond/windowSize.
|
||||
// that way we both allow for fluctuation in block rate but also make sure they're not too big (by an order of magnitude)
|
||||
if hasBlockRateTarget {
|
||||
windowRate := time.Duration(float64(time.Second) / (targetBlocksPerSecond / windowSize))
|
||||
blockRate := time.Duration(float64(time.Second) / (targetBlocksPerSecond * windowSize))
|
||||
log.Infof("Minimum average time per %d blocks: %s, smaller minimum time per block: %s", windowSize, windowRate, blockRate)
|
||||
windowTicker = time.NewTicker(windowRate)
|
||||
blockTicker = time.NewTicker(blockRate)
|
||||
defer windowTicker.Stop()
|
||||
defer blockTicker.Stop()
|
||||
}
|
||||
windowStart := time.Now()
|
||||
for blockIndex := 1; ; blockIndex++ {
|
||||
foundBlockChan <- mineNextBlock(mineWhenNotSynced)
|
||||
for c := 0; c < (runtime.NumCPU()/2)+1; c++ {
|
||||
c := c
|
||||
spawn(fmt.Sprintf("blocksLoop %d", c), func() {
|
||||
const windowSize = 10
|
||||
hasBlockRateTarget := targetBlocksPerSecond != 0
|
||||
var windowTicker, blockTicker *time.Ticker
|
||||
// We use tickers to limit the block rate:
|
||||
// 1. windowTicker -> makes sure that the last windowSize blocks take at least windowSize*targetBlocksPerSecond.
|
||||
// 2. blockTicker -> makes sure that each block takes at least targetBlocksPerSecond/windowSize.
|
||||
// that way we both allow for fluctuation in block rate but also make sure they're not too big (by an order of magnitude)
|
||||
if hasBlockRateTarget {
|
||||
<-blockTicker.C
|
||||
if (blockIndex % windowSize) == 0 {
|
||||
tickerStart := time.Now()
|
||||
<-windowTicker.C
|
||||
log.Infof("Finished mining %d blocks in: %s. slept for: %s", windowSize, time.Since(windowStart), time.Since(tickerStart))
|
||||
windowStart = time.Now()
|
||||
windowRate := time.Duration(float64(time.Second) / (targetBlocksPerSecond / windowSize))
|
||||
blockRate := time.Duration(float64(time.Second) / (targetBlocksPerSecond * windowSize))
|
||||
log.Infof("Minimum average time per %d blocks: %s, smaller minimum time per block: %s", windowSize, windowRate, blockRate)
|
||||
windowTicker = time.NewTicker(windowRate)
|
||||
blockTicker = time.NewTicker(blockRate)
|
||||
defer windowTicker.Stop()
|
||||
defer blockTicker.Stop()
|
||||
}
|
||||
windowStart := time.Now()
|
||||
for blockIndex := 1; ; blockIndex++ {
|
||||
foundBlockChan <- mineNextBlock(mineWhenNotSynced)
|
||||
if hasBlockRateTarget {
|
||||
<-blockTicker.C
|
||||
if (blockIndex % windowSize) == 0 {
|
||||
tickerStart := time.Now()
|
||||
<-windowTicker.C
|
||||
log.Infof("Finished mining %d blocks in: %s. slept for: %s", windowSize, time.Since(windowStart), time.Since(tickerStart))
|
||||
windowStart = time.Now()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
spawn("handleFoundBlock", func() {
|
||||
for i := uint64(0); numberOfBlocks == 0 || i < numberOfBlocks; i++ {
|
||||
|
||||
@@ -27,6 +27,9 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// DelayDuration a duration for the delay, global for testing
|
||||
var DelayDuration time.Duration
|
||||
|
||||
const (
|
||||
defaultConfigFilename = "kaspad.conf"
|
||||
defaultDataDirname = "data"
|
||||
@@ -122,6 +125,7 @@ type Flags struct {
|
||||
UTXOIndex bool `long:"utxoindex" description:"Enable the UTXO index"`
|
||||
IsArchivalNode bool `long:"archival" description:"Run as an archival node: don't delete old block data when moving the pruning point (Warning: heavy disk usage)'"`
|
||||
EnableSanityCheckPruningUTXOSet bool `long:"enable-sanity-check-pruning-utxo" hidden:"true" description:"When moving the pruning point - check that the utxo set matches the utxo commitment"`
|
||||
Delay float32 `long:"delay" description:"Provide a delay in seconds as a floating point"`
|
||||
NetworkFlags
|
||||
ServiceOptions *ServiceOptions
|
||||
}
|
||||
@@ -577,6 +581,8 @@ func LoadConfig() (*Config, error) {
|
||||
if configFileError != nil {
|
||||
log.Warnf("%s", configFileError)
|
||||
}
|
||||
|
||||
DelayDuration = time.Duration(cfg.Delay * float32(time.Second))
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ package netadapter
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
@@ -175,18 +176,20 @@ func (na *NetAdapter) ID() *id.ID {
|
||||
// P2PBroadcast sends the given `message` to every peer corresponding
|
||||
// to each NetConnection in the given netConnections
|
||||
func (na *NetAdapter) P2PBroadcast(netConnections []*NetConnection, message appmessage.Message) error {
|
||||
na.p2pConnectionsLock.RLock()
|
||||
defer na.p2pConnectionsLock.RUnlock()
|
||||
|
||||
for _, netConnection := range netConnections {
|
||||
err := netConnection.router.OutgoingRoute().Enqueue(message)
|
||||
if err != nil {
|
||||
if errors.Is(err, routerpkg.ErrRouteClosed) {
|
||||
log.Debugf("Cannot enqueue message to %s: router is closed", netConnection)
|
||||
continue
|
||||
go func() {
|
||||
time.Sleep(config.DelayDuration)
|
||||
na.p2pConnectionsLock.RLock()
|
||||
defer na.p2pConnectionsLock.RUnlock()
|
||||
for _, netConnection := range netConnections {
|
||||
err := netConnection.router.OutgoingRoute().Enqueue(message)
|
||||
if err != nil {
|
||||
if errors.Is(err, routerpkg.ErrRouteClosed) {
|
||||
log.Debugf("Cannot enqueue message to %s: router is closed", netConnection)
|
||||
continue
|
||||
}
|
||||
log.Error(err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user