Compare commits

...

20 Commits

Author SHA1 Message Date
Elichai Turkel
bb96719698 Better benchmark 2021-02-09 16:03:41 +02:00
stasatdaglabs
1564972908 Add more logs. 2021-01-31 12:29:35 +02:00
stasatdaglabs
f0b772f4d6 Add more logs, In TestPickVirtualParents, only print relevant logs. 2021-01-31 09:56:22 +02:00
stasatdaglabs
8e09bc9cb6 Merge remote-tracking branch 'origin/large-reorg-logs' into multiple-chain-slowdown 2021-01-29 12:08:29 +02:00
stasatdaglabs
5cf1663108 Add logging. 2021-01-29 12:07:30 +02:00
stasatdaglabs
cda9d5f27e Fix an error string. 2021-01-29 11:50:53 +02:00
stasatdaglabs
ceb7cda983 Implement TestPickVirtualParents. 2021-01-29 11:50:12 +02:00
stasatdaglabs
5dfc630980 Merge branch 'v0.9.0-dev' into large-reorg-logs 2021-01-29 10:48:24 +02:00
stasatdaglabs
e6da05679f Add logs to help debug long virtual parent selection. 2021-01-29 10:46:44 +02:00
stasatdaglabs
65e149b2bb In kaspaminer, don't crash on submitBlock timeout (#1462)
* In kaspaminer, don't crash on submitBlock timeout.

* Make timeout messages have a log level of Warn.

* Wait for a second after receiving a reject for IBD.

Co-authored-by: Elichai Turkel <elichai.turkel@gmail.com>
2021-01-29 09:10:21 +02:00
stasatdaglabs
7c1495ba65 Force stop gRPC servers after a short timeout (#1463)
* Force stop gRPC servers after a short timeout.

* Use spawn instead of go.
2021-01-28 19:43:04 +02:00
Ori Newman
13ffa5093c Increase the waiting for error timeout (#1465) 2021-01-28 13:33:37 +02:00
Ori Newman
a9a810a2b2 Add block type to MineJSON (#1464) 2021-01-28 13:22:20 +02:00
Michael Sutton
c9b591f2d3 Final reindex algorithm (#1430)
* Mine JSON

* [Reindex tests] add test_params and validate_mining flag to test_consensus

* Rename file and extend tests

* Ignore local test datasets

* Use spaces over tabs

* Reindex algorithm - full algorithm, initial commit, some tests fail

* Reindex algorithm - a few critical fixes

* Reindex algorithm - move reindex struct and all related operations to new file

* Reindex algorithm - added a validateIntervals method and modified tests to use it (instead of exact comparisons)

* Reindex algorithm - modified reindexIntervals to receive the new child as argument and fixed an important related bug

* Reindex attack tests - move logic to helper function and add stretch test

* Reindex algorithm - variable names and some comments

* Reindex algorithm - minor changes

* Reindex algorithm - minor changes 2

* Reindex algorithm - extended stretch test

* Reindex algorithm - small fix to validate function

* Reindex tests - move tests and add DAG files

* go format fixes

* TestParams doc comment

* Reindex tests - exact comparisons are not needed

* Update to version 0.8.6

* Remove TestParams and use AddUTXOInvalidHeader instead

* Use gzipeed test files

* This unintended change somehow slipped in through branch merges

* Rename test

* Move interval increase/decrease methods to reachability interval file

* Addressing a bunch of minor review comments

* Addressed a few more minor review comments

* Make code of offsetSiblingsBefore and offsetSiblingsAfter symmetric

* Optimize reindex logic in cases where reorg occurs + reorg test

* Do not change reindex root too fast (on reorg)

* Some comments

* A few more comments

* Addressing review comments

* Remove TestNoAttackAlternateReorg and assert chain attack

* Minor

Co-authored-by: Elichai Turkel <elichai.turkel@gmail.com>
Co-authored-by: Mike Zak <feanorr@gmail.com>
Co-authored-by: Ori Newman <orinewman1@gmail.com>
2021-01-27 17:09:20 +02:00
Ori Newman
8d6e71d490 Add IBD test cases and check for MsgUnexpectedPruningPoint on receivePruningPointBlock as well (#1459)
* Check for MsgUnexpectedPruningPoint on receivePruningPointBlock as well

* Add IBD test cases

* Revert "Check for MsgUnexpectedPruningPoint on receivePruningPointBlock as well"

This reverts commit 6a6d1ea180.

* Change log level for two logs

* Remove "testing a situation where the pruning point moved during IBD (before sending the pruning point block)"
2021-01-27 16:42:42 +02:00
Elichai Turkel
2823461fe2 Change AddressKey from string to port+ipv6 address (#1458) 2021-01-27 16:09:32 +02:00
Elichai Turkel
2075c585da Fix race condition in kaspaminer (#1455)
Co-authored-by: Ori Newman <orinewman1@gmail.com>
2021-01-27 13:08:35 +02:00
Elichai Turkel
01aee62cb0 Add log and measure to pruning points (#1457) 2021-01-27 11:40:51 +02:00
Ori Newman
a6ee871f7e Increase maxSelectedParentTimeDiffToAllowMiningInMilliSeconds to one hour (#1456) 2021-01-27 11:04:58 +02:00
Mike Zak
6393a8186a Update to version 0.9.0 2021-01-27 09:22:56 +02:00
35 changed files with 2398 additions and 1043 deletions

View File

@@ -3,7 +3,7 @@ package flowcontext
import "github.com/kaspanet/kaspad/util/mstime"
const (
maxSelectedParentTimeDiffToAllowMiningInMilliSeconds = 300_000
maxSelectedParentTimeDiffToAllowMiningInMilliSeconds = 60 * 60 * 1000 // 1 Hour
)
// ShouldMine returns whether it's ok to use block template from this node

View File

@@ -446,11 +446,11 @@ func (flow *handleRelayInvsFlow) receiveAndInsertPruningPointUTXOSet(
}
case *appmessage.MsgDonePruningPointUTXOSetChunks:
log.Debugf("Finished receiving the UTXO set. Total UTXOs: %d", receivedUTXOCount)
log.Infof("Finished receiving the UTXO set. Total UTXOs: %d", receivedUTXOCount)
return true, nil
case *appmessage.MsgUnexpectedPruningPoint:
log.Debugf("Could not receive the next UTXO chunk because the pruning point %s "+
log.Infof("Could not receive the next UTXO chunk because the pruning point %s "+
"is no longer the pruning point of peer %s", pruningPointHash, flow.peer)
return false, nil

File diff suppressed because it is too large Load Diff

View File

@@ -20,13 +20,13 @@ import (
"github.com/pkg/errors"
)
var random = rand.New(rand.NewSource(time.Now().UnixNano()))
var hashesTried uint64
const logHashRateInterval = 10 * time.Second
func mineLoop(client *minerClient, numberOfBlocks uint64, targetBlocksPerSecond float64, mineWhenNotSynced bool,
miningAddr util.Address) error {
rand.Seed(time.Now().UnixNano()) // Seed the global concurrent-safe random source.
errChan := make(chan error)
@@ -87,7 +87,7 @@ func logHashRate() {
spawn("logHashRate", func() {
lastCheck := time.Now()
for range time.Tick(logHashRateInterval) {
currentHashesTried := hashesTried
currentHashesTried := atomic.LoadUint64(&hashesTried)
currentTime := time.Now()
kiloHashesTried := float64(currentHashesTried) / 1000.0
hashRate := kiloHashesTried / currentTime.Sub(lastCheck).Seconds()
@@ -117,8 +117,14 @@ func handleFoundBlock(client *minerClient, block *externalapi.DomainBlock) error
rejectReason, err := client.SubmitBlock(block)
if err != nil {
if nativeerrors.Is(err, router.ErrTimeout) {
log.Warnf("Got timeout while submitting block %s to %s: %s", blockHash, client.Address(), err)
return nil
}
if rejectReason == appmessage.RejectReasonIsInIBD {
log.Warnf("Block %s was rejected because the node is in IBD", blockHash)
const waitTime = 1 * time.Second
log.Warnf("Block %s was rejected because the node is in IBD. Waiting for %s", blockHash, waitTime)
time.Sleep(waitTime)
return nil
}
return errors.Errorf("Error submitting block %s to %s: %s", blockHash, client.Address(), err)
@@ -129,7 +135,7 @@ func handleFoundBlock(client *minerClient, block *externalapi.DomainBlock) error
func solveBlock(block *externalapi.DomainBlock, stopChan chan struct{}, foundBlock chan *externalapi.DomainBlock) {
targetDifficulty := difficulty.CompactToBig(block.Header.Bits())
headerForMining := block.Header.ToMutable()
initialNonce := random.Uint64()
initialNonce := rand.Uint64() // Use the global concurrent-safe random source.
for i := initialNonce; i != initialNonce-1; i++ {
select {
case <-stopChan:
@@ -152,7 +158,7 @@ func templatesLoop(client *minerClient, miningAddr util.Address,
getBlockTemplate := func() {
template, err := client.GetBlockTemplate(miningAddr.String())
if nativeerrors.Is(err, router.ErrTimeout) {
log.Infof("Got timeout while requesting block template from %s", client.Address())
log.Warnf("Got timeout while requesting block template from %s: %s", client.Address(), err)
return
} else if err != nil {
errChan <- errors.Errorf("Error getting block template from %s: %s", client.Address(), err)
@@ -191,8 +197,9 @@ func solveLoop(newTemplateChan chan *appmessage.GetBlockTemplateResponseMessage,
stopOldTemplateSolving = make(chan struct{})
block := appmessage.MsgBlockToDomainBlock(template.MsgBlock)
stopOldTemplateSolvingCopy := stopOldTemplateSolving
spawn("solveBlock", func() {
solveBlock(block, stopOldTemplateSolving, foundBlock)
solveBlock(block, stopOldTemplateSolvingCopy, foundBlock)
})
}
if stopOldTemplateSolving != nil {

View File

@@ -9,5 +9,5 @@ type ReachabilityManager interface {
IsReachabilityTreeAncestorOf(blockHashA *externalapi.DomainHash, blockHashB *externalapi.DomainHash) (bool, error)
IsDAGAncestorOf(blockHashA *externalapi.DomainHash, blockHashB *externalapi.DomainHash) (bool, error)
UpdateReindexRoot(selectedTip *externalapi.DomainHash) error
FindAncestorOfThisAmongChildrenOfOther(this, other *externalapi.DomainHash) (*externalapi.DomainHash, error)
FindNextAncestor(descendant, ancestor *externalapi.DomainHash) (*externalapi.DomainHash, error)
}

View File

@@ -5,6 +5,21 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/dagconfig"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"io"
)
// MineJSONBlockType indicates which type of blocks MineJSON mines
type MineJSONBlockType int
const (
// MineJSONBlockTypeUTXOValidBlock indicates for MineJSON to mine valid blocks.
MineJSONBlockTypeUTXOValidBlock MineJSONBlockType = iota
// MineJSONBlockTypeUTXOInvalidBlock indicates for MineJSON to mine UTXO invalid blocks.
MineJSONBlockTypeUTXOInvalidBlock
// MineJSONBlockTypeUTXOInvalidHeader indicates for MineJSON to mine UTXO invalid headers.
MineJSONBlockTypeUTXOInvalidHeader
)
// TestConsensus wraps the Consensus interface with some methods that are needed by tests only
@@ -32,6 +47,7 @@ type TestConsensus interface {
AddUTXOInvalidBlock(parentHashes []*externalapi.DomainHash) (*externalapi.DomainHash,
*externalapi.BlockInsertionResult, error)
MineJSON(r io.Reader, blockType MineJSONBlockType) (tips []*externalapi.DomainHash, err error)
DiscardAllStores()
AcceptanceDataStore() model.AcceptanceDataStore

View File

@@ -1,6 +1,9 @@
package testapi
import "github.com/kaspanet/kaspad/domain/consensus/model"
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// TestReachabilityManager adds to the main ReachabilityManager methods required by tests
type TestReachabilityManager interface {
@@ -8,4 +11,6 @@ type TestReachabilityManager interface {
SetReachabilityReindexWindow(reindexWindow uint64)
SetReachabilityReindexSlack(reindexSlack uint64)
ReachabilityReindexSlack() uint64
ValidateIntervals(root *externalapi.DomainHash) error
GetAllNodes(root *externalapi.DomainHash) ([]*externalapi.DomainHash, error)
}

View File

@@ -4,6 +4,7 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/multiset"
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/domain/consensus/model"
@@ -15,8 +16,10 @@ import (
func (csm *consensusStateManager) CalculatePastUTXOAndAcceptanceData(blockHash *externalapi.DomainHash) (
model.UTXODiff, externalapi.AcceptanceData, model.Multiset, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "CalculatePastUTXOAndAcceptanceData")
defer onEnd()
log.Debugf("CalculatePastUTXOAndAcceptanceData start for block %s", blockHash)
defer log.Debugf("CalculatePastUTXOAndAcceptanceData end for block %s", blockHash)
if blockHash.Equal(csm.genesisHash) {
log.Debugf("Block %s is the genesis. By definition, "+
@@ -35,6 +38,9 @@ func (csm *consensusStateManager) CalculatePastUTXOAndAcceptanceData(blockHash *
if err != nil {
return nil, nil, nil, err
}
log.Debugf("Restored the past UTXO of block %s with selectedParent %s. "+
"Diff toAdd length: %d, toRemove length: %d", blockHash, blockGHOSTDAGData.SelectedParent(),
selectedParentPastUTXO.ToAdd().Len(), selectedParentPastUTXO.ToRemove().Len())
log.Debugf("Applying blue blocks to the selected parent past UTXO of block %s", blockHash)
acceptanceData, utxoDiff, err := csm.applyMergeSetBlocks(blockHash, selectedParentPastUTXO, blockGHOSTDAGData)
@@ -53,8 +59,10 @@ func (csm *consensusStateManager) CalculatePastUTXOAndAcceptanceData(blockHash *
}
func (csm *consensusStateManager) restorePastUTXO(blockHash *externalapi.DomainHash) (model.MutableUTXODiff, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "restorePastUTXO")
defer onEnd()
log.Debugf("restorePastUTXO start for block %s", blockHash)
defer log.Debugf("restorePastUTXO end for block %s", blockHash)
var err error
@@ -110,8 +118,10 @@ func (csm *consensusStateManager) applyMergeSetBlocks(blockHash *externalapi.Dom
selectedParentPastUTXODiff model.MutableUTXODiff, ghostdagData *model.BlockGHOSTDAGData) (
externalapi.AcceptanceData, model.MutableUTXODiff, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "applyMergeSetBlocks")
defer onEnd()
log.Debugf("applyMergeSetBlocks start for block %s", blockHash)
defer log.Tracef("applyMergeSetBlocks end for block %s", blockHash)
mergeSetHashes := ghostdagData.MergeSet()
log.Debugf("Merge set for block %s is %v", blockHash, mergeSetHashes)

View File

@@ -1,6 +1,7 @@
package consensusstatemanager
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/domain/consensus/model"
@@ -9,8 +10,10 @@ import (
)
func (csm *consensusStateManager) pickVirtualParents(tips []*externalapi.DomainHash) ([]*externalapi.DomainHash, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "pickVirtualParents")
defer onEnd()
log.Debugf("pickVirtualParents start for tips len: %d", len(tips))
defer log.Debugf("pickVirtualParents end for tips len: %d", len(tips))
log.Debugf("Pushing all tips into a DownHeap")
candidatesHeap := csm.dagTraversalManager.NewDownHeap()
@@ -84,8 +87,8 @@ func (csm *consensusStateManager) pickVirtualParents(tips []*externalapi.DomainH
func (csm *consensusStateManager) selectVirtualSelectedParent(
candidatesHeap model.BlockHeap) (*externalapi.DomainHash, error) {
log.Tracef("selectVirtualSelectedParent start")
defer log.Tracef("selectVirtualSelectedParent end")
onEnd := logger.LogAndMeasureExecutionTime(log, "selectVirtualSelectedParent")
defer onEnd()
disqualifiedCandidates := hashset.New()
@@ -153,8 +156,8 @@ func (csm *consensusStateManager) selectVirtualSelectedParent(
func (csm *consensusStateManager) mergeSetIncrease(
candidate *externalapi.DomainHash, selectedVirtualParents hashset.HashSet) (uint64, error) {
log.Tracef("mergeSetIncrease start")
defer log.Tracef("mergeSetIncrease end")
onEnd := logger.LogAndMeasureExecutionTime(log, "mergeSetIncrease")
defer onEnd()
visited := hashset.New()
queue := csm.dagTraversalManager.NewDownHeap()
@@ -204,8 +207,10 @@ func (csm *consensusStateManager) mergeSetIncrease(
func (csm *consensusStateManager) boundedMergeBreakingParents(
parents []*externalapi.DomainHash) (hashset.HashSet, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "boundedMergeBreakingParents")
defer onEnd()
log.Tracef("boundedMergeBreakingParents start for parents: %s", parents)
defer log.Tracef("boundedMergeBreakingParents end for parents: %s", parents)
log.Debug("Temporarily setting virtual to all parents, so that we can run ghostdag on it")
err := csm.dagTopologyManager.SetParents(model.VirtualBlockHash, parents)

View File

@@ -0,0 +1,117 @@
package consensusstatemanager_test
import (
"bytes"
"fmt"
"github.com/kaspanet/kaspad/domain/consensus"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils"
"github.com/kaspanet/kaspad/domain/dagconfig"
"github.com/kaspanet/kaspad/infrastructure/logger"
"io/ioutil"
"os/user"
"path"
"runtime/pprof"
"testing"
"time"
)
var log, _ = logger.Get(logger.SubsystemTags.CMGR)
func TestPickVirtualParents(t *testing.T) {
usr, err := user.Current()
if err != nil {
t.Fatal(err)
}
const chainSize = 97
params := dagconfig.DevnetParams
params.SkipProofOfWork = true
factory := consensus.NewFactory()
var chains [10][]*externalapi.DomainBlock
// Build three chains over the genesis
for chainIndex := range chains {
func() {
tipHash := params.GenesisHash
builder, teardown, err := factory.NewTestConsensus(&params, false, fmt.Sprintf("TestPickVirtualParents: %d", chainIndex))
if err != nil {
t.Fatalf("Error setting up consensus: %+v", err)
}
defer teardown(false)
for blockIndex := 0; blockIndex < chainSize; blockIndex++ {
scriptPubKey, _ := testutils.OpTrueScript()
extraData := []byte{byte(chainIndex)}
block, _, err := builder.BuildBlockWithParents([]*externalapi.DomainHash{tipHash}, &externalapi.DomainCoinbaseData{scriptPubKey, extraData}, nil)
if err != nil {
t.Fatalf("Could not build block: %s", err)
}
_, err = builder.ValidateAndInsertBlock(block)
if err != nil {
t.Fatalf("Could not build block: %s", err)
}
chains[chainIndex] = append(chains[chainIndex], block)
tipHash = consensushashing.BlockHash(block)
}
fmt.Printf("Finished Building chain: %d\n", chainIndex)
}()
}
testConsensus, teardown, err := factory.NewTestConsensus(&params, false, "TestPickVirtualParents")
if err != nil {
t.Fatalf("Error setting up consensus: %+v", err)
}
defer teardown(false)
var maxTime time.Duration
var maxString string
var profName string
maxProf := make([]byte, 0, 1024)
// Build three chains over the genesis
buf := bytes.NewBuffer(make([]byte, 0, 1024))
for chainIndex, chain := range chains {
accumulatedValidationTime := time.Duration(0)
for blockIndex, block := range chain {
if chainIndex == 9 && blockIndex > 90 {
logger.InitLog(path.Join(usr.HomeDir, "TestPickVirtualParents.log"), path.Join(usr.HomeDir, "TestPickVirtualParents_err.log"))
logger.SetLogLevels("debug")
}
log.Debugf("Starting chain:#%d, block: #%d", chainIndex, blockIndex)
blockHash := consensushashing.BlockHash(block)
buf.Reset()
err = pprof.StartCPUProfile(buf)
if err != nil {
t.Fatal(err)
}
start := time.Now()
_, err := testConsensus.ValidateAndInsertBlock(block)
validationTime := time.Since(start)
pprof.StopCPUProfile()
if err != nil {
t.Fatalf("Failed to validate block %s: %s", blockHash, err)
}
if validationTime > maxTime {
maxTime = validationTime
maxString = fmt.Sprintf("Chain: %d, Block: %d", chainIndex, blockIndex)
profName = fmt.Sprintf("TestPickVirtualParents-chain-%d-block-%d.pprof", chainIndex, blockIndex)
maxProf = append(maxProf[:0], buf.Bytes()...)
}
accumulatedValidationTime += validationTime
log.Debugf("Validated block #%d in chain #%d, took %s\n", blockIndex, chainIndex, validationTime)
}
averageValidationTime := accumulatedValidationTime / chainSize
fmt.Printf("Average validation time for chain #%d: %s\n", chainIndex, averageValidationTime)
}
err = ioutil.WriteFile(path.Join(usr.HomeDir, profName), maxProf, 0644)
if err != nil {
t.Fatal(err)
}
fmt.Printf("%s, took: %s\n", maxString, maxTime)
}

View File

@@ -3,13 +3,16 @@ package consensusstatemanager
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/logger"
)
func (csm *consensusStateManager) updateVirtual(newBlockHash *externalapi.DomainHash,
tips []*externalapi.DomainHash) (*externalapi.SelectedChainPath, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "updateVirtual")
defer onEnd()
log.Debugf("updateVirtual start for block %s", newBlockHash)
defer log.Debugf("updateVirtual end for block %s", newBlockHash)
log.Debugf("Saving a reference to the GHOSTDAG data of the old virtual")
var oldVirtualSelectedParent *externalapi.DomainHash
@@ -44,6 +47,9 @@ func (csm *consensusStateManager) updateVirtual(newBlockHash *externalapi.Domain
if err != nil {
return nil, err
}
log.Debugf("Calculated the past UTXO of the new virtual. "+
"Diff toAdd length: %d, toRemove length: %d",
virtualUTXODiff.ToAdd().Len(), virtualUTXODiff.ToRemove().Len())
log.Debugf("Staging new acceptance data for the virtual block")
csm.acceptanceDataStore.Stage(model.VirtualBlockHash, virtualAcceptanceData)
@@ -73,6 +79,8 @@ func (csm *consensusStateManager) updateVirtual(newBlockHash *externalapi.Domain
if err != nil {
return nil, err
}
log.Debugf("Selected parent chain changes: %d blocks were removed and %d blocks were added",
len(selectedParentChainChanges.Removed), len(selectedParentChainChanges.Added))
}
return selectedParentChainChanges, nil

View File

@@ -195,5 +195,5 @@ func (dtm *dagTopologyManager) ChildInSelectedParentChainOf(
blockHash, specifiedHighHash)
}
return dtm.reachabilityManager.FindAncestorOfThisAmongChildrenOfOther(highHash, blockHash)
return dtm.reachabilityManager.FindNextAncestor(highHash, blockHash)
}

View File

@@ -3,9 +3,13 @@ package ghostdagmanager
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/logger"
)
func (gm *ghostdagManager) findSelectedParent(parentHashes []*externalapi.DomainHash) (*externalapi.DomainHash, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "ghostdagManager.findSelectedParent")
defer onEnd()
var selectedParent *externalapi.DomainHash
for _, hash := range parentHashes {
if selectedParent == nil {

View File

@@ -3,6 +3,7 @@ package ghostdagmanager
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/difficulty"
"github.com/pkg/errors"
"math/big"
@@ -40,6 +41,9 @@ func (bg *blockGHOSTDAGData) toModel() *model.BlockGHOSTDAGData {
//
// For further details see the article https://eprint.iacr.org/2018/104.pdf
func (gm *ghostdagManager) GHOSTDAG(blockHash *externalapi.DomainHash) error {
onEnd := logger.LogAndMeasureExecutionTime(log, "GHOSTDAG")
defer onEnd()
newBlockData := &blockGHOSTDAGData{
blueWork: new(big.Int),
mergeSetBlues: make([]*externalapi.DomainHash, 0),
@@ -69,6 +73,7 @@ func (gm *ghostdagManager) GHOSTDAG(blockHash *externalapi.DomainHash) error {
return err
}
onMergeSetWithoutSelectedParentEnd := logger.LogAndMeasureExecutionTime(log, "GHOSTDAG.mergeSetWithoutSelectedParent")
for _, blueCandidate := range mergeSetWithoutSelectedParent {
isBlue, candidateAnticoneSize, candidateBluesAnticoneSizes, err := gm.checkBlueCandidate(newBlockData.toModel(), blueCandidate)
if err != nil {
@@ -86,6 +91,7 @@ func (gm *ghostdagManager) GHOSTDAG(blockHash *externalapi.DomainHash) error {
newBlockData.mergeSetReds = append(newBlockData.mergeSetReds, blueCandidate)
}
}
onMergeSetWithoutSelectedParentEnd()
if !isGenesis {
selectedParentGHOSTDAGData, err := gm.ghostdagDataStore.Get(gm.databaseContext, newBlockData.selectedParent)

View File

@@ -0,0 +1,7 @@
package ghostdagmanager
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
)
var log, _ = logger.Get(logger.SubsystemTags.BDAG)

View File

@@ -85,6 +85,8 @@ func New(
// FindNextPruningPoint finds the next pruning point from the
// given blockHash
func (pm *pruningManager) UpdatePruningPointByVirtual() error {
onEnd := logger.LogAndMeasureExecutionTime(log, "pruningManager.UpdatePruningPointByVirtual")
defer onEnd()
hasPruningPoint, err := pm.pruningStore.HasPruningPoint(pm.databaseContext)
if err != nil {
return err
@@ -413,6 +415,9 @@ func (pm *pruningManager) pruningPointCandidate() (*externalapi.DomainHash, erro
// validateUTXOSetFitsCommitment makes sure that the calculated UTXOSet of the new pruning point fits the commitment.
// This is a sanity test, to make sure that kaspad doesn't store, and subsequently sends syncing peers the wrong UTXOSet.
func (pm *pruningManager) validateUTXOSetFitsCommitment(pruningPointHash *externalapi.DomainHash) error {
onEnd := logger.LogAndMeasureExecutionTime(log, "pruningManager.validateUTXOSetFitsCommitment")
defer onEnd()
utxoSetIterator, err := pm.consensusStateManager.RestorePastUTXOSetIterator(pruningPointHash)
if err != nil {
return err

View File

@@ -16,6 +16,54 @@ func intervalSize(ri *model.ReachabilityInterval) uint64 {
return ri.End - ri.Start + 1
}
// intervalIncrease returns a ReachabilityInterval with offset added to start and end
func intervalIncrease(ri *model.ReachabilityInterval, offset uint64) *model.ReachabilityInterval {
return &model.ReachabilityInterval{
Start: ri.Start + offset,
End: ri.End + offset,
}
}
// intervalDecrease returns a ReachabilityInterval with offset subtracted from start and end
func intervalDecrease(ri *model.ReachabilityInterval, offset uint64) *model.ReachabilityInterval {
return &model.ReachabilityInterval{
Start: ri.Start - offset,
End: ri.End - offset,
}
}
// intervalIncreaseStart returns a ReachabilityInterval with offset added to start
func intervalIncreaseStart(ri *model.ReachabilityInterval, offset uint64) *model.ReachabilityInterval {
return &model.ReachabilityInterval{
Start: ri.Start + offset,
End: ri.End,
}
}
// intervalDecreaseStart returns a ReachabilityInterval with offset reduced from start
func intervalDecreaseStart(ri *model.ReachabilityInterval, offset uint64) *model.ReachabilityInterval {
return &model.ReachabilityInterval{
Start: ri.Start - offset,
End: ri.End,
}
}
// intervalIncreaseEnd returns a ReachabilityInterval with offset added to end
func intervalIncreaseEnd(ri *model.ReachabilityInterval, offset uint64) *model.ReachabilityInterval {
return &model.ReachabilityInterval{
Start: ri.Start,
End: ri.End + offset,
}
}
// intervalDecreaseEnd returns a ReachabilityInterval with offset subtracted from end
func intervalDecreaseEnd(ri *model.ReachabilityInterval, offset uint64) *model.ReachabilityInterval {
return &model.ReachabilityInterval{
Start: ri.Start,
End: ri.End - offset,
}
}
// intervalSplitInHalf splits this interval by a fraction of 0.5.
// See splitFraction for further details.
func intervalSplitInHalf(ri *model.ReachabilityInterval) (
@@ -111,6 +159,35 @@ func intervalSplitWithExponentialBias(ri *model.ReachabilityInterval, sizes []ui
return intervalSplitExact(ri, biasedSizes)
}
// exponentialFractions returns a fraction of each size in sizes
// as follows:
// fraction[i] = 2^size[i] / sum_j(2^size[j])
// In the code below the above equation is divided by 2^max(size)
// to avoid exploding numbers. Note that in 1 / 2^(max(size)-size[i])
// we divide 1 by potentially a very large number, which will
// result in loss of float precision. This is not a problem - all
// numbers close to 0 bear effectively the same weight.
func exponentialFractions(sizes []uint64) []float64 {
maxSize := uint64(0)
for _, size := range sizes {
if size > maxSize {
maxSize = size
}
}
fractions := make([]float64, len(sizes))
for i, size := range sizes {
fractions[i] = 1 / math.Pow(2, float64(maxSize-size))
}
fractionsSum := float64(0)
for _, fraction := range fractions {
fractionsSum += fraction
}
for i, fraction := range fractions {
fractions[i] = fraction / fractionsSum
}
return fractions
}
// intervalContains returns true if ri contains other.
func intervalContains(ri *model.ReachabilityInterval, other *model.ReachabilityInterval) bool {
return ri.Start <= other.Start && other.End <= ri.End

View File

@@ -1,7 +1,6 @@
package reachabilitymanager
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
@@ -58,20 +57,3 @@ func (rt *reachabilityManager) findAncestorIndexOfNode(tns orderedTreeNodeSet, n
}
return low - 1, true, nil
}
func (rt *reachabilityManager) propagateIntervals(tns orderedTreeNodeSet, intervals []*model.ReachabilityInterval,
subtreeSizeMaps []map[externalapi.DomainHash]uint64) error {
for i, node := range tns {
err := rt.stageInterval(node, intervals[i])
if err != nil {
return err
}
subtreeSizeMap := subtreeSizeMaps[i]
err = rt.propagateInterval(node, subtreeSizeMap)
if err != nil {
return err
}
}
return nil
}

View File

@@ -1,6 +1,7 @@
package reachabilitymanager_test
import (
"math"
"testing"
"github.com/kaspanet/kaspad/domain/consensus"
@@ -57,10 +58,20 @@ func TestAddChildThatPointsDirectlyToTheSelectedParentChainBelowReindexRoot(t *t
t.Fatalf("reindex root is expected to change")
}
// Add another block over genesis
_, _, err = tc.AddBlock([]*externalapi.DomainHash{params.GenesisHash}, nil, nil)
// Add enough blocks over genesis to test also the case where the first
// level (genesis in this case) runs out of slack
slackSize := tc.ReachabilityManager().ReachabilityReindexSlack()
blocksToAdd := uint64(math.Log2(float64(slackSize))) + 2
for i := uint64(0); i < blocksToAdd; i++ {
_, _, err = tc.AddBlock([]*externalapi.DomainHash{params.GenesisHash}, nil, nil)
if err != nil {
t.Fatalf("AddBlock: %+v", err)
}
}
err = tc.ReachabilityManager().ValidateIntervals(params.GenesisHash)
if err != nil {
t.Fatalf("AddBlock: %+v", err)
t.Fatal(err)
}
})
}
@@ -152,6 +163,11 @@ func TestUpdateReindexRoot(t *testing.T) {
t.Fatalf("got unexpected chain1RootBlock interval. Want: %d, got: %d",
intervalSize(chain1RootBlock), expectedChain1RootIntervalSize)
}
err = tc.ReachabilityManager().ValidateIntervals(params.GenesisHash)
if err != nil {
t.Fatal(err)
}
})
}
@@ -224,27 +240,16 @@ func TestReindexIntervalsEarlierThanReindexRoot(t *testing.T) {
t.Fatalf("rightBlock interval not tight after reindex")
}
// Get the current interval for centerBlock. Its interval should be:
// genesisInterval - 1 - leftInterval - leftSlack - rightInterval - rightSlack
expectedCenterInterval := intervalSize(params.GenesisHash) - 1 -
intervalSize(leftBlock) - tc.ReachabilityManager().ReachabilityReindexSlack() -
intervalSize(rightBlock) - tc.ReachabilityManager().ReachabilityReindexSlack()
if intervalSize(centerBlock) != expectedCenterInterval {
t.Fatalf("unexpected centerBlock interval. Want: %d, got: %d",
expectedCenterInterval, intervalSize(centerBlock))
err = tc.ReachabilityManager().ValidateIntervals(params.GenesisHash)
if err != nil {
t.Fatal(err)
}
// Add a chain of reachabilityReindexWindow - 1 blocks above leftBlock.
// Each addition will trigger a low-than-reindex-root reindex. We
// expect the centerInterval to shrink by 1 each time, but its child
// to remain unaffected
centerData, err := tc.ReachabilityDataStore().ReachabilityData(tc.DatabaseContext(), centerBlock)
if err != nil {
t.Fatalf("ReachabilityData: %s", err)
}
treeChildOfCenterBlock := centerData.Children()[0]
treeChildOfCenterBlockOriginalIntervalSize := intervalSize(treeChildOfCenterBlock)
leftTipHash := leftBlock
for i := uint64(0); i < reachabilityReindexWindow-1; i++ {
var err error
@@ -253,14 +258,9 @@ func TestReindexIntervalsEarlierThanReindexRoot(t *testing.T) {
t.Fatalf("AddBlock: %+v", err)
}
expectedCenterInterval--
if intervalSize(centerBlock) != expectedCenterInterval {
t.Fatalf("unexpected centerBlock interval. Want: %d, got: %d",
expectedCenterInterval, intervalSize(centerBlock))
}
if intervalSize(treeChildOfCenterBlock) != treeChildOfCenterBlockOriginalIntervalSize {
t.Fatalf("the interval of centerBlock's child unexpectedly changed")
err = tc.ReachabilityManager().ValidateIntervals(params.GenesisHash)
if err != nil {
t.Fatal(err)
}
}
@@ -276,15 +276,15 @@ func TestReindexIntervalsEarlierThanReindexRoot(t *testing.T) {
t.Fatalf("AddBlock: %+v", err)
}
expectedCenterInterval--
if intervalSize(centerBlock) != expectedCenterInterval {
t.Fatalf("unexpected centerBlock interval. Want: %d, got: %d",
expectedCenterInterval, intervalSize(centerBlock))
err = tc.ReachabilityManager().ValidateIntervals(params.GenesisHash)
if err != nil {
t.Fatal(err)
}
}
if intervalSize(treeChildOfCenterBlock) != treeChildOfCenterBlockOriginalIntervalSize {
t.Fatalf("the interval of centerBlock's child unexpectedly changed")
}
err = tc.ReachabilityManager().ValidateIntervals(params.GenesisHash)
if err != nil {
t.Fatal(err)
}
})
}
@@ -324,5 +324,10 @@ func TestTipsAfterReindexIntervalsEarlierThanReindexRoot(t *testing.T) {
if err != nil {
t.Fatalf("AddBlock: %+v", err)
}
err = tc.ReachabilityManager().ValidateIntervals(params.GenesisHash)
if err != nil {
t.Fatal(err)
}
})
}

View File

@@ -0,0 +1,258 @@
package reachabilitymanager_test
import (
"compress/gzip"
"fmt"
"github.com/kaspanet/kaspad/domain/consensus"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/model/testapi"
"github.com/kaspanet/kaspad/domain/dagconfig"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/pkg/errors"
"math"
"math/rand"
"os"
"testing"
)
// Test configuration
const (
numBlocksExponent = 12
logLevel = "warn"
)
func initializeTest(t *testing.T, testName string) (tc testapi.TestConsensus, teardown func(keepDataDir bool)) {
t.Parallel()
logger.SetLogLevels(logLevel)
params := dagconfig.SimnetParams
params.SkipProofOfWork = true
tc, teardown, err := consensus.NewFactory().NewTestConsensus(&params, false, testName)
if err != nil {
t.Fatalf("Error setting up consensus: %+v", err)
}
return tc, teardown
}
func buildJsonDAG(t *testing.T, tc testapi.TestConsensus, attackJson bool) (tips []*externalapi.DomainHash) {
filePrefix := "noattack"
if attackJson {
filePrefix = "attack"
}
fileName := fmt.Sprintf(
"../../testdata/reachability/%s-dag-blocks--2^%d-delay-factor--1-k--18.json.gz",
filePrefix, numBlocksExponent)
f, err := os.Open(fileName)
if err != nil {
t.Fatal(err)
}
defer f.Close()
gzipReader, err := gzip.NewReader(f)
if err != nil {
t.Fatal(err)
}
defer gzipReader.Close()
tips, err = tc.MineJSON(gzipReader, testapi.MineJSONBlockTypeUTXOInvalidHeader)
if err != nil {
t.Fatal(err)
}
err = tc.ReachabilityManager().ValidateIntervals(tc.DAGParams().GenesisHash)
if err != nil {
t.Fatal(err)
}
return tips
}
func addArbitraryBlocks(t *testing.T, tc testapi.TestConsensus) {
// After loading json, add arbitrary blocks all over the DAG to stretch
// reindex logic, and validate intervals post each addition
blocks, err := tc.ReachabilityManager().GetAllNodes(tc.DAGParams().GenesisHash)
if err != nil {
t.Fatal(err)
}
numChainsToAdd := len(blocks) / 2 // Multiply the size of the DAG with arbitrary blocks
maxBlocksInChain := 20
validationFreq := int(math.Max(1, float64(numChainsToAdd/100)))
randSource := rand.New(rand.NewSource(33233))
for i := 0; i < numChainsToAdd; i++ {
randomIndex := randSource.Intn(len(blocks))
randomParent := blocks[randomIndex]
newBlock, _, err := tc.AddUTXOInvalidHeader([]*externalapi.DomainHash{randomParent})
if err != nil {
t.Fatal(err)
}
blocks = append(blocks, newBlock)
// Add a random-length chain every few blocks
if randSource.Intn(8) == 0 {
numBlocksInChain := randSource.Intn(maxBlocksInChain)
chainBlock := newBlock
for j := 0; j < numBlocksInChain; j++ {
chainBlock, _, err = tc.AddUTXOInvalidHeader([]*externalapi.DomainHash{chainBlock})
if err != nil {
t.Fatal(err)
}
blocks = append(blocks, chainBlock)
}
}
// Normally, validate intervals for new chain only
validationRoot := newBlock
// However every 'validation frequency' blocks validate intervals for entire DAG
if i%validationFreq == 0 || i == numChainsToAdd-1 {
validationRoot = tc.DAGParams().GenesisHash
}
err = tc.ReachabilityManager().ValidateIntervals(validationRoot)
if err != nil {
t.Fatal(err)
}
}
}
func addAlternatingReorgBlocks(t *testing.T, tc testapi.TestConsensus, tips []*externalapi.DomainHash) {
// Create alternating reorgs to test the cases where
// reindex root is out of current header selected tip chain
reindexRoot, err := tc.ReachabilityDataStore().ReachabilityReindexRoot(tc.DatabaseContext())
if err != nil {
t.Fatal(err)
}
// Try finding two tips; one which has reindex root on it's chain (chainTip), and one which
// does not (reorgTip). The latter is expected to exist in json attack files.
var chainTip, reorgTip *externalapi.DomainHash
for _, block := range tips {
isRootAncestorOfTip, err := tc.ReachabilityManager().IsReachabilityTreeAncestorOf(reindexRoot, block)
if err != nil {
t.Fatal(err)
}
if isRootAncestorOfTip {
chainTip = block
} else {
reorgTip = block
}
}
if reorgTip == nil {
t.Fatal(errors.Errorf("DAG from jsom file is expected to contain a tip " +
"disagreeing with reindex root chain"))
}
if chainTip == nil {
t.Fatal(errors.Errorf("reindex root is not on any header tip chain, this is unexpected behavior"))
}
chainTipGHOSTDAGData, err := tc.GHOSTDAGDataStore().Get(tc.DatabaseContext(), chainTip)
if err != nil {
t.Fatal(err)
}
reorgTipGHOSTDAGData, err := tc.GHOSTDAGDataStore().Get(tc.DatabaseContext(), reorgTip)
if err != nil {
t.Fatal(err)
}
// Get both chains close to each other (we care about blue score and not
// blue work because we have SkipProofOfWork=true)
if chainTipGHOSTDAGData.BlueScore() > reorgTipGHOSTDAGData.BlueScore() {
blueScoreDiff := int(chainTipGHOSTDAGData.BlueScore() - reorgTipGHOSTDAGData.BlueScore())
for i := 0; i < blueScoreDiff+5; i++ {
reorgTip, _, err = tc.AddUTXOInvalidHeader([]*externalapi.DomainHash{reorgTip})
if err != nil {
t.Fatal(err)
}
}
} else {
blueScoreDiff := int(reorgTipGHOSTDAGData.BlueScore() - chainTipGHOSTDAGData.BlueScore())
for i := 0; i < blueScoreDiff+5; i++ {
chainTip, _, err = tc.AddUTXOInvalidHeader([]*externalapi.DomainHash{chainTip})
if err != nil {
t.Fatal(err)
}
}
}
err = tc.ReachabilityManager().ValidateIntervals(tc.DAGParams().GenesisHash)
if err != nil {
t.Fatal(err)
}
// Alternate between the chains 200 times
for i := 0; i < 200; i++ {
if i%2 == 0 {
for j := 0; j < 10; j++ {
chainTip, _, err = tc.AddUTXOInvalidHeader([]*externalapi.DomainHash{chainTip})
if err != nil {
t.Fatal(err)
}
}
} else {
for j := 0; j < 10; j++ {
reorgTip, _, err = tc.AddUTXOInvalidHeader([]*externalapi.DomainHash{reorgTip})
if err != nil {
t.Fatal(err)
}
}
}
err = tc.ReachabilityManager().ValidateIntervals(tc.DAGParams().GenesisHash)
if err != nil {
t.Fatal(err)
}
}
// Since current logic switches reindex root chain with reindex slack threshold - at last make the switch happen
for i := 0; i < int(tc.ReachabilityManager().ReachabilityReindexSlack())+10; i++ {
reorgTip, _, err = tc.AddUTXOInvalidHeader([]*externalapi.DomainHash{reorgTip})
if err != nil {
t.Fatal(err)
}
}
err = tc.ReachabilityManager().ValidateIntervals(tc.DAGParams().GenesisHash)
if err != nil {
t.Fatal(err)
}
}
func TestNoAttack(t *testing.T) {
tc, teardown := initializeTest(t, "TestNoAttack")
defer teardown(false)
buildJsonDAG(t, tc, false)
}
func TestAttack(t *testing.T) {
tc, teardown := initializeTest(t, "TestAttack")
defer teardown(false)
buildJsonDAG(t, tc, true)
}
func TestNoAttackFuzzy(t *testing.T) {
tc, teardown := initializeTest(t, "TestNoAttackFuzzy")
defer teardown(false)
tc.ReachabilityManager().SetReachabilityReindexSlack(10)
buildJsonDAG(t, tc, false)
addArbitraryBlocks(t, tc)
}
func TestAttackFuzzy(t *testing.T) {
tc, teardown := initializeTest(t, "TestAttackFuzzy")
defer teardown(false)
tc.ReachabilityManager().SetReachabilityReindexSlack(10)
buildJsonDAG(t, tc, true)
addArbitraryBlocks(t, tc)
}
func TestAttackAlternateReorg(t *testing.T) {
tc, teardown := initializeTest(t, "TestAttackAlternateReorg")
defer teardown(false)
tc.ReachabilityManager().SetReachabilityReindexSlack(256)
tips := buildJsonDAG(t, tc, true)
addAlternatingReorgBlocks(t, tc, tips)
}

View File

@@ -246,6 +246,11 @@ func TestAddChild(t *testing.T) {
}
}
err = manager.validateIntervals(root)
if err != nil {
t.Fatal(err)
}
// Scenario 2: test addChild where all nodes are direct descendants of root
// root -> a, b, c...
// Create the root node of a new reachability tree
@@ -306,6 +311,11 @@ func TestAddChild(t *testing.T) {
t.Fatalf("TestAddChild: childNode is not a descendant of root")
}
}
err = manager.validateIntervals(root)
if err != nil {
t.Fatal(err)
}
}
func TestReachabilityTreeNodeIsAncestorOf(t *testing.T) {
@@ -334,6 +344,11 @@ func TestReachabilityTreeNodeIsAncestorOf(t *testing.T) {
if !helper.isReachabilityTreeAncestorOf(root, root) {
t.Fatalf("TestReachabilityTreeNodeIsAncestorOf: root is expected to be an ancestor of root")
}
err := manager.validateIntervals(root)
if err != nil {
t.Fatal(err)
}
}
func TestIntervalContains(t *testing.T) {
@@ -978,19 +993,19 @@ func TestReachabilityTreeNodeString(t *testing.T) {
treeNodeB2 := helper.newNodeWithInterval(newReachabilityInterval(150, 199))
treeNodeC := helper.newNodeWithInterval(newReachabilityInterval(100, 149))
err := helper.addChildAndStage(treeNodeA, treeNodeB1)
err := helper.stageAddChild(treeNodeA, treeNodeB1)
if err != nil {
t.Fatalf("addChildAndStage: %s", err)
t.Fatalf("stageAddChild: %s", err)
}
err = helper.addChildAndStage(treeNodeA, treeNodeB2)
err = helper.stageAddChild(treeNodeA, treeNodeB2)
if err != nil {
t.Fatalf("addChildAndStage: %s", err)
t.Fatalf("stageAddChild: %s", err)
}
err = helper.addChildAndStage(treeNodeB2, treeNodeC)
err = helper.stageAddChild(treeNodeB2, treeNodeC)
if err != nil {
t.Fatalf("addChildAndStage: %s", err)
t.Fatalf("stageAddChild: %s", err)
}
str, err := manager.String(treeNodeA)

View File

@@ -0,0 +1,804 @@
package reachabilitymanager
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/pkg/errors"
)
var (
// defaultReindexWindow is the default target window size for reachability
// reindexes. Note that this is not a constant for testing purposes.
defaultReindexWindow uint64 = 200
// defaultReindexSlack is default the slack interval given to reachability
// tree nodes not in the selected parent chain. Note that this is not
// a constant for testing purposes.
defaultReindexSlack uint64 = 1 << 12
)
// reindexContext is a struct used during reindex operations. It represents a temporary context
// for caching subtree information during the *current* reindex operation only
type reindexContext struct {
manager *reachabilityManager
subTreeSizesCache map[externalapi.DomainHash]uint64
}
// newReindexContext creates a new empty reindex context
func newReindexContext(rt *reachabilityManager) reindexContext {
return reindexContext{
manager: rt,
subTreeSizesCache: make(map[externalapi.DomainHash]uint64),
}
}
/*
Core (BFS) algorithms used during reindexing
*/
// countSubtrees counts the size of each subtree under this node,
// and populates the provided subTreeSizeMap with the results.
// It is equivalent to the following recursive implementation:
//
// func (rt *reachabilityManager) countSubtrees(node *model.ReachabilityTreeNode) uint64 {
// subtreeSize := uint64(0)
// for _, child := range node.children {
// subtreeSize += child.countSubtrees()
// }
// return subtreeSize + 1
// }
//
// However, we are expecting (linearly) deep trees, and so a
// recursive stack-based approach is inefficient and will hit
// recursion limits. Instead, the same logic was implemented
// using a (queue-based) BFS method. At a high level, the
// algorithm uses BFS for reaching all leaves and pushes
// intermediate updates from leaves via parent chains until all
// size information is gathered at the root of the operation
// (i.e. at node).
func (rc *reindexContext) countSubtrees(node *externalapi.DomainHash) error {
if _, ok := rc.subTreeSizesCache[*node]; ok {
return nil
}
queue := []*externalapi.DomainHash{node}
calculatedChildrenCount := make(map[externalapi.DomainHash]uint64)
for len(queue) > 0 {
var current *externalapi.DomainHash
current, queue = queue[0], queue[1:]
children, err := rc.manager.children(current)
if err != nil {
return err
}
if len(children) == 0 {
// We reached a leaf
rc.subTreeSizesCache[*current] = 1
} else if _, ok := rc.subTreeSizesCache[*current]; !ok {
// We haven't yet calculated the subtree size of
// the current node. Add all its children to the
// queue
queue = append(queue, children...)
continue
}
// We reached a leaf or a pre-calculated subtree.
// Push information up
for !current.Equal(node) {
current, err = rc.manager.parent(current)
if err != nil {
return err
}
// If the current is now nil, it means that the previous
// `current` was the genesis block -- the only block that
// does not have parents
if current == nil {
break
}
calculatedChildrenCount[*current]++
children, err := rc.manager.children(current)
if err != nil {
return err
}
if calculatedChildrenCount[*current] != uint64(len(children)) {
// Not all subtrees of the current node are ready
break
}
// All children of `current` have calculated their subtree size.
// Sum them all together and add 1 to get the sub tree size of
// `current`.
childSubtreeSizeSum := uint64(0)
for _, child := range children {
childSubtreeSizeSum += rc.subTreeSizesCache[*child]
}
rc.subTreeSizesCache[*current] = childSubtreeSizeSum + 1
}
}
return nil
}
// propagateInterval propagates the new interval using a BFS traversal.
// Subtree intervals are recursively allocated according to subtree sizes and
// the allocation rule in splitWithExponentialBias.
func (rc *reindexContext) propagateInterval(node *externalapi.DomainHash) error {
// Make sure subtrees are counted before propagating
err := rc.countSubtrees(node)
if err != nil {
return err
}
queue := []*externalapi.DomainHash{node}
for len(queue) > 0 {
var current *externalapi.DomainHash
current, queue = queue[0], queue[1:]
children, err := rc.manager.children(current)
if err != nil {
return err
}
if len(children) > 0 {
sizes := make([]uint64, len(children))
for i, child := range children {
sizes[i] = rc.subTreeSizesCache[*child]
}
interval, err := rc.manager.intervalRangeForChildAllocation(current)
if err != nil {
return err
}
intervals, err := intervalSplitWithExponentialBias(interval, sizes)
if err != nil {
return err
}
for i, child := range children {
childInterval := intervals[i]
err = rc.manager.stageInterval(child, childInterval)
if err != nil {
return err
}
queue = append(queue, child)
}
}
}
return nil
}
/*
Functions for handling reindex triggered by adding child block
*/
// reindexIntervals traverses the reachability subtree that's
// defined by the new child node and reallocates reachability interval space
// such that another reindexing is unlikely to occur shortly
// thereafter. It does this by traversing down the reachability
// tree until it finds a node with a subtree size that's greater than
// its interval size. See propagateInterval for further details.
func (rc *reindexContext) reindexIntervals(newChild, reindexRoot *externalapi.DomainHash) error {
current := newChild
// Search for the first ancestor with sufficient interval space
for {
currentInterval, err := rc.manager.interval(current)
if err != nil {
return err
}
currentIntervalSize := intervalSize(currentInterval)
err = rc.countSubtrees(current)
if err != nil {
return err
}
currentSubtreeSize := rc.subTreeSizesCache[*current]
// Current has sufficient space, break and propagate
if currentIntervalSize >= currentSubtreeSize {
break
}
parent, err := rc.manager.parent(current)
if err != nil {
return err
}
if parent == nil {
// If we ended up here it means that there are more
// than 2^64 blocks, which shouldn't ever happen.
return errors.Errorf("missing tree " +
"parent during reindexing. Theoretically, this " +
"should only ever happen if there are more " +
"than 2^64 blocks in the DAG.")
}
if current.Equal(reindexRoot) {
// Reindex root is expected to hold enough capacity as long as there are less
// than ~2^52 blocks in the DAG, which should never happen in our lifetimes
// even if block rate per second is above 100. The calculation follows from the allocation of
// 2^12 (which equals 2^64/2^52) for slack per chain block below the reindex root.
return errors.Errorf("unexpected behavior: reindex root %s is out of capacity"+
"during reindexing. Theoretically, this "+
"should only ever happen if there are more "+
"than ~2^52 blocks in the DAG.", reindexRoot.String())
}
isParentStrictAncestorOfRoot, err := rc.manager.isStrictAncestorOf(parent, reindexRoot)
if err != nil {
return err
}
if isParentStrictAncestorOfRoot {
// In this case parent is guaranteed to have sufficient interval space,
// however we avoid reindexing the entire subtree above parent
// (which includes root and thus majority of blocks mined since)
// and use slacks along the chain up from parent to reindex root.
// Notes:
// 1. we set requiredAllocation=currentSubtreeSize in order to double the
// current interval capacity
// 2. it might be the case that current is the `newChild` itself
return rc.reindexIntervalsEarlierThanRoot(current, reindexRoot, parent, currentSubtreeSize)
}
current = parent
}
// Propagate the interval to the subtree
return rc.propagateInterval(current)
}
// reindexIntervalsEarlierThanRoot implements the reindex algorithm for the case where the
// new child node is not in reindex root's subtree. The function is expected to allocate
// `requiredAllocation` to be added to interval of `allocationNode`. `commonAncestor` is
// expected to be a direct parent of `allocationNode` and an ancestor of `reindexRoot`.
func (rc *reindexContext) reindexIntervalsEarlierThanRoot(
allocationNode, reindexRoot, commonAncestor *externalapi.DomainHash, requiredAllocation uint64) error {
// The chosen child is:
// a. A reachability tree child of `commonAncestor`
// b. A reachability tree ancestor of `reindexRoot` or `reindexRoot` itself
chosenChild, err := rc.manager.FindNextAncestor(reindexRoot, commonAncestor)
if err != nil {
return err
}
nodeInterval, err := rc.manager.interval(allocationNode)
if err != nil {
return err
}
chosenInterval, err := rc.manager.interval(chosenChild)
if err != nil {
return err
}
if nodeInterval.Start < chosenInterval.Start {
// allocationNode is in the subtree before the chosen child
return rc.reclaimIntervalBefore(allocationNode, commonAncestor, chosenChild, reindexRoot, requiredAllocation)
}
// allocationNode is in the subtree after the chosen child
return rc.reclaimIntervalAfter(allocationNode, commonAncestor, chosenChild, reindexRoot, requiredAllocation)
}
func (rc *reindexContext) reclaimIntervalBefore(
allocationNode, commonAncestor, chosenChild, reindexRoot *externalapi.DomainHash, requiredAllocation uint64) error {
var slackSum uint64 = 0
var pathLen uint64 = 0
var pathSlackAlloc uint64 = 0
var err error
current := chosenChild
// Walk up the chain from common ancestor's chosen child towards reindex root
for {
if current.Equal(reindexRoot) {
// Reached reindex root. In this case, since we reached (the unlimited) root,
// we also re-allocate new slack for the chain we just traversed
previousInterval, err := rc.manager.interval(current)
if err != nil {
return err
}
offset := requiredAllocation + rc.manager.reindexSlack*pathLen - slackSum
err = rc.manager.stageInterval(current, intervalIncreaseStart(previousInterval, offset))
if err != nil {
return err
}
err = rc.propagateInterval(current)
if err != nil {
return err
}
err = rc.offsetSiblingsBefore(allocationNode, current, offset)
if err != nil {
return err
}
// Set the slack for each chain block to be reserved below during the chain walk-down
pathSlackAlloc = rc.manager.reindexSlack
break
}
slackBeforeCurrent, err := rc.manager.remainingSlackBefore(current)
if err != nil {
return err
}
slackSum += slackBeforeCurrent
if slackSum >= requiredAllocation {
previousInterval, err := rc.manager.interval(current)
if err != nil {
return err
}
// Set offset to be just enough to satisfy required allocation
offset := slackBeforeCurrent - (slackSum - requiredAllocation)
err = rc.manager.stageInterval(current, intervalIncreaseStart(previousInterval, offset))
if err != nil {
return err
}
err = rc.offsetSiblingsBefore(allocationNode, current, offset)
if err != nil {
return err
}
break
}
current, err = rc.manager.FindNextAncestor(reindexRoot, current)
if err != nil {
return err
}
pathLen++
}
// Go back down the reachability tree towards the common ancestor.
// On every hop we reindex the reachability subtree before the
// current node with an interval that is smaller.
// This is to make room for the required allocation.
for {
current, err = rc.manager.parent(current)
if err != nil {
return err
}
if current.Equal(commonAncestor) {
break
}
originalInterval, err := rc.manager.interval(current)
if err != nil {
return err
}
slackBeforeCurrent, err := rc.manager.remainingSlackBefore(current)
if err != nil {
return err
}
offset := slackBeforeCurrent - pathSlackAlloc
err = rc.manager.stageInterval(current, intervalIncreaseStart(originalInterval, offset))
if err != nil {
return err
}
err = rc.offsetSiblingsBefore(allocationNode, current, offset)
if err != nil {
return err
}
}
return nil
}
func (rc *reindexContext) offsetSiblingsBefore(allocationNode, current *externalapi.DomainHash, offset uint64) error {
parent, err := rc.manager.parent(current)
if err != nil {
return err
}
siblingsBefore, _, err := rc.manager.splitChildren(parent, current)
if err != nil {
return err
}
// Iterate over the slice in reverse order in order to break if reaching `allocationNode`
for i := len(siblingsBefore) - 1; i >= 0; i-- {
sibling := siblingsBefore[i]
if sibling.Equal(allocationNode) {
// We reached our final destination, allocate `offset` to `allocationNode` by increasing end and break
previousInterval, err := rc.manager.interval(allocationNode)
if err != nil {
return err
}
err = rc.manager.stageInterval(allocationNode, intervalIncreaseEnd(previousInterval, offset))
if err != nil {
return err
}
err = rc.propagateInterval(allocationNode)
if err != nil {
return err
}
break
}
previousInterval, err := rc.manager.interval(sibling)
if err != nil {
return err
}
err = rc.manager.stageInterval(sibling, intervalIncrease(previousInterval, offset))
if err != nil {
return err
}
err = rc.propagateInterval(sibling)
if err != nil {
return err
}
}
return nil
}
func (rc *reindexContext) reclaimIntervalAfter(
allocationNode, commonAncestor, chosenChild, reindexRoot *externalapi.DomainHash, requiredAllocation uint64) error {
var slackSum uint64 = 0
var pathLen uint64 = 0
var pathSlackAlloc uint64 = 0
var err error
current := chosenChild
// Walk up the chain from common ancestor's chosen child towards reindex root
for {
if current.Equal(reindexRoot) {
// Reached reindex root. In this case, since we reached (the unlimited) root,
// we also re-allocate new slack for the chain we just traversed
previousInterval, err := rc.manager.interval(current)
if err != nil {
return err
}
offset := requiredAllocation + rc.manager.reindexSlack*pathLen - slackSum
err = rc.manager.stageInterval(current, intervalDecreaseEnd(previousInterval, offset))
if err != nil {
return err
}
err = rc.propagateInterval(current)
if err != nil {
return err
}
err = rc.offsetSiblingsAfter(allocationNode, current, offset)
if err != nil {
return err
}
// Set the slack for each chain block to be reserved below during the chain walk-down
pathSlackAlloc = rc.manager.reindexSlack
break
}
slackAfterCurrent, err := rc.manager.remainingSlackAfter(current)
if err != nil {
return err
}
slackSum += slackAfterCurrent
if slackSum >= requiredAllocation {
previousInterval, err := rc.manager.interval(current)
if err != nil {
return err
}
// Set offset to be just enough to satisfy required allocation
offset := slackAfterCurrent - (slackSum - requiredAllocation)
err = rc.manager.stageInterval(current, intervalDecreaseEnd(previousInterval, offset))
if err != nil {
return err
}
err = rc.offsetSiblingsAfter(allocationNode, current, offset)
if err != nil {
return err
}
break
}
current, err = rc.manager.FindNextAncestor(reindexRoot, current)
if err != nil {
return err
}
pathLen++
}
// Go back down the reachability tree towards the common ancestor.
// On every hop we reindex the reachability subtree before the
// current node with an interval that is smaller.
// This is to make room for the required allocation.
for {
current, err = rc.manager.parent(current)
if err != nil {
return err
}
if current.Equal(commonAncestor) {
break
}
originalInterval, err := rc.manager.interval(current)
if err != nil {
return err
}
slackAfterCurrent, err := rc.manager.remainingSlackAfter(current)
if err != nil {
return err
}
offset := slackAfterCurrent - pathSlackAlloc
err = rc.manager.stageInterval(current, intervalDecreaseEnd(originalInterval, offset))
if err != nil {
return err
}
err = rc.offsetSiblingsAfter(allocationNode, current, offset)
if err != nil {
return err
}
}
return nil
}
func (rc *reindexContext) offsetSiblingsAfter(allocationNode, current *externalapi.DomainHash, offset uint64) error {
parent, err := rc.manager.parent(current)
if err != nil {
return err
}
_, siblingsAfter, err := rc.manager.splitChildren(parent, current)
if err != nil {
return err
}
for _, sibling := range siblingsAfter {
if sibling.Equal(allocationNode) {
// We reached our final destination, allocate `offset` to `allocationNode` by decreasing start and break
previousInterval, err := rc.manager.interval(allocationNode)
if err != nil {
return err
}
err = rc.manager.stageInterval(allocationNode, intervalDecreaseStart(previousInterval, offset))
if err != nil {
return err
}
err = rc.propagateInterval(allocationNode)
if err != nil {
return err
}
break
}
previousInterval, err := rc.manager.interval(sibling)
if err != nil {
return err
}
err = rc.manager.stageInterval(sibling, intervalDecrease(previousInterval, offset))
if err != nil {
return err
}
err = rc.propagateInterval(sibling)
if err != nil {
return err
}
}
return nil
}
/*
Functions for handling reindex triggered by moving reindex root
*/
func (rc *reindexContext) concentrateInterval(reindexRoot, chosenChild *externalapi.DomainHash, isFinalReindexRoot bool) error {
siblingsBeforeChosen, siblingsAfterChosen, err := rc.manager.splitChildren(reindexRoot, chosenChild)
if err != nil {
return err
}
siblingsBeforeSizesSum, err := rc.tightenIntervalsBefore(reindexRoot, siblingsBeforeChosen)
if err != nil {
return err
}
siblingsAfterSizesSum, err := rc.tightenIntervalsAfter(reindexRoot, siblingsAfterChosen)
if err != nil {
return err
}
err = rc.expandIntervalToChosen(
reindexRoot, chosenChild, siblingsBeforeSizesSum, siblingsAfterSizesSum, isFinalReindexRoot)
if err != nil {
return err
}
return nil
}
func (rc *reindexContext) tightenIntervalsBefore(
reindexRoot *externalapi.DomainHash, siblingsBeforeChosen []*externalapi.DomainHash) (sizesSum uint64, err error) {
siblingSubtreeSizes, sizesSum := rc.countChildrenSubtrees(siblingsBeforeChosen)
rootInterval, err := rc.manager.interval(reindexRoot)
if err != nil {
return 0, err
}
intervalBeforeChosen := newReachabilityInterval(
rootInterval.Start+rc.manager.reindexSlack,
rootInterval.Start+rc.manager.reindexSlack+sizesSum-1,
)
err = rc.propagateChildrenIntervals(intervalBeforeChosen, siblingsBeforeChosen, siblingSubtreeSizes)
if err != nil {
return 0, err
}
return sizesSum, nil
}
func (rc *reindexContext) tightenIntervalsAfter(
reindexRoot *externalapi.DomainHash, siblingsAfterChosen []*externalapi.DomainHash) (sizesSum uint64, err error) {
siblingSubtreeSizes, sizesSum := rc.countChildrenSubtrees(siblingsAfterChosen)
rootInterval, err := rc.manager.interval(reindexRoot)
if err != nil {
return 0, err
}
intervalAfterChosen := newReachabilityInterval(
rootInterval.End-rc.manager.reindexSlack-sizesSum,
rootInterval.End-rc.manager.reindexSlack-1,
)
err = rc.propagateChildrenIntervals(intervalAfterChosen, siblingsAfterChosen, siblingSubtreeSizes)
if err != nil {
return 0, err
}
return sizesSum, nil
}
func (rc *reindexContext) expandIntervalToChosen(
reindexRoot, chosenChild *externalapi.DomainHash, sizesSumBefore, sizesSumAfter uint64, isFinalReindexRoot bool) error {
rootInterval, err := rc.manager.interval(reindexRoot)
if err != nil {
return err
}
newChosenInterval := newReachabilityInterval(
rootInterval.Start+sizesSumBefore+rc.manager.reindexSlack,
rootInterval.End-sizesSumAfter-rc.manager.reindexSlack-1,
)
currentChosenInterval, err := rc.manager.interval(chosenChild)
if err != nil {
return err
}
// Propagate interval only if chosenChild is the final reindex root
if isFinalReindexRoot && !intervalContains(newChosenInterval, currentChosenInterval) {
// New interval doesn't contain the previous one, propagation is required
// We assign slack on both sides as an optimization. Were we to
// assign a tight interval, the next time the reindex root moves we
// would need to propagate intervals again. That is to say, when we
// do allocate slack, next time
// expandIntervalToChosen is called (next time the
// reindex root moves), newChosenInterval is likely to
// contain currentChosenInterval.
err := rc.manager.stageInterval(chosenChild, newReachabilityInterval(
newChosenInterval.Start+rc.manager.reindexSlack,
newChosenInterval.End-rc.manager.reindexSlack,
))
if err != nil {
return err
}
err = rc.propagateInterval(chosenChild)
if err != nil {
return err
}
}
err = rc.manager.stageInterval(chosenChild, newChosenInterval)
if err != nil {
return err
}
return nil
}
func (rc *reindexContext) countChildrenSubtrees(children []*externalapi.DomainHash) (
sizes []uint64, sum uint64) {
sizes = make([]uint64, len(children))
sum = 0
for i, node := range children {
err := rc.countSubtrees(node)
if err != nil {
return nil, 0
}
subtreeSize := rc.subTreeSizesCache[*node]
sizes[i] = subtreeSize
sum += subtreeSize
}
return sizes, sum
}
func (rc *reindexContext) propagateChildrenIntervals(
interval *model.ReachabilityInterval, children []*externalapi.DomainHash, sizes []uint64) error {
childIntervals, err := intervalSplitExact(interval, sizes)
if err != nil {
return err
}
for i, child := range children {
childInterval := childIntervals[i]
err := rc.manager.stageInterval(child, childInterval)
if err != nil {
return err
}
err = rc.propagateInterval(child)
if err != nil {
return err
}
}
return nil
}

View File

@@ -25,7 +25,7 @@ func (rt *reachabilityManager) stageReindexRoot(blockHash *externalapi.DomainHas
rt.reachabilityDataStore.StageReachabilityReindexRoot(blockHash)
}
func (rt *reachabilityManager) addChildAndStage(node, child *externalapi.DomainHash) error {
func (rt *reachabilityManager) stageAddChild(node, child *externalapi.DomainHash) error {
nodeData, err := rt.reachabilityDataForInsertion(node)
if err != nil {
return err

View File

@@ -2,6 +2,7 @@ package reachabilitymanager
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/model/testapi"
)
@@ -21,6 +22,14 @@ func (t *testReachabilityManager) SetReachabilityReindexWindow(reindexWindow uin
t.reachabilityManager.reindexWindow = reindexWindow
}
func (t *testReachabilityManager) ValidateIntervals(root *externalapi.DomainHash) error {
return t.reachabilityManager.validateIntervals(root)
}
func (t *testReachabilityManager) GetAllNodes(root *externalapi.DomainHash) ([]*externalapi.DomainHash, error) {
return t.reachabilityManager.getAllNodes(root)
}
// NewTestReachabilityManager creates an instance of a TestReachabilityManager
func NewTestReachabilityManager(manager model.ReachabilityManager) testapi.TestReachabilityManager {
return &testReachabilityManager{reachabilityManager: manager.(*reachabilityManager)}

File diff suppressed because it is too large Load Diff

View File

@@ -1,12 +1,15 @@
package consensus
import (
"encoding/json"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/model/testapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/dagconfig"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/pkg/errors"
"io"
)
type testConsensus struct {
@@ -103,8 +106,81 @@ func (tc *testConsensus) AddUTXOInvalidBlock(parentHashes []*externalapi.DomainH
return consensushashing.BlockHash(block), blockInsertionResult, nil
}
func (tc *testConsensus) BuildUTXOInvalidBlock(parentHashes []*externalapi.DomainHash) (*externalapi.DomainBlock, error) {
func (tc *testConsensus) MineJSON(r io.Reader, blockType testapi.MineJSONBlockType) (tips []*externalapi.DomainHash, err error) {
// jsonBlock is a json representation of a block in mine format
type jsonBlock struct {
ID string `json:"id"`
Parents []string `json:"parents"`
}
tipSet := map[externalapi.DomainHash]*externalapi.DomainHash{}
tipSet[*tc.dagParams.GenesisHash] = tc.dagParams.GenesisHash
parentsMap := make(map[string]*externalapi.DomainHash)
parentsMap["0"] = tc.dagParams.GenesisHash
decoder := json.NewDecoder(r)
// read open bracket
_, err = decoder.Token()
if err != nil {
return nil, err
}
// while the array contains values
for decoder.More() {
var block jsonBlock
// decode an array value (Message)
err := decoder.Decode(&block)
if err != nil {
return nil, err
}
if block.ID == "0" {
continue
}
parentHashes := make([]*externalapi.DomainHash, len(block.Parents))
var ok bool
for i, parentID := range block.Parents {
parentHashes[i], ok = parentsMap[parentID]
if !ok {
return nil, errors.Errorf("Couldn't find blockID: %s", parentID)
}
delete(tipSet, *parentHashes[i])
}
var blockHash *externalapi.DomainHash
switch blockType {
case testapi.MineJSONBlockTypeUTXOValidBlock:
blockHash, _, err = tc.AddBlock(parentHashes, nil, nil)
if err != nil {
return nil, err
}
case testapi.MineJSONBlockTypeUTXOInvalidBlock:
blockHash, _, err = tc.AddUTXOInvalidBlock(parentHashes)
if err != nil {
return nil, err
}
case testapi.MineJSONBlockTypeUTXOInvalidHeader:
blockHash, _, err = tc.AddUTXOInvalidHeader(parentHashes)
if err != nil {
return nil, err
}
default:
return nil, errors.Errorf("unknwon block type %v", blockType)
}
parentsMap[block.ID] = blockHash
tipSet[*blockHash] = blockHash
}
tips = make([]*externalapi.DomainHash, len(tipSet))
i := 0
for _, v := range tipSet {
tips[i] = v
i++
}
return tips, nil
}
func (tc *testConsensus) BuildUTXOInvalidBlock(parentHashes []*externalapi.DomainHash) (*externalapi.DomainBlock, error) {
// Require write lock because BuildBlockWithParents stages temporary data
tc.lock.Lock()
defer tc.lock.Unlock()

View File

@@ -5,7 +5,7 @@
package addressmanager
import (
"encoding/binary"
"net"
"sync"
"github.com/kaspanet/kaspad/app/appmessage"
@@ -18,9 +18,11 @@ type AddressRandomizer interface {
RandomAddresses(addresses []*appmessage.NetAddress, count int) []*appmessage.NetAddress
}
// AddressKey represents a "string" key of the ip addresses
// for use as keys in maps.
type AddressKey string
// AddressKey represents a pair of IP and port, the IP is always in V6 representation
type AddressKey struct {
port uint16
address [net.IPv6len]byte
}
// ErrAddressNotFound is an error returned from some functions when a
// given address is not found in the address manager
@@ -28,13 +30,10 @@ var ErrAddressNotFound = errors.New("address not found")
// NetAddressKey returns a key of the ip address to use it in maps.
func netAddressKey(netAddress *appmessage.NetAddress) AddressKey {
port := make([]byte, 2, 2)
binary.LittleEndian.PutUint16(port, netAddress.Port)
key := make([]byte, len(netAddress.IP), len(netAddress.IP)+len(port))
copy(key, netAddress.IP)
return AddressKey(append(key, port...))
key := AddressKey{port: netAddress.Port}
// all IPv4 can be represented as IPv6.
copy(key.address[:], netAddress.IP.To16())
return key
}
// netAddressKeys returns a key of the ip address to use it in maps.

View File

@@ -9,6 +9,7 @@ import (
"google.golang.org/grpc"
"google.golang.org/grpc/peer"
"net"
"time"
)
type gRPCServer struct {
@@ -61,7 +62,20 @@ func (s *gRPCServer) listenOn(listenAddr string) error {
}
func (s *gRPCServer) Stop() error {
s.server.GracefulStop()
const stopTimeout = 2 * time.Second
stopChan := make(chan interface{})
spawn("gRPCServer.Stop", func() {
s.server.GracefulStop()
close(stopChan)
})
select {
case <-stopChan:
case <-time.After(stopTimeout):
log.Warnf("Could not gracefully stop %s: timed out after %s", s.name, stopTimeout)
s.server.Stop()
}
return nil
}

View File

@@ -10,8 +10,8 @@ const validCharacters = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrs
const (
appMajor uint = 0
appMinor uint = 8
appPatch uint = 6
appMinor uint = 9
appPatch uint = 0
)
// appBuild is defined as a variable so it can be overridden during the build