mirror of
https://github.com/kaspanet/kaspad.git
synced 2025-03-30 15:08:33 +00:00
[NOD-500] Remove checkpoints (#541)
* [NOD-502] Remove checkpoints. * [NOD-502] Remove remaining references to checkpoints. * [NOD-500] Split RejectFinality to RejectDifficulty. * [NOD-500] Remove support for headers-first in p2p. * [NOD-500] Panic in newHashFromStr in case of an error.
This commit is contained in:
parent
c1f7ae72e0
commit
a140327dd2
@ -1,258 +0,0 @@
|
||||
// Copyright (c) 2013-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/txscript"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// CheckpointConfirmations is the number of blocks before the end of the current
|
||||
// best block chain that a good checkpoint candidate must be.
|
||||
const CheckpointConfirmations = 2016
|
||||
|
||||
// newHashFromStr converts the passed big-endian hex string into a
|
||||
// daghash.Hash. It only differs from the one available in daghash in that
|
||||
// it ignores the error since it will only (and must only) be called with
|
||||
// hard-coded, and therefore known good, hashes.
|
||||
func newHashFromStr(hexStr string) *daghash.Hash {
|
||||
hash, _ := daghash.NewHashFromStr(hexStr)
|
||||
return hash
|
||||
}
|
||||
|
||||
// newTxIDFromStr converts the passed big-endian hex string into a
|
||||
// daghash.TxID. It only differs from the one available in daghash in that
|
||||
// it ignores the error since it will only (and must only) be called with
|
||||
// hard-coded, and therefore known good, IDs.
|
||||
func newTxIDFromStr(hexStr string) *daghash.TxID {
|
||||
txID, _ := daghash.NewTxIDFromStr(hexStr)
|
||||
return txID
|
||||
}
|
||||
|
||||
// Checkpoints returns a slice of checkpoints (regardless of whether they are
|
||||
// already known). When there are no checkpoints for the chain, it will return
|
||||
// nil.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) Checkpoints() []dagconfig.Checkpoint {
|
||||
return dag.checkpoints
|
||||
}
|
||||
|
||||
// HasCheckpoints returns whether this BlockDAG has checkpoints defined.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) HasCheckpoints() bool {
|
||||
return len(dag.checkpoints) > 0
|
||||
}
|
||||
|
||||
// LatestCheckpoint returns the most recent checkpoint (regardless of whether it
|
||||
// is already known). When there are no defined checkpoints for the active chain
|
||||
// instance, it will return nil.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) LatestCheckpoint() *dagconfig.Checkpoint {
|
||||
if !dag.HasCheckpoints() {
|
||||
return nil
|
||||
}
|
||||
return &dag.checkpoints[len(dag.checkpoints)-1]
|
||||
}
|
||||
|
||||
// verifyCheckpoint returns whether the passed block chain height and hash combination
|
||||
// match the checkpoint data. It also returns true if there is no checkpoint
|
||||
// data for the passed block chain height.
|
||||
func (dag *BlockDAG) verifyCheckpoint(chainHeight uint64, hash *daghash.Hash) bool {
|
||||
if !dag.HasCheckpoints() {
|
||||
return true
|
||||
}
|
||||
|
||||
// Nothing to check if there is no checkpoint data for the block chainHeight.
|
||||
checkpoint, exists := dag.checkpointsByChainHeight[chainHeight]
|
||||
if !exists {
|
||||
return true
|
||||
}
|
||||
|
||||
if !checkpoint.Hash.IsEqual(hash) {
|
||||
return false
|
||||
}
|
||||
|
||||
log.Infof("Verified checkpoint at chainHeight %d/block %s", checkpoint.ChainHeight,
|
||||
checkpoint.Hash)
|
||||
return true
|
||||
}
|
||||
|
||||
// findPreviousCheckpoint finds the most recent checkpoint that is already
|
||||
// available in the downloaded portion of the block chain and returns the
|
||||
// associated block node. It returns nil if a checkpoint can't be found (this
|
||||
// should really only happen for blocks before the first checkpoint).
|
||||
//
|
||||
// This function MUST be called with the DAG lock held (for reads).
|
||||
func (dag *BlockDAG) findPreviousCheckpoint() (*blockNode, error) {
|
||||
if !dag.HasCheckpoints() {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Perform the initial search to find and cache the latest known
|
||||
// checkpoint if the best chain is not known yet or we haven't already
|
||||
// previously searched.
|
||||
checkpoints := dag.checkpoints
|
||||
numCheckpoints := len(checkpoints)
|
||||
if dag.checkpointNode == nil && dag.nextCheckpoint == nil {
|
||||
// Loop backwards through the available checkpoints to find one
|
||||
// that is already available.
|
||||
for i := numCheckpoints - 1; i >= 0; i-- {
|
||||
node := dag.index.LookupNode(checkpoints[i].Hash)
|
||||
if node == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Checkpoint found. Cache it for future lookups and
|
||||
// set the next expected checkpoint accordingly.
|
||||
dag.checkpointNode = node
|
||||
if i < numCheckpoints-1 {
|
||||
dag.nextCheckpoint = &checkpoints[i+1]
|
||||
}
|
||||
return dag.checkpointNode, nil
|
||||
}
|
||||
|
||||
// No known latest checkpoint. This will only happen on blocks
|
||||
// before the first known checkpoint. So, set the next expected
|
||||
// checkpoint to the first checkpoint and return the fact there
|
||||
// is no latest known checkpoint block.
|
||||
dag.nextCheckpoint = &checkpoints[0]
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// At this point we've already searched for the latest known checkpoint,
|
||||
// so when there is no next checkpoint, the current checkpoint lockin
|
||||
// will always be the latest known checkpoint.
|
||||
if dag.nextCheckpoint == nil {
|
||||
return dag.checkpointNode, nil
|
||||
}
|
||||
|
||||
// When there is a next checkpoint and the chain height of the current
|
||||
// selected tip of the DAG does not exceed it, the current checkpoint
|
||||
// lockin is still the latest known checkpoint.
|
||||
if dag.selectedTip().chainHeight < dag.nextCheckpoint.ChainHeight {
|
||||
return dag.checkpointNode, nil
|
||||
}
|
||||
|
||||
// We've reached or exceeded the next checkpoint height. Note that
|
||||
// once a checkpoint lockin has been reached, forks are prevented from
|
||||
// any blocks before the checkpoint, so we don't have to worry about the
|
||||
// checkpoint going away out from under us due to a chain reorganize.
|
||||
|
||||
// Cache the latest known checkpoint for future lookups. Note that if
|
||||
// this lookup fails something is very wrong since the chain has already
|
||||
// passed the checkpoint which was verified as accurate before inserting
|
||||
// it.
|
||||
checkpointNode := dag.index.LookupNode(dag.nextCheckpoint.Hash)
|
||||
if checkpointNode == nil {
|
||||
return nil, AssertError(fmt.Sprintf("findPreviousCheckpoint "+
|
||||
"failed lookup of known good block node %s",
|
||||
dag.nextCheckpoint.Hash))
|
||||
}
|
||||
dag.checkpointNode = checkpointNode
|
||||
|
||||
// Set the next expected checkpoint.
|
||||
checkpointIndex := -1
|
||||
for i := numCheckpoints - 1; i >= 0; i-- {
|
||||
if checkpoints[i].Hash.IsEqual(dag.nextCheckpoint.Hash) {
|
||||
checkpointIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
dag.nextCheckpoint = nil
|
||||
if checkpointIndex != -1 && checkpointIndex < numCheckpoints-1 {
|
||||
dag.nextCheckpoint = &checkpoints[checkpointIndex+1]
|
||||
}
|
||||
|
||||
return dag.checkpointNode, nil
|
||||
}
|
||||
|
||||
// isNonstandardTransaction determines whether a transaction contains any
|
||||
// scripts which are not one of the standard types.
|
||||
func isNonstandardTransaction(tx *util.Tx) bool {
|
||||
// Check all of the output public key scripts for non-standard scripts.
|
||||
for _, txOut := range tx.MsgTx().TxOut {
|
||||
scriptClass := txscript.GetScriptClass(txOut.ScriptPubKey)
|
||||
if scriptClass == txscript.NonStandardTy {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsCheckpointCandidate returns whether or not the passed block is a good
|
||||
// checkpoint candidate.
|
||||
//
|
||||
// The factors used to determine a good checkpoint are:
|
||||
// - The block must be in the main chain
|
||||
// - The block must be at least 'CheckpointConfirmations' blocks prior to the
|
||||
// current end of the main chain
|
||||
// - The timestamps for the blocks before and after the checkpoint must have
|
||||
// timestamps which are also before and after the checkpoint, respectively
|
||||
// (due to the median time allowance this is not always the case)
|
||||
// - The block must not contain any strange transaction such as those with
|
||||
// nonstandard scripts
|
||||
//
|
||||
// The intent is that candidates are reviewed by a developer to make the final
|
||||
// decision and then manually added to the list of checkpoints for a network.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) IsCheckpointCandidate(block *util.Block) (bool, error) {
|
||||
dag.dagLock.RLock()
|
||||
defer dag.dagLock.RUnlock()
|
||||
|
||||
// A checkpoint must be in the DAG.
|
||||
node := dag.index.LookupNode(block.Hash())
|
||||
if node == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Ensure the chain height of the passed block and the entry for the block
|
||||
// in the DAG match. This should always be the case unless the
|
||||
// caller provided an invalid block.
|
||||
if node.chainHeight != block.ChainHeight() {
|
||||
return false, errors.Errorf("passed block chain height of %d does not "+
|
||||
"match the its height in the DAG: %d", block.ChainHeight(),
|
||||
node.chainHeight)
|
||||
}
|
||||
|
||||
// A checkpoint must be at least CheckpointConfirmations blocks
|
||||
// before the end of the main chain.
|
||||
dagChainHeight := dag.selectedTip().chainHeight
|
||||
if node.chainHeight > (dagChainHeight - CheckpointConfirmations) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// A checkpoint must be have at least one block after it.
|
||||
//
|
||||
// This should always succeed since the check above already made sure it
|
||||
// is CheckpointConfirmations back, but be safe in case the constant
|
||||
// changes.
|
||||
if len(node.children) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// A checkpoint must be have at least one block before it.
|
||||
if &node.selectedParent == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// A checkpoint must have transactions that only contain standard
|
||||
// scripts.
|
||||
for _, tx := range block.Transactions() {
|
||||
if isNonstandardTransaction(tx) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// All of the checks passed, so the block is a candidate.
|
||||
return true, nil
|
||||
}
|
@ -45,20 +45,17 @@ type chainUpdates struct {
|
||||
|
||||
// BlockDAG provides functions for working with the kaspa block DAG.
|
||||
// It includes functionality such as rejecting duplicate blocks, ensuring blocks
|
||||
// follow all rules, orphan handling, checkpoint handling, and best chain
|
||||
// selection with reorganization.
|
||||
// follow all rules, orphan handling, and best chain selection with reorganization.
|
||||
type BlockDAG struct {
|
||||
// The following fields are set when the instance is created and can't
|
||||
// be changed afterwards, so there is no need to protect them with a
|
||||
// separate mutex.
|
||||
checkpoints []dagconfig.Checkpoint
|
||||
checkpointsByChainHeight map[uint64]*dagconfig.Checkpoint
|
||||
db database.DB
|
||||
dagParams *dagconfig.Params
|
||||
timeSource MedianTimeSource
|
||||
sigCache *txscript.SigCache
|
||||
indexManager IndexManager
|
||||
genesis *blockNode
|
||||
db database.DB
|
||||
dagParams *dagconfig.Params
|
||||
timeSource MedianTimeSource
|
||||
sigCache *txscript.SigCache
|
||||
indexManager IndexManager
|
||||
genesis *blockNode
|
||||
|
||||
// The following fields are calculated based upon the provided DAG
|
||||
// parameters. They are also set when the instance is created and
|
||||
@ -102,11 +99,6 @@ type BlockDAG struct {
|
||||
prevOrphans map[daghash.Hash][]*orphanBlock
|
||||
newestOrphan *orphanBlock
|
||||
|
||||
// These fields are related to checkpoint handling. They are protected
|
||||
// by the chain lock.
|
||||
nextCheckpoint *dagconfig.Checkpoint
|
||||
checkpointNode *blockNode
|
||||
|
||||
// The following caches are used to efficiently keep track of the
|
||||
// current deployment threshold state of each rule change deployment.
|
||||
//
|
||||
@ -468,7 +460,6 @@ func LockTimeToSequence(isSeconds bool, locktime uint64) uint64 {
|
||||
//
|
||||
// The flags modify the behavior of this function as follows:
|
||||
// - BFFastAdd: Avoids several expensive transaction validation operations.
|
||||
// This is useful when using checkpoints.
|
||||
//
|
||||
// This function MUST be called with the DAG state lock held (for writes).
|
||||
func (dag *BlockDAG) addBlock(node *blockNode, parentNodes blockSet,
|
||||
@ -1239,18 +1230,10 @@ func updateTipsUTXO(dag *BlockDAG, virtualUTXO UTXOSet) error {
|
||||
// isCurrent returns whether or not the DAG believes it is current. Several
|
||||
// factors are used to guess, but the key factors that allow the DAG to
|
||||
// believe it is current are:
|
||||
// - Latest block height is after the latest checkpoint (if enabled)
|
||||
// - Latest block has a timestamp newer than 24 hours ago
|
||||
//
|
||||
// This function MUST be called with the DAG state lock held (for reads).
|
||||
func (dag *BlockDAG) isCurrent() bool {
|
||||
// Not current if the virtual's selected tip chain height is less than
|
||||
// the latest known good checkpoint (when checkpoints are enabled).
|
||||
checkpoint := dag.LatestCheckpoint()
|
||||
if checkpoint != nil && dag.selectedTip().chainHeight < checkpoint.ChainHeight {
|
||||
return false
|
||||
}
|
||||
|
||||
// Not current if the virtual's selected parent has a timestamp
|
||||
// before 24 hours ago. If the DAG is empty, we take the genesis
|
||||
// block timestamp.
|
||||
@ -1271,7 +1254,6 @@ func (dag *BlockDAG) isCurrent() bool {
|
||||
// IsCurrent returns whether or not the chain believes it is current. Several
|
||||
// factors are used to guess, but the key factors that allow the chain to
|
||||
// believe it is current are:
|
||||
// - Latest block height is after the latest checkpoint (if enabled)
|
||||
// - Latest block has a timestamp newer than 24 hours ago
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
@ -1854,14 +1836,6 @@ type Config struct {
|
||||
// This field is required.
|
||||
DAGParams *dagconfig.Params
|
||||
|
||||
// Checkpoints hold caller-defined checkpoints that should be added to
|
||||
// the default checkpoints in DAGParams. Checkpoints must be sorted
|
||||
// by height.
|
||||
//
|
||||
// This field can be nil if the caller does not wish to specify any
|
||||
// checkpoints.
|
||||
Checkpoints []dagconfig.Checkpoint
|
||||
|
||||
// TimeSource defines the median time source to use for things such as
|
||||
// block processing and determining whether or not the chain is current.
|
||||
//
|
||||
@ -1906,31 +1880,11 @@ func New(config *Config) (*BlockDAG, error) {
|
||||
return nil, AssertError("BlockDAG.New timesource is nil")
|
||||
}
|
||||
|
||||
// Generate a checkpoint by chain height map from the provided checkpoints
|
||||
// and assert the provided checkpoints are sorted by chain height as required.
|
||||
var checkpointsByChainHeight map[uint64]*dagconfig.Checkpoint
|
||||
var prevCheckpointChainHeight uint64
|
||||
if len(config.Checkpoints) > 0 {
|
||||
checkpointsByChainHeight = make(map[uint64]*dagconfig.Checkpoint)
|
||||
for i := range config.Checkpoints {
|
||||
checkpoint := &config.Checkpoints[i]
|
||||
if checkpoint.ChainHeight <= prevCheckpointChainHeight {
|
||||
return nil, AssertError("blockdag.New " +
|
||||
"checkpoints are not sorted by chain height")
|
||||
}
|
||||
|
||||
checkpointsByChainHeight[checkpoint.ChainHeight] = checkpoint
|
||||
prevCheckpointChainHeight = checkpoint.ChainHeight
|
||||
}
|
||||
}
|
||||
|
||||
params := config.DAGParams
|
||||
targetTimePerBlock := int64(params.TargetTimePerBlock / time.Second)
|
||||
|
||||
index := newBlockIndex(config.DB, params)
|
||||
dag := BlockDAG{
|
||||
checkpoints: config.Checkpoints,
|
||||
checkpointsByChainHeight: checkpointsByChainHeight,
|
||||
db: config.DB,
|
||||
dagParams: params,
|
||||
timeSource: config.TimeSource,
|
||||
|
@ -264,3 +264,15 @@ func TestDAGStateDeserializeErrors(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// newHashFromStr converts the passed big-endian hex string into a
|
||||
// daghash.Hash. It only differs from the one available in daghash in that
|
||||
// it panics in case of an error since it will only (and must only) be
|
||||
// called with hard-coded, and therefore known good, hashes.
|
||||
func newHashFromStr(hexStr string) *daghash.Hash {
|
||||
hash, err := daghash.NewHashFromStr(hexStr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return hash
|
||||
}
|
||||
|
@ -35,8 +35,6 @@ is by no means exhaustive:
|
||||
- Perform a series of sanity checks on the block and its transactions such as
|
||||
verifying proof of work, timestamps, number and character of transactions,
|
||||
transaction amounts, script complexity, and merkle root calculations
|
||||
- Compare the block against predetermined checkpoints for expected timestamps
|
||||
and difficulty based on elapsed time since the checkpoint
|
||||
- Save the most recent orphan blocks for a limited time in case their parent
|
||||
blocks become available
|
||||
- Stop processing if the block is an orphan as the rest of the processing
|
||||
@ -44,7 +42,7 @@ is by no means exhaustive:
|
||||
- Perform a series of more thorough checks that depend on the block's position
|
||||
within the block chain such as verifying block difficulties adhere to
|
||||
difficulty retarget rules, timestamps are after the median of the last
|
||||
several blocks, all transactions are finalized, checkpoint blocks match, and
|
||||
several blocks, all transactions are finalized, and
|
||||
block versions are in line with the previous blocks
|
||||
- Determine how the block fits into the chain and perform different actions
|
||||
accordingly in order to ensure any side chains which have higher difficulty
|
||||
|
@ -52,8 +52,7 @@ const (
|
||||
ErrInvalidTime
|
||||
|
||||
// ErrTimeTooOld indicates the time is either before the median time of
|
||||
// the last several blocks per the chain consensus rules or prior to the
|
||||
// most recent checkpoint.
|
||||
// the last several blocks per the chain consensus rules.
|
||||
ErrTimeTooOld
|
||||
|
||||
// ErrTimeTooNew indicates the time is too far in the future as compared
|
||||
@ -67,7 +66,7 @@ const (
|
||||
ErrWrongParentsOrder
|
||||
|
||||
// ErrDifficultyTooLow indicates the difficulty for the block is lower
|
||||
// than the difficulty required by the most recent checkpoint.
|
||||
// than the difficulty required.
|
||||
ErrDifficultyTooLow
|
||||
|
||||
// ErrUnexpectedDifficulty indicates specified bits do not align with
|
||||
@ -88,10 +87,6 @@ const (
|
||||
// the expected value.
|
||||
ErrBadUTXOCommitment
|
||||
|
||||
// ErrBadCheckpoint indicates a block that is expected to be at a
|
||||
// checkpoint height does not match the expected one.
|
||||
ErrBadCheckpoint
|
||||
|
||||
// ErrFinalityPointTimeTooOld indicates a block has a timestamp before the
|
||||
// last finality point.
|
||||
ErrFinalityPointTimeTooOld
|
||||
@ -238,7 +233,6 @@ var errorCodeStrings = map[ErrorCode]string{
|
||||
ErrUnexpectedDifficulty: "ErrUnexpectedDifficulty",
|
||||
ErrHighHash: "ErrHighHash",
|
||||
ErrBadMerkleRoot: "ErrBadMerkleRoot",
|
||||
ErrBadCheckpoint: "ErrBadCheckpoint",
|
||||
ErrFinalityPointTimeTooOld: "ErrFinalityPointTimeTooOld",
|
||||
ErrNoTransactions: "ErrNoTransactions",
|
||||
ErrNoTxInputs: "ErrNoTxInputs",
|
||||
|
@ -27,7 +27,6 @@ func TestErrorCodeStringer(t *testing.T) {
|
||||
{ErrUnexpectedDifficulty, "ErrUnexpectedDifficulty"},
|
||||
{ErrHighHash, "ErrHighHash"},
|
||||
{ErrBadMerkleRoot, "ErrBadMerkleRoot"},
|
||||
{ErrBadCheckpoint, "ErrBadCheckpoint"},
|
||||
{ErrFinalityPointTimeTooOld, "ErrFinalityPointTimeTooOld"},
|
||||
{ErrNoTransactions, "ErrNoTransactions"},
|
||||
{ErrNoTxInputs, "ErrNoTxInputs"},
|
||||
@ -35,7 +34,6 @@ func TestErrorCodeStringer(t *testing.T) {
|
||||
{ErrBadTxOutValue, "ErrBadTxOutValue"},
|
||||
{ErrDuplicateTxInputs, "ErrDuplicateTxInputs"},
|
||||
{ErrBadTxInput, "ErrBadTxInput"},
|
||||
{ErrBadCheckpoint, "ErrBadCheckpoint"},
|
||||
{ErrMissingTxOut, "ErrMissingTxOut"},
|
||||
{ErrUnfinalizedTx, "ErrUnfinalizedTx"},
|
||||
{ErrDuplicateTx, "ErrDuplicateTx"},
|
||||
|
@ -115,11 +115,10 @@ func DAGSetup(dbName string, params *dagconfig.Params) (*blockdag.BlockDAG, func
|
||||
|
||||
// Create the main chain instance.
|
||||
chain, err := blockdag.New(&blockdag.Config{
|
||||
DB: db,
|
||||
DAGParams: ¶msCopy,
|
||||
Checkpoints: nil,
|
||||
TimeSource: blockdag.NewMedianTime(),
|
||||
SigCache: txscript.NewSigCache(1000),
|
||||
DB: db,
|
||||
DAGParams: ¶msCopy,
|
||||
TimeSource: blockdag.NewMedianTime(),
|
||||
SigCache: txscript.NewSigCache(1000),
|
||||
})
|
||||
if err != nil {
|
||||
teardown()
|
||||
|
@ -125,9 +125,6 @@ var regressionNetParams = &dagconfig.Params{
|
||||
TimestampDeviationTolerance: 132,
|
||||
GenerateSupported: true,
|
||||
|
||||
// Checkpoints ordered from oldest to newest.
|
||||
Checkpoints: nil,
|
||||
|
||||
// Mempool parameters
|
||||
RelayNonStdTxs: true,
|
||||
|
||||
|
@ -19,8 +19,8 @@ type BehaviorFlags uint32
|
||||
const (
|
||||
// BFFastAdd may be set to indicate that several checks can be avoided
|
||||
// for the block since it is already known to fit into the chain due to
|
||||
// already proving it correct links into the chain up to a known
|
||||
// checkpoint. This is primarily used for headers-first mode.
|
||||
// already proving it correct links into the chain.
|
||||
// This is primarily used for headers-first mode.
|
||||
BFFastAdd BehaviorFlags = 1 << iota
|
||||
|
||||
// BFNoPoWCheck may be set to indicate the proof of work check which
|
||||
|
@ -549,8 +549,7 @@ func (dag *BlockDAG) checkBlockSanity(block *util.Block, flags BehaviorFlags) (t
|
||||
// which depend on its position within the block dag.
|
||||
//
|
||||
// The flags modify the behavior of this function as follows:
|
||||
// - BFFastAdd: All checks except those involving comparing the header against
|
||||
// the checkpoints are not performed.
|
||||
// - BFFastAdd: No checks are performed.
|
||||
//
|
||||
// This function MUST be called with the dag state lock held (for writes).
|
||||
func (dag *BlockDAG) checkBlockHeaderContext(header *wire.BlockHeader, bluestParent *blockNode, blockChainHeight uint64, fastAdd bool) error {
|
||||
@ -563,19 +562,6 @@ func (dag *BlockDAG) checkBlockHeaderContext(header *wire.BlockHeader, bluestPar
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return dag.validateCheckpoints(header, blockChainHeight)
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) validateCheckpoints(header *wire.BlockHeader, blockChainHeight uint64) error {
|
||||
// Ensure DAG matches up to predetermined checkpoints.
|
||||
blockHash := header.BlockHash()
|
||||
if !dag.verifyCheckpoint(blockChainHeight, blockHash) {
|
||||
str := fmt.Sprintf("block at chain height %d does not match "+
|
||||
"checkpoint hash", blockChainHeight)
|
||||
return ruleError(ErrBadCheckpoint, str)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -902,19 +888,6 @@ func (dag *BlockDAG) checkConnectToPastUTXO(block *blockNode, pastUTXO UTXOSet,
|
||||
}
|
||||
|
||||
if !fastAdd {
|
||||
|
||||
// Don't run scripts if this node is before the latest known good
|
||||
// checkpoint since the validity is verified via the checkpoints (all
|
||||
// transactions are included in the merkle root hash and any changes
|
||||
// will therefore be detected by the next checkpoint). This is a huge
|
||||
// optimization because running the scripts is the most time consuming
|
||||
// portion of block handling.
|
||||
checkpoint := dag.LatestCheckpoint()
|
||||
runScripts := true
|
||||
if checkpoint != nil && block.chainHeight <= checkpoint.ChainHeight {
|
||||
runScripts = false
|
||||
}
|
||||
|
||||
scriptFlags := txscript.ScriptNoFlags
|
||||
|
||||
// We obtain the MTP of the *previous* block (unless it's genesis block)
|
||||
@ -948,13 +921,10 @@ func (dag *BlockDAG) checkConnectToPastUTXO(block *blockNode, pastUTXO UTXOSet,
|
||||
// transactions are actually allowed to spend the coins by running the
|
||||
// expensive ECDSA signature check scripts. Doing this last helps
|
||||
// prevent CPU exhaustion attacks.
|
||||
if runScripts {
|
||||
err := checkBlockScripts(block, pastUTXO, transactions, scriptFlags, dag.sigCache)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err := checkBlockScripts(block, pastUTXO, transactions, scriptFlags, dag.sigCache)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
}
|
||||
return feeData, nil
|
||||
}
|
||||
|
@ -36,7 +36,7 @@ func ActiveConfig() *ConfigFlags {
|
||||
return activeConfig
|
||||
}
|
||||
|
||||
// ConfigFlags defines the configuration options for findcheckpoint.
|
||||
// ConfigFlags defines the configuration options for addblock.
|
||||
//
|
||||
// See loadConfig for details on the configuration load process.
|
||||
type ConfigFlags struct {
|
||||
|
@ -86,9 +86,8 @@ func (bi *blockImporter) readBlock() ([]byte, error) {
|
||||
// processBlock potentially imports the block into the database. It first
|
||||
// deserializes the raw block while checking for errors. Already known blocks
|
||||
// are skipped and orphan blocks are considered errors. Finally, it runs the
|
||||
// block through the DAG rules to ensure it follows all rules and matches
|
||||
// up to the known checkpoint. Returns whether the block was imported along
|
||||
// with any potential errors.
|
||||
// block through the DAG rules to ensure it follows all rules.
|
||||
// Returns whether the block was imported along with any potential errors.
|
||||
func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) {
|
||||
// Deserialize the block which includes checks for malformed blocks.
|
||||
block, err := util.NewBlockFromBytes(serializedBlock)
|
||||
@ -116,8 +115,7 @@ func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the blocks follows all of the chain rules and match up to the
|
||||
// known checkpoints.
|
||||
// Ensure the blocks follows all of the chain rules.
|
||||
isOrphan, delay, err := bi.dag.ProcessBlock(block,
|
||||
blockdag.BFFastAdd)
|
||||
if err != nil {
|
||||
|
@ -1,116 +0,0 @@
|
||||
// Copyright (c) 2013-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/config"
|
||||
"github.com/pkg/errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
flags "github.com/jessevdk/go-flags"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
_ "github.com/kaspanet/kaspad/database/ffldb"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
)
|
||||
|
||||
const (
|
||||
minCandidates = 1
|
||||
maxCandidates = 20
|
||||
defaultNumCandidates = 5
|
||||
defaultDbType = "ffldb"
|
||||
)
|
||||
|
||||
var (
|
||||
kaspadHomeDir = util.AppDataDir("kaspad", false)
|
||||
defaultDataDir = filepath.Join(kaspadHomeDir, "data")
|
||||
knownDbTypes = database.SupportedDrivers()
|
||||
activeConfig *ConfigFlags
|
||||
)
|
||||
|
||||
// ActiveConfig returns the active configuration struct
|
||||
func ActiveConfig() *ConfigFlags {
|
||||
return activeConfig
|
||||
}
|
||||
|
||||
// ConfigFlags defines the configuration options for findcheckpoint.
|
||||
//
|
||||
// See loadConfig for details on the configuration load process.
|
||||
type ConfigFlags struct {
|
||||
DataDir string `short:"b" long:"datadir" description:"Location of the kaspad data directory"`
|
||||
DbType string `long:"dbtype" description:"Database backend to use for the Block Chain"`
|
||||
NumCandidates int `short:"n" long:"numcandidates" description:"Max num of checkpoint candidates to show {1-20}"`
|
||||
UseGoOutput bool `short:"g" long:"gooutput" description:"Display the candidates using Go syntax that is ready to insert into the Kaspa checkpoint list"`
|
||||
config.NetworkFlags
|
||||
}
|
||||
|
||||
// validDbType returns whether or not dbType is a supported database type.
|
||||
func validDbType(dbType string) bool {
|
||||
for _, knownType := range knownDbTypes {
|
||||
if dbType == knownType {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// loadConfig initializes and parses the config using command line options.
|
||||
func loadConfig() (*ConfigFlags, []string, error) {
|
||||
// Default config.
|
||||
activeConfig = &ConfigFlags{
|
||||
DataDir: defaultDataDir,
|
||||
DbType: defaultDbType,
|
||||
NumCandidates: defaultNumCandidates,
|
||||
}
|
||||
|
||||
// Parse command line options.
|
||||
parser := flags.NewParser(&activeConfig, flags.Default)
|
||||
remainingArgs, err := parser.Parse()
|
||||
if err != nil {
|
||||
if e, ok := err.(*flags.Error); !ok || e.Type != flags.ErrHelp {
|
||||
parser.WriteHelp(os.Stderr)
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
funcName := "loadConfig"
|
||||
|
||||
err = activeConfig.ResolveNetwork(parser)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
// Validate database type.
|
||||
if !validDbType(activeConfig.DbType) {
|
||||
str := "%s: The specified database type [%s] is invalid -- " +
|
||||
"supported types %s"
|
||||
err := errors.Errorf(str, funcName, activeConfig.DbType, strings.Join(knownDbTypes, ", "))
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
parser.WriteHelp(os.Stderr)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Append the network type to the data directory so it is "namespaced"
|
||||
// per network. In addition to the block database, there are other
|
||||
// pieces of data that are saved to disk such as address manager state.
|
||||
// All data is specific to a network, so namespacing the data directory
|
||||
// means each individual piece of serialized data does not have to
|
||||
// worry about changing names per network and such.
|
||||
activeConfig.DataDir = filepath.Join(activeConfig.DataDir, activeConfig.NetParams().Name)
|
||||
|
||||
// Validate the number of candidates.
|
||||
if activeConfig.NumCandidates < minCandidates || activeConfig.NumCandidates > maxCandidates {
|
||||
str := "%s: The specified number of candidates is out of " +
|
||||
"range -- parsed [%d]"
|
||||
err = errors.Errorf(str, funcName, activeConfig.NumCandidates)
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
parser.WriteHelp(os.Stderr)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return activeConfig, remainingArgs, nil
|
||||
}
|
@ -1,186 +0,0 @@
|
||||
// Copyright (c) 2013-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
const blockDbNamePrefix = "blocks"
|
||||
|
||||
var (
|
||||
cfg *ConfigFlags
|
||||
)
|
||||
|
||||
// loadBlockDB opens the block database and returns a handle to it.
|
||||
func loadBlockDB() (database.DB, error) {
|
||||
// The database name is based on the database type.
|
||||
dbName := blockDbNamePrefix + "_" + cfg.DbType
|
||||
dbPath := filepath.Join(cfg.DataDir, dbName)
|
||||
fmt.Printf("Loading block database from '%s'\n", dbPath)
|
||||
db, err := database.Open(cfg.DbType, dbPath, ActiveConfig().NetParams().Net)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// findCandidates searches the DAG backwards for checkpoint candidates and
|
||||
// returns a slice of found candidates, if any. It also stops searching for
|
||||
// candidates at the last checkpoint that is already hard coded since there
|
||||
// is no point in finding candidates before already existing checkpoints.
|
||||
func findCandidates(dag *blockdag.BlockDAG, highestTipHash *daghash.Hash) ([]*dagconfig.Checkpoint, error) {
|
||||
// Start with the selected tip.
|
||||
block, err := dag.BlockByHash(highestTipHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the latest known checkpoint.
|
||||
latestCheckpoint := dag.LatestCheckpoint()
|
||||
if latestCheckpoint == nil {
|
||||
// Set the latest checkpoint to the genesis block if there isn't
|
||||
// already one.
|
||||
latestCheckpoint = &dagconfig.Checkpoint{
|
||||
Hash: ActiveConfig().NetParams().GenesisHash,
|
||||
ChainHeight: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// The latest known block must be at least the last known checkpoint
|
||||
// plus required checkpoint confirmations.
|
||||
checkpointConfirmations := uint64(blockdag.CheckpointConfirmations)
|
||||
requiredChainHeight := latestCheckpoint.ChainHeight + checkpointConfirmations
|
||||
if block.ChainHeight() < requiredChainHeight {
|
||||
return nil, errors.Errorf("the block database is only at chain "+
|
||||
"height %d which is less than the latest checkpoint chain height "+
|
||||
"of %d plus required confirmations of %d",
|
||||
block.ChainHeight(), latestCheckpoint.ChainHeight,
|
||||
checkpointConfirmations)
|
||||
}
|
||||
|
||||
// For the first checkpoint, the required height is any block after the
|
||||
// genesis block, so long as the DAG has at least the required number
|
||||
// of confirmations (which is enforced above).
|
||||
if len(ActiveConfig().NetParams().Checkpoints) == 0 {
|
||||
requiredChainHeight = 1
|
||||
}
|
||||
|
||||
// Indeterminate progress setup.
|
||||
numBlocksToTest := block.ChainHeight() - requiredChainHeight
|
||||
progressInterval := (numBlocksToTest / 100) + 1 // min 1
|
||||
fmt.Print("Searching for candidates")
|
||||
defer fmt.Println()
|
||||
|
||||
// Loop backwards through the DAG to find checkpoint candidates.
|
||||
candidates := make([]*dagconfig.Checkpoint, 0, cfg.NumCandidates)
|
||||
numTested := uint64(0)
|
||||
for len(candidates) < cfg.NumCandidates && block.ChainHeight() > requiredChainHeight {
|
||||
// Display progress.
|
||||
if numTested%progressInterval == 0 {
|
||||
fmt.Print(".")
|
||||
}
|
||||
|
||||
// Determine if this block is a checkpoint candidate.
|
||||
isCandidate, err := dag.IsCheckpointCandidate(block)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// All checks passed, so this node seems like a reasonable
|
||||
// checkpoint candidate.
|
||||
if isCandidate {
|
||||
checkpoint := dagconfig.Checkpoint{
|
||||
ChainHeight: block.ChainHeight(),
|
||||
Hash: block.Hash(),
|
||||
}
|
||||
candidates = append(candidates, &checkpoint)
|
||||
}
|
||||
|
||||
parentHashes := block.MsgBlock().Header.ParentHashes
|
||||
selectedBlockHash := parentHashes[0]
|
||||
block, err = dag.BlockByHash(selectedBlockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
numTested++
|
||||
}
|
||||
return candidates, nil
|
||||
}
|
||||
|
||||
// showCandidate display a checkpoint candidate using and output format
|
||||
// determined by the configuration parameters. The Go syntax output
|
||||
// uses the format kaspa code expects for checkpoints added to the list.
|
||||
func showCandidate(candidateNum int, checkpoint *dagconfig.Checkpoint) {
|
||||
if cfg.UseGoOutput {
|
||||
fmt.Printf("Candidate %d -- {%d, newShaHashFromStr(\"%s\")},\n",
|
||||
candidateNum, checkpoint.ChainHeight, checkpoint.Hash)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("Candidate %d -- ChainHeight: %d, Hash: %s\n", candidateNum,
|
||||
checkpoint.ChainHeight, checkpoint.Hash)
|
||||
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Load configuration and parse command line.
|
||||
tcfg, _, err := loadConfig()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
cfg = tcfg
|
||||
|
||||
// Load the block database.
|
||||
db, err := loadBlockDB()
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "failed to load database:", err)
|
||||
return
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Setup chain. Ignore notifications since they aren't needed for this
|
||||
// util.
|
||||
dag, err := blockdag.New(&blockdag.Config{
|
||||
DB: db,
|
||||
DAGParams: ActiveConfig().NetParams(),
|
||||
TimeSource: blockdag.NewMedianTime(),
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to initialize chain: %s\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Get the latest block hash and height from the database and report
|
||||
// status.
|
||||
fmt.Printf("Block database loaded with block chain height %d\n", dag.ChainHeight())
|
||||
|
||||
// Find checkpoint candidates.
|
||||
selectedTipHash := dag.SelectedTipHash()
|
||||
candidates, err := findCandidates(dag, selectedTipHash)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "Unable to identify candidates:", err)
|
||||
return
|
||||
}
|
||||
|
||||
// No candidates.
|
||||
if len(candidates) == 0 {
|
||||
fmt.Println("No candidates found.")
|
||||
return
|
||||
}
|
||||
|
||||
// Show the candidates.
|
||||
for i, checkpoint := range candidates {
|
||||
showCandidate(i+1, checkpoint)
|
||||
}
|
||||
}
|
@ -21,11 +21,9 @@ import (
|
||||
|
||||
"github.com/btcsuite/go-socks/socks"
|
||||
"github.com/jessevdk/go-flags"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/logger"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/util/network"
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
"github.com/kaspanet/kaspad/version"
|
||||
@ -131,8 +129,6 @@ type Flags struct {
|
||||
OnionProxyPass string `long:"onionpass" default-mask:"-" description:"Password for onion proxy server"`
|
||||
NoOnion bool `long:"noonion" description:"Disable connecting to tor hidden services"`
|
||||
TorIsolation bool `long:"torisolation" description:"Enable Tor stream isolation by randomizing user credentials for each connection."`
|
||||
AddCheckpoints []string `long:"addcheckpoint" description:"Add a custom checkpoint. Format: '<height>:<hash>'"`
|
||||
DisableCheckpoints bool `long:"nocheckpoints" description:"Disable built-in checkpoints. Don't do this unless you know what you're doing."`
|
||||
DbType string `long:"dbtype" description:"Database backend to use for the Block DAG"`
|
||||
Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"`
|
||||
CPUProfile string `long:"cpuprofile" description:"Write CPU profile to the specified file"`
|
||||
@ -165,14 +161,13 @@ type Flags struct {
|
||||
// See loadConfig for details on the configuration load process.
|
||||
type Config struct {
|
||||
*Flags
|
||||
Lookup func(string) ([]net.IP, error)
|
||||
OnionDial func(string, string, time.Duration) (net.Conn, error)
|
||||
Dial func(string, string, time.Duration) (net.Conn, error)
|
||||
AddCheckpoints []dagconfig.Checkpoint
|
||||
MiningAddrs []util.Address
|
||||
MinRelayTxFee util.Amount
|
||||
Whitelists []*net.IPNet
|
||||
SubnetworkID *subnetworkid.SubnetworkID // nil in full nodes
|
||||
Lookup func(string) ([]net.IP, error)
|
||||
OnionDial func(string, string, time.Duration) (net.Conn, error)
|
||||
Dial func(string, string, time.Duration) (net.Conn, error)
|
||||
MiningAddrs []util.Address
|
||||
MinRelayTxFee util.Amount
|
||||
Whitelists []*net.IPNet
|
||||
SubnetworkID *subnetworkid.SubnetworkID // nil in full nodes
|
||||
}
|
||||
|
||||
// serviceOptions defines the configuration options for the daemon as a service on
|
||||
@ -206,54 +201,6 @@ func validDbType(dbType string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// newCheckpointFromStr parses checkpoints in the '<height>:<hash>' format.
|
||||
func newCheckpointFromStr(checkpoint string) (dagconfig.Checkpoint, error) {
|
||||
parts := strings.Split(checkpoint, ":")
|
||||
if len(parts) != 2 {
|
||||
return dagconfig.Checkpoint{}, errors.Errorf("unable to parse "+
|
||||
"checkpoint %q -- use the syntax <height>:<hash>",
|
||||
checkpoint)
|
||||
}
|
||||
|
||||
height, err := strconv.ParseInt(parts[0], 10, 64)
|
||||
if err != nil {
|
||||
return dagconfig.Checkpoint{}, errors.Errorf("unable to parse "+
|
||||
"checkpoint %q due to malformed height", checkpoint)
|
||||
}
|
||||
|
||||
if len(parts[1]) == 0 {
|
||||
return dagconfig.Checkpoint{}, errors.Errorf("unable to parse "+
|
||||
"checkpoint %q due to missing hash", checkpoint)
|
||||
}
|
||||
hash, err := daghash.NewHashFromStr(parts[1])
|
||||
if err != nil {
|
||||
return dagconfig.Checkpoint{}, errors.Errorf("unable to parse "+
|
||||
"checkpoint %q due to malformed hash", checkpoint)
|
||||
}
|
||||
|
||||
return dagconfig.Checkpoint{
|
||||
ChainHeight: uint64(height),
|
||||
Hash: hash,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// parseCheckpoints checks the checkpoint strings for valid syntax
|
||||
// ('<height>:<hash>') and parses them to dagconfig.Checkpoint instances.
|
||||
func parseCheckpoints(checkpointStrings []string) ([]dagconfig.Checkpoint, error) {
|
||||
if len(checkpointStrings) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
checkpoints := make([]dagconfig.Checkpoint, len(checkpointStrings))
|
||||
for i, cpString := range checkpointStrings {
|
||||
checkpoint, err := newCheckpointFromStr(cpString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
checkpoints[i] = checkpoint
|
||||
}
|
||||
return checkpoints, nil
|
||||
}
|
||||
|
||||
// newConfigParser returns a new command line flags parser.
|
||||
func newConfigParser(cfgFlags *Flags, so *serviceOptions, options flags.Options) *flags.Parser {
|
||||
parser := flags.NewParser(cfgFlags, options)
|
||||
@ -839,16 +786,6 @@ func loadConfig() (*Config, []string, error) {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Check the checkpoints for syntax errors.
|
||||
activeConfig.AddCheckpoints, err = parseCheckpoints(activeConfig.Flags.AddCheckpoints)
|
||||
if err != nil {
|
||||
str := "%s: Error parsing checkpoints: %s"
|
||||
err := errors.Errorf(str, funcName, err)
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
fmt.Fprintln(os.Stderr, usageMessage)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Tor stream isolation requires either proxy or onion proxy to be set.
|
||||
if activeConfig.TorIsolation && activeConfig.Proxy == "" && activeConfig.OnionProxy == "" {
|
||||
str := "%s: Tor stream isolation requires either proxy or " +
|
||||
|
@ -50,18 +50,6 @@ const phantomK = 10
|
||||
const difficultyAdjustmentWindowSize = 2640
|
||||
const timestampDeviationTolerance = 132
|
||||
|
||||
// Checkpoint identifies a known good point in the block chain. Using
|
||||
// checkpoints allows a few optimizations for old blocks during initial download
|
||||
// and also prevents forks from old blocks.
|
||||
//
|
||||
// Each checkpoint is selected based upon several factors. See the
|
||||
// documentation for blockchain.IsCheckpointCandidate for details on the
|
||||
// selection criteria.
|
||||
type Checkpoint struct {
|
||||
ChainHeight uint64
|
||||
Hash *daghash.Hash
|
||||
}
|
||||
|
||||
// ConsensusDeployment defines details related to a specific consensus rule
|
||||
// change that is voted in. This is part of BIP0009.
|
||||
type ConsensusDeployment struct {
|
||||
@ -151,9 +139,6 @@ type Params struct {
|
||||
// GenerateSupported specifies whether or not CPU mining is allowed.
|
||||
GenerateSupported bool
|
||||
|
||||
// Checkpoints ordered from oldest to newest.
|
||||
Checkpoints []Checkpoint
|
||||
|
||||
// These fields are related to voting on consensus rule changes as
|
||||
// defined by BIP0009.
|
||||
//
|
||||
@ -213,9 +198,6 @@ var MainNetParams = Params{
|
||||
TimestampDeviationTolerance: timestampDeviationTolerance,
|
||||
GenerateSupported: false,
|
||||
|
||||
// Checkpoints ordered from oldest to newest.
|
||||
Checkpoints: nil,
|
||||
|
||||
// Consensus rule change deployments.
|
||||
//
|
||||
// The miner confirmation window is defined as:
|
||||
@ -274,9 +256,6 @@ var RegressionNetParams = Params{
|
||||
TimestampDeviationTolerance: timestampDeviationTolerance,
|
||||
GenerateSupported: true,
|
||||
|
||||
// Checkpoints ordered from oldest to newest.
|
||||
Checkpoints: nil,
|
||||
|
||||
// Consensus rule change deployments.
|
||||
//
|
||||
// The miner confirmation window is defined as:
|
||||
@ -333,9 +312,6 @@ var TestNetParams = Params{
|
||||
TimestampDeviationTolerance: timestampDeviationTolerance,
|
||||
GenerateSupported: true,
|
||||
|
||||
// Checkpoints ordered from oldest to newest.
|
||||
Checkpoints: nil,
|
||||
|
||||
// Consensus rule change deployments.
|
||||
//
|
||||
// The miner confirmation window is defined as:
|
||||
@ -398,9 +374,6 @@ var SimNetParams = Params{
|
||||
TimestampDeviationTolerance: timestampDeviationTolerance,
|
||||
GenerateSupported: true,
|
||||
|
||||
// Checkpoints ordered from oldest to newest.
|
||||
Checkpoints: nil,
|
||||
|
||||
// Consensus rule change deployments.
|
||||
//
|
||||
// The miner confirmation window is defined as:
|
||||
@ -455,9 +428,6 @@ var DevNetParams = Params{
|
||||
TimestampDeviationTolerance: timestampDeviationTolerance,
|
||||
GenerateSupported: true,
|
||||
|
||||
// Checkpoints ordered from oldest to newest.
|
||||
Checkpoints: nil,
|
||||
|
||||
// Consensus rule change deployments.
|
||||
//
|
||||
// The miner confirmation window is defined as:
|
||||
|
3
doc.go
3
doc.go
@ -73,9 +73,6 @@ Application Options:
|
||||
--testnet Use the test network
|
||||
--regtest Use the regression test network
|
||||
--simnet Use the simulation test network
|
||||
--addcheckpoint= Add a custom checkpoint. Format: '<height>:<hash>'
|
||||
--nocheckpoints Disable built-in checkpoints. Don't do this unless
|
||||
you know what you're doing.
|
||||
--uacomment= Comment to add to the user agent --
|
||||
See BIP 14 for more information.
|
||||
--dbtype= Database backend to use for the Block Chain (ffldb)
|
||||
|
@ -82,11 +82,9 @@ func extractRejectCode(err error) (wire.RejectCode, bool) {
|
||||
|
||||
// Rejected due to being earlier than the last finality point.
|
||||
case blockdag.ErrFinalityPointTimeTooOld:
|
||||
fallthrough
|
||||
code = wire.RejectFinality
|
||||
case blockdag.ErrDifficultyTooLow:
|
||||
fallthrough
|
||||
case blockdag.ErrBadCheckpoint:
|
||||
code = wire.RejectCheckpoint
|
||||
code = wire.RejectDifficulty
|
||||
|
||||
// Everything else is due to the block or transaction being invalid.
|
||||
default:
|
||||
|
@ -1577,15 +1577,11 @@ func TestExtractRejectCode(t *testing.T) {
|
||||
},
|
||||
{
|
||||
blockdagRuleErrorCode: blockdag.ErrFinalityPointTimeTooOld,
|
||||
wireRejectCode: wire.RejectCheckpoint,
|
||||
wireRejectCode: wire.RejectFinality,
|
||||
},
|
||||
{
|
||||
blockdagRuleErrorCode: blockdag.ErrDifficultyTooLow,
|
||||
wireRejectCode: wire.RejectCheckpoint,
|
||||
},
|
||||
{
|
||||
blockdagRuleErrorCode: blockdag.ErrBadCheckpoint,
|
||||
wireRejectCode: wire.RejectCheckpoint,
|
||||
wireRejectCode: wire.RejectDifficulty,
|
||||
},
|
||||
{
|
||||
blockdagRuleErrorCode: math.MaxUint32,
|
||||
|
@ -29,7 +29,5 @@ type Config struct {
|
||||
DAG *blockdag.BlockDAG
|
||||
TxMemPool *mempool.TxPool
|
||||
ChainParams *dagconfig.Params
|
||||
|
||||
DisableCheckpoints bool
|
||||
MaxPeers int
|
||||
MaxPeers int
|
||||
}
|
||||
|
@ -5,14 +5,11 @@
|
||||
package netsync
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
"net"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
@ -25,11 +22,6 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// minInFlightBlocks is the minimum number of blocks that should be
|
||||
// in the request queue for headers-first mode before requesting
|
||||
// more.
|
||||
minInFlightBlocks = 10
|
||||
|
||||
// maxRejectedTxns is the maximum number of rejected transactions
|
||||
// hashes to store in memory.
|
||||
maxRejectedTxns = 1000
|
||||
@ -64,13 +56,6 @@ type invMsg struct {
|
||||
peer *peerpkg.Peer
|
||||
}
|
||||
|
||||
// headersMsg packages a kaspa headers message and the peer it came from
|
||||
// together so the block handler has access to that information.
|
||||
type headersMsg struct {
|
||||
headers *wire.MsgHeaders
|
||||
peer *peerpkg.Peer
|
||||
}
|
||||
|
||||
// donePeerMsg signifies a newly disconnected peer to the block handler.
|
||||
type donePeerMsg struct {
|
||||
peer *peerpkg.Peer
|
||||
@ -129,13 +114,6 @@ type pauseMsg struct {
|
||||
unpause <-chan struct{}
|
||||
}
|
||||
|
||||
// headerNode is used as a node in a list of headers that are linked together
|
||||
// between checkpoints.
|
||||
type headerNode struct {
|
||||
height uint64
|
||||
hash *daghash.Hash
|
||||
}
|
||||
|
||||
type requestQueueAndSet struct {
|
||||
queue []*wire.InvVect
|
||||
set map[daghash.Hash]struct{}
|
||||
@ -174,94 +152,6 @@ type SyncManager struct {
|
||||
requestedBlocks map[daghash.Hash]struct{}
|
||||
syncPeer *peerpkg.Peer
|
||||
peerStates map[*peerpkg.Peer]*peerSyncState
|
||||
|
||||
// The following fields are used for headers-first mode.
|
||||
headersFirstMode bool
|
||||
headerList *list.List
|
||||
startHeader *list.Element
|
||||
nextCheckpoint *dagconfig.Checkpoint
|
||||
}
|
||||
|
||||
// PushGetBlockInvsOrHeaders sends a getblockinvs or getheaders message according to checkpoint status
|
||||
// for the provided start hash.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (sm *SyncManager) PushGetBlockInvsOrHeaders(peer *peerpkg.Peer, startHash *daghash.Hash) error {
|
||||
// When the current height is less than a known checkpoint we
|
||||
// can use block headers to learn about which blocks comprise
|
||||
// the DAG up to the checkpoint and perform less validation
|
||||
// for them. This is possible since each header contains the
|
||||
// hash of the previous header and a merkle root. Therefore if
|
||||
// we validate all of the received headers link together
|
||||
// properly and the checkpoint hashes match, we can be sure the
|
||||
// hashes for the blocks in between are accurate. Further, once
|
||||
// the full blocks are downloaded, the merkle root is computed
|
||||
// and compared against the value in the header which proves the
|
||||
// full block hasn't been tampered with.
|
||||
//
|
||||
// Once we have passed the final checkpoint, or checkpoints are
|
||||
// disabled, use standard inv messages learn about the blocks
|
||||
// and fully validate them. Finally, regression test mode does
|
||||
// not support the headers-first approach so do normal block
|
||||
// downloads when in regression test mode.
|
||||
if sm.nextCheckpoint != nil &&
|
||||
sm.dag.ChainHeight() < sm.nextCheckpoint.ChainHeight &&
|
||||
sm.dagParams != &dagconfig.RegressionNetParams {
|
||||
//TODO: (Ori) This is probably wrong. Done only for compilation
|
||||
err := peer.PushGetHeadersMsg(startHash, sm.nextCheckpoint.Hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sm.headersFirstMode = true
|
||||
log.Infof("Downloading headers for blocks %d to "+
|
||||
"%d from peer %s", sm.dag.ChainHeight()+1,
|
||||
sm.nextCheckpoint.ChainHeight, peer.Addr()) //TODO: (Ori) This is probably wrong. Done only for compilation
|
||||
}
|
||||
return peer.PushGetBlockInvsMsg(startHash, peer.SelectedTip())
|
||||
}
|
||||
|
||||
// resetHeaderState sets the headers-first mode state to values appropriate for
|
||||
// syncing from a new peer.
|
||||
func (sm *SyncManager) resetHeaderState(newestHash *daghash.Hash, newestHeight uint64) {
|
||||
sm.headersFirstMode = false
|
||||
sm.headerList.Init()
|
||||
sm.startHeader = nil
|
||||
|
||||
// When there is a next checkpoint, add an entry for the latest known
|
||||
// block into the header pool. This allows the next downloaded header
|
||||
// to prove it links to the chain properly.
|
||||
if sm.nextCheckpoint != nil {
|
||||
node := headerNode{height: newestHeight, hash: newestHash}
|
||||
sm.headerList.PushBack(&node)
|
||||
}
|
||||
}
|
||||
|
||||
// findNextHeaderCheckpoint returns the next checkpoint after the passed height.
|
||||
// It returns nil when there is not one either because the height is already
|
||||
// later than the final checkpoint or some other reason such as disabled
|
||||
// checkpoints.
|
||||
func (sm *SyncManager) findNextHeaderCheckpoint(height uint64) *dagconfig.Checkpoint {
|
||||
checkpoints := sm.dag.Checkpoints()
|
||||
if len(checkpoints) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// There is no next checkpoint if the height is already after the final
|
||||
// checkpoint.
|
||||
finalCheckpoint := &checkpoints[len(checkpoints)-1]
|
||||
if height >= finalCheckpoint.ChainHeight {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Find the next checkpoint.
|
||||
nextCheckpoint := finalCheckpoint
|
||||
for i := len(checkpoints) - 2; i >= 0; i-- {
|
||||
if height >= checkpoints[i].ChainHeight {
|
||||
break
|
||||
}
|
||||
nextCheckpoint = &checkpoints[i]
|
||||
}
|
||||
return nextCheckpoint
|
||||
}
|
||||
|
||||
// startSync will choose the best peer among the available candidate peers to
|
||||
@ -300,14 +190,7 @@ func (sm *SyncManager) startSync() {
|
||||
log.Infof("Syncing to block %s from peer %s",
|
||||
bestPeer.SelectedTip(), bestPeer.Addr())
|
||||
|
||||
if sm.nextCheckpoint != nil &&
|
||||
sm.dag.ChainHeight() < sm.nextCheckpoint.ChainHeight &&
|
||||
sm.dagParams != &dagconfig.RegressionNetParams {
|
||||
//TODO: (Ori) This is probably wrong. Done only for compilation
|
||||
bestPeer.PushGetBlockLocatorMsg(sm.nextCheckpoint.Hash, sm.dagParams.GenesisHash)
|
||||
} else {
|
||||
bestPeer.PushGetBlockLocatorMsg(&daghash.ZeroHash, sm.dagParams.GenesisHash)
|
||||
}
|
||||
bestPeer.PushGetBlockLocatorMsg(&daghash.ZeroHash, sm.dagParams.GenesisHash)
|
||||
sm.syncPeer = bestPeer
|
||||
} else {
|
||||
log.Warnf("No sync peer candidates available")
|
||||
@ -412,14 +295,9 @@ func (sm *SyncManager) handleDonePeerMsg(peer *peerpkg.Peer) {
|
||||
|
||||
func (sm *SyncManager) stopSyncFromPeer(peer *peerpkg.Peer) {
|
||||
// Attempt to find a new peer to sync from if the quitting peer is the
|
||||
// sync peer. Also, reset the headers-first state if in headers-first
|
||||
// mode so
|
||||
// sync peer.
|
||||
if sm.syncPeer == peer {
|
||||
sm.syncPeer = nil
|
||||
if sm.headersFirstMode {
|
||||
selectedTipHash := sm.dag.SelectedTipHash()
|
||||
sm.resetHeaderState(selectedTipHash, sm.dag.ChainHeight()) //TODO: (Ori) This is probably wrong. Done only for compilation
|
||||
}
|
||||
sm.startSync()
|
||||
}
|
||||
}
|
||||
@ -553,30 +431,7 @@ func (sm *SyncManager) handleBlockMsg(bmsg *blockMsg) {
|
||||
}
|
||||
}
|
||||
|
||||
// When in headers-first mode, if the block matches the hash of the
|
||||
// first header in the list of headers that are being fetched, it's
|
||||
// eligible for less validation since the headers have already been
|
||||
// verified to link together and are valid up to the next checkpoint.
|
||||
// Also, remove the list entry for all blocks except the checkpoint
|
||||
// since it is needed to verify the next round of headers links
|
||||
// properly.
|
||||
isCheckpointBlock := false
|
||||
behaviorFlags := blockdag.BFNone
|
||||
if sm.headersFirstMode {
|
||||
firstNodeEl := sm.headerList.Front()
|
||||
if firstNodeEl != nil {
|
||||
firstNode := firstNodeEl.Value.(*headerNode)
|
||||
if blockHash.IsEqual(firstNode.hash) {
|
||||
behaviorFlags |= blockdag.BFFastAdd
|
||||
if firstNode.hash.IsEqual(sm.nextCheckpoint.Hash) {
|
||||
isCheckpointBlock = true
|
||||
} else {
|
||||
sm.headerList.Remove(firstNodeEl)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if bmsg.isDelayedBlock {
|
||||
behaviorFlags |= blockdag.BFAfterDelay
|
||||
}
|
||||
@ -657,55 +512,6 @@ func (sm *SyncManager) handleBlockMsg(bmsg *blockMsg) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Nothing more to do if we aren't in headers-first mode.
|
||||
if !sm.headersFirstMode {
|
||||
return
|
||||
}
|
||||
|
||||
// This is headers-first mode, so if the block is not a checkpoint
|
||||
// request more blocks using the header list when the request queue is
|
||||
// getting short.
|
||||
if !isCheckpointBlock {
|
||||
if sm.startHeader != nil &&
|
||||
len(state.requestedBlocks) < minInFlightBlocks {
|
||||
sm.fetchHeaderBlocks()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// This is headers-first mode and the block is a checkpoint. When
|
||||
// there is a next checkpoint, get the next round of headers by asking
|
||||
// for headers starting from the block after this one up to the next
|
||||
// checkpoint.
|
||||
prevHeight := sm.nextCheckpoint.ChainHeight
|
||||
parentHash := sm.nextCheckpoint.Hash
|
||||
sm.nextCheckpoint = sm.findNextHeaderCheckpoint(prevHeight)
|
||||
if sm.nextCheckpoint != nil {
|
||||
err := peer.PushGetHeadersMsg(parentHash, sm.nextCheckpoint.Hash)
|
||||
if err != nil {
|
||||
log.Warnf("Failed to send getheaders message to "+
|
||||
"peer %s: %s", peer.Addr(), err)
|
||||
return
|
||||
}
|
||||
log.Infof("Downloading headers for blocks %d to %d from "+
|
||||
"peer %s", prevHeight+1, sm.nextCheckpoint.ChainHeight,
|
||||
sm.syncPeer.Addr())
|
||||
return
|
||||
}
|
||||
|
||||
// This is headers-first mode, the block is a checkpoint, and there are
|
||||
// no more checkpoints, so switch to normal mode by requesting blocks
|
||||
// from the block after this one up to the end of the chain (zero hash).
|
||||
sm.headersFirstMode = false
|
||||
sm.headerList.Init()
|
||||
log.Infof("Reached the final checkpoint -- switching to normal mode")
|
||||
err = peer.PushGetBlockInvsMsg(blockHash, peer.SelectedTip())
|
||||
if err != nil {
|
||||
log.Warnf("Failed to send getblockinvs message to peer %s: %s",
|
||||
peer.Addr(), err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *SyncManager) addBlocksToRequestQueue(state *peerSyncState, hashes []*daghash.Hash, isRelayedInv bool) {
|
||||
@ -740,160 +546,6 @@ func (state *peerSyncState) addInvToRequestQueue(iv *wire.InvVect) {
|
||||
state.addInvToRequestQueueNoLock(iv)
|
||||
}
|
||||
|
||||
// fetchHeaderBlocks creates and sends a request to the syncPeer for the next
|
||||
// list of blocks to be downloaded based on the current list of headers.
|
||||
func (sm *SyncManager) fetchHeaderBlocks() {
|
||||
// Nothing to do if there is no start header.
|
||||
if sm.startHeader == nil {
|
||||
log.Warnf("fetchHeaderBlocks called with no start header")
|
||||
return
|
||||
}
|
||||
|
||||
// Build up a getdata request for the list of blocks the headers
|
||||
// describe. The size hint will be limited to wire.MaxInvPerMsg by
|
||||
// the function, so no need to double check it here.
|
||||
gdmsg := wire.NewMsgGetDataSizeHint(uint(sm.headerList.Len()))
|
||||
numRequested := 0
|
||||
for e := sm.startHeader; e != nil; e = e.Next() {
|
||||
node, ok := e.Value.(*headerNode)
|
||||
if !ok {
|
||||
log.Warn("Header list node type is not a headerNode")
|
||||
continue
|
||||
}
|
||||
|
||||
iv := wire.NewInvVect(wire.InvTypeBlock, node.hash)
|
||||
haveInv, err := sm.haveInventory(iv)
|
||||
if err != nil {
|
||||
log.Warnf("Unexpected failure when checking for "+
|
||||
"existing inventory during header block "+
|
||||
"fetch: %s", err)
|
||||
}
|
||||
if !haveInv {
|
||||
syncPeerState := sm.peerStates[sm.syncPeer]
|
||||
|
||||
sm.requestedBlocks[*node.hash] = struct{}{}
|
||||
syncPeerState.requestedBlocks[*node.hash] = struct{}{}
|
||||
|
||||
gdmsg.AddInvVect(iv)
|
||||
numRequested++
|
||||
}
|
||||
sm.startHeader = e.Next()
|
||||
if numRequested >= wire.MaxInvPerMsg {
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(gdmsg.InvList) > 0 {
|
||||
sm.syncPeer.QueueMessage(gdmsg, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// handleHeadersMsg handles block header messages from all peers. Headers are
|
||||
// requested when performing a headers-first sync.
|
||||
func (sm *SyncManager) handleHeadersMsg(hmsg *headersMsg) {
|
||||
peer := hmsg.peer
|
||||
_, exists := sm.peerStates[peer]
|
||||
if !exists {
|
||||
log.Warnf("Received headers message from unknown peer %s", peer)
|
||||
return
|
||||
}
|
||||
|
||||
// The remote peer is misbehaving if we didn't request headers.
|
||||
msg := hmsg.headers
|
||||
numHeaders := len(msg.Headers)
|
||||
if !sm.headersFirstMode {
|
||||
log.Warnf("Got %d unrequested headers from %s -- "+
|
||||
"disconnecting", numHeaders, peer.Addr())
|
||||
peer.Disconnect()
|
||||
return
|
||||
}
|
||||
|
||||
// Nothing to do for an empty headers message.
|
||||
if numHeaders == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Process all of the received headers ensuring each one connects to the
|
||||
// previous and that checkpoints match.
|
||||
receivedCheckpoint := false
|
||||
var finalHash *daghash.Hash
|
||||
for _, blockHeader := range msg.Headers {
|
||||
blockHash := blockHeader.BlockHash()
|
||||
finalHash = blockHash
|
||||
|
||||
// Ensure there is a previous header to compare against.
|
||||
prevNodeEl := sm.headerList.Back()
|
||||
if prevNodeEl == nil {
|
||||
log.Warnf("Header list does not contain a previous" +
|
||||
"element as expected -- disconnecting peer")
|
||||
peer.Disconnect()
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure the header properly connects to the previous one and
|
||||
// add it to the list of headers.
|
||||
node := headerNode{hash: blockHash}
|
||||
prevNode := prevNodeEl.Value.(*headerNode)
|
||||
if prevNode.hash.IsEqual(blockHeader.ParentHashes[0]) { // TODO: (Stas) This is wrong. Modified only to satisfy compilation.
|
||||
node.height = prevNode.height + 1
|
||||
e := sm.headerList.PushBack(&node)
|
||||
if sm.startHeader == nil {
|
||||
sm.startHeader = e
|
||||
}
|
||||
} else {
|
||||
log.Warnf("Received block header that does not "+
|
||||
"properly connect to the chain from peer %s "+
|
||||
"-- disconnecting", peer.Addr())
|
||||
peer.Disconnect()
|
||||
return
|
||||
}
|
||||
|
||||
// Verify the header at the next checkpoint height matches.
|
||||
if node.height == sm.nextCheckpoint.ChainHeight {
|
||||
if node.hash.IsEqual(sm.nextCheckpoint.Hash) {
|
||||
receivedCheckpoint = true
|
||||
log.Infof("Verified downloaded block "+
|
||||
"header against checkpoint at height "+
|
||||
"%d/hash %s", node.height, node.hash)
|
||||
} else {
|
||||
log.Warnf("Block header at height %d/hash "+
|
||||
"%s from peer %s does NOT match "+
|
||||
"expected checkpoint hash of %s -- "+
|
||||
"disconnecting", node.height,
|
||||
node.hash, peer.Addr(),
|
||||
sm.nextCheckpoint.Hash)
|
||||
peer.Disconnect()
|
||||
return
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// When this header is a checkpoint, switch to fetching the blocks for
|
||||
// all of the headers since the last checkpoint.
|
||||
if receivedCheckpoint {
|
||||
// Since the first entry of the list is always the final block
|
||||
// that is already in the database and is only used to ensure
|
||||
// the next header links properly, it must be removed before
|
||||
// fetching the blocks.
|
||||
sm.headerList.Remove(sm.headerList.Front())
|
||||
log.Infof("Received %d block headers: Fetching blocks",
|
||||
sm.headerList.Len())
|
||||
sm.progressLogger.SetLastLogTime(time.Now())
|
||||
sm.fetchHeaderBlocks()
|
||||
return
|
||||
}
|
||||
|
||||
// This header is not a checkpoint, so request the next batch of
|
||||
// headers starting from the latest known header and ending with the
|
||||
// next checkpoint.
|
||||
err := peer.PushGetHeadersMsg(finalHash, sm.nextCheckpoint.Hash)
|
||||
if err != nil {
|
||||
log.Warnf("Failed to send getheaders message to "+
|
||||
"peer %s: %s", peer.Addr(), err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// haveInventory returns whether or not the inventory represented by the passed
|
||||
// inventory vector is known. This includes checking all of the various places
|
||||
// inventory can be when it is in different states such as blocks that are part
|
||||
@ -981,11 +633,6 @@ func (sm *SyncManager) handleInvMsg(imsg *invMsg) {
|
||||
// for the peer.
|
||||
peer.AddKnownInventory(iv)
|
||||
|
||||
// Ignore inventory when we're in headers-first mode.
|
||||
if sm.headersFirstMode {
|
||||
continue
|
||||
}
|
||||
|
||||
// Request the inventory if we don't already have it.
|
||||
haveInv, err := sm.haveInventory(iv)
|
||||
if err != nil {
|
||||
@ -1209,9 +856,6 @@ out:
|
||||
case *invMsg:
|
||||
sm.handleInvMsg(msg)
|
||||
|
||||
case *headersMsg:
|
||||
sm.handleHeadersMsg(msg)
|
||||
|
||||
case *donePeerMsg:
|
||||
sm.handleDonePeerMsg(msg.peer)
|
||||
|
||||
@ -1351,18 +995,6 @@ func (sm *SyncManager) QueueInv(inv *wire.MsgInv, peer *peerpkg.Peer) {
|
||||
sm.msgChan <- &invMsg{inv: inv, peer: peer}
|
||||
}
|
||||
|
||||
// QueueHeaders adds the passed headers message and peer to the block handling
|
||||
// queue.
|
||||
func (sm *SyncManager) QueueHeaders(headers *wire.MsgHeaders, peer *peerpkg.Peer) {
|
||||
// No channel handling here because peers do not need to block on
|
||||
// headers messages.
|
||||
if atomic.LoadInt32(&sm.shutdown) != 0 {
|
||||
return
|
||||
}
|
||||
|
||||
sm.msgChan <- &headersMsg{headers: headers, peer: peer}
|
||||
}
|
||||
|
||||
// DonePeer informs the blockmanager that a peer has disconnected.
|
||||
func (sm *SyncManager) DonePeer(peer *peerpkg.Peer) {
|
||||
// Ignore if we are shutting down.
|
||||
@ -1462,21 +1094,9 @@ func New(config *Config) (*SyncManager, error) {
|
||||
peerStates: make(map[*peerpkg.Peer]*peerSyncState),
|
||||
progressLogger: newBlockProgressLogger("Processed", log),
|
||||
msgChan: make(chan interface{}, config.MaxPeers*3),
|
||||
headerList: list.New(),
|
||||
quit: make(chan struct{}),
|
||||
}
|
||||
|
||||
selectedTipHash := sm.dag.SelectedTipHash()
|
||||
if !config.DisableCheckpoints {
|
||||
// Initialize the next checkpoint based on the current chain height.
|
||||
sm.nextCheckpoint = sm.findNextHeaderCheckpoint(sm.dag.ChainHeight()) //TODO: (Ori) This is probably wrong. Done only for compilation
|
||||
if sm.nextCheckpoint != nil {
|
||||
sm.resetHeaderState(selectedTipHash, sm.dag.ChainHeight()) //TODO: (Ori) This is probably wrong. Done only for compilation)
|
||||
}
|
||||
} else {
|
||||
log.Info("Checkpoints are disabled")
|
||||
}
|
||||
|
||||
sm.dag.Subscribe(sm.handleBlockDAGNotification)
|
||||
|
||||
return &sm, nil
|
||||
|
17
peer/peer.go
17
peer/peer.go
@ -126,9 +126,6 @@ type MessageListeners struct {
|
||||
// OnBlockLocator is invoked when a peer receives a locator kaspa message.
|
||||
OnBlockLocator func(p *Peer, msg *wire.MsgBlockLocator)
|
||||
|
||||
// OnHeaders is invoked when a peer receives a headers kaspa message.
|
||||
OnHeaders func(p *Peer, msg *wire.MsgHeaders)
|
||||
|
||||
// OnNotFound is invoked when a peer receives a notfound kaspa
|
||||
// message.
|
||||
OnNotFound func(p *Peer, msg *wire.MsgNotFound)
|
||||
@ -140,10 +137,6 @@ type MessageListeners struct {
|
||||
// message.
|
||||
OnGetBlockInvs func(p *Peer, msg *wire.MsgGetBlockInvs)
|
||||
|
||||
// OnGetHeaders is invoked when a peer receives a getheaders kaspa
|
||||
// message.
|
||||
OnGetHeaders func(p *Peer, msg *wire.MsgGetHeaders)
|
||||
|
||||
// OnFeeFilter is invoked when a peer receives a feefilter bitcoin message.
|
||||
OnFeeFilter func(p *Peer, msg *wire.MsgFeeFilter)
|
||||
|
||||
@ -1443,11 +1436,6 @@ out:
|
||||
p.cfg.Listeners.OnInv(p, msg)
|
||||
}
|
||||
|
||||
case *wire.MsgHeaders:
|
||||
if p.cfg.Listeners.OnHeaders != nil {
|
||||
p.cfg.Listeners.OnHeaders(p, msg)
|
||||
}
|
||||
|
||||
case *wire.MsgNotFound:
|
||||
if p.cfg.Listeners.OnNotFound != nil {
|
||||
p.cfg.Listeners.OnNotFound(p, msg)
|
||||
@ -1473,11 +1461,6 @@ out:
|
||||
p.cfg.Listeners.OnGetBlockInvs(p, msg)
|
||||
}
|
||||
|
||||
case *wire.MsgGetHeaders:
|
||||
if p.cfg.Listeners.OnGetHeaders != nil {
|
||||
p.cfg.Listeners.OnGetHeaders(p, msg)
|
||||
}
|
||||
|
||||
case *wire.MsgFeeFilter:
|
||||
if p.cfg.Listeners.OnFeeFilter != nil {
|
||||
p.cfg.Listeners.OnFeeFilter(p, msg)
|
||||
|
@ -360,9 +360,6 @@ func TestPeerListeners(t *testing.T) {
|
||||
OnInv: func(p *peer.Peer, msg *wire.MsgInv) {
|
||||
ok <- msg
|
||||
},
|
||||
OnHeaders: func(p *peer.Peer, msg *wire.MsgHeaders) {
|
||||
ok <- msg
|
||||
},
|
||||
OnNotFound: func(p *peer.Peer, msg *wire.MsgNotFound) {
|
||||
ok <- msg
|
||||
},
|
||||
@ -372,9 +369,6 @@ func TestPeerListeners(t *testing.T) {
|
||||
OnGetBlockInvs: func(p *peer.Peer, msg *wire.MsgGetBlockInvs) {
|
||||
ok <- msg
|
||||
},
|
||||
OnGetHeaders: func(p *peer.Peer, msg *wire.MsgGetHeaders) {
|
||||
ok <- msg
|
||||
},
|
||||
OnFeeFilter: func(p *peer.Peer, msg *wire.MsgFeeFilter) {
|
||||
ok <- msg
|
||||
},
|
||||
@ -471,10 +465,6 @@ func TestPeerListeners(t *testing.T) {
|
||||
"OnInv",
|
||||
wire.NewMsgInv(),
|
||||
},
|
||||
{
|
||||
"OnHeaders",
|
||||
wire.NewMsgHeaders(),
|
||||
},
|
||||
{
|
||||
"OnNotFound",
|
||||
wire.NewMsgNotFound(),
|
||||
@ -487,10 +477,6 @@ func TestPeerListeners(t *testing.T) {
|
||||
"OnGetBlockInvs",
|
||||
wire.NewMsgGetBlockInvs(&daghash.Hash{}, &daghash.Hash{}),
|
||||
},
|
||||
{
|
||||
"OnGetHeaders",
|
||||
wire.NewMsgGetHeaders(&daghash.Hash{}, &daghash.Hash{}),
|
||||
},
|
||||
{
|
||||
"OnFeeFilter",
|
||||
wire.NewMsgFeeFilter(15000),
|
||||
|
@ -28,7 +28,7 @@ func (sp *Peer) OnBlockLocator(_ *peer.Peer, msg *wire.MsgBlockLocator) {
|
||||
sp.server.SyncManager.RemoveFromSyncCandidates(sp.Peer)
|
||||
return
|
||||
}
|
||||
err := sp.server.SyncManager.PushGetBlockInvsOrHeaders(sp.Peer, firstHash)
|
||||
err := sp.Peer.PushGetBlockInvsMsg(firstHash, sp.Peer.SelectedTip())
|
||||
if err != nil {
|
||||
peerLog.Errorf("Failed pushing get blocks message for peer %s: %s",
|
||||
sp, err)
|
||||
|
@ -1,39 +0,0 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/peer"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
// OnGetHeaders is invoked when a peer receives a getheaders kaspa
|
||||
// message.
|
||||
func (sp *Peer) OnGetHeaders(_ *peer.Peer, msg *wire.MsgGetHeaders) {
|
||||
// Ignore getheaders requests if not in sync.
|
||||
if !sp.server.SyncManager.IsCurrent() {
|
||||
return
|
||||
}
|
||||
|
||||
// Find the most recent known block in the best chain based on the block
|
||||
// locator and fetch all of the headers after it until either
|
||||
// wire.MaxBlockHeadersPerMsg have been fetched or the provided stop
|
||||
// hash is encountered.
|
||||
//
|
||||
// Use the block after the genesis block if no other blocks in the
|
||||
// provided locator are known. This does mean the client will start
|
||||
// over with the genesis block if unknown block locators are provided.
|
||||
dag := sp.server.DAG
|
||||
headers, err := dag.GetBlueBlocksHeadersBetween(msg.StartHash, msg.StopHash)
|
||||
if err != nil {
|
||||
log.Warnf("Error getting blue blocks headers between %s and %s: %s", msg.StartHash, msg.StopHash, err)
|
||||
sp.Disconnect()
|
||||
return
|
||||
}
|
||||
|
||||
// Send found headers to the requesting peer.
|
||||
blockHeaders := make([]*wire.BlockHeader, len(headers))
|
||||
for i := range headers {
|
||||
blockHeaders[i] = headers[i]
|
||||
}
|
||||
sp.QueueMessage(&wire.MsgHeaders{Headers: blockHeaders}, nil)
|
||||
}
|
@ -1,12 +0,0 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/peer"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// OnHeaders is invoked when a peer receives a headers kaspa
|
||||
// message. The message is passed down to the sync manager.
|
||||
func (sp *Peer) OnHeaders(_ *peer.Peer, msg *wire.MsgHeaders) {
|
||||
sp.server.SyncManager.QueueHeaders(msg, sp.Peer)
|
||||
}
|
@ -13,7 +13,6 @@ import (
|
||||
"math"
|
||||
"net"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -1059,12 +1058,10 @@ func newPeerConfig(sp *Peer) *peer.Config {
|
||||
OnTx: sp.OnTx,
|
||||
OnBlock: sp.OnBlock,
|
||||
OnInv: sp.OnInv,
|
||||
OnHeaders: sp.OnHeaders,
|
||||
OnGetData: sp.OnGetData,
|
||||
OnGetBlockLocator: sp.OnGetBlockLocator,
|
||||
OnBlockLocator: sp.OnBlockLocator,
|
||||
OnGetBlockInvs: sp.OnGetBlockInvs,
|
||||
OnGetHeaders: sp.OnGetHeaders,
|
||||
OnFeeFilter: sp.OnFeeFilter,
|
||||
OnFilterAdd: sp.OnFilterAdd,
|
||||
OnFilterClear: sp.OnFilterClear,
|
||||
@ -1663,19 +1660,12 @@ func NewServer(listenAddrs []string, db database.DB, dagParams *dagconfig.Params
|
||||
indexManager = indexers.NewManager(indexes)
|
||||
}
|
||||
|
||||
// Merge given checkpoints with the default ones unless they are disabled.
|
||||
var checkpoints []dagconfig.Checkpoint
|
||||
if !config.ActiveConfig().DisableCheckpoints {
|
||||
checkpoints = mergeCheckpoints(s.DAGParams.Checkpoints, config.ActiveConfig().AddCheckpoints)
|
||||
}
|
||||
|
||||
// Create a new block chain instance with the appropriate configuration.
|
||||
var err error
|
||||
s.DAG, err = blockdag.New(&blockdag.Config{
|
||||
DB: s.db,
|
||||
Interrupt: interrupt,
|
||||
DAGParams: s.DAGParams,
|
||||
Checkpoints: checkpoints,
|
||||
TimeSource: s.TimeSource,
|
||||
SigCache: s.SigCache,
|
||||
IndexManager: indexManager,
|
||||
@ -1706,15 +1696,12 @@ func NewServer(listenAddrs []string, db database.DB, dagParams *dagconfig.Params
|
||||
}
|
||||
s.TxMemPool = mempool.New(&txC)
|
||||
|
||||
cfg := config.ActiveConfig()
|
||||
|
||||
s.SyncManager, err = netsync.New(&netsync.Config{
|
||||
PeerNotifier: &s,
|
||||
DAG: s.DAG,
|
||||
TxMemPool: s.TxMemPool,
|
||||
ChainParams: s.DAGParams,
|
||||
DisableCheckpoints: cfg.DisableCheckpoints,
|
||||
MaxPeers: maxPeers,
|
||||
PeerNotifier: &s,
|
||||
DAG: s.DAG,
|
||||
TxMemPool: s.TxMemPool,
|
||||
ChainParams: s.DAGParams,
|
||||
MaxPeers: maxPeers,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -2031,59 +2018,6 @@ func isWhitelisted(addr net.Addr) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// checkpointSorter implements sort.Interface to allow a slice of checkpoints to
|
||||
// be sorted.
|
||||
type checkpointSorter []dagconfig.Checkpoint
|
||||
|
||||
// Len returns the number of checkpoints in the slice. It is part of the
|
||||
// sort.Interface implementation.
|
||||
func (s checkpointSorter) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
// Swap swaps the checkpoints at the passed indices. It is part of the
|
||||
// sort.Interface implementation.
|
||||
func (s checkpointSorter) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
// Less returns whether the checkpoint with index i should sort before the
|
||||
// checkpoint with index j. It is part of the sort.Interface implementation.
|
||||
func (s checkpointSorter) Less(i, j int) bool {
|
||||
return s[i].ChainHeight < s[j].ChainHeight
|
||||
}
|
||||
|
||||
// mergeCheckpoints returns two slices of checkpoints merged into one slice
|
||||
// such that the checkpoints are sorted by height. In the case the additional
|
||||
// checkpoints contain a checkpoint with the same height as a checkpoint in the
|
||||
// default checkpoints, the additional checkpoint will take precedence and
|
||||
// overwrite the default one.
|
||||
func mergeCheckpoints(defaultCheckpoints, additional []dagconfig.Checkpoint) []dagconfig.Checkpoint {
|
||||
// Create a map of the additional checkpoints to remove duplicates while
|
||||
// leaving the most recently-specified checkpoint.
|
||||
extra := make(map[uint64]dagconfig.Checkpoint)
|
||||
for _, checkpoint := range additional {
|
||||
extra[checkpoint.ChainHeight] = checkpoint
|
||||
}
|
||||
|
||||
// Add all default checkpoints that do not have an override in the
|
||||
// additional checkpoints.
|
||||
numDefault := len(defaultCheckpoints)
|
||||
checkpoints := make([]dagconfig.Checkpoint, 0, numDefault+len(extra))
|
||||
for _, checkpoint := range defaultCheckpoints {
|
||||
if _, exists := extra[checkpoint.ChainHeight]; !exists {
|
||||
checkpoints = append(checkpoints, checkpoint)
|
||||
}
|
||||
}
|
||||
|
||||
// Append the additional checkpoints and return the sorted results.
|
||||
for _, checkpoint := range extra {
|
||||
checkpoints = append(checkpoints, checkpoint)
|
||||
}
|
||||
sort.Sort(checkpointSorter(checkpoints))
|
||||
return checkpoints
|
||||
}
|
||||
|
||||
// AnnounceNewTransactions generates and relays inventory vectors and notifies
|
||||
// both websocket and getblocktemplate long poll clients of the passed
|
||||
// transactions. This function should be called whenever new transactions
|
||||
|
@ -391,8 +391,6 @@ func chainErrToGBTErrString(err error) string {
|
||||
return "high-hash"
|
||||
case blockdag.ErrBadMerkleRoot:
|
||||
return "bad-txnmrklroot"
|
||||
case blockdag.ErrBadCheckpoint:
|
||||
return "bad-checkpoint"
|
||||
case blockdag.ErrFinalityPointTimeTooOld:
|
||||
return "finality-point-time-too-old"
|
||||
case blockdag.ErrNoTransactions:
|
||||
|
@ -24,7 +24,8 @@ const (
|
||||
RejectNonstandard RejectCode = 0x40
|
||||
RejectDust RejectCode = 0x41
|
||||
RejectInsufficientFee RejectCode = 0x42
|
||||
RejectCheckpoint RejectCode = 0x43
|
||||
RejectFinality RejectCode = 0x43
|
||||
RejectDifficulty RejectCode = 0x44
|
||||
)
|
||||
|
||||
// Map of reject codes back strings for pretty printing.
|
||||
@ -36,7 +37,8 @@ var rejectCodeStrings = map[RejectCode]string{
|
||||
RejectNonstandard: "REJECT_NONSTANDARD",
|
||||
RejectDust: "REJECT_DUST",
|
||||
RejectInsufficientFee: "REJECT_INSUFFICIENTFEE",
|
||||
RejectCheckpoint: "REJECT_CHECKPOINT",
|
||||
RejectFinality: "REJECT_FINALITY",
|
||||
RejectDifficulty: "REJECT_DIFFICULTY",
|
||||
}
|
||||
|
||||
// String returns the RejectCode in human-readable form.
|
||||
|
@ -26,7 +26,8 @@ func TestRejectCodeStringer(t *testing.T) {
|
||||
{RejectNonstandard, "REJECT_NONSTANDARD"},
|
||||
{RejectDust, "REJECT_DUST"},
|
||||
{RejectInsufficientFee, "REJECT_INSUFFICIENTFEE"},
|
||||
{RejectCheckpoint, "REJECT_CHECKPOINT"},
|
||||
{RejectFinality, "REJECT_FINALITY"},
|
||||
{RejectDifficulty, "REJECT_DIFFICULTY"},
|
||||
{0xff, "Unknown RejectCode (255)"},
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user