mirror of
https://github.com/kaspanet/kaspad.git
synced 2026-03-09 02:12:52 +00:00
* [DEV-74] Implemented and written tests for utxoIterator. * [DEV-74] Improved utxoIterator tests. * [DEV-74] Implemented utxoCollection tests. * [DEV-74] Implemented utxoDiff and its tests. * [DEV-74] Implemented utxoSet. * [DEV -74] Added tests for fullUTXOSet. * [DEV-74] Added some tests for diffUTXOSet. * [DEV-74] Wrote tests for diffUTXOSet iterator. * [DEV-74] Added a negative test for diffUTXOSet.withDiff. * [DEV-74] Wrote tests for addTx. * [DEV-74] Wrote a toRemove test for addTx. * [DEV-74] Changed blockNode.utxoDiff to be of type utxoDiff. * [DEV-74] Removed superfluous whitespace. * [DEV-74] Renamed confusing "previousHash" to "hash". * [DEV-74] Fixed bad test and corrected failing test. * [DEV-74] Moved confusing "negatives" test to be part of the general utxoCollection test. * [DEV-74] Removed utxoDiff.inverted. * [DEV-74] Renamed blockNode.utxoDiff to blockNode.diff. * [DEV-74] Renamed diff to diffFrom for clarity's sake. * [DEV-74] Converted the type of utxoCollection from map[daghash.Hash]map[uint32]*wire.TxOut to map[wire.OutPoint]*UtxoEntry. * [DEV-74] Corrected test names in utxoCollection_test. * [DEV-74] Removed superfluous utxoCollection iterator and moved utxoIterator into utxoset.go. * [DEV-74] Renamed variables in utxoset.go. * [DEV-74] Renamed verifyTx to areInputsInUTXO and removed a superfulous test. * [DEV-74] Fixed bad test logic in TestDiffUTXOSet_addTx. * [DEV-74] Added a few comments. Added reference-equals checks to clone functions. * [DEV-74] Moved utxoCollection and utxoDiff into utxoset.go. * [DEV-74] Wrote explanations for utxoCollection and utxoDiff tests. * [DEV-74] Wrote explanations for all utxoSet tests besides addTx. * [DEV-74] Wrote explanations for TestDiffUTXOSet_addTx. * [DEV-74] Moved the documentation for utxoDiff into utxoset.go. * [DEV-74] Wrote an explanation on utxoSet. * [DEV-75] Found a typo. * [DEV-75] Renamed dag -> virtual, dagView -> virtualBlock. * [DEV-75] Renamed newDAGView to newVirtualBlock. * [DEV-75] Moved queries for the genesis block from virtualBlock to BlockDAG. * [DEV-75] Got rid of chainView height and findFork. * [DEV-75] Renamed receivers from c to v. * [DEV-75] Updated initBlockNode to allow for virtual (headerless) nodes, updated dbDAGState to contain multiple tip hashes, implemented virtualBlock.setTips. * [DEV-75] Got rid of virtualBlock.equals, which was not used anywhere. * [DEV-75] Got rid of virtualBlock.tip(). * [DEV-75] Got rid of SetTip everywhere except for tests. * [DEV-75] Got rid of Next(). * [DEV-75] Got rid of Contains(). * [DEV-75] Got rid of HeightRange(), as no one was using it. * [DEV-75] Made verifyDAG in rpcserver.go not use block height for iteration. * [DEV-75] Got rid of the part of Manager.Init() that handled "catching up" for side chains, which allowed me to get rid of BlockDAG.BlockByHeight(). * [DEV-75] Dropped support for the RPC command getblockhash since it was getting blocks by their height. * [DEV-75] Dropped getnetworkhashps since it was reliant on height, fixed another couple of RPC commands to return nextHashes instead of a nextHash, and got rid of nodeByHeight in virtualBlock. * [DEV-75] Got rid of setTip(). * [DEV-75] Moved blockLocator() out of virtualBlock and into BlockDAG. Also removed TestLocateInventory(). * [DEV-75] Implemented addTip(). * [DEV-75] Cleaned up virtualblock.go a bit. * [DEV-75] Erased irrelevant tests in virtualblock_test.go. Moved dag-related tests into dag_test.go. * [DEV-75] Removed unnecessary nil check. * [DEV-75] Wrote tests for virtualBlock. * [DEV-75] Fixed bad test, added explanations to tests. * [DEV-89] Fixed a comment. * [DEV-89] Fixed another comment. * [DEV-89] Removed the section in Manager::Init that handled rolling back indexes to the main chain if their tip is an orphaned fork. This could only happen during reorg, which no longer exists. Also removed BlockDAG::MainChainHasBlock, which was no longer used by anyone. * [DEV-89] Removed the nil check inside initBlockNode() and amended the one place that called it with nil. * [DEV-89] Renamed the receiver param for BlockDAG from b to dag. * [DEV-89] Moved fastLog2Floor from dag.go to btcutil/btcmath.go. * [DEV-89] Renamed tstTip to testTip. * [DEV-89] Renamed phanom_test.go to phantom_test.go. * [DEV-89] Fixed comments, renamed mainChainHeight to dagHeight. * [DEV-89] Rewrote virtualBlock.addTip(). * [DEV-89] Fixed a comment. (chain -> DAG) * [DEV-89] Fixed another chain -> DAG comment.
251 lines
8.4 KiB
Go
251 lines
8.4 KiB
Go
// Copyright (c) 2013-2017 The btcsuite developers
|
|
// Use of this source code is governed by an ISC
|
|
// license that can be found in the LICENSE file.
|
|
|
|
package blockdag
|
|
|
|
import (
|
|
"fmt"
|
|
"time"
|
|
|
|
"github.com/daglabs/btcd/dagconfig/daghash"
|
|
"github.com/daglabs/btcd/database"
|
|
"github.com/daglabs/btcutil"
|
|
)
|
|
|
|
// BehaviorFlags is a bitmask defining tweaks to the normal behavior when
|
|
// performing chain processing and consensus rules checks.
|
|
type BehaviorFlags uint32
|
|
|
|
const (
|
|
// BFFastAdd may be set to indicate that several checks can be avoided
|
|
// for the block since it is already known to fit into the chain due to
|
|
// already proving it correct links into the chain up to a known
|
|
// checkpoint. This is primarily used for headers-first mode.
|
|
BFFastAdd BehaviorFlags = 1 << iota
|
|
|
|
// BFNoPoWCheck may be set to indicate the proof of work check which
|
|
// ensures a block hashes to a value less than the required target will
|
|
// not be performed.
|
|
BFNoPoWCheck
|
|
|
|
// BFNone is a convenience value to specifically indicate no flags.
|
|
BFNone BehaviorFlags = 0
|
|
)
|
|
|
|
// blockExists determines whether a block with the given hash exists either in
|
|
// the main chain or any side chains.
|
|
//
|
|
// This function is safe for concurrent access.
|
|
func (dag *BlockDAG) blockExists(hash *daghash.Hash) (bool, error) {
|
|
// Check block index first (could be main chain or side chain blocks).
|
|
if dag.index.HaveBlock(hash) {
|
|
return true, nil
|
|
}
|
|
|
|
// Check in the database.
|
|
var exists bool
|
|
err := dag.db.View(func(dbTx database.Tx) error {
|
|
var err error
|
|
exists, err = dbTx.HasBlock(hash)
|
|
if err != nil || !exists {
|
|
return err
|
|
}
|
|
|
|
// Ignore side chain blocks in the database. This is necessary
|
|
// because there is not currently any record of the associated
|
|
// block index data such as its block height, so it's not yet
|
|
// possible to efficiently load the block and do anything useful
|
|
// with it.
|
|
//
|
|
// Ultimately the entire block index should be serialized
|
|
// instead of only the current main chain so it can be consulted
|
|
// directly.
|
|
_, err = dbFetchHeightByHash(dbTx, hash)
|
|
if isNotInDAGErr(err) {
|
|
exists = false
|
|
return nil
|
|
}
|
|
return err
|
|
})
|
|
return exists, err
|
|
}
|
|
|
|
// processOrphans determines if there are any orphans which depend on the passed
|
|
// block hash (they are no longer orphans if true) and potentially accepts them.
|
|
// It repeats the process for the newly accepted blocks (to detect further
|
|
// orphans which may no longer be orphans) until there are no more.
|
|
//
|
|
// The flags do not modify the behavior of this function directly, however they
|
|
// are needed to pass along to maybeAcceptBlock.
|
|
//
|
|
// This function MUST be called with the chain state lock held (for writes).
|
|
func (dag *BlockDAG) processOrphans(hash *daghash.Hash, flags BehaviorFlags) error {
|
|
// Start with processing at least the passed hash. Leave a little room
|
|
// for additional orphan blocks that need to be processed without
|
|
// needing to grow the array in the common case.
|
|
processHashes := make([]*daghash.Hash, 0, 10)
|
|
processHashes = append(processHashes, hash)
|
|
for len(processHashes) > 0 {
|
|
// Pop the first hash to process from the slice.
|
|
processHash := processHashes[0]
|
|
processHashes[0] = nil // Prevent GC leak.
|
|
processHashes = processHashes[1:]
|
|
|
|
// Look up all orphans that are parented by the block we just
|
|
// accepted. This will typically only be one, but it could
|
|
// be multiple if multiple blocks are mined and broadcast
|
|
// around the same time. The one with the most proof of work
|
|
// will eventually win out. An indexing for loop is
|
|
// intentionally used over a range here as range does not
|
|
// reevaluate the slice on each iteration nor does it adjust the
|
|
// index for the modified slice.
|
|
for i := 0; i < len(dag.prevOrphans[*processHash]); i++ {
|
|
orphan := dag.prevOrphans[*processHash][i]
|
|
if orphan == nil {
|
|
log.Warnf("Found a nil entry at index %d in the "+
|
|
"orphan dependency list for block %v", i,
|
|
processHash)
|
|
continue
|
|
}
|
|
|
|
// Remove the orphan from the orphan pool.
|
|
orphanHash := orphan.block.Hash()
|
|
dag.removeOrphanBlock(orphan)
|
|
i--
|
|
|
|
// Potentially accept the block into the block chain.
|
|
err := dag.maybeAcceptBlock(orphan.block, flags)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
// Add this block to the list of blocks to process so
|
|
// any orphan blocks that depend on this block are
|
|
// handled too.
|
|
processHashes = append(processHashes, orphanHash)
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// ProcessBlock is the main workhorse for handling insertion of new blocks into
|
|
// the block chain. It includes functionality such as rejecting duplicate
|
|
// blocks, ensuring blocks follow all rules, orphan handling, and insertion into
|
|
// the block DAG.
|
|
//
|
|
// When no errors occurred during processing, the first return value indicates
|
|
// whether or not the block is an orphan.
|
|
//
|
|
// This function is safe for concurrent access.
|
|
func (dag *BlockDAG) ProcessBlock(block *btcutil.Block, flags BehaviorFlags) (bool, error) {
|
|
dag.dagLock.Lock()
|
|
defer dag.dagLock.Unlock()
|
|
|
|
fastAdd := flags&BFFastAdd == BFFastAdd
|
|
|
|
blockHash := block.Hash()
|
|
log.Tracef("Processing block %v", blockHash)
|
|
|
|
// The block must not already exist in the main chain or side chains.
|
|
exists, err := dag.blockExists(blockHash)
|
|
if err != nil {
|
|
return false, err
|
|
}
|
|
if exists {
|
|
str := fmt.Sprintf("already have block %v", blockHash)
|
|
return false, ruleError(ErrDuplicateBlock, str)
|
|
}
|
|
|
|
// The block must not already exist as an orphan.
|
|
if _, exists := dag.orphans[*blockHash]; exists {
|
|
str := fmt.Sprintf("already have block (orphan) %v", blockHash)
|
|
return false, ruleError(ErrDuplicateBlock, str)
|
|
}
|
|
|
|
// Perform preliminary sanity checks on the block and its transactions.
|
|
err = checkBlockSanity(block, dag.dagParams.PowLimit, dag.timeSource, flags)
|
|
if err != nil {
|
|
return false, err
|
|
}
|
|
|
|
// Find the previous checkpoint and perform some additional checks based
|
|
// on the checkpoint. This provides a few nice properties such as
|
|
// preventing old side chain blocks before the last checkpoint,
|
|
// rejecting easy to mine, but otherwise bogus, blocks that could be
|
|
// used to eat memory, and ensuring expected (versus claimed) proof of
|
|
// work requirements since the previous checkpoint are met.
|
|
blockHeader := &block.MsgBlock().Header
|
|
checkpointNode, err := dag.findPreviousCheckpoint()
|
|
if err != nil {
|
|
return false, err
|
|
}
|
|
if checkpointNode != nil {
|
|
// Ensure the block timestamp is after the checkpoint timestamp.
|
|
checkpointTime := time.Unix(checkpointNode.timestamp, 0)
|
|
if blockHeader.Timestamp.Before(checkpointTime) {
|
|
str := fmt.Sprintf("block %v has timestamp %v before "+
|
|
"last checkpoint timestamp %v", blockHash,
|
|
blockHeader.Timestamp, checkpointTime)
|
|
return false, ruleError(ErrCheckpointTimeTooOld, str)
|
|
}
|
|
if !fastAdd {
|
|
// Even though the checks prior to now have already ensured the
|
|
// proof of work exceeds the claimed amount, the claimed amount
|
|
// is a field in the block header which could be forged. This
|
|
// check ensures the proof of work is at least the minimum
|
|
// expected based on elapsed time since the last checkpoint and
|
|
// maximum adjustment allowed by the retarget rules.
|
|
duration := blockHeader.Timestamp.Sub(checkpointTime)
|
|
requiredTarget := CompactToBig(dag.calcEasiestDifficulty(
|
|
checkpointNode.bits, duration))
|
|
currentTarget := CompactToBig(blockHeader.Bits)
|
|
if currentTarget.Cmp(requiredTarget) > 0 {
|
|
str := fmt.Sprintf("block target difficulty of %064x "+
|
|
"is too low when compared to the previous "+
|
|
"checkpoint", currentTarget)
|
|
return false, ruleError(ErrDifficultyTooLow, str)
|
|
}
|
|
}
|
|
}
|
|
|
|
// Handle orphan blocks.
|
|
allPrevBlocksExist := true
|
|
for _, prevBlock := range blockHeader.PrevBlocks {
|
|
prevBlockExists, err := dag.blockExists(&prevBlock)
|
|
if err != nil {
|
|
return false, err
|
|
}
|
|
|
|
if !prevBlockExists {
|
|
log.Infof("Adding orphan block %v with parent %v", blockHash, prevBlock)
|
|
dag.addOrphanBlock(block)
|
|
|
|
allPrevBlocksExist = false
|
|
}
|
|
}
|
|
|
|
if !allPrevBlocksExist {
|
|
return true, nil
|
|
}
|
|
|
|
// The block has passed all context independent checks and appears sane
|
|
// enough to potentially accept it into the block DAG.
|
|
err = dag.maybeAcceptBlock(block, flags)
|
|
if err != nil {
|
|
return false, err
|
|
}
|
|
|
|
// Accept any orphan blocks that depend on this block (they are
|
|
// no longer orphans) and repeat for those accepted blocks until
|
|
// there are no more.
|
|
err = dag.processOrphans(blockHash, flags)
|
|
if err != nil {
|
|
return false, err
|
|
}
|
|
|
|
log.Debugf("Accepted block %v", blockHash)
|
|
|
|
return false, nil
|
|
}
|