// Copyright (c) 2013-2017 The btcsuite developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package blockdag import ( "fmt" "time" "github.com/daglabs/btcd/database" "github.com/daglabs/btcd/util" "github.com/daglabs/btcd/util/daghash" ) // BehaviorFlags is a bitmask defining tweaks to the normal behavior when // performing DAG processing and consensus rules checks. type BehaviorFlags uint32 const ( // BFFastAdd may be set to indicate that several checks can be avoided // for the block since it is already known to fit into the chain due to // already proving it correct links into the chain up to a known // checkpoint. This is primarily used for headers-first mode. BFFastAdd BehaviorFlags = 1 << iota // BFNoPoWCheck may be set to indicate the proof of work check which // ensures a block hashes to a value less than the required target will // not be performed. BFNoPoWCheck // BFWasUnorphaned may be set to indicate that a block was just now // unorphaned BFWasUnorphaned // BFNone is a convenience value to specifically indicate no flags. BFNone BehaviorFlags = 0 ) // BlockExists determines whether a block with the given hash exists in // the DAG. // // This function is safe for concurrent access. func (dag *BlockDAG) BlockExists(hash *daghash.Hash) (bool, error) { // Check block index first (could be main chain or side chain blocks). if dag.index.HaveBlock(hash) { return true, nil } // Check in the database. var exists bool err := dag.db.View(func(dbTx database.Tx) error { var err error exists, err = dbTx.HasBlock(hash) return err }) return exists, err } // processOrphans determines if there are any orphans which depend on the passed // block hash (they are no longer orphans if true) and potentially accepts them. // It repeats the process for the newly accepted blocks (to detect further // orphans which may no longer be orphans) until there are no more. // // The flags do not modify the behavior of this function directly, however they // are needed to pass along to maybeAcceptBlock. // // This function MUST be called with the chain state lock held (for writes). func (dag *BlockDAG) processOrphans(hash *daghash.Hash, flags BehaviorFlags) error { // Start with processing at least the passed hash. Leave a little room // for additional orphan blocks that need to be processed without // needing to grow the array in the common case. processHashes := make([]*daghash.Hash, 0, 10) processHashes = append(processHashes, hash) for len(processHashes) > 0 { // Pop the first hash to process from the slice. processHash := processHashes[0] processHashes[0] = nil // Prevent GC leak. processHashes = processHashes[1:] // Look up all orphans that are parented by the block we just // accepted. This will typically only be one, but it could // be multiple if multiple blocks are mined and broadcast // around the same time. The one with the most proof of work // will eventually win out. An indexing for loop is // intentionally used over a range here as range does not // reevaluate the slice on each iteration nor does it adjust the // index for the modified slice. for i := 0; i < len(dag.prevOrphans[*processHash]); i++ { orphan := dag.prevOrphans[*processHash][i] if orphan == nil { log.Warnf("Found a nil entry at index %d in the "+ "orphan dependency list for block %s", i, processHash) continue } // Skip this orphan if one or more of its parents are // still missing. _, err := lookupParentNodes(orphan.block, dag) if err != nil { if ruleErr, ok := err.(RuleError); ok && ruleErr.ErrorCode == ErrParentBlockUnknown { continue } return err } // Remove the orphan from the orphan pool. orphanHash := orphan.block.Hash() dag.removeOrphanBlock(orphan) i-- // Potentially accept the block into the block DAG. err = dag.maybeAcceptBlock(orphan.block, flags|BFWasUnorphaned) if err != nil { return err } // Add this block to the list of blocks to process so // any orphan blocks that depend on this block are // handled too. processHashes = append(processHashes, orphanHash) } } return nil } // ProcessBlock is the main workhorse for handling insertion of new blocks into // the block chain. It includes functionality such as rejecting duplicate // blocks, ensuring blocks follow all rules, orphan handling, and insertion into // the block DAG. // // When no errors occurred during processing, the first return value indicates // whether or not the block is an orphan. // // This function is safe for concurrent access. func (dag *BlockDAG) ProcessBlock(block *util.Block, flags BehaviorFlags) (bool, error) { dag.dagLock.Lock() defer dag.dagLock.Unlock() fastAdd := flags&BFFastAdd == BFFastAdd blockHash := block.Hash() log.Tracef("Processing block %s", blockHash) // The block must not already exist in the main chain or side chains. exists, err := dag.BlockExists(blockHash) if err != nil { return false, err } if exists { str := fmt.Sprintf("already have block %s", blockHash) return false, ruleError(ErrDuplicateBlock, str) } // The block must not already exist as an orphan. if _, exists := dag.orphans[*blockHash]; exists { str := fmt.Sprintf("already have block (orphan) %s", blockHash) return false, ruleError(ErrDuplicateBlock, str) } // Perform preliminary sanity checks on the block and its transactions. err = dag.checkBlockSanity(block, flags) if err != nil { return false, err } // Find the previous checkpoint and perform some additional checks based // on the checkpoint. This provides a few nice properties such as // preventing old side chain blocks before the last checkpoint, // rejecting easy to mine, but otherwise bogus, blocks that could be // used to eat memory, and ensuring expected (versus claimed) proof of // work requirements since the previous checkpoint are met. blockHeader := &block.MsgBlock().Header checkpointNode, err := dag.findPreviousCheckpoint() if err != nil { return false, err } if checkpointNode != nil { // Ensure the block timestamp is after the checkpoint timestamp. checkpointTime := time.Unix(checkpointNode.timestamp, 0) if blockHeader.Timestamp.Before(checkpointTime) { str := fmt.Sprintf("block %s has timestamp %s before "+ "last checkpoint timestamp %s", blockHash, blockHeader.Timestamp, checkpointTime) return false, ruleError(ErrCheckpointTimeTooOld, str) } if !fastAdd { // Even though the checks prior to now have already ensured the // proof of work exceeds the claimed amount, the claimed amount // is a field in the block header which could be forged. This // check ensures the proof of work is at least the minimum // expected based on elapsed time since the last checkpoint and // maximum adjustment allowed by the retarget rules. duration := blockHeader.Timestamp.Sub(checkpointTime) requiredTarget := util.CompactToBig(dag.calcEasiestDifficulty( checkpointNode.bits, duration)) currentTarget := util.CompactToBig(blockHeader.Bits) if currentTarget.Cmp(requiredTarget) > 0 { str := fmt.Sprintf("block target difficulty of %064x "+ "is too low when compared to the previous "+ "checkpoint", currentTarget) return false, ruleError(ErrDifficultyTooLow, str) } } } // Handle orphan blocks. allParentsExist := true for _, parentHash := range blockHeader.ParentHashes { parentExists, err := dag.BlockExists(parentHash) if err != nil { return false, err } if !parentExists { allParentsExist = false } } if !allParentsExist { log.Infof("Adding orphan block %s", blockHash) dag.addOrphanBlock(block) return true, nil } // The block has passed all context independent checks and appears sane // enough to potentially accept it into the block DAG. err = dag.maybeAcceptBlock(block, flags) if err != nil { return false, err } // Accept any orphan blocks that depend on this block (they are // no longer orphans) and repeat for those accepted blocks until // there are no more. err = dag.processOrphans(blockHash, flags) if err != nil { return false, err } log.Debugf("Accepted block %s", blockHash) return false, nil }