diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..0d760cbb4 --- /dev/null +++ b/LICENSE @@ -0,0 +1,13 @@ +Copyright (c) 2013 Conformal Systems LLC. + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. \ No newline at end of file diff --git a/README.md b/README.md index 37420b770..2c9bf4d2b 100644 --- a/README.md +++ b/README.md @@ -2,3 +2,143 @@ btcchain ======== Package btcchain implements bitcoin block handling and chain selection rules. +The test coverage is currently only around 60%, but will be increasing over +time. See `test_coverage.txt` for the gocov coverage report. Alternatively, if +you are running a POSIX OS, you can run the `cov_report.sh` script for a +real-time report. Package btcchain is licensed under the liberal ISC license. + +There is an associated blog post about the release of this package +[here](https://blog.conformal.com/btcchain-the-bitcoin-chain-package-from-bctd/). + +This package is one of the core packages from btcd, an alternative full-node +implementation of bitcoin which is under active development by Conformal. +Although it was primarily written for btcd, this package has intentionally been +designed so it can be used as a standalone package for any projects needing to +handle processing of blocks into the bitcoin block chain. + +## Documentation + +Full `go doc` style documentation for the project can be viewed online without +installing this package by using the GoDoc site here: +http://godoc.org/github.com/conformal/btcchain + +You can also view the documentation locally once the package is installed with +the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to +http://localhost:6060/pkg/github.com/conformal/btcchain + +## Installation + +```bash +$ go get github.com/conformal/btcchain +``` + +## Bitcoin Chain Processing Overview + +Before a block is allowed into the block chain, it must go through an intensive +series of validation rules. The following list serves as a general outline of +those rules to provide some intuition into what is going on under the hood, but +is by no means exhaustive: + + - Reject duplicate blocks + - Perform a series of sanity checks on the block and its transactions such as + verifying proof of work, timestamps, number and character of transactions, + transaction amounts, script complexity, and merkle root calculations + - Compare the block against predetermined checkpoints for expected timestamps + and difficulty based on elapsed time since the checkpoint + - Save the most recent orphan blocks for a limited time in case their parent + blocks become available + - Stop processing if the block is an orphan as the rest of the processing + depends on the block's position within the block chain + - Perform a series of more thorough checks that depend on the block's position + within the block chain such as verifying block difficulties adhere to + difficulty retarget rules, timestamps are after the median of the last + several blocks, all transactions are finalized, checkpoint blocks match, and + block versions are in line with the previous blocks + - Determine how the block fits into the chain and perform different actions + accordingly in order to ensure any side chains which have higher difficulty + than the main chain become the new main chain + - When a block is being connected to the main chain (either through + reorganization of a side chain to the main chain or just extending the + main chain), perform further checks on the block's transactions such as + verifying transaction duplicates, script complexity for the combination of + connected scripts, coinbase maturity, double spends, and connected + transaction values + - Run the transaction scripts to verify the spender is allowed to spend the + coins + - Insert the block into the block database + +## Block Processing Example + +The following example program demonstrates processing a block. This example +intentionally causes an error by attempting to process a duplicate block. + +```Go + package main + + import ( + "fmt" + "github.com/conformal/btcchain" + "github.com/conformal/btcdb" + _ "github.com/conformal/btcdb/sqlite3" + "github.com/conformal/btcutil" + "github.com/conformal/btcwire" + ) + + func main() { + // First, we create a new database to store the accepted blocks into. + // Typically this would be opening an existing database, but we create + // a new db here so this is a complete working example. + db, err := btcdb.CreateDB("sqlite", "example.db") + if err != nil { + fmt.Printf("Failed to create database: %v\n", err) + return + } + defer db.Close() + + // Create a new BlockChain instance using the underlying database for + // the main bitcoin network and ignore notifications. + chain := btcchain.New(db, btcwire.MainNet, nil) + + // Process a block. For this example, we are going to intentionally + // cause an error by trying to process the genesis block which already + // exists. + block := btcutil.NewBlock(&btcwire.GenesisBlock, btcwire.ProtocolVersion) + err = chain.ProcessBlock(block) + if err != nil { + fmt.Printf("Failed to process block: %v\n", err) + return + } + } +``` + +## TODO + +- Increase test coverage +- Add testnet specific rules +- Profile and optimize +- Expose some APIs for block verification (without actually inserting it) and + transaction input lookups + +## GPG Verification Key + +All official release tags are signed by Conformal so users can ensure the code +has not been tampered with and is coming from Conformal. To verify the +signature perform the following: + +- Download the public key from the Conformal website at + https://opensource.conformal.com/GIT-GPG-KEY-conformal.txt + +- Import the public key into your GPG keyring: + ```bash + gpg --import GIT-GPG-KEY-conformal.txt + ``` + +- Verify the release tag with the following command where `TAG_NAME` is a + placeholder for the specific tag: + ```bash + git tag -v TAG_NAME + ``` + +## License + +Package btcchain is licensed under the liberal ISC License. diff --git a/accept.go b/accept.go new file mode 100644 index 000000000..e52dad09e --- /dev/null +++ b/accept.go @@ -0,0 +1,159 @@ +// Copyright (c) 2013 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package btcchain + +import ( + "fmt" + "github.com/conformal/btcutil" + "github.com/conformal/btcwire" +) + +// maybeAcceptBlock potentially accepts a block into the memory block chain. +// It performs several validation checks which depend on its position within +// the block chain before adding it. The block is expected to have already gone +// through ProcessBlock before calling this function with it. +func (b *BlockChain) maybeAcceptBlock(block *btcutil.Block) error { + // Get a block node for the block previous to this one. Will be nil + // if this is the genesis block. + prevNode, err := b.getPrevNodeFromBlock(block) + if err != nil { + return err + } + + // The height of this block one more than the referenced previous block. + blockHeight := int64(0) + if prevNode != nil { + blockHeight = prevNode.height + 1 + } + + // Ensure the difficulty specified in the block header matches the + // calculated difficulty based on the previous block and difficulty + // retarget rules. + blockHeader := block.MsgBlock().Header + expectedDifficulty, err := b.calcNextRequiredDifficulty(prevNode) + if err != nil { + return err + } + blockDifficulty := blockHeader.Bits + if blockDifficulty != expectedDifficulty { + str := "block difficulty of %d is not the expected value of %d" + str = fmt.Sprintf(str, blockDifficulty, expectedDifficulty) + return RuleError(str) + } + + // Ensure the timestamp for the block header is after the median time of + // the last several blocks (medianTimeBlocks). + medianTime, err := b.calcPastMedianTime(prevNode) + if err != nil { + return err + } + if !blockHeader.Timestamp.After(medianTime) { + str := "block timestamp of %v is not after expected %v" + str = fmt.Sprintf(str, blockHeader.Timestamp, medianTime) + return RuleError(str) + } + + // Ensure all transactions in the block are finalized. + for i, tx := range block.MsgBlock().Transactions { + if !isFinalizedTransaction(tx, blockHeight, blockHeader.Timestamp) { + // Use the TxSha function from the block rather + // than the transaction itself since the block version + // is cached. Also, it's safe to ignore the error here + // since the only reason TxSha can fail is if the index + // is out of range which is impossible here. + txSha, _ := block.TxSha(i) + str := fmt.Sprintf("block contains unfinalized "+ + "transaction %v", txSha) + return RuleError(str) + } + } + + // Ensure chain matches up to predetermined checkpoints. + // It's safe to ignore the error on Sha since it's already cached. + blockHash, _ := block.Sha() + if !b.verifyCheckpoint(blockHeight, blockHash) { + // TODO(davec): This should probably be a distinct error type + // (maybe CheckpointError). Since this error shouldn't happen + // unless the peer is connected to a rogue network serving up an + // alternate chain, the caller would likely need to react by + // disconnecting peers and rolling back the chain to the last + // known good point. + str := fmt.Sprintf("block at height %d does not match "+ + "checkpoint hash", blockHeight) + return RuleError(str) + } + + // Reject version 1 blocks once a majority of the network has upgraded. + // Rules: + // 95% (950 / 1000) for main network + // 75% (75 / 100) for the test network + // This is part of BIP_0034. + if blockHeader.Version == 1 { + minRequired := uint64(950) + numToCheck := uint64(1000) + if b.btcnet == btcwire.TestNet3 || b.btcnet == btcwire.TestNet { + minRequired = 75 + numToCheck = 100 + } + if b.isMajorityVersion(2, prevNode, minRequired, numToCheck) { + str := "new blocks with version %d are no longer valid" + str = fmt.Sprintf(str, blockHeader.Version) + return RuleError(str) + } + } + + // Ensure coinbase starts with serialized block heights for blocks + // whose version is the serializedHeightVersion or newer once a majority + // of the network has upgraded. + // Rules: + // 75% (750 / 1000) for main network + // 51% (51 / 100) for the test network + // This is part of BIP_0034. + if blockHeader.Version >= serializedHeightVersion { + minRequired := uint64(750) + numToCheck := uint64(1000) + if b.btcnet == btcwire.TestNet3 || b.btcnet == btcwire.TestNet { + minRequired = 51 + numToCheck = 100 + } + if b.isMajorityVersion(serializedHeightVersion, prevNode, + minRequired, numToCheck) { + + expectedHeight := int64(0) + if prevNode != nil { + expectedHeight = prevNode.height + 1 + } + coinbaseTx := block.MsgBlock().Transactions[0] + err := checkSerializedHeight(coinbaseTx, expectedHeight) + if err != nil { + return err + } + } + } + + // Create a new block node for the block and add it to the in-memory + // block chain (could be either a side chain or the main chain). + newNode := newBlockNode(block) + if prevNode != nil { + newNode.parent = prevNode + newNode.height = blockHeight + newNode.workSum.Add(prevNode.workSum, newNode.workSum) + } + + // Connect the passed block to the chain while respecting proper chain + // selection according to the chain with the most proof of work. This + // also handles validation of the transaction scripts. + err = b.connectBestChain(newNode, block) + if err != nil { + return err + } + + // Notify the caller that the new block was accepted into the block + // chain. The caller would typically want to react by relaying the + // inventory to other peers. + b.sendNotification(NTBlockAccepted, block) + + return nil +} diff --git a/chain.go b/chain.go new file mode 100644 index 000000000..40e378abb --- /dev/null +++ b/chain.go @@ -0,0 +1,716 @@ +// Copyright (c) 2013 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package btcchain + +import ( + "container/list" + "fmt" + "github.com/conformal/btcdb" + "github.com/conformal/btcutil" + "github.com/conformal/btcwire" + "math/big" + "sort" + "time" +) + +// maxOrphanBlocks is the maximum number of orphan blocks that can be queued. +const maxOrphanBlocks = 100 + +// blockNode represents a block within the block chain and is primarily used to +// aid in selecting the best chain to be the main chain. The main chain is +// stored into the block database. +type blockNode struct { + // parent is the parent block for this node. + parent *blockNode + + // children contains the child nodes for this node. Typically there + // will only be one, but sometimes there can be more than one and that + // is when the best chain selection algorithm is used. + children []*blockNode + + // hash is the double sha 256 of the block. + hash *btcwire.ShaHash + + // height is the position in the block chain. + height int64 + + // workSum is the total amount of work in the chain up to and including + // this node. + workSum *big.Rat + + // inMainChain denotes whether the block node is currently on the + // the main chain or not. This is used to help find the common + // ancestor when switching chains. + inMainChain bool + + // Some fields from block headers to aid in best chain selection. + version uint32 + bits uint32 + timestamp time.Time +} + +// newBlockNode returns a new block node for the given block. It is completely +// disconnected from the chain and the workSum value is just the work for the +// passed block. The work sum is updated accordingly when the node is inserted +// into a chain. +func newBlockNode(block *btcutil.Block) *blockNode { + // Get the block sha. It's ok to ignore the error here since + // sha has already been called and an error there would have caused + // an exit before this function is called. + blockSha, _ := block.Sha() + + blockHeader := block.MsgBlock().Header + node := blockNode{ + hash: blockSha, + workSum: calcWork(blockHeader.Bits), + height: block.Height(), + version: blockHeader.Version, + bits: blockHeader.Bits, + timestamp: blockHeader.Timestamp, + } + return &node +} + +// orphanBlock represents a block that we don't yet have the parent for. It +// is a normal block plus an expiration time to prevent caching the orphan +// forever. +type orphanBlock struct { + block *btcutil.Block + expiration time.Time +} + +// addChildrenWork adds the passed work amount to all children all the way +// down the chain. It is used primarily to allow a new node to be dynamically +// inserted from the database into the memory chain prior to nodes we already +// have and update their work values accordingly. +func addChildrenWork(node *blockNode, work *big.Rat) { + for _, childNode := range node.children { + childNode.workSum.Add(childNode.workSum, work) + addChildrenWork(childNode, work) + } +} + +// removeChildNode deletes node from the provided slice of child block +// nodes. It ensures the final pointer reference is set to nil to prevent +// potential memory leaks. The original slice is returned unmodified if node +// is invalid or not in the slice. +func removeChildNode(children []*blockNode, node *blockNode) []*blockNode { + if node == nil { + return children + } + for i, n := range children { + if n.hash.IsEqual(node.hash) { + copy(children[i:], children[i+1:]) + children[len(children)-1] = nil + return children[:len(children)-1] + } + } + return children +} + +// BlockChain provides functions for working with the bitcoin block chain. +// It includes functionality such as rejecting duplicate blocks, ensuring blocks +// follow all rules, orphan handling, checkpoint handling, and best chain +// selection with reorganization. +type BlockChain struct { + db btcdb.Db + btcnet btcwire.BitcoinNet + notifications chan *Notification + root *blockNode + bestChain *blockNode + index map[btcwire.ShaHash]*blockNode + depNodes map[btcwire.ShaHash][]*blockNode + orphans map[btcwire.ShaHash]*orphanBlock + prevOrphans map[btcwire.ShaHash][]*orphanBlock + oldestOrphan *orphanBlock + blockCache map[btcwire.ShaHash]*btcutil.Block + noVerify bool + noCheckpoints bool +} + +// DisableVerify provides a mechanism to disable transaction script validation +// which you DO NOT want to do in production as it could allow double spends +// and othe undesirable things. It is provided only for debug purposes since +// script validation is extremely intensive and when debugging it is sometimes +// nice to quickly get the chain. +func (b *BlockChain) DisableVerify(disable bool) { + b.noVerify = disable +} + +// getOrphanRoot returns the head of the chain for the provided hash from the +// map of orphan blocks. +func (b *BlockChain) getOrphanRoot(sha *btcwire.ShaHash) *btcwire.ShaHash { + // Keep looping while the parent of each orphaned block is + // known and is an orphan itself. + prevHash := sha + for { + orphan, exists := b.orphans[*prevHash] + if !exists { + break + } + prevHash = &orphan.block.MsgBlock().Header.PrevBlock + } + + return prevHash +} + +// removeOrphanBlock removes the passed orphan block from the orphan pool and +// previous orphan index. +func (b *BlockChain) removeOrphanBlock(orphan *orphanBlock) { + // Remove the orphan block from the orphan pool. It's safe to ignore + // the error on Sha since it's cached. + orphanHash, _ := orphan.block.Sha() + delete(b.orphans, *orphanHash) + + // Remove the reference from the previous orphan index too. + prevHash := &orphan.block.MsgBlock().Header.PrevBlock + orphans := b.prevOrphans[*prevHash] + for i, ob := range orphans { + hash, _ := ob.block.Sha() + if hash.IsEqual(orphanHash) { + copy(orphans[i:], orphans[i+1:]) + orphans[len(orphans)-1] = nil + b.prevOrphans[*prevHash] = orphans[:len(orphans)-1] + } + } + + // Remove the map entry altogether if there are no longer any orphans + // which depend on the parent hash. + if len(b.prevOrphans[*prevHash]) == 0 { + delete(b.prevOrphans, *prevHash) + } +} + +// addOrphanBlock adds the passed block (which is already determined to be +// an orphan prior calling this function) to the orphan pool. It lazily cleans +// up any expired blocks so a separate cleanup poller doesn't need to be run. +// It also imposes a maximum limit on the number of outstanding orphan +// blocks and will remove the oldest received orphan block if the limit is +// exceeded. +func (b *BlockChain) addOrphanBlock(block *btcutil.Block) { + // Remove expired orphan blocks. + for _, oBlock := range b.orphans { + if time.Now().After(oBlock.expiration) { + b.removeOrphanBlock(oBlock) + continue + } + + // Update the oldest orphan block pointer so it can be discarded + // in case the orphan pool fills up. + if b.oldestOrphan == nil || oBlock.expiration.Before(b.oldestOrphan.expiration) { + b.oldestOrphan = oBlock + } + } + + // Limit orphan blocks to prevent memory exhaustion. + if len(b.orphans)+1 > maxOrphanBlocks { + // Remove the oldest orphan to make room for the new one. + b.removeOrphanBlock(b.oldestOrphan) + b.oldestOrphan = nil + } + + // Get the block sha. It is safe to ignore the error here since any + // errors would've been caught prior to calling this function. + blockSha, _ := block.Sha() + + // Insert the block into the orphan map with an expiration time + // 1 hour from now. + expiration := time.Now().Add(time.Hour) + oBlock := &orphanBlock{ + block: block, + expiration: expiration, + } + b.orphans[*blockSha] = oBlock + + // Add to previous hash lookup index for faster dependency lookups. + prevHash := &block.MsgBlock().Header.PrevBlock + b.prevOrphans[*prevHash] = append(b.prevOrphans[*prevHash], oBlock) + + return +} + +// loadBlockNode loads the block identified by hash from the block database, +// creates a block node from it, and updates the memory block chain accordingly. +// It is used mainly to dynamically load previous blocks from database as they +// are needed to avoid needing to put the entire block chain in memory. +func (b *BlockChain) loadBlockNode(hash *btcwire.ShaHash) (*blockNode, error) { + // Load the block from the db. + block, err := b.db.FetchBlockBySha(hash) + if err != nil { + return nil, err + } + + // Create the new block node for the block and set the work. + node := newBlockNode(block) + node.inMainChain = true + + // Add the node to the chain. + // There are several possibilities here: + // 1) This node is a child of an existing block node + // 2) This node is the parent of one or more nodes + // 3) Neither 1 or 2 is true, and this is not the first node being + // added to the tree which implies it's an orphan block and + // therefore is an error to insert into the chain + // 4) Neither 1 or 2 is true, but this is the first node being added + // to the tree, so it's the root. + prevHash := &block.MsgBlock().Header.PrevBlock + if parentNode, ok := b.index[*prevHash]; ok { + // Case 1 -- This node is a child of an existing block node. + // Update the node's work sum with the sum of the parent node's + // work sum and this node's work, append the node as a child of + // the parent node and set this node's parent to the parent + // node. + node.workSum = node.workSum.Add(parentNode.workSum, node.workSum) + parentNode.children = append(parentNode.children, node) + node.parent = parentNode + + } else if childNodes, ok := b.depNodes[*hash]; ok { + // Case 2 -- This node is the parent of one or more nodes. + // Connect this block node to all of its children and update + // all of the children (and their children) with the new work + // sums. + for _, childNode := range childNodes { + childNode.parent = node + node.children = append(node.children, childNode) + addChildrenWork(childNode, node.workSum) + b.root = node + } + + } else { + // Case 3 -- The node does't have a parent and is not the parent + // of another node. This is only acceptable for the first node + // inserted into the chain. Otherwise it means an arbitrary + // orphan block is trying to be loaded which is not allowed. + if b.root != nil { + str := "loadBlockNode: attempt to insert orphan block %v" + return nil, fmt.Errorf(str, hash) + } + + // Case 4 -- This is the root since it's the first and only node. + b.root = node + } + + // Add the new node to the indices for faster lookups. + b.index[*hash] = node + b.depNodes[*prevHash] = append(b.depNodes[*prevHash], node) + + return node, nil +} + +// getPrevNodeFromBlock returns a block node for the block previous to the +// passed block (the passed block's parent). When it is already in the memory +// block chain, it simply returns it. Otherwise, it loads the previous block +// from the block database, creates a new block node from it, and returns it. +// The returned node will be nil if the genesis block is passed. +func (b *BlockChain) getPrevNodeFromBlock(block *btcutil.Block) (*blockNode, error) { + // Genesis block. + prevHash := &block.MsgBlock().Header.PrevBlock + if prevHash.IsEqual(zeroHash) { + return nil, nil + } + + // Return the existing previous block node if it's already there. + if bn, ok := b.index[*prevHash]; ok { + return bn, nil + } + + // Dynamically load the previous block from the block database, create + // a new block node for it, and update the memory chain accordingly. + prevBlockNode, err := b.loadBlockNode(prevHash) + if err != nil { + return nil, err + } + return prevBlockNode, nil +} + +// getPrevNodeFromNode returns a block node for the block previous to the +// passed block node (the passed block node's parent). When the node is already +// connected to a parent, it simply returns it. Otherwise, it loads the +// associated block from the database to obtain the previous hash and uses that +// to dynamically create a new block node and return it. The memory block +// chain is updated accordingly. The returned node will be nil if the genesis +// block is passed. +func (b *BlockChain) getPrevNodeFromNode(node *blockNode) (*blockNode, error) { + // Return the existing previous block node if it's already there. + if node.parent != nil { + return node.parent, nil + } + + // Genesis block. + if node.hash.IsEqual(&btcwire.GenesisHash) { + return nil, nil + } + + // Load the actual block for this block node from the db to ascertain + // the previous hash. + block, err := b.db.FetchBlockBySha(node.hash) + if err != nil { + return nil, err + } + + // Dynamically load the previous block from the block database, create + // a new block node for it, and update the memory chain accordingly. + prevHash := &block.MsgBlock().Header.PrevBlock + prevBlockNode, err := b.loadBlockNode(prevHash) + if err != nil { + return nil, err + } + + return prevBlockNode, nil +} + +// isMajorityVersion determines if a previous number of blocks in the chain +// starting with startNode are at least the minimum passed version. +func (b *BlockChain) isMajorityVersion(minVer uint32, startNode *blockNode, numRequired, numToCheck uint64) bool { + numFound := uint64(0) + iterNode := startNode + for i := uint64(0); i < numToCheck && iterNode != nil; i++ { + // This node has a version that is at least the minimum version. + if iterNode.version >= minVer { + numFound++ + } + + // Get the previous block node. This function is used over + // simply accessing iterNode.parent directly as it will + // dynamically create previous block nodes as needed. This + // helps allow only the pieces of the chain that are needed + // to remain in memory. + var err error + iterNode, err = b.getPrevNodeFromNode(iterNode) + if err != nil { + break + } + } + + return numFound >= numRequired +} + +// calcPastMedianTime calculates the median time of the previous few blocks +// prior to, and including, the passed block node. It is primarily used to +// validate new blocks have sane timestamps. +func (b *BlockChain) calcPastMedianTime(startNode *blockNode) (time.Time, error) { + // Genesis block. + if startNode == nil { + return btcwire.GenesisBlock.Header.Timestamp, nil + } + + // Create a slice of the previous few block timestamps used to calculate + // the median per the number defined by the constant medianTimeBlocks. + timestamps := make([]time.Time, medianTimeBlocks) + numNodes := 0 + iterNode := startNode + for i := 0; i < medianTimeBlocks && iterNode != nil; i++ { + timestamps[i] = iterNode.timestamp + numNodes++ + + // Get the previous block node. This function is used over + // simply accessing iterNode.parent directly as it will + // dynamically create previous block nodes as needed. This + // helps allow only the pieces of the chain that are needed + // to remain in memory. + var err error + iterNode, err = b.getPrevNodeFromNode(iterNode) + if err != nil { + return time.Time{}, err + } + } + + // Prune the slice to the actual number of available timestamps which + // will be fewer than desired near the beginning of the block chain + // and sort them. + timestamps = timestamps[:numNodes] + sort.Sort(timeSorter(timestamps)) + + // NOTE: bitcoind incorrectly calculates the median for even numbers of + // blocks. A true median averages the middle two elements for a set + // with an even number of elements in it. Since the constant for the + // previous number of blocks to be used is odd, this is only an issue + // for a few blocks near the beginning of the chain. I suspect this is + // an optimization even though the result is slightly wrong for a few + // of the first blocks since after the first few blocks, there will + // always be an odd number of blocks in the set per the constant. + // + // This code follows suit to ensure the same rules are used as bitcoind + // however, be aware that should the medianTimeBlocks constant ever be + // changed to an even number, this code will be wrong. + medianTimestamp := timestamps[numNodes/2] + return medianTimestamp, nil +} + +// getReorganizeNodes finds the fork point between the main chain and the passed +// node and returns a list of block nodes that would need to be detached from +// the main chain and a list of block nodes that would need to be attached to +// the fork point (which will be the end of the main chain after detaching the +// returned list of block nodes) in order to reorganize the chain such that the +// passed node is the new end of the main chain. The lists will be empty if the +// passed node is not on a side chain. +func (b *BlockChain) getReorganizeNodes(node *blockNode) (*list.List, *list.List) { + // Nothing to detach or attach if there is no node. + attachNodes := list.New() + detachNodes := list.New() + if node == nil { + return detachNodes, attachNodes + } + + // Find the fork point (if any) adding each block to the list of nodes + // to attach to the main tree. Push them onto the list in reverse order + // so they are attached in the appropriate order when iterating the list + // later. + ancestor := node + for ; ancestor.parent != nil; ancestor = ancestor.parent { + if ancestor.inMainChain { + break + } + attachNodes.PushFront(ancestor) + } + + // TODO(davec): Use prevNodeFromNode function in case the requested + // node is further back than the what is in memory. This shouldn't + // happen in the normal course of operation, but the ability to fetch + // input transactions of arbitrary blocks will likely to be exposed at + // some point and that could lead to an issue here. + + // Start from the end of the main chain and work backwards until the + // common ancestor adding each block to the list of nodes to detach from + // the main chain. + for n := b.bestChain; n != nil && n.parent != nil; n = n.parent { + if n.hash.IsEqual(ancestor.hash) { + break + } + detachNodes.PushBack(n) + } + + return detachNodes, attachNodes +} + +// connectBlock handles connecting the passed node/block to the end of the main +// (best) chain. +func (b *BlockChain) connectBlock(node *blockNode, block *btcutil.Block) error { + // Make sure it's extending the end of the best chain. + prevHash := &block.MsgBlock().Header.PrevBlock + if b.bestChain != nil && !prevHash.IsEqual(b.bestChain.hash) { + return fmt.Errorf("connectBlock must be called with a block " + + "that extends the main chain") + } + + // Insert the block into the database which houses the main chain. + _, err := b.db.InsertBlock(block) + if err != nil { + return err + } + + // TODO(davec): Remove transactions from memory transaction pool. + + // Add the new node to the memory main chain indices for faster + // lookups. + node.inMainChain = true + b.index[*node.hash] = node + b.depNodes[*prevHash] = append(b.depNodes[*prevHash], node) + + // This node is now the end of the best chain. + b.bestChain = node + + // Notify the caller that the block was connected to the main chain. + // The caller would typically want to react with actions such as + // updating wallets. + b.sendNotification(NTBlockConnected, block) + + return nil +} + +// disconnectBlock handles disconnecting the passed node/block from the end of +// the main (best) chain. +func (b *BlockChain) disconnectBlock(node *blockNode, block *btcutil.Block) error { + // Make sure the node being disconnected is the end of the best chain. + if b.bestChain == nil || !node.hash.IsEqual(b.bestChain.hash) { + return fmt.Errorf("disconnectBlock must be called with the " + + "block at the end of the main chain") + } + + // Remove the block from the database which houses the main chain. + prevNode, err := b.getPrevNodeFromNode(node) + if err != nil { + return err + } + err = b.db.DropAfterBlockBySha(prevNode.hash) + if err != nil { + return err + } + + // TODO(davec): Put transactions back in memory transaction pool. + + // Put block in the side chain cache. + node.inMainChain = false + b.blockCache[*node.hash] = block + + // This node's parent is now the end of the best chain. + b.bestChain = node.parent + + // Notify the caller that the block was disconnect from the main chain. + // The caller would typically want to react with actions such as + // updating wallets. + b.sendNotification(NTBlockDisconnected, block) + + return nil +} + +// reorganizeChain reorganizes the block chain by disconnecting the nodes in the +// detachNodes list and connecting the nodes in the attach list. It expects +// that the lists are already in the correct order and are in sync with the +// end of the current best chain. Specifically, nodes that are being +// disconnected must be in reverse order (think of popping them off +// the end of the chain) and nodes the are being attached must be in forwards +// order (think pushing them onto the end of the chain). +func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List) error { + // Ensure all of the needed side chain blocks are in the cache. + for e := attachNodes.Front(); e != nil; e = e.Next() { + n := e.Value.(*blockNode) + if _, exists := b.blockCache[*n.hash]; !exists { + return fmt.Errorf("block %v is missing from the side "+ + "chain block cache", n.hash) + } + } + + // Perform several checks to verify each block that needs to be attached + // to the main chain can be connected without violating any rules and + // without actually connecting the block. + // + // NOTE: bitcoind does these checks directly when it connects a block. + // The downside to that approach is that if any of these checks fail + // after disconneting some blocks or attaching others, all of the + // operations have to be rolled back to get the chain back into the + // state it was before the rule violation (or other failure). There are + // at least a couple of ways accomplish that rollback, but both involve + // tweaking the chain. This approach catches these issues before ever + // modifying the chain. + for e := attachNodes.Front(); e != nil; e = e.Next() { + n := e.Value.(*blockNode) + block := b.blockCache[*n.hash] + err := b.checkConnectBlock(n, block) + if err != nil { + return err + } + + } + + // Disconnect blocks from the main chain. + for e := detachNodes.Front(); e != nil; e = e.Next() { + n := e.Value.(*blockNode) + block, err := b.db.FetchBlockBySha(n.hash) + if err != nil { + return err + } + err = b.disconnectBlock(n, block) + if err != nil { + return err + } + } + + // Connect the new best chain blocks. + for e := attachNodes.Front(); e != nil; e = e.Next() { + n := e.Value.(*blockNode) + block := b.blockCache[*n.hash] + err := b.connectBlock(n, block) + if err != nil { + return err + } + delete(b.blockCache, *n.hash) + } + + return nil +} + +// connectBestChain handles connecting the passed block to the chain while +// respecting proper chain selection according to the chain with the most +// proof of work. In the typical case, the new block simply extends the main +// chain. However, it may also be extending (or creating) a side chain (fork) +// which may or may not end up becoming the main chain depending on which fork +// cumulatively has the most proof of work. +func (b *BlockChain) connectBestChain(node *blockNode, block *btcutil.Block) error { + // We haven't selected a best chain yet or we are extending the main + // (best) chain with a new block. This is the most common case. + if b.bestChain == nil || node.parent.hash.IsEqual(b.bestChain.hash) { + // Perform several checks to verify the block can be connected + // to the main chain (including whatever reorganization might + // be necessary to get this node to the main chain) without + // violating any rules and without actually connecting the + // block. + err := b.checkConnectBlock(node, block) + if err != nil { + return err + } + + // Connect the block to the main chain. + err = b.connectBlock(node, block) + if err != nil { + return err + } + + // Connect the parent node to this node. + if node.parent != nil { + node.parent.children = append(node.parent.children, node) + } + + return nil + } + + // We're extending (or creating) a side chain which may or may not + // become the main chain, but in either case we need the block stored + // for future processing, so add the block to the side chain holding + // cache. + log.Debugf("Adding block %v to side chain cache", node.hash) + b.blockCache[*node.hash] = block + b.index[*node.hash] = node + + // We're extending (or creating) a side chain, but the cumulative + // work for this new side chain is not enough to make it the new chain. + if node.workSum.Cmp(b.bestChain.workSum) <= 0 { + // Connect the parent node to this node. + node.inMainChain = false + if node.parent != nil { + node.parent.children = append(node.parent.children, node) + } + return nil + } + + // We're extending (or creating) a side chain and the cumulative work + // for this new side chain is more than the old best chain, so this side + // chain needs to become the main chain. In order to accomplish that, + // find the common ancestor of both sides of the fork, disconnect the + // blocks that form the (now) old fork from the main chain, and attach + // the blocks that form the new chain to the main chain starting at the + // common ancenstor (the point where the chain forked). + detachNodes, attachNodes := b.getReorganizeNodes(node) + + // Reorganize the chain. + err := b.reorganizeChain(detachNodes, attachNodes) + if err != nil { + return err + } + + return nil +} + +// New returns a BlockChain instance for the passed bitcoin network using the +// provided backing database. It accepts a channel on which asynchronous +// notifications will be sent when various events take place. See the +// documentation for Notification and NotificationType for details on the +// types and contents of notifications. The provided channel can be nil if the +// caller is not interested in receiving notifications. +func New(db btcdb.Db, btcnet btcwire.BitcoinNet, c chan *Notification) *BlockChain { + b := BlockChain{ + db: db, + btcnet: btcnet, + notifications: c, + root: nil, + bestChain: nil, + index: make(map[btcwire.ShaHash]*blockNode), + depNodes: make(map[btcwire.ShaHash][]*blockNode), + orphans: make(map[btcwire.ShaHash]*orphanBlock), + prevOrphans: make(map[btcwire.ShaHash][]*orphanBlock), + blockCache: make(map[btcwire.ShaHash]*btcutil.Block), + } + return &b +} diff --git a/checkpoints.go b/checkpoints.go new file mode 100644 index 000000000..ba3a42337 --- /dev/null +++ b/checkpoints.go @@ -0,0 +1,258 @@ +// Copyright (c) 2013 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package btcchain + +import ( + "fmt" + "github.com/conformal/btcscript" + "github.com/conformal/btcutil" + "github.com/conformal/btcwire" +) + +const CheckpointConfirmations = 20 + +// A checkpoint is a known good point in the block chain. Using checkpoints +// allows a few optimizations for old blocks during initial download and also +// prevents forks from old blocks. +// +// Each checkpoint is selected by the core developers based upon several +// factors. See the documentation for IsCheckpointCandidate for details +// on the selection criteria. +// +// As alluded to above, this package provides an IsCheckpointCandidate function +// which programatically identifies a block as a checkpoint candidate. The idea +// is that candidates are reviewed by a developer to make the final decision and +// then manually added to the list of checkpoints. +type Checkpoint struct { + Height int64 + Hash *btcwire.ShaHash +} + +// checkpointData groups checkpoints and other pertinent checkpoint data into +// a single type. +type checkpointData struct { + // Checkpoints ordered from oldest to newest. + checkpoints []Checkpoint + + // A map that will be automatically generated with the heights from + // the checkpoints as keys. + checkpointsByHeight map[int64]*Checkpoint +} + +// checkpointDataMainNet contains checkpoint data for the main network. +var checkpointDataMainNet = checkpointData{ + checkpoints: []Checkpoint{ + {11111, newShaHashFromStr("0000000069e244f73d78e8fd29ba2fd2ed618bd6fa2ee92559f542fdb26e7c1d")}, + {33333, newShaHashFromStr("000000002dd5588a74784eaa7ab0507a18ad16a236e7b1ce69f00d7ddfb5d0a6")}, + {74000, newShaHashFromStr("0000000000573993a3c9e41ce34471c079dcf5f52a0e824a81e7f953b8661a20")}, + {105000, newShaHashFromStr("00000000000291ce28027faea320c8d2b054b2e0fe44a773f3eefb151d6bdc97")}, + {134444, newShaHashFromStr("00000000000005b12ffd4cd315cd34ffd4a594f430ac814c91184a0d42d2b0fe")}, + {168000, newShaHashFromStr("000000000000099e61ea72015e79632f216fe6cb33d7899acb35b75c8303b763")}, + {193000, newShaHashFromStr("000000000000059f452a5f7340de6682a977387c17010ff6e6c3bd83ca8b1317")}, + {210000, newShaHashFromStr("000000000000048b95347e83192f69cf0366076336c639f9b7228e9ba171342e")}, + {216116, newShaHashFromStr("00000000000001b4f4b433e81ee46494af945cf96014816a4e2370f11b23df4e")}, + {225430, newShaHashFromStr("00000000000001c108384350f74090433e7fcf79a606b8e797f065b130575932")}, + }, + checkpointsByHeight: nil, // Automatically generated in init. +} + +// checkpointDataTestNet contains checkpoint data for the test network. +var checkpointDataTestNet = checkpointData{ + checkpoints: []Checkpoint{ + {546, newShaHashFromStr("000000002a936ca763904c3c35fce2f3556c559c0214345d31b1bcebf76acb70")}, + }, + checkpointsByHeight: nil, // Automatically generated in init. +} + +// newShaHashFromStr converts the passed big-endian hex string into a +// btcwire.ShaHash. It only differs from the one available in btcwire in that +// it ignores the error since it will only (and must only) be called with +// hard-coded, and therefore known good, hashes. +func newShaHashFromStr(hexStr string) *btcwire.ShaHash { + sha, _ := btcwire.NewShaHashFromStr(hexStr) + return sha +} + +// DisableCheckpoints provides a mechanism to disable validation against +// checkpoints which you DO NOT want to do in production. It is provided only +// for debug purposes. +func (b *BlockChain) DisableCheckpoints(disable bool) { + b.noCheckpoints = disable +} + +// checkpointData returns the appropriate checkpoint data set depending on the +// network configured for the block chain. +func (b *BlockChain) checkpointData() *checkpointData { + switch b.btcnet { + case btcwire.TestNet3: + return &checkpointDataTestNet + case btcwire.MainNet: + fallthrough + default: + return &checkpointDataMainNet + } +} + +// LatestCheckpoint returns the most recent checkpoint (regardless of whether it +// is already known). When checkpoints are disabled it will return nil. +func (b *BlockChain) LatestCheckpoint() *Checkpoint { + if b.noCheckpoints { + return nil + } + + checkpoints := b.checkpointData().checkpoints + return &checkpoints[len(checkpoints)-1] +} + +// verifyCheckpoint returns whether the passed block height and hash combination +// match the hard-coded checkpoint data. It also returns true if there is no +// checkpoint data for the passed block height. +func (b *BlockChain) verifyCheckpoint(height int64, hash *btcwire.ShaHash) bool { + if b.noCheckpoints { + return true + } + + // Nothing to check if there is no checkpoint data for the block height. + checkpoint, exists := b.checkpointData().checkpointsByHeight[height] + if !exists { + return true + } + + return checkpoint.Hash.IsEqual(hash) +} + +// findClosestKnownCheckpoint finds the most recent checkpoint that is already +// available in the downloaded portion of the block chain and returns the +// associated block. It returns nil if a checkpoint can't be found (this should +// really only happen for blocks before the first checkpoint). +func (b *BlockChain) findLatestKnownCheckpoint() (*btcutil.Block, error) { + if b.noCheckpoints { + return nil, nil + } + + // Loop backwards through the available checkpoints to find one that + // we already have. + checkpoints := b.checkpointData().checkpoints + clen := len(checkpoints) + for i := clen - 1; i >= 0; i-- { + if b.db.ExistsSha(checkpoints[i].Hash) { + block, err := b.db.FetchBlockBySha(checkpoints[i].Hash) + if err != nil { + return nil, err + } + return block, nil + } + } + return nil, nil +} + +// isNonstandardTransaction determines whether a transaction contains any +// scripts which are not one of the standard types. +func isNonstandardTransaction(tx *btcwire.MsgTx) bool { + // TODO(davec): Should there be checks for the input signature scripts? + + // Check all of the output public key scripts for non-standard scripts. + for _, txOut := range tx.TxOut { + scriptClass := btcscript.GetScriptClass(txOut.PkScript) + if scriptClass == btcscript.NonStandardTy { + return true + } + } + return false +} + +// IsCheckpointCandidate returns whether or not the passed block is a good +// checkpoint candidate. +// +// The factors used to determine a good checkpoint are: +// - The block must be in the main chain +// - The block must be at least 'CheckpointConfirmations' blocks prior to the +// current end of the main chain +// - The timestamps for the blocks before and after the checkpoint must have +// timestamps which are also before and after the checkpoint, respectively +// (due to the median time allowance this is not always the case) +// - The block must not contain any strange transaction such as those with +// nonstandard scripts +func (b *BlockChain) IsCheckpointCandidate(block *btcutil.Block) (bool, error) { + // Checkpoints must be enabled. + if b.noCheckpoints { + return false, fmt.Errorf("checkpoints are disabled") + } + + blockHash, err := block.Sha() + if err != nil { + return false, err + } + + // A checkpoint must be in the main chain. + if !b.db.ExistsSha(blockHash) { + return false, nil + } + + // A checkpoint must be at least CheckpointConfirmations blocks before + // the end of the main chain. + blockHeight := block.Height() + _, mainChainHeight, err := b.db.NewestSha() + if err != nil { + return false, err + } + if blockHeight > (mainChainHeight - CheckpointConfirmations) { + return false, nil + } + + // Get the previous block. + prevHash := &block.MsgBlock().Header.PrevBlock + prevBlock, err := b.db.FetchBlockBySha(prevHash) + if err != nil { + return false, err + } + + // Get the next block. + nextHash, err := b.db.FetchBlockShaByHeight(blockHeight + 1) + if err != nil { + return false, err + } + nextBlock, err := b.db.FetchBlockBySha(nextHash) + if err != nil { + return false, err + } + + // A checkpoint must have timestamps for the block and the blocks on + // either side of it in order (due to the median time allowance this is + // not always the case). + prevTime := prevBlock.MsgBlock().Header.Timestamp + curTime := block.MsgBlock().Header.Timestamp + nextTime := nextBlock.MsgBlock().Header.Timestamp + if prevTime.After(curTime) || nextTime.Before(curTime) { + return false, nil + } + + // A checkpoint must have transactions that only contain standard + // scripts. + for _, tx := range block.MsgBlock().Transactions { + if isNonstandardTransaction(tx) { + return false, nil + } + } + + return true, nil +} + +// init is called on package load. +func init() { + // Generate the checkpoint by height maps from the checkpoint data + // when the package loads. + checkpointInitializeList := []*checkpointData{ + &checkpointDataMainNet, + &checkpointDataTestNet, + } + for _, data := range checkpointInitializeList { + data.checkpointsByHeight = make(map[int64]*Checkpoint) + for i := range data.checkpoints { + checkpoint := &data.checkpoints[i] + data.checkpointsByHeight[checkpoint.Height] = checkpoint + } + } +} diff --git a/cov_report.sh b/cov_report.sh new file mode 100644 index 000000000..307f05b76 --- /dev/null +++ b/cov_report.sh @@ -0,0 +1,17 @@ +#!/bin/sh + +# This script uses gocov to generate a test coverage report. +# The gocov tool my be obtained with the following command: +# go get github.com/axw/gocov/gocov +# +# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH. + +# Check for gocov. +type gocov >/dev/null 2>&1 +if [ $? -ne 0 ]; then + echo >&2 "This script requires the gocov tool." + echo >&2 "You may obtain it with the following command:" + echo >&2 "go get github.com/axw/gocov/gocov" + exit 1 +fi +gocov test | gocov report diff --git a/difficulty.go b/difficulty.go new file mode 100644 index 000000000..c30ad7ac9 --- /dev/null +++ b/difficulty.go @@ -0,0 +1,285 @@ +// Copyright (c) 2013 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package btcchain + +import ( + "fmt" + "github.com/conformal/btcwire" + "math/big" + "time" +) + +const ( + // targetTimespan is the desired amount of time that should elapse + // before block difficulty requirement is examined to determine how + // it should be changed in order to maintain the desired block + // generation rate. + targetTimespan = time.Hour * 24 * 14 + + // targetSpacing is the desired amount of time to generate each block. + targetSpacing = time.Minute * 10 + + // blocksPerRetarget is the number of blocks between each difficulty + // retarget. It is calculated based on the desired block generation + // rate. + blocksPerRetarget = int64(targetTimespan / targetSpacing) + + // retargetAdjustmentFactor is the adjustment factor used to limit + // the minimum and maximum amount of adjustment that can occur between + // difficulty retargets. + retargetAdjustmentFactor = 4 + + // minRetargetTimespan is the minimum amount of adjustment that can + // occur between difficulty retargets. It equates to 25% of the + // previous difficulty. + minRetargetTimespan = int64(targetTimespan / retargetAdjustmentFactor) + + // maxRetargetTimespan is the maximum amount of adjustment that can + // occur between difficulty retargets. It equates to 400% of the + // previous difficulty. + maxRetargetTimespan = int64(targetTimespan * retargetAdjustmentFactor) +) + +var ( + // bigOne is 1 represented as a big.Int. It is defined here to avoid + // the overhead of creating it multiple times. + bigOne = big.NewInt(1) + + // oneLsh256 is 1 shifted left 256 bits. It is defined here to avoid + // the overhead of creating it multiple times. + oneLsh256 = new(big.Int).Lsh(bigOne, 256) + + // powLimit is the highest proof of work value a bitcoin block can have. + // It is the value 2^224 - 1. + powLimit = new(big.Int).Sub(new(big.Int).Lsh(bigOne, 224), bigOne) +) + +// ShaHashToBig converts a btcwire.ShaHash into a big.Int that can be used to +// perform math comparisons. +func ShaHashToBig(hash *btcwire.ShaHash) *big.Int { + // A ShaHash is in little-endian, but the big package wants the bytes + // in big-endian. Reverse them. ShaHash.Bytes makes a copy, so it + // is safe to modify the returned buffer. + buf := hash.Bytes() + blen := len(buf) + for i := 0; i < blen/2; i++ { + buf[i], buf[blen-1-i] = buf[blen-1-i], buf[i] + } + + return new(big.Int).SetBytes(buf) +} + +// CompactToBig converts a compact representation of a whole number N to an +// unsigned 32-bit number. The representation is similar to IEEE754 floating +// point numbers. +// +// Like IEEE754 floating point, there are three basic components: the sign, +// the exponent, and the mantissa. They are broken out as follows: +// +// * the most significant 8 bits represent the unsigned base 256 exponent +// * bit 23 (the 24th bit) represents the sign bit +// * the least significant 23 bits represent the mantissa +// +// ------------------------------------------------- +// | Exponent | Sign | Mantissa | +// ------------------------------------------------- +// | 8 bits [31-24] | 1 bit [23] | 23 bits [22-00] | +// ------------------------------------------------- +// +// The formula to calculate N is: +// N = (-1^sign) * mantissa * 256^(exponent-3) +// +// This compact form is only used in bitcoin to encode unsigned 256-bit numbers +// which represent difficulty targets, thus there really is not a need for a +// sign bit, but it is implemented here to stay consistent with bitcoind. +func CompactToBig(compact uint32) *big.Int { + // Extract the mantissa, sign bit, and exponent. + mantissa := compact & 0x007fffff + isNegative := compact&0x00800000 != 0 + exponent := uint(compact >> 24) + + // Since the base for the exponent is 256, the exponent can be treated + // as the number of bytes to represent the full 256-bit number. So, + // treat the exponent as the number of bytes and shift the mantissa + // right or left accordingly. This is equivalent to: + // N = mantissa * 256^(exponent-3) + var bn *big.Int + if exponent <= 3 { + mantissa >>= 8 * (3 - exponent) + bn = big.NewInt(int64(mantissa)) + } else { + bn = big.NewInt(int64(mantissa)) + bn.Lsh(bn, 8*(exponent-3)) + } + + // Make it negative if the sign bit is set. + if isNegative { + bn = bn.Neg(bn) + } + + return bn +} + +// BigToCompact converts a whole number N to a compact representation using +// an unsigned 32-bit number. The compact representation only provides 23 bits +// of precision, so values larger than (2^23 - 1) only encode the most +// significant digits of the number. See CompactToBig for details. +func BigToCompact(n *big.Int) uint32 { + // No need to do any work if it's zero. + if n.Sign() == 0 { + return 0 + } + + // Since the base for the exponent is 256, the exponent can be treated + // as the number of bytes. So, shift the number right or left + // accordingly. This is equivalent to: + // mantissa = mantissa / 256^(exponent-3) + var mantissa uint32 + exponent := uint(len(n.Bytes())) + if exponent <= 3 { + mantissa = uint32(n.Bits()[0]) + mantissa <<= 8 * (3 - exponent) + } else { + // Use a copy to avoid modifying the caller's original number. + tn := new(big.Int).Set(n) + mantissa = uint32(tn.Rsh(tn, 8*(exponent-3)).Bits()[0]) + } + + // When the mantissa already has the sign bit set, the number is too + // large to fit into the available 23-bits, so divide the number by 256 + // and increment the exponent accordingly. + if mantissa&0x00800000 != 0 { + mantissa >>= 8 + exponent++ + } + + // Pack the exponent, sign bit, and mantissa into an unsigned 32-bit + // int and return it. + compact := uint32(exponent<<24) | mantissa + if n.Sign() < 0 { + compact |= 0x00800000 + } + return compact +} + +// calcWork calculates a work value from difficulty bits. Bitcoin increases +// the difficulty for generating a block by decreasing the value which the +// generated hash must be less than. This difficulty target is stored in each +// block header using a compact representation as described in the documenation +// for CompactToBig. The main chain is selected by choosing the chain that has +// the most proof of work (highest difficulty). Since a lower target difficulty +// value equates to higher actual difficulty, the work value which will be +// accumulated must be the inverse of the difficulty. Also, in order to avoid +// potential division by zero and really small floating point numbers, add 1 to +// the denominator and multiply the numerator by 2^256. +func calcWork(bits uint32) *big.Rat { + // (1 << 256) / (difficultyNum + 1) + difficultyNum := CompactToBig(bits) + denominator := new(big.Int).Add(difficultyNum, bigOne) + return new(big.Rat).SetFrac(oneLsh256, denominator) +} + +// calcEasiestDifficulty calculates the easiest possible difficulty that a block +// can have given starting difficulty bits and a duration. It is mainly used to +// verify that claimed proof of work by a block is sane as compared to a +// known good checkpoint. +func calcEasiestDifficulty(bits uint32, duration time.Duration) uint32 { + // Convert types used in the calculations below. + durationVal := int64(duration) + adjustmentFactor := big.NewInt(retargetAdjustmentFactor) + + // TODO(davec): Testnet has special rules. + + // Since easier difficulty equates to higher numbers, the easiest + // difficulty for a given duration is the largest value possible given + // the number of retargets for the duration and starting difficulty + // multiplied by the max adjustment factor. + newTarget := CompactToBig(bits) + for durationVal > 0 && newTarget.Cmp(powLimit) < 0 { + newTarget.Mul(newTarget, adjustmentFactor) + durationVal -= maxRetargetTimespan + } + + // Limit new value to the proof of work limit. + if newTarget.Cmp(powLimit) > 0 { + newTarget.Set(powLimit) + } + + return BigToCompact(newTarget) +} + +// calcNextRequiredDifficulty calculates the required difficulty for the block +// after the passed previous block node based on the difficulty retarget rules. +func (b *BlockChain) calcNextRequiredDifficulty(lastNode *blockNode) (uint32, error) { + // Genesis block. + if lastNode == nil { + return BigToCompact(powLimit), nil + } + + // Return the previous block's difficulty requirements if this block + // is not at a difficulty retarget interval. + if (lastNode.height+1)%blocksPerRetarget != 0 { + // TODO(davec): Testnet has special rules. + return lastNode.bits, nil + } + + // Get the block node at the previous retarget (targetTimespan days + // worth of blocks). + firstNode := lastNode + for i := int64(0); i < blocksPerRetarget-1 && firstNode != nil; i++ { + // Get the previous block node. This function is used over + // simply accessing firstNode.parent directly as it will + // dynamically create previous block nodes as needed. This + // helps allow only the pieces of the chain that are needed + // to remain in memory. + var err error + firstNode, err = b.getPrevNodeFromNode(firstNode) + if err != nil { + return 0, err + } + } + + if firstNode == nil { + return 0, fmt.Errorf("unable to obtain previous retarget block") + } + + // Limit the amount of adjustment that can occur to the previous + // difficulty. + actualTimespan := lastNode.timestamp.UnixNano() - firstNode.timestamp.UnixNano() + adjustedTimespan := actualTimespan + if actualTimespan < minRetargetTimespan { + adjustedTimespan = minRetargetTimespan + } else if actualTimespan > maxRetargetTimespan { + adjustedTimespan = maxRetargetTimespan + } + + // Calculate new target difficulty as: + // currentDifficulty * (adjustedTimespan / targetTimespan) + // The result uses integer division which means it will be slightly + // rounded down. Bitcoind also uses integer division to calculate this + // result. + oldTarget := CompactToBig(lastNode.bits) + newTarget := new(big.Int).Mul(oldTarget, big.NewInt(adjustedTimespan)) + newTarget.Div(newTarget, big.NewInt(int64(targetTimespan))) + + // Limit new value to the proof of work limit. + if newTarget.Cmp(powLimit) > 0 { + newTarget.Set(powLimit) + } + + // Log new target difficulty and return it. The new target logging is + // intentionally converting the bits back to a number instead of using + // newTarget since conversion to the compact representation loses + // precision. + newTargetBits := BigToCompact(newTarget) + log.Debugf("Difficulty retarget at block height %d", lastNode.height+1) + log.Debugf("Old target %08x (%064x)", lastNode.bits, oldTarget) + log.Debugf("New target %08x (%064x)", newTargetBits, CompactToBig(newTargetBits)) + log.Debugf("Actual timespan %v, adjusted timespan %v, target timespan %v", + time.Duration(actualTimespan), time.Duration(adjustedTimespan), + targetTimespan) + + return newTargetBits, nil +} diff --git a/doc.go b/doc.go new file mode 100644 index 000000000..a96b6ad8a --- /dev/null +++ b/doc.go @@ -0,0 +1,124 @@ +// Copyright (c) 2013 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +/* +Package btcchain implements bitcoin block handling and chain selection rules. + +The bitcoin block handling and chain selection rules are an integral, and quite +likely the most important, part of bitcoin. Unfortunately, at the time of +this writing, these rules are also largely undocumented and had to be +ascertained from the bitcoind source code. At its core, bitcoin is a +distributed consensus of which blocks are valid and which ones will comprise the +main block chain (public ledger) that ultimately determines accepted +transactions, so it is extremely important that fully validating nodes agree on +all rules. + +At a high level, this package provides support for inserting new blocks into +the block chain according to the aforementioned rules. It includes +functionality such as rejecting duplicate blocks, ensuring blocks and +transactions follow all rules, orphan handling, and best chain selection along +with reorganization. + +Since this package does not deal with other bitcoin specifics such as network +communication or wallets, it provides a notification system which gives the +caller a high level of flexibility in how they want to react to certain events +such as orphan blocks which need their parents requested and newly connected +main chain blocks which might result in wallet updates. + +Bitcoin Chain Processing Overview + +Before a block is allowed into the block chain, it must go through an intensive +series of validation rules. The following list serves as a general outline of +those rules to provide some intuition into what is going on under the hood, but +is by no means exhaustive: + + - Reject duplicate blocks + - Perform a series of sanity checks on the block and its transactions such as + verifying proof of work, timestamps, number and character of transactions, + transaction amounts, script complexity, and merkle root calculations + - Compare the block against predetermined checkpoints for expected timestamps + and difficulty based on elapsed time since the checkpoint + - Save the most recent orphan blocks for a limited time in case their parent + blocks become available + - Stop processing if the block is an orphan as the rest of the processing + depends on the block's position within the block chain + - Perform a series of more thorough checks that depend on the block's position + within the block chain such as verifying block difficulties adhere to + difficulty retarget rules, timestamps are after the median of the last + several blocks, all transactions are finalized, checkpoint blocks match, and + block versions are in line with the previous blocks + - Determine how the block fits into the chain and perform different actions + accordingly in order to ensure any side chains which have higher difficulty + than the main chain become the new main chain + - When a block is being connected to the main chain (either through + reorganization of a side chain to the main chain or just extending the + main chain), perform further checks on the block's transactions such as + verifying transaction duplicates, script complexity for the combination of + connected scripts, coinbase maturity, double spends, and connected + transaction values + - Run the transaction scripts to verify the spender is allowed to spend the + coins + - Insert the block into the block database + +Block Processing Example + +The following example program demonstrates processing a block. This example +intentionally causes an error by attempting to process a duplicate block. + + package main + + import ( + "fmt" + "github.com/conformal/btcchain" + "github.com/conformal/btcdb" + _ "github.com/conformal/btcdb/sqlite3" + "github.com/conformal/btcutil" + "github.com/conformal/btcwire" + ) + + func main() { + // First, we create a new database to store the accepted blocks into. + // Typically this would be opening an existing database, but we create + // a new db here so this is a complete working example. + db, err := btcdb.CreateDB("sqlite", "example.db") + if err != nil { + fmt.Printf("Failed to create database: %v\n", err) + return + } + defer db.Close() + + // Create a new BlockChain instance using the underlying database for + // the main bitcoin network and ignore notifications. + chain := btcchain.New(db, btcwire.MainNet, nil) + + // Process a block. For this example, we are going to intentionally + // cause an error by trying to process the genesis block which already + // exists. + block := btcutil.NewBlock(&btcwire.GenesisBlock, btcwire.ProtocolVersion) + err = chain.ProcessBlock(block) + if err != nil { + fmt.Printf("Failed to process block: %v\n", err) + return + } + } + +Errors + +Errors returned by this package are either the raw errors provided by underlying +calls or of type btcchain.RuleError. This allows the caller to differentiate +between unexpected errors, such as database errors, versus errors due to rule +violations through type assertions. + +Bitcoin Improvement Proposals + +This package includes spec changes outlined by the following BIPs: + + BIP0016 (https://en.bitcoin.it/wiki/BIP_0016) + BIP0030 (https://en.bitcoin.it/wiki/BIP_0030) + +Other important information + +This package does not yet implement all of the unique rules for testnet. +*/ +package btcchain diff --git a/internal_test.go b/internal_test.go new file mode 100644 index 000000000..c5c9a4040 --- /dev/null +++ b/internal_test.go @@ -0,0 +1,28 @@ +// Copyright (c) 2013 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +/* +This test file is part of the btcchain package rather than than the +btcchain_test package so it can bridge access to the internals to properly test +cases which are either not possible or can't reliably be tested via the public +interface. The functions are only exported while the tests are being run. +*/ + +package btcchain + +import ( + "github.com/conformal/btcutil" +) + +// TstCheckBlockSanity makes the internal checkBlockSanity function available to +// the test package. +func TstCheckBlockSanity(block *btcutil.Block) error { + return checkBlockSanity(block) +} + +// TstSetCoinbaseMaturity makes the ability to set the coinbase maturity +// available to the test package. +func TstSetCoinbaseMaturity(maturity int64) { + coinbaseMaturity = maturity +} diff --git a/log.go b/log.go new file mode 100644 index 000000000..0c273bd31 --- /dev/null +++ b/log.go @@ -0,0 +1,65 @@ +// Copyright (c) 2013 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package btcchain + +import ( + "errors" + "github.com/conformal/seelog" + "io" +) + +// log is a logger that is initialized with no output filters. This +// means the package will not perform any logging by default until the caller +// requests it. +var log seelog.LoggerInterface + +// The default amount of logging is none. +func init() { + DisableLog() +} + +// DisableLog disables all library log output. Logging output is disabled +// by default until either UseLogger or SetLogWriter are called. +func DisableLog() { + log = seelog.Disabled +} + +// UseLogger uses a specified Logger to output package logging info. +// This should be used in preference to SetLogWriter if the caller is also +// using seelog. +func UseLogger(logger seelog.LoggerInterface) { + log = logger +} + +// SetLogWriter uses a specified io.Writer to output package logging info. +// This allows a caller to direct package logging output without needing a +// dependency on seelog. If the caller is also using seelog, UseLogger should +// be used instead. +func SetLogWriter(w io.Writer) error { + if w == nil { + return errors.New("nil writer") + } + + l, err := seelog.LoggerFromWriterWithMinLevel(w, seelog.TraceLvl) + if err != nil { + return err + } + + UseLogger(l) + return nil +} + +// LogClosure is a closure that can be printed with %v to be used to +// generate expensive-to-create data for a detailed log level and avoid doing +// the work if the data isn't printed. +type logClosure func() string + +func (c logClosure) String() string { + return c() +} + +func newLogClosure(c func() string) logClosure { + return logClosure(c) +} diff --git a/merkle.go b/merkle.go new file mode 100644 index 000000000..e0aa00530 --- /dev/null +++ b/merkle.go @@ -0,0 +1,114 @@ +// Copyright (c) 2013 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package btcchain + +import ( + "github.com/conformal/btcutil" + "github.com/conformal/btcwire" + "math" +) + +// nextPowerOfTwo returns the next highest power of two from a given number if +// it is not already a power of two. This is a helper function used during the +// calculation of a merkle tree. +func nextPowerOfTwo(n int) int { + // Return the number if it's already a power of 2. + if n&(n-1) == 0 { + return n + } + + // Figure out and return the next power of two. + exponent := uint(math.Log2(float64(n))) + 1 + return 1 << exponent // 2^exponent +} + +// hashMerkleBranches takes two hashes, treated as the left and right tree +// nodes, and returns the hash of their concatenation. This is a helper +// function used to during generatation of a merkle tree. +func hashMerkleBranches(left *btcwire.ShaHash, right *btcwire.ShaHash) *btcwire.ShaHash { + // Concatenate the left and right nodes. + var sha [btcwire.HashSize * 2]byte + copy(sha[:btcwire.HashSize], left.Bytes()) + copy(sha[btcwire.HashSize:], right.Bytes()) + + // Create a new sha hash from the double sha 256. Ignore the error + // here since SetBytes can't fail here due to the fact DoubleSha256 + // always returns a []byte of the right size regardless of input. + newSha, _ := btcwire.NewShaHash(btcwire.DoubleSha256(sha[:])) + return newSha +} + +// BuildMerkleTreeStore creates a merkle tree from block, stores it using a +// linear array, and returns a slice of the backing array. A linear array was +// chosen as opposed to an actual tree structure since it uses about half as +// much memory. The following describes a merkle tree and how it is stored in +// a linear array. +// +// A merkle tree is a tree in which every non-leaf node is the hash of its +// children nodes. A diagram depicting how this works for bitcoin transactions +// where h(x) is a double sha256 follows: +// +// root = h1234 = h(h12 + h34) +// / \ +// h12 = h(h1 + h2) h34 = h(h3 + h4) +// / \ / \ +// h1 = h(tx1) h2 = h(tx2) h3 = h(tx3) h4 = h(tx4) +// +// The above stored as a linear array is as follows: +// +// [h1 h2 h3 h4 h12 h34 root] +// +// As the above shows, the merkle root is always the last element in the array. +// +// The number of inputs is not always a power of two which results in a +// balanced tree structure as above. In that case, parent nodes with no +// children are also zero and parent nodes with only a single left node +// are calculated by concatenating the left node with itself before hashing. +// Since this function uses nodes that are pointers to the hashes, empty nodes +// will be nil. +func BuildMerkleTreeStore(block *btcutil.Block) []*btcwire.ShaHash { + numTransactions := len(block.MsgBlock().Transactions) + + // Calculate how many entries are required to hold the binary merkle + // tree as a linear array and create an array of that size. + nextPoT := nextPowerOfTwo(numTransactions) + arraySize := nextPoT*2 - 1 + merkles := make([]*btcwire.ShaHash, arraySize) + + // Create the base transaction shas and populate the array with them. + for i := 0; i < numTransactions; i++ { + // Ignore the error since the only reason TxSha can fail is + // if the index is out of range which is impossible here due + // to using a loop over the existing transactions. + sha, _ := block.TxSha(i) + merkles[i] = sha + } + + // Start the array offset after the last transaction and adjusted to the + // next power of two. + offset := nextPoT + for i := 0; i < arraySize-1; i += 2 { + switch { + // When there is no left child node, the parent is nil too. + case merkles[i] == nil: + merkles[offset] = nil + + // When there is no right child, the parent is generated by + // hashing the concatenation of the left child with itself. + case merkles[i+1] == nil: + newSha := hashMerkleBranches(merkles[i], merkles[i]) + merkles[offset] = newSha + + // The normal case sets the parent node to the double sha256 + // of the concatentation of the left and right children. + default: + newSha := hashMerkleBranches(merkles[i], merkles[i+1]) + merkles[offset] = newSha + } + offset++ + } + + return merkles +} diff --git a/merkle_test.go b/merkle_test.go new file mode 100644 index 000000000..cd2d45321 --- /dev/null +++ b/merkle_test.go @@ -0,0 +1,24 @@ +// Copyright (c) 2013 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package btcchain_test + +import ( + "github.com/conformal/btcchain" + "github.com/conformal/btcutil" + "github.com/conformal/btcwire" + "testing" +) + +// TestMerkle tests the BuildMerkleTreeStore API. +func TestMerkle(t *testing.T) { + block := btcutil.NewBlock(&Block100000, btcwire.ProtocolVersion) + merkles := btcchain.BuildMerkleTreeStore(block) + calculatedMerkleRoot := merkles[len(merkles)-1] + wantMerkle := &Block100000.Header.MerkleRoot + if !wantMerkle.IsEqual(calculatedMerkleRoot) { + t.Errorf("BuildMerkleTreeStore: merkle root mismatch - "+ + "got %v, want %v", calculatedMerkleRoot, wantMerkle) + } +} diff --git a/notifications.go b/notifications.go new file mode 100644 index 000000000..5cc544f24 --- /dev/null +++ b/notifications.go @@ -0,0 +1,76 @@ +// Copyright (c) 2013 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package btcchain + +import ( + "fmt" +) + +// NotificationType represents the type of a notification message. +type NotificationType int + +// Constants for the type of a notification message. +const ( + // NTOrphanBlock indicates an orphan block was processed and the + // associated block hash is the root of all known orphans which should + // be used to request the missing blocks. + NTOrphanBlock NotificationType = iota + + // NTBlockAccepted indicates the associated block was accepted into + // the block chain. Note that this does not necessarily mean it was + // added to the main chain. For that, use NTBlockConnected. + NTBlockAccepted + + // NTBlockConnected indicates the associated block was connected to the + // main chain. + NTBlockConnected + + // NTBlockDisconnected indicates the associated block was disconnected + // from the main chain. + NTBlockDisconnected +) + +// notificationTypeStrings is a map of notification types back to their constant +// names for pretty printing. +var notificationTypeStrings = map[NotificationType]string{ + NTOrphanBlock: "NTOrphanBlock", + NTBlockAccepted: "NTBlockAccepted", + NTBlockConnected: "NTBlockConnected", + NTBlockDisconnected: "NTBlockDisconnected", +} + +// String returns the NotificationType in human-readable form. +func (n NotificationType) String() string { + if s, ok := notificationTypeStrings[n]; ok { + return s + } + return fmt.Sprintf("Unknown Notification Type (%d)", int(n)) +} + +// Notification defines an asynchronous notification that is sent to the caller +// over the notification channel provided during the call to New and consists +// of a notification type as well as associated data that depends on the type as +// follows: +// - NTOrphanBlock: *btcwire.ShaHash +// - NTBlockAccepted: *btcutil.Block +// - NTBlockConnected: *btcutil.Block +// - NTBlockDisconnected: *btcutil.Block +type Notification struct { + Type NotificationType + Data interface{} +} + +// sendNotification sends a notification with the passed type and data if the +// caller requested notifications by providing a channel in the call to New. +func (b *BlockChain) sendNotification(typ NotificationType, data interface{}) { + // Ignore it if the caller didn't request notifications. + if b.notifications == nil { + return + } + + // Generate and send the notification. + n := Notification{Type: typ, Data: data} + b.notifications <- &n +} diff --git a/process.go b/process.go new file mode 100644 index 000000000..5b9eef988 --- /dev/null +++ b/process.go @@ -0,0 +1,175 @@ +// Copyright (c) 2013 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package btcchain + +import ( + "fmt" + "github.com/conformal/btcutil" + "github.com/conformal/btcwire" +) + +// RuleError identifies a rule violation. It is used to indicate that +// processing of a block or transaction failed due to one of the many validation +// rules. The caller can use type assertions to determine if a failure was +// specifically due to a rule violation. +type RuleError string + +// Error satisfies the error interface to print human-readable errors. +func (e RuleError) Error() string { + return string(e) +} + +// blockExists determines whether a block with the given hash exists either in +// the main chain or any side chains. +func (b *BlockChain) blockExists(hash *btcwire.ShaHash) bool { + // Check memory chain first (could be main chain or side chain blocks). + if _, ok := b.index[*hash]; ok { + return true + } + + // Check in database (rest of main chain not in memory). + return b.db.ExistsSha(hash) +} + +// processOrphans determines if there are any orphans which depend on the passed +// block hash (they are no longer orphans if true) and potentially accepts them. +// It repeats the process for the newly accepted blocks (to detect further +// orphans which may no longer be orphans) until there are no more. +func (b *BlockChain) processOrphans(hash *btcwire.ShaHash) error { + processHashes := []*btcwire.ShaHash{hash} + for len(processHashes) > 0 { + // Pop the first hash to process from the slice. + processHash := processHashes[0] + processHashes = processHashes[1:] + + // Look up all orphans that are parented by the block we just + // accepted. This will typically only be one, but it could + // be multiple if multiple blocks are mined and broadcast + // around the same time. The one with the most proof of work + // will eventually win out. + for _, orphan := range b.prevOrphans[*processHash] { + // Remove the orphan from the orphan pool. + // It's safe to ignore the error on Sha since the hash + // is already cached. + orphanHash, _ := orphan.block.Sha() + b.removeOrphanBlock(orphan) + + // Potentially accept the block into the block chain. + err := b.maybeAcceptBlock(orphan.block) + if err != nil { + return err + } + + // Add this block to the list of blocks to process so + // any orphan blocks that depend on this block are + // handled too. + processHashes = append(processHashes, orphanHash) + } + } + return nil +} + +// ProcessBlock is the main workhorse for handling insertion of new blocks into +// the block chain. It includes functionality such as rejecting duplicate +// blocks, ensuring blocks follow all rules, orphan handling, and insertion into +// the block chain along with best chain selection and reorganization. +func (b *BlockChain) ProcessBlock(block *btcutil.Block) error { + blockHash, err := block.Sha() + if err != nil { + return err + } + log.Debugf("Processing block %v", blockHash) + + // The block must not already exist in the main chain or side chains. + if b.blockExists(blockHash) { + str := fmt.Sprintf("already have block %v", blockHash) + return RuleError(str) + } + + // The block must not already exist as an orphan. + if _, exists := b.orphans[*blockHash]; exists { + str := fmt.Sprintf("already have block (orphan) %v", blockHash) + return RuleError(str) + } + + // Perform preliminary sanity checks on the block and its transactions. + err = checkBlockSanity(block) + if err != nil { + return err + } + + // Find the latest known checkpoint and perform some additional checks + // based on the checkpoint. This provides a few nice properties such as + // preventing forks from blocks before the last checkpoint, rejecting + // easy to mine, but otherwise bogus, blocks that could be used to eat + // memory, and ensuring expected (versus claimed) proof of work + // requirements since the last checkpoint are met. + blockHeader := block.MsgBlock().Header + checkpointBlock, err := b.findLatestKnownCheckpoint() + if err != nil { + return err + } + if checkpointBlock != nil { + // Ensure the block timestamp is after the checkpoint timestamp. + checkpointHeader := checkpointBlock.MsgBlock().Header + checkpointTime := checkpointHeader.Timestamp + if blockHeader.Timestamp.Before(checkpointTime) { + str := fmt.Sprintf("block %v has timestamp %v before "+ + "last checkpoint timestamp %v", blockHash, + blockHeader.Timestamp, checkpointTime) + return RuleError(str) + } + + // Even though the checks prior to now have already ensured the + // proof of work exceeds the claimed amount, the claimed amount + // is a field in the block header which could be forged. This + // check ensures the proof of work is at least the minimum + // expected based on elapsed time since the last checkpoint and + // maximum adjustment allowed by the retarget rules. + duration := blockHeader.Timestamp.Sub(checkpointTime) + requiredTarget := CompactToBig(calcEasiestDifficulty( + checkpointHeader.Bits, duration)) + currentTarget := CompactToBig(blockHeader.Bits) + if currentTarget.Cmp(requiredTarget) > 0 { + str := fmt.Sprintf("block target difficulty of %064x "+ + "is too low when compared to the previous "+ + "checkpoint", currentTarget) + return RuleError(str) + } + } + + // Handle orphan blocks. + prevHash := &blockHeader.PrevBlock + if !prevHash.IsEqual(zeroHash) && !b.blockExists(prevHash) { + // Add the orphan block to the orphan pool. + log.Infof("Adding orphan block %v", blockHash) + b.addOrphanBlock(block) + + // Get the hash for the head of the orphaned block chain for + // this block and notify the caller so it can request missing + // blocks. + orphanRoot := b.getOrphanRoot(prevHash) + b.sendNotification(NTOrphanBlock, orphanRoot) + return nil + } + + // The block has passed all context independent checks and appears sane + // enough to potentially accept it into the block chain. + err = b.maybeAcceptBlock(block) + if err != nil { + return err + } + + // Accept any orphan blocks that depend on this block (they are no + // longer orphans) and repeat for those accepted blocks until there are + // no more. + err = b.processOrphans(blockHash) + if err != nil { + return err + } + + log.Debugf("Accepted block %v", blockHash) + return nil +} diff --git a/reorganization_test.go b/reorganization_test.go new file mode 100644 index 000000000..70fcf0549 --- /dev/null +++ b/reorganization_test.go @@ -0,0 +1,133 @@ +// Copyright (c) 2013 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package btcchain_test + +import ( + "compress/bzip2" + "encoding/binary" + "github.com/conformal/btcchain" + "github.com/conformal/btcdb" + _ "github.com/conformal/btcdb/sqlite3" + "github.com/conformal/btcutil" + "github.com/conformal/btcwire" + "io" + "os" + "path/filepath" + "strings" + "testing" +) + +// TestReorganization loads a set of test blocks which force a chain +// reorganization to test the block chain handling code. +// The test blocks were originally from a post on the bitcoin talk forums: +// https://bitcointalk.org/index.php?topic=46370.msg577556#msg577556 +func TestReorganization(t *testing.T) { + // Intentionally load the side chain blocks out of order to ensure + // orphans are handled properly along with chain reorganization. + testFiles := [...]string{ + "blk_0_to_4.dat.bz2", + "blk_4A.dat.bz2", + "blk_5A.dat.bz2", + "blk_3A.dat.bz2", + } + + var blocks []*btcutil.Block + for _, file := range testFiles { + blockTmp, err := loadBlocks(file) + if err != nil { + t.Errorf("Error loading file: %v\n", err) + } + for _, block := range blockTmp { + blocks = append(blocks, block) + } + } + + t.Logf("Number of blocks: %v\n", len(blocks)) + + dbname := "chaintest" + _ = os.Remove(dbname) + db, err := btcdb.CreateDB("sqlite", dbname) + if err != nil { + t.Errorf("Error creating db: %v\n", err) + } + // Clean up + defer os.Remove(dbname) + defer db.Close() + + // Since we're not dealing with the real block chain, disable + // checkpoints and set the coinbase maturity to 1. + blockChain := btcchain.New(db, btcwire.MainNet, nil) + blockChain.DisableCheckpoints(true) + btcchain.TstSetCoinbaseMaturity(1) + + for i := 1; i < len(blocks); i++ { + err = blockChain.ProcessBlock(blocks[i]) + if err != nil { + t.Errorf("ProcessBlock fail on block %v: %v\n", i, err) + return + } + } + db.Sync() + + return +} + +// loadBlocks reads files containing bitcoin block data (gzipped but otherwise +// in the format bitcoind writes) from disk and returns them as an array of +// btcutil.Block. This is largely borrowed from the test code in btcdb. +func loadBlocks(filename string) (blocks []*btcutil.Block, err error) { + filename = filepath.Join("testdata/", filename) + + var network = btcwire.MainNet + var dr io.Reader + var fi io.ReadCloser + + fi, err = os.Open(filename) + if err != nil { + return + } + + if strings.HasSuffix(filename, ".bz2") { + dr = bzip2.NewReader(fi) + } else { + dr = fi + } + defer fi.Close() + + var block *btcutil.Block + + err = nil + for height := int64(1); err == nil; height++ { + var rintbuf uint32 + err = binary.Read(dr, binary.LittleEndian, &rintbuf) + if err == io.EOF { + // hit end of file at expected offset: no warning + height-- + err = nil + break + } + if err != nil { + break + } + if rintbuf != uint32(network) { + break + } + err = binary.Read(dr, binary.LittleEndian, &rintbuf) + blocklen := rintbuf + + rbytes := make([]byte, blocklen) + + // read block + dr.Read(rbytes) + + block, err = btcutil.NewBlockFromBytes(rbytes, btcwire.ProtocolVersion) + if err != nil { + return + } + blocks = append(blocks, block) + } + + return +} diff --git a/scriptval.go b/scriptval.go new file mode 100644 index 000000000..37eec3b35 --- /dev/null +++ b/scriptval.go @@ -0,0 +1,136 @@ +// Copyright (c) 2013 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package btcchain + +import ( + "fmt" + "github.com/conformal/btcscript" + "github.com/conformal/btcutil" + "github.com/conformal/btcwire" + "math" + "time" +) + +// txValidate is used to track results of validating scripts for each +// transaction input index. +type txValidate struct { + txIndex int + err error +} + +// txProcessList +type txProcessList struct { + txsha btcwire.ShaHash + tx *btcwire.MsgTx +} + +// validateTxIn validates a the script pair for the passed spending transaction +// (along with the specific input index) and origin transaction (with the +// specific output index). +func validateTxIn(txInIdx int, txin *btcwire.TxIn, txSha *btcwire.ShaHash, tx *btcwire.MsgTx, pver uint32, timestamp time.Time, originTx *btcwire.MsgTx) error { + // If the input transaction has no previous input, there is nothing + // to check. + originTxIdx := txin.PreviousOutpoint.Index + if originTxIdx == math.MaxUint32 { + return nil + } + + if originTxIdx >= uint32(len(originTx.TxOut)) { + originTxSha := &txin.PreviousOutpoint.Hash + log.Warnf("unable to locate source tx %v spending tx %v", originTxSha, &txSha) + return fmt.Errorf("invalid index %x", originTxIdx) + } + + sigScript := txin.SignatureScript + pkScript := originTx.TxOut[originTxIdx].PkScript + engine, err := btcscript.NewScript(sigScript, pkScript, txInIdx, tx, + pver, timestamp.After(btcscript.Bip16Activation)) + if err != nil { + return err + } + + err = engine.Execute() + if err != nil { + log.Warnf("validate of input %v failed: %v", txInIdx, err) + return err + } + + return nil +} + +// validateAllTxIn validates the scripts for all of the passed transaction +// inputs using multiple goroutines. +func validateAllTxIn(txsha *btcwire.ShaHash, txValidator *btcwire.MsgTx, pver uint32, timestamp time.Time, job []*btcwire.TxIn, txStore map[btcwire.ShaHash]*txData) (err error) { + c := make(chan txValidate) + resultErrors := make([]error, len(job)) + + var currentItem int + var completedItems int + + processFunc := func(txInIdx int) { + log.Tracef("validating tx %v input %v len %v", + &txsha, currentItem, len(job)) + txin := job[txInIdx] + originTxSha := &txin.PreviousOutpoint.Hash + origintxidx := txin.PreviousOutpoint.Index + + var originTx *btcwire.MsgTx + if origintxidx != math.MaxUint32 { + txInfo, ok := txStore[*originTxSha] + if !ok { + //wtf? + fmt.Printf("obj not found in txStore %v", + originTxSha) + } + originTx = txInfo.tx + } + err := validateTxIn(txInIdx, job[txInIdx], txsha, txValidator, + pver, timestamp, originTx) + r := txValidate{txInIdx, err} + c <- r + } + for currentItem = 0; currentItem < len(job) && currentItem < 16; currentItem++ { + go processFunc(currentItem) + } + for completedItems < len(job) { + select { + case result := <-c: + completedItems++ + resultErrors[result.txIndex] = result.err + // would be nice to determine if we could stop + // on early errors here instead of running more. + if err == nil { + err = result.err + } + + if currentItem < len(job) { + go processFunc(currentItem) + currentItem++ + } + } + } + for i := 0; i < len(job); i++ { + if resultErrors[i] != nil { + log.Warnf("tx %v failed input %v, err %v", &txsha, i, resultErrors[i]) + } + } + return +} + +// checkBlockScripts executes and validates the scripts for all transactions in +// the passed block. +func checkBlockScripts(block *btcutil.Block, txStore map[btcwire.ShaHash]*txData) error { + pver := block.ProtocolVersion() + timestamp := block.MsgBlock().Header.Timestamp + for i, tx := range block.MsgBlock().Transactions { + txHash, _ := block.TxSha(i) + err := validateAllTxIn(txHash, tx, pver, timestamp, tx.TxIn, txStore) + if err != nil { + return err + } + } + + return nil +} diff --git a/test_coverage.txt b/test_coverage.txt new file mode 100644 index 000000000..736687db3 --- /dev/null +++ b/test_coverage.txt @@ -0,0 +1,77 @@ + +github.com/conformal/btcchain/chain.go BlockChain.removeOrphanBlock 100.00% (12/12) +github.com/conformal/btcchain/chain.go BlockChain.getOrphanRoot 100.00% (7/7) +github.com/conformal/btcchain/checkpoints.go init 100.00% (6/6) +github.com/conformal/btcchain/merkle.go hashMerkleBranches 100.00% (5/5) +github.com/conformal/btcchain/difficulty.go ShaHashToBig 100.00% (5/5) +github.com/conformal/btcchain/merkle.go nextPowerOfTwo 100.00% (4/4) +github.com/conformal/btcchain/chain.go newBlockNode 100.00% (4/4) +github.com/conformal/btcchain/difficulty.go calcWork 100.00% (3/3) +github.com/conformal/btcchain/process.go BlockChain.blockExists 100.00% (3/3) +github.com/conformal/btcchain/chain.go New 100.00% (2/2) +github.com/conformal/btcchain/checkpoints.go newShaHashFromStr 100.00% (2/2) +github.com/conformal/btcchain/log.go DisableLog 100.00% (1/1) +github.com/conformal/btcchain/validate.go calcBlockSubsidy 100.00% (1/1) +github.com/conformal/btcchain/timesorter.go timeSorter.Less 100.00% (1/1) +github.com/conformal/btcchain/log.go init 100.00% (1/1) +github.com/conformal/btcchain/timesorter.go timeSorter.Swap 100.00% (1/1) +github.com/conformal/btcchain/checkpoints.go BlockChain.DisableCheckpoints 100.00% (1/1) +github.com/conformal/btcchain/timesorter.go timeSorter.Len 100.00% (1/1) +github.com/conformal/btcchain/merkle.go BuildMerkleTreeStore 94.12% (16/17) +github.com/conformal/btcchain/chain.go BlockChain.getReorganizeNodes 92.86% (13/14) +github.com/conformal/btcchain/process.go BlockChain.processOrphans 91.67% (11/12) +github.com/conformal/btcchain/txlookup.go disconnectTransactions 90.91% (10/11) +github.com/conformal/btcchain/txlookup.go BlockChain.fetchTxList 88.57% (31/35) +github.com/conformal/btcchain/scriptval.go validateAllTxIn 87.88% (29/33) +github.com/conformal/btcchain/chain.go BlockChain.calcPastMedianTime 87.50% (14/16) +github.com/conformal/btcchain/scriptval.go checkBlockScripts 87.50% (7/8) +github.com/conformal/btcchain/chain.go BlockChain.connectBestChain 86.96% (20/23) +github.com/conformal/btcchain/validate.go countSigOps 86.67% (13/15) +github.com/conformal/btcchain/chain.go BlockChain.connectBlock 83.33% (10/12) +github.com/conformal/btcchain/validate.go isCoinBase 83.33% (5/6) +github.com/conformal/btcchain/chain.go BlockChain.reorganizeChain 80.77% (21/26) +github.com/conformal/btcchain/chain.go BlockChain.isMajorityVersion 80.00% (8/10) +github.com/conformal/btcchain/txlookup.go BlockChain.fetchInputTransactions 78.26% (18/23) +github.com/conformal/btcchain/chain.go BlockChain.getPrevNodeFromBlock 77.78% (7/9) +github.com/conformal/btcchain/chain.go BlockChain.disconnectBlock 76.92% (10/13) +github.com/conformal/btcchain/chain.go BlockChain.addOrphanBlock 75.00% (12/16) +github.com/conformal/btcchain/difficulty.go CompactToBig 75.00% (9/12) +github.com/conformal/btcchain/validate.go BlockChain.checkConnectBlock 68.52% (37/54) +github.com/conformal/btcchain/validate.go checkBlockSanity 66.67% (30/45) +github.com/conformal/btcchain/validate.go isNullOutpoint 66.67% (2/3) +github.com/conformal/btcchain/scriptval.go validateTxIn 64.71% (11/17) +github.com/conformal/btcchain/validate.go checkTransactionInputs 63.64% (28/44) +github.com/conformal/btcchain/validate.go checkTransactionSanity 62.16% (23/37) +github.com/conformal/btcchain/txlookup.go connectTransactions 60.00% (9/15) +github.com/conformal/btcchain/validate.go isBIP0030Node 60.00% (3/5) +github.com/conformal/btcchain/validate.go BlockChain.checkBIP0030 57.14% (8/14) +github.com/conformal/btcchain/validate.go checkProofOfWork 56.25% (9/16) +github.com/conformal/btcchain/process.go BlockChain.ProcessBlock 54.55% (24/44) +github.com/conformal/btcchain/chain.go BlockChain.loadBlockNode 50.00% (11/22) +github.com/conformal/btcchain/notifications.go BlockChain.sendNotification 50.00% (2/4) +github.com/conformal/btcchain/checkpoints.go BlockChain.LatestCheckpoint 50.00% (2/4) +github.com/conformal/btcchain/accept.go BlockChain.maybeAcceptBlock 49.23% (32/65) +github.com/conformal/btcchain/chain.go BlockChain.getPrevNodeFromNode 33.33% (4/12) +github.com/conformal/btcchain/checkpoints.go BlockChain.verifyCheckpoint 33.33% (2/6) +github.com/conformal/btcchain/validate.go isFinalizedTransaction 23.08% (3/13) +github.com/conformal/btcchain/checkpoints.go BlockChain.findLatestKnownCheckpoint 18.18% (2/11) +github.com/conformal/btcchain/difficulty.go BlockChain.calcNextRequiredDifficulty 10.71% (3/28) +github.com/conformal/btcchain/checkpoints.go BlockChain.IsCheckpointCandidate 0.00% (0/32) +github.com/conformal/btcchain/validate.go countP2SHSigOps 0.00% (0/26) +github.com/conformal/btcchain/difficulty.go BigToCompact 0.00% (0/16) +github.com/conformal/btcchain/validate.go checkSerializedHeight 0.00% (0/12) +github.com/conformal/btcchain/difficulty.go calcEasiestDifficulty 0.00% (0/9) +github.com/conformal/btcchain/chain.go removeChildNode 0.00% (0/8) +github.com/conformal/btcchain/log.go SetLogWriter 0.00% (0/7) +github.com/conformal/btcchain/checkpoints.go isNonstandardTransaction 0.00% (0/5) +github.com/conformal/btcchain/checkpoints.go BlockChain.checkpointData 0.00% (0/4) +github.com/conformal/btcchain/validate.go isTransactionSpent 0.00% (0/4) +github.com/conformal/btcchain/notifications.go NotificationType.String 0.00% (0/3) +github.com/conformal/btcchain/chain.go addChildrenWork 0.00% (0/3) +github.com/conformal/btcchain/log.go UseLogger 0.00% (0/1) +github.com/conformal/btcchain/chain.go BlockChain.DisableVerify 0.00% (0/1) +github.com/conformal/btcchain/log.go logClosure.String 0.00% (0/1) +github.com/conformal/btcchain/process.go RuleError.Error 0.00% (0/1) +github.com/conformal/btcchain/log.go newLogClosure 0.00% (0/1) +github.com/conformal/btcchain ------------------------------------- 59.02% (569/964) + diff --git a/testdata/blk_0_to_4.dat.bz2 b/testdata/blk_0_to_4.dat.bz2 new file mode 100644 index 000000000..274c710d2 Binary files /dev/null and b/testdata/blk_0_to_4.dat.bz2 differ diff --git a/testdata/blk_3A.dat.bz2 b/testdata/blk_3A.dat.bz2 new file mode 100644 index 000000000..01266565d Binary files /dev/null and b/testdata/blk_3A.dat.bz2 differ diff --git a/testdata/blk_4A.dat.bz2 b/testdata/blk_4A.dat.bz2 new file mode 100644 index 000000000..19b409e75 Binary files /dev/null and b/testdata/blk_4A.dat.bz2 differ diff --git a/testdata/blk_5A.dat.bz2 b/testdata/blk_5A.dat.bz2 new file mode 100644 index 000000000..47bff9038 Binary files /dev/null and b/testdata/blk_5A.dat.bz2 differ diff --git a/testdata/reorgtest.hex b/testdata/reorgtest.hex new file mode 100644 index 000000000..5b9e75e70 --- /dev/null +++ b/testdata/reorgtest.hex @@ -0,0 +1,180 @@ +File path: reorgTest/blk_0_to_4.dat + +Block 0: + f9beb4d9 + 1d010000 + + 01000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 + 00000000 3ba3edfd 7a7b12b2 7ac72c3e 67768f61 7fc81bc3 888a5132 3a9fb8aa + 4b1e5e4a 29ab5f49 ffff001d 1dac2b7c + 01 + + 01000000 01000000 00000000 00000000 00000000 00000000 00000000 00000000 + 00000000 00ffffff ff4d04ff ff001d01 04455468 65205469 6d657320 30332f4a + 616e2f32 30303920 4368616e 63656c6c 6f72206f 6e206272 696e6b20 6f662073 + 65636f6e 64206261 696c6f75 7420666f 72206261 6e6b73ff ffffff01 00f2052a + 01000000 43410467 8afdb0fe 55482719 67f1a671 30b7105c d6a828e0 3909a679 + 62e0ea1f 61deb649 f6bc3f4c ef38c4f3 5504e51e c112de5c 384df7ba 0b8d578a + 4c702b6b f11d5fac 00000000 +Block 1: + f9beb4d9 + d4000000 + + 01000000 6fe28c0a b6f1b372 c1a6a246 ae63f74f 931e8365 e15a089c 68d61900 + 00000000 3bbd67ad e98fbbb7 0718cd80 f9e9acf9 3b5fae91 7bb2b41d 4c3bb82c + 77725ca5 81ad5f49 ffff001d 44e69904 + 01 + + 01000000 01000000 00000000 00000000 00000000 00000000 00000000 00000000 + 00000000 00ffffff ff04722f 2e2bffff ffff0100 f2052a01 00000043 41046868 + 0737c76d abb801cb 2204f57d be4e4579 e4f710cd 67dc1b42 27592c81 e9b5cf02 + b5ac9e8b 4c9f49be 5251056b 6a6d011e 4c37f6b6 d17ede6b 55faa235 19e2ac00 + 000000 +Block 2: + f9beb4d9 + 95010000 + + 01000000 13ca7940 4c11c63e ca906bbd f190b751 2872b857 1b5143ae e8cb5737 + 00000000 fc07c983 d7391736 0aeda657 29d0d4d3 2533eb84 76ee9d64 aa27538f + 9b4fc00a d9af5f49 ffff001d 630bea22 + 02 + + 01000000 01000000 00000000 00000000 00000000 00000000 00000000 00000000 + 00000000 00ffffff ff04eb96 14e5ffff ffff0100 f2052a01 00000043 41046868 + 0737c76d abb801cb 2204f57d be4e4579 e4f710cd 67dc1b42 27592c81 e9b5cf02 + b5ac9e8b 4c9f49be 5251056b 6a6d011e 4c37f6b6 d17ede6b 55faa235 19e2ac00 + 000000 + + 01000000 0163451d 1002611c 1388d5ba 4ddfdf99 196a86b5 990fb5b0 dc786207 + 4fdcb8ee d2000000 004a4930 46022100 3dde52c6 5e339f45 7fe1015e 70eed208 + 872eb71e dd484c07 206b190e cb2ec3f8 02210011 c78dcfd0 3d43fa63 61242a33 + 6291ba2a 8c1ef5bc d5472126 2468f2bf 8dee4d01 ffffffff 0200ca9a 3b000000 + 001976a9 14cb2abd e8bccacc 32e893df 3a054b9e f7f227a4 ce88ac00 286bee00 + 00000019 76a914ee 26c56fc1 d942be8d 7a24b2a1 001dd894 69398088 ac000000 + 00 +Block 3: + f9beb4d9 + 96020000 + + 01000000 7d338254 0506faab 0d4cf179 45dda023 49db51f9 6233f24c 28002258 + 00000000 4806fe80 bf85931b 882ea645 77ca5a03 22bb8af2 3f277b20 55f160cd + 972c8e8b 31b25f49 ffff001d e8f0c653 + 03 + + 01000000 01000000 00000000 00000000 00000000 00000000 00000000 00000000 + 00000000 00ffffff ff044abd 8159ffff ffff0100 f2052a01 00000043 4104b95c + 249d84f4 17e3e395 a1274254 28b54067 1cc15881 eb828c17 b722a53f c599e21c + a5e56c90 f340988d 3933acc7 6beb832f d64cab07 8ddf3ce7 32923031 d1a8ac00 + 000000 + + 01000000 01f287b5 e067e1cf 80f7da8a f89917b5 505094db d82412d9 35b665eb + bad253d3 77010000 008c4930 46022100 96ee0d02 b35fd61e 4960b44f f396f67e + 01fe17f9 de4e0c17 b6a963bd ab2b50a6 02210034 920d4daa 7e9f8abe 5675c931 + 495809f9 0b9c1189 d05fbaf1 dd6696a5 b0d8f301 41046868 0737c76d abb801cb + 2204f57d be4e4579 e4f710cd 67dc1b42 27592c81 e9b5cf02 b5ac9e8b 4c9f49be + 5251056b 6a6d011e 4c37f6b6 d17ede6b 55faa235 19e2ffff ffff0100 286bee00 + 00000019 76a914c5 22664fb0 e55cdc5c 0cea73b4 aad97ec8 34323288 ac000000 + 00 + + 01000000 01f287b5 e067e1cf 80f7da8a f89917b5 505094db d82412d9 35b665eb + bad253d3 77000000 008c4930 46022100 b08b922a c4bde411 1c229f92 9fe6eb6a + 50161f98 1f4cf47e a9214d35 bf74d380 022100d2 f6640327 e677a1e1 cc474991 + b9a48ba5 bd1e0c94 d1c8df49 f7b0193b 7ea4fa01 4104b95c 249d84f4 17e3e395 + a1274254 28b54067 1cc15881 eb828c17 b722a53f c599e21c a5e56c90 f340988d + 3933acc7 6beb832f d64cab07 8ddf3ce7 32923031 d1a8ffff ffff0100 ca9a3b00 + 00000019 76a914c5 22664fb0 e55cdc5c 0cea73b4 aad97ec8 34323288 ac000000 + 00 + +Block 4: + f9beb4d9 + 73010000 + + 01000000 5da36499 06f35e09 9be42a1d 87b6dd42 11bc1400 6c220694 0807eaae + 00000000 48eeeaed 2d9d8522 e6201173 743823fd 4b87cd8a ca8e6408 ec75ca38 + 302c2ff0 89b45f49 ffff001d 00530839 + 02 + + 01000000 01000000 00000000 00000000 00000000 00000000 00000000 00000000 + 00000000 00ffffff ff04d41d 2213ffff ffff0100 f2052a01 00000043 4104678a + fdb0fe55 48271967 f1a67130 b7105cd6 a828e039 09a67962 e0ea1f61 deb649f6 + bc3f4cef 38c4f355 04e51ec1 12de5c38 4df7ba0b 8d578a4c 702b6bf1 1d5fac00 + 000000 + + 01000000 0163451d 1002611c 1388d5ba 4ddfdf99 196a86b5 990fb5b0 dc786207 + 4fdcb8ee d2000000 004a4930 46022100 8c8fd57b 48762135 8d8f3e69 19f33e08 + 804736ff 83db47aa 248512e2 6df9b8ba 022100b0 c59e5ee7 bfcbfcd1 a4d83da9 + 55fb260e fda7f42a 25522625 a3d6f2d9 1174a701 ffffffff 0100f205 2a010000 + 001976a9 14c52266 4fb0e55c dc5c0cea 73b4aad9 7ec83432 3288ac00 000000 + +File path: reorgTest/blk_3A.dat +Block 3A: + f9beb4d9 + 96020000 + + 01000000 7d338254 0506faab 0d4cf179 45dda023 49db51f9 6233f24c 28002258 + 00000000 5a15f573 1177a353 bdca7aab 20e16624 dfe90adc 70accadc 68016732 + 302c20a7 31b25f49 ffff001d 6a901440 + 03 + + 01000000 01000000 00000000 00000000 00000000 00000000 00000000 00000000 + 00000000 00ffffff ff04ad1b e7d5ffff ffff0100 f2052a01 00000043 4104ed83 + 704c95d8 29046f1a c2780621 1132102c 34e9ac7f fa1b7111 0658e5b9 d1bdedc4 + 16f5cefc 1db0625c d0c75de8 192d2b59 2d7e3b00 bcfb4a0e 860d880f d1fcac00 + 000000 + + 01000000 01f287b5 e067e1cf 80f7da8a f89917b5 505094db d82412d9 35b665eb + bad253d3 77010000 008c4930 46022100 96ee0d02 b35fd61e 4960b44f f396f67e + 01fe17f9 de4e0c17 b6a963bd ab2b50a6 02210034 920d4daa 7e9f8abe 5675c931 + 495809f9 0b9c1189 d05fbaf1 dd6696a5 b0d8f301 41046868 0737c76d abb801cb + 2204f57d be4e4579 e4f710cd 67dc1b42 27592c81 e9b5cf02 b5ac9e8b 4c9f49be + 5251056b 6a6d011e 4c37f6b6 d17ede6b 55faa235 19e2ffff ffff0100 286bee00 + 00000019 76a914c5 22664fb0 e55cdc5c 0cea73b4 aad97ec8 34323288 ac000000 + 00 + + 01000000 01f287b5 e067e1cf 80f7da8a f89917b5 505094db d82412d9 35b665eb + bad253d3 77000000 008c4930 46022100 9cc67ddd aa6f592a 6b2babd4 d6ff954f + 25a784cf 4fe4bb13 afb9f49b 08955119 022100a2 d99545b7 94080757 fcf2b563 + f2e91287 86332f46 0ec6b90f f085fb28 41a69701 4104b95c 249d84f4 17e3e395 + a1274254 28b54067 1cc15881 eb828c17 b722a53f c599e21c a5e56c90 f340988d + 3933acc7 6beb832f d64cab07 8ddf3ce7 32923031 d1a8ffff ffff0100 ca9a3b00 + 00000019 76a914ee 26c56fc1 d942be8d 7a24b2a1 001dd894 69398088 ac000000 + 00 + +File path: reorgTest/blk_4A.dat +Block 4A: + f9beb4d9 + d4000000 + + 01000000 aae77468 2205667d 4f413a58 47cc8fe8 9795f1d5 645d5b24 1daf3c92 + 00000000 361c9cde a09637a0 d0c05c3b 4e7a5d91 9edb184a 0a4c7633 d92e2ddd + f04cb854 89b45f49 ffff001d 9e9aa1e8 + 01 + + 01000000 01000000 00000000 00000000 00000000 00000000 00000000 00000000 + 00000000 00ffffff ff0401b8 f3eaffff ffff0100 f2052a01 00000043 4104678a + fdb0fe55 48271967 f1a67130 b7105cd6 a828e039 09a67962 e0ea1f61 deb649f6 + bc3f4cef 38c4f355 04e51ec1 12de5c38 4df7ba0b 8d578a4c 702b6bf1 1d5fac00 + 000000 + +File path: reorgTest/blk_5A.dat +Block 5A: + f9beb4d9 + 73010000 + + 01000000 ebc7d0de 9c31a71b 7f41d275 2c080ba4 11e1854b d45cb2cf 8c1e4624 + 00000000 a607774b 79b8eb50 b52a5a32 c1754281 ec67f626 9561df28 57d1fe6a + ea82c696 e1b65f49 ffff001d 4a263577 + 02 + + 01000000 01000000 00000000 00000000 00000000 00000000 00000000 00000000 + 00000000 00ffffff ff049971 0c7dffff ffff0100 f2052a01 00000043 4104678a + fdb0fe55 48271967 f1a67130 b7105cd6 a828e039 09a67962 e0ea1f61 deb649f6 + bc3f4cef 38c4f355 04e51ec1 12de5c38 4df7ba0b 8d578a4c 702b6bf1 1d5fac00 + 000000 + + 01000000 0163451d 1002611c 1388d5ba 4ddfdf99 196a86b5 990fb5b0 dc786207 + 4fdcb8ee d2000000 004a4930 46022100 8c8fd57b 48762135 8d8f3e69 19f33e08 + 804736ff 83db47aa 248512e2 6df9b8ba 022100b0 c59e5ee7 bfcbfcd1 a4d83da9 + 55fb260e fda7f42a 25522625 a3d6f2d9 1174a701 ffffffff 0100f205 2a010000 + 001976a9 14c52266 4fb0e55c dc5c0cea 73b4aad9 7ec83432 3288ac00 000000 + diff --git a/timesorter.go b/timesorter.go new file mode 100644 index 000000000..6cb8448d6 --- /dev/null +++ b/timesorter.go @@ -0,0 +1,31 @@ +// Copyright (c) 2013 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package btcchain + +import ( + "time" +) + +// timeSorter implements sort.Interface to allow a slice of timestamps to +// be sorted. +type timeSorter []time.Time + +// Len returns the number of timestamps in the slice. It is part of the +// sort.Interface implementation. +func (s timeSorter) Len() int { + return len(s) +} + +// Swap swaps the timestamps at the passed indices. It is part of the +// sort.Interface implementation. +func (s timeSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Less returns whether the timstamp with index i should sort before the +// timestamp with index j. It is part of the sort.Interface implementation. +func (s timeSorter) Less(i, j int) bool { + return s[i].Before(s[j]) +} diff --git a/txlookup.go b/txlookup.go new file mode 100644 index 000000000..aff9eae2e --- /dev/null +++ b/txlookup.go @@ -0,0 +1,248 @@ +// Copyright (c) 2013 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package btcchain + +import ( + "fmt" + "github.com/conformal/btcdb" + "github.com/conformal/btcutil" + "github.com/conformal/btcwire" +) + +// txData contains contextual information about transactions such as which block +// they were found in and whether or not the outputs are spent. +type txData struct { + tx *btcwire.MsgTx + hash *btcwire.ShaHash + blockHeight int64 + spent []bool + err error +} + +// connectTransactions updates the passed map by applying transaction and +// spend information for all the transactions in the passed block. Only +// transactions in the passed map are updated. +func connectTransactions(txStore map[btcwire.ShaHash]*txData, block *btcutil.Block) error { + // Loop through all of the transactions in the block to see if any of + // them are ones we need to update and spend based on the results map. + for i, tx := range block.MsgBlock().Transactions { + txHash, err := block.TxSha(i) + if err != nil { + return err + } + + // Update the transaction store with the transaction information + // if it's one of the requested transactions. + if txD, exists := txStore[*txHash]; exists { + txD.tx = tx + txD.blockHeight = block.Height() + txD.spent = make([]bool, len(tx.TxOut)) + txD.err = nil + } + + // Spend the origin transaction output. + for _, txIn := range tx.TxIn { + originHash := &txIn.PreviousOutpoint.Hash + originIndex := txIn.PreviousOutpoint.Index + if originTx, exists := txStore[*originHash]; exists { + originTx.spent[originIndex] = true + } + } + } + + return nil +} + +// disconnectTransactions updates the passed map by undoing transaction and +// spend information for all transactions in the passed block. Only +// transactions in the passed map are updated. +func disconnectTransactions(txStore map[btcwire.ShaHash]*txData, block *btcutil.Block) error { + // Loop through all of the transactions in the block to see if any of + // them are ones were need to undo based on the results map. + for i, tx := range block.MsgBlock().Transactions { + txHash, err := block.TxSha(i) + if err != nil { + return err + } + + // Remove this transaction from the transaction store (this is a + // no-op if it's not there). + delete(txStore, *txHash) + + // Unspend the origin transaction output. + for _, txIn := range tx.TxIn { + originHash := &txIn.PreviousOutpoint.Hash + originIndex := txIn.PreviousOutpoint.Index + if originTx, exists := txStore[*originHash]; exists { + originTx.spent[originIndex] = false + } + } + } + + return nil +} + +// fetchTxList fetches transaction data about the provided list of transactions +// from the point of view of the given node. For example, a given node might +// be down a side chain where a transaction hasn't been spent from its point of +// view even though it might have been spent in the main chain (or another side +// chain). Another scenario is where a transaction exists from the point of +// view of the main chain, but doesn't exist in a side chain that branches +// before the block that contains the transaction on the main chain. +func (b *BlockChain) fetchTxList(node *blockNode, txList []*btcwire.ShaHash) (map[btcwire.ShaHash]*txData, error) { + // Get the previous block node. This function is used over simply + // accessing node.parent directly as it will dynamically create previous + // block nodes as needed. This helps allow only the pieces of the chain + // that are needed to remain in memory. + prevNode, err := b.getPrevNodeFromNode(node) + if err != nil { + return nil, err + } + + // The transaction store map needs to have an entry for every requested + // transaction. By default, all the transactions are marked as missing. + // Each entry will be filled in with the appropriate data below. + txStore := make(map[btcwire.ShaHash]*txData) + for _, hash := range txList { + txStore[*hash] = &txData{hash: hash, err: btcdb.TxShaMissing} + } + + // Ask the database (main chain) for the list of transactions. This + // will return the information from the point of view of the end of the + // main chain. + txReplyList := b.db.FetchTxByShaList(txList) + for _, txReply := range txReplyList { + // Lookup the existing results entry to modify. Skip + // this reply if there is no corresponding entry in + // the transaction store map which really should not happen, but + // be safe. + txD, ok := txStore[*txReply.Sha] + if !ok { + continue + } + + // Fill in the transaction details. A copy is used here since + // there is no guarantee the returned data isn't cached and + // this code modifies the data. A bug caused by modifying the + // cached data would likely be difficult to track down and could + // cause subtle errors, so avoid the potential altogether. + txD.err = txReply.Err + if txReply.Err == nil { + txD.tx = txReply.Tx + txD.blockHeight = txReply.Height + txD.spent = make([]bool, len(txReply.TxSpent)) + copy(txD.spent, txReply.TxSpent) + } + } + + // At this point, we have the transaction data from the point of view + // of the end of the main (best) chain. If we haven't selected a best + // chain yet or we are extending the main (best) chain with a new block, + // everything is accurate, so return the results now. + if b.bestChain == nil || (prevNode != nil && prevNode.hash.IsEqual(b.bestChain.hash)) { + return txStore, nil + } + + // The requested node is either on a side chain or is a node on the main + // chain before the end of it. In either case, we need to undo the + // transactions and spend information for the blocks which would be + // disconnected during a reorganize to the point of view of the + // node just before the requested node. + detachNodes, attachNodes := b.getReorganizeNodes(prevNode) + for e := detachNodes.Front(); e != nil; e = e.Next() { + n := e.Value.(*blockNode) + block, err := b.db.FetchBlockBySha(n.hash) + if err != nil { + return nil, err + } + + disconnectTransactions(txStore, block) + } + + // The transaction store is now accurate to either the node where the + // requested node forks off the main chain (in the case where the + // requested node is on a side chain), or the requested node itself if + // the requested node is an old node on the main chain. Entries in the + // attachNodes list indicate the requested node is on a side chain, so + // if there are no nodes to attach, we're done. + if attachNodes.Len() == 0 { + return txStore, nil + } + + // The requested node is on a side chain, so we need to apply the + // transactions and spend information from each of the nodes to attach. + for e := attachNodes.Front(); e != nil; e = e.Next() { + n := e.Value.(*blockNode) + block, exists := b.blockCache[*n.hash] + if !exists { + return nil, fmt.Errorf("unable to find block %v in "+ + "side chain cache for transaction search", + n.hash) + } + + connectTransactions(txStore, block) + } + + return txStore, nil +} + +// fetchInputTransactions fetches the input transactions referenced by the +// transactions in the given block from its point of view. See fetchTxList +// for more details on what the point of view entails. +func (b *BlockChain) fetchInputTransactions(node *blockNode, block *btcutil.Block) (map[btcwire.ShaHash]*txData, error) { + // Build a map of in-flight transactions because some of the inputs in + // this block could be referencing other transactions in this block + // which are not yet in the chain. + txInFlight := map[btcwire.ShaHash]*btcwire.MsgTx{} + for i, tx := range block.MsgBlock().Transactions { + // Get transaction hash. It's safe to ignore the error since + // it's already cached in the nominal code path and the only + // way it can fail is if the index is out of range which is + // impossible here. + txHash, _ := block.TxSha(i) + txInFlight[*txHash] = tx + } + + // Loop through all of the transaction inputs (except for the coinbase + // which has no inputs) collecting them into lists of what is needed and + // what is already known (in-flight). + var txNeededList []*btcwire.ShaHash + txStore := make(map[btcwire.ShaHash]*txData) + for _, tx := range block.MsgBlock().Transactions[1:] { + for _, txIn := range tx.TxIn { + // Add an entry to the transaction store for the needed + // transaction with it set to missing by default. + originHash := &txIn.PreviousOutpoint.Hash + txD := &txData{hash: originHash, err: btcdb.TxShaMissing} + txStore[*originHash] = txD + + // The transaction is already in-flight, so update the + // transaction store acccordingly. Otherwise, we need + // it. + if tx, ok := txInFlight[*originHash]; ok { + txD.tx = tx + txD.blockHeight = node.height + txD.spent = make([]bool, len(tx.TxOut)) + txD.err = nil + } else { + txNeededList = append(txNeededList, originHash) + } + } + } + + // Request the input transaction from the point of view of the node. + txNeededStore, err := b.fetchTxList(node, txNeededList) + if err != nil { + return nil, err + } + + // Merge the results of the requested transactions and the in-flight + // transactions. + for _, txD := range txNeededStore { + txStore[*txD.hash] = txD + } + + return txStore, nil +} diff --git a/validate.go b/validate.go new file mode 100644 index 000000000..f30afd967 --- /dev/null +++ b/validate.go @@ -0,0 +1,877 @@ +// Copyright (c) 2013 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package btcchain + +import ( + "encoding/binary" + "fmt" + "github.com/conformal/btcdb" + "github.com/conformal/btcscript" + "github.com/conformal/btcutil" + "github.com/conformal/btcwire" + "math" + "time" +) + +const ( + // satoshiPerBitcoin is the number of satoshi in one bitcoin (1 BTC). + satoshiPerBitcoin int64 = 1e8 + + // maxSatoshi is the maximum transaction amount allowed in satoshi. + maxSatoshi int64 = 21e6 * satoshiPerBitcoin + + // maxSigOpsPerBlock is the maximum number of signature operations + // allowed for a block. It is a fraction of the max block payload size. + maxSigOpsPerBlock = btcwire.MaxBlockPayload / 50 + + // lockTimeThreshold is the number below which a lock time is + // interpreted to be a block number. Since an average of one block + // is generated per 10 minutes, this allows blocks for about 9,512 + // years. However, if the field is interpreted as a timestamp, given + // the lock time is a uint32, the max is sometime around 2106. + lockTimeThreshold uint32 = 5e8 // Tue Nov 5 00:53:20 1985 UTC + + // minCoinbaseScriptLen is the minimum length a coinbase script can be. + minCoinbaseScriptLen = 2 + + // maxCoinbaseScriptLen is the maximum length a coinbase script can be. + maxCoinbaseScriptLen = 100 + + // medianTimeBlocks is the number of previous blocks which should be + // used to calculate the median time used to validate block timestamps. + medianTimeBlocks = 11 + + // serializedHeightVersion is the block version which changed block + // coinbases to start with the serialized block height. + serializedHeightVersion = 2 + + // baseSubsidy is the starting subsidy amount for mined blocks. This + // value is halved every subsidyHalvingInterval blocks. + baseSubsidy = 50 * satoshiPerBitcoin + + // subsidyHalvingInterval is the interval of blocks at which the + // baseSubsidy is continually halved. See calcBlockSubsidy for more + // details. + subsidyHalvingInterval = 210000 +) + +var ( + // coinbaseMaturity is the number of blocks required before newly + // mined bitcoins (coinbase transactions) can be spent. This is a + // variable as opposed to a constant because the tests need the ability + // to modify it. + coinbaseMaturity int64 = 100 + + // zeroHash is the zero value for a btcwire.ShaHash and is defined as + // a package level variable to avoid the need to create a new instance + // every time a check is needed. + zeroHash = &btcwire.ShaHash{} + + // block91842Hash is one of the two nodes which violate the rules + // set forth in BIP0030. It is defined as a package level variable to + // avoid the need to create a new instance every time a check is needed. + block91842Hash = newShaHashFromStr("00000000000a4d0a398161ffc163c503763b1f4360639393e0e4c8e300e0caec") + + // block91880Hash is one of the two nodes which violate the rules + // set forth in BIP0030. It is defined as a package level variable to + // avoid the need to create a new instance every time a check is needed. + block91880Hash = newShaHashFromStr("00000000000743f190a18c5577a3c2d2a1f610ae9601ac046a38084ccb7cd721") +) + +// isNullOutpoint determines whether or not a previous transaction output point +// is set. +func isNullOutpoint(outpoint *btcwire.OutPoint) bool { + if outpoint.Index == math.MaxUint32 && outpoint.Hash.IsEqual(zeroHash) { + return true + } + return false +} + +// isCoinBase determines whether or not a transaction is a coinbase. A coinbase +// is a special transaction created by miners that has no inputs. This is +// represented in the block chain by a transaction with a single input that has +// a previous output transaction index set to the maximum value along with a +// zero hash. +func isCoinBase(msgTx *btcwire.MsgTx) bool { + // A coin base must only have one transaction input. + if len(msgTx.TxIn) != 1 { + return false + } + + // The previous output of a coin base must have a max value index and + // a zero hash. + prevOut := msgTx.TxIn[0].PreviousOutpoint + if prevOut.Index != math.MaxUint32 || !prevOut.Hash.IsEqual(zeroHash) { + return false + } + + return true +} + +// isFinalized determines whether or not a transaction is finalized. +func isFinalizedTransaction(msgTx *btcwire.MsgTx, blockHeight int64, blockTime time.Time) bool { + // Lock time of zero means the transaction is finalized. + lockTime := msgTx.LockTime + if lockTime == 0 { + return true + } + + // The lock time field of a transaction is either a block height at + // which the transaction is finalized or a timestamp depending on if the + // value is before the lockTimeThreshold. When it is under the + // threshold it is a block height. + blockTimeOrHeight := int64(0) + if lockTime < lockTimeThreshold { + blockTimeOrHeight = blockHeight + } else { + blockTimeOrHeight = blockTime.Unix() + } + if int64(lockTime) < blockTimeOrHeight { + return true + } + + // At this point, the transaction's lock time hasn't occured yet, but + // the transaction might still be finalized if the sequence number + // for all transaction inputs is maxed out. + for _, txIn := range msgTx.TxIn { + if txIn.Sequence != math.MaxUint32 { + return false + } + } + return true +} + +// isBIP0030Node returns whether or not the passed node represents one of the +// two blocks that violate the BIP0030 rule which prevents transactions from +// overwriting old ones. +func isBIP0030Node(node *blockNode) bool { + if node.height == 91842 && node.hash.IsEqual(block91842Hash) { + return true + } + + if node.height == 91880 && node.hash.IsEqual(block91880Hash) { + return true + } + + return false +} + +// calcBlockSubsidy returns the subsidy amount a block at the provided height +// should have. This is mainly used for determining how much the coinbase for +// newly generated blocks awards as well as validating the coinbase for blocks +// has the expected value. +// +// The subsidy is halved every subsidyHalvingInterval blocks. Mathematically +// this is: baseSubsidy / 2^(height/subsidyHalvingInterval) +// +// At the target block generation rate this is approximately every 4 +// years. +func calcBlockSubsidy(height int64) int64 { + // Equivalent to: baseSubsidy / 2^(height/subsidyHalvingInterval) + return baseSubsidy >> uint(height/subsidyHalvingInterval) +} + +// checkTransactionSanity performs some preliminary checks on a transaction to +// ensure it is sane. These checks are context free. +func checkTransactionSanity(tx *btcwire.MsgTx) error { + // A transaction must have at least one input. + if len(tx.TxIn) == 0 { + return RuleError("transaction has no inputs") + } + + // A transaction must have at least one output. + if len(tx.TxOut) == 0 { + return RuleError("transaction has no outputs") + } + + // NOTE: bitcoind does size limits checking here, but the size limits + // have already been checked by btcwire for incoming transactions. + // Also, btcwire checks the size limits on send too, so there is no need + // to double check it here. + + // Ensure the transaction amounts are in range. Each transaction + // output must not be negative or more than the max allowed per + // transaction. Also, the total of all outputs must abide by the same + // restrictions. All amounts in a transaction are in a unit value known + // as a satoshi. One bitcoin is a quantity of satoshi as defined by the + // satoshiPerBitcoin constant. + var totalSatoshi int64 + for _, txOut := range tx.TxOut { + satoshi := txOut.Value + if satoshi < 0 { + str := fmt.Sprintf("transaction output has negative "+ + "value of %v", satoshi) + return RuleError(str) + } + if satoshi > maxSatoshi { + str := fmt.Sprintf("transaction output value of %v is "+ + "higher than max allowed value of %v", satoshi, + maxSatoshi) + return RuleError(str) + } + + // TODO(davec): No need to check < 0 here as satoshi is + // guaranteed to be positive per the above check. Also need + // to add overflow checks. + totalSatoshi += satoshi + if totalSatoshi < 0 { + str := fmt.Sprintf("total value of all transaction "+ + "outputs has negative value of %v", totalSatoshi) + return RuleError(str) + } + if totalSatoshi > maxSatoshi { + str := fmt.Sprintf("total value of all transaction "+ + "outputs is %v which is higher than max "+ + "allowed value of %v", totalSatoshi, maxSatoshi) + return RuleError(str) + } + } + + // Check for duplicate transaction inputs. + existingTxOut := make(map[string]bool) + for _, txIn := range tx.TxIn { + prevOut := &txIn.PreviousOutpoint + key := fmt.Sprintf("%v%v", prevOut.Hash, prevOut.Index) + if _, exists := existingTxOut[key]; exists { + return RuleError("transaction contains duplicate outpoint") + } + existingTxOut[key] = true + } + + // Coinbase script length must be between min and max length. + if isCoinBase(tx) { + slen := len(tx.TxIn[0].SignatureScript) + if slen < minCoinbaseScriptLen || slen > maxCoinbaseScriptLen { + str := fmt.Sprintf("coinbase transaction script length "+ + "of %d is out of range (min: %d, max: %d)", + slen, minCoinbaseScriptLen, maxCoinbaseScriptLen) + return RuleError(str) + } + } else { + // Previous transaction outputs referenced by the inputs to this + // transaction must not be null. + for _, txIn := range tx.TxIn { + prevOut := &txIn.PreviousOutpoint + if isNullOutpoint(prevOut) { + return RuleError("transaction input refers to " + + "previous output that is null") + } + } + } + + return nil +} + +// checkProofOfWork ensures the block header bits which indicate the target +// difficulty is in min/max range and that the block hash is less than the +// target difficulty as claimed. +func checkProofOfWork(block *btcutil.Block) error { + // The target difficulty must be larger than zero. + header := block.MsgBlock().Header + target := CompactToBig(header.Bits) + if target.Sign() <= 0 { + str := fmt.Sprintf("block target difficulty of %064x is too low", + target) + return RuleError(str) + } + + // The target difficulty must be less than the maximum allowed. + if target.Cmp(powLimit) > 0 { + str := fmt.Sprintf("block target difficulty of %064x is "+ + "higher than max of %064x", target, powLimit) + return RuleError(str) + } + + // The block hash must be less than the claimed target. + blockHash, err := block.Sha() + if err != nil { + return err + } + hashNum := ShaHashToBig(blockHash) + if hashNum.Cmp(target) > 0 { + str := fmt.Sprintf("block hash of %064x is higher than "+ + "expected max of %064x", hashNum, target) + return RuleError(str) + } + + return nil +} + +// countSigOps returns the number of signature operations for all transaction +// input and output scripts in the provided transaction. This uses the +// quicker, but imprecise, signature operation counting mechanism from +// btcscript. +func countSigOps(msgTx *btcwire.MsgTx, isCoinBaseTx bool) (int, error) { + // Choose the starting transaction input based on whether this is a + // coinbase transaction since the coinbase input script should not be + // executed. + txIns := msgTx.TxIn + if isCoinBaseTx { + txIns = txIns[1:] + } + + // Accumulate the number of signature operations in all transaction + // inputs (except the first input if this is a coinbase transaction). + totalSigOps := 0 + for _, txIn := range txIns { + numSigOps, err := btcscript.GetSigOpCount(txIn.SignatureScript) + if err != nil { + return 0, err + } + totalSigOps += numSigOps + } + + // Accumulate the number of signature operations in all transaction + // outputs. + for _, txOut := range msgTx.TxOut { + numSigOps, err := btcscript.GetSigOpCount(txOut.PkScript) + if err != nil { + return 0, err + } + totalSigOps += numSigOps + } + + return totalSigOps, nil +} + +// countP2SHSigOps returns the number of signature operations for all input +// transactions which are of the pay-to-script-hash type. This uses the +// precise, signature operation counting mechanism from btcscript which requires +// access to the input transaction scripts. +func countP2SHSigOps(msgTx *btcwire.MsgTx, isCoinBaseTx bool, txStore map[btcwire.ShaHash]*txData) (int, error) { + // Coinbase transactions have no interesting inputs. + if isCoinBaseTx { + return 0, nil + } + + // TODO(davec): Need to pass the cached version in. + txHash, err := msgTx.TxSha(btcwire.ProtocolVersion) + if err != nil { + return 0, err + } + + // Accumulate the number of signature operations in all transaction + // inputs. + totalSigOps := 0 + for _, txIn := range msgTx.TxIn { + // Ensure the referenced input transaction is available. + txInHash := &txIn.PreviousOutpoint.Hash + originTx, exists := txStore[*txInHash] + if !exists { + return 0, fmt.Errorf("unable to find input transaction "+ + "%v referenced from transaction %v", txHash, + txInHash) + } + + // Ensure the output index in the referenced transaction is + // available. + originTxIndex := txIn.PreviousOutpoint.Index + if originTxIndex >= uint32(len(originTx.tx.TxOut)) { + return 0, fmt.Errorf("out of bounds input index %d in "+ + "transaction %v referenced from transaction %v", + originTxIndex, txInHash, txHash) + } + + // We're only interested in pay-to-script-hash types, so skip + // this input if it's not one. + pkScript := originTx.tx.TxOut[originTxIndex].PkScript + if !btcscript.IsPayToScriptHash(pkScript) { + continue + } + + // Count the precise number of signature operations in the + // referenced public key script. + sigScript := txIn.SignatureScript + numSigOps, err := btcscript.GetPreciseSigOpCount(sigScript, + pkScript, true) + if err != nil { + return 0, err + } + + // We could potentially overflow the accumulator so check for + // overflow. + lastSigOps := totalSigOps + totalSigOps += numSigOps + if totalSigOps < lastSigOps { + return 0, fmt.Errorf("the public key script from "+ + "output index %d in transaction %v contains "+ + "too many signature operations - overflow", + originTxIndex, txInHash) + } + } + + return totalSigOps, nil +} + +// checkBlockSanity performs some preliminary checks on a block to ensure it is +// sane before continuing with block processing. These checks are context free. +func checkBlockSanity(block *btcutil.Block) error { + // NOTE: bitcoind does size limits checking here, but the size limits + // have already been checked by btcwire for incoming blocks. Also, + // btcwire checks the size limits on send too, so there is no need + // to double check it here. + + // Ensure the proof of work bits in the block header is in min/max range + // and the block hash is less than the target value described by the + // bits. + err := checkProofOfWork(block) + if err != nil { + return err + } + + // Ensure the block time is not more than 2 hours in the future. + msgBlock := block.MsgBlock() + header := &msgBlock.Header + if header.Timestamp.After(time.Now().Add(time.Hour * 2)) { + str := fmt.Sprintf("block timestamp of %v is too far in the "+ + "future", header.Timestamp) + return RuleError(str) + } + + // A block must have at least one transaction. + transactions := msgBlock.Transactions + if len(transactions) == 0 { + return RuleError("block does not contain any transactions") + } + + // The first transaction in a block must be a coinbase. + if !isCoinBase(transactions[0]) { + return RuleError("first transaction in block is not a coinbase") + } + + // A block must not have more than one coinbase. + for _, tx := range transactions[1:] { + if isCoinBase(tx) { + return RuleError("block contains more than one coinbase") + } + } + + // Do some preliminary checks on each transaction to ensure they are + // sane before continuing. + for _, tx := range transactions { + err := checkTransactionSanity(tx) + if err != nil { + return err + } + } + + // Build merkle tree and ensure the calculated merkle root matches the + // entry in the block header. This also has the effect of caching all + // of the transaction hashes in the block to speed up future hash + // checks. Bitcoind builds the tree here and checks the merkle root + // after the following checks, but there is no reason not to check the + // merkle root matches here. + merkles := BuildMerkleTreeStore(block) + calculatedMerkleRoot := merkles[len(merkles)-1] + if !header.MerkleRoot.IsEqual(calculatedMerkleRoot) { + str := fmt.Sprintf("block merkle root is invalid - got %v, "+ + "want %v", calculatedMerkleRoot, header.MerkleRoot) + return RuleError(str) + } + + // Check for duplicate transactions. This check will be fairly quick + // since the transaction hashes are already cached due to building the + // merkle tree above. + existingTxHashes := make(map[btcwire.ShaHash]bool) + txShas, err := block.TxShas() + if err != nil { + return err + } + for _, hash := range txShas { + if _, exists := existingTxHashes[*hash]; exists { + str := fmt.Sprintf("block contains duplicate "+ + "transaction %v", hash) + return RuleError(str) + } + existingTxHashes[*hash] = true + } + + // The number of signature operations must be less than the maximum + // allowed per block. + totalSigOps := 0 + for i, tx := range transactions { + // Since the first (and only the first) transaction has already + // been verified above to be a coinbase transaction, use i == 0 + // as an optimization for the flag to countSigOps for whether + // or not the transaction is a coinbase transaction rather than + // having to do a full coinbase check again. + numSigOps, err := countSigOps(tx, i == 0) + if err != nil { + return err + } + + // We could potentially overflow the accumulator so check for + // overflow. + lastSigOps := totalSigOps + totalSigOps += numSigOps + if totalSigOps < lastSigOps || totalSigOps > maxSigOpsPerBlock { + str := fmt.Sprintf("block contains too many signature "+ + "operations - got %v, max %v", totalSigOps, + maxSigOpsPerBlock) + return RuleError(str) + } + } + + return nil +} + +// checkSerializedHeight checks if the signature script in the passed +// transaction starts with the serialized block height of wantHeight. +func checkSerializedHeight(coinbaseTx *btcwire.MsgTx, wantHeight int64) error { + sigScript := coinbaseTx.TxIn[0].SignatureScript + if len(sigScript) < 4 { + str := "the coinbase signature script for blocks of " + + "version %d or greater must start with the " + + "serialized block height" + str = fmt.Sprintf(str, serializedHeightVersion) + return RuleError(str) + } + + serializedHeightBytes := make([]byte, 4, 4) + copy(serializedHeightBytes, sigScript[1:4]) + serializedHeight := binary.LittleEndian.Uint32(serializedHeightBytes) + if int64(serializedHeight) != wantHeight { + str := fmt.Sprintf("the coinbase signature script serialized "+ + "block height is %d when %d was expected", + serializedHeight, wantHeight) + return RuleError(str) + } + + return nil +} + +// isTransactionSpent returns whether or not the provided transaction is fully +// spent. A fully spent transaction is one where all outputs have been spent. +func isTransactionSpent(tx *txData) bool { + for _, isOutputSpent := range tx.spent { + if !isOutputSpent { + return false + } + } + return true +} + +// checkBIP0030 ensures blocks do not contain duplicate transactions which +// 'overwrite' older transactions that are not fully spent. This prevents an +// attack where a coinbase and all of its dependent transactions could be +// duplicated to effectively revert the overwritten transactions to a single +// confirmation thereby making them vulnerable to a double spend. +// +// For more details, see https://en.bitcoin.it/wiki/BIP_0030 and +// http://r6.ca/blog/20120206T005236Z.html. +func (b *BlockChain) checkBIP0030(node *blockNode, block *btcutil.Block) error { + // Attempt to fetch duplicate transactions for all of the transactions + // in this block from the point of view of the parent node. + fetchList, err := block.TxShas() + if err != nil { + return nil + } + txResults, err := b.fetchTxList(node, fetchList) + if err != nil { + return err + } + + // Examine the resulting data about the requested transactions. + for _, txD := range txResults { + switch txD.err { + // A duplicate transaction was not found. This is the most + // common case. + case btcdb.TxShaMissing: + continue + + // A duplicate transaction was found. This is only allowed if + // the duplicate transaction is fully spent. + case nil: + if !isTransactionSpent(txD) { + str := fmt.Sprintf("tried to overwrite "+ + "transaction %v at block height %d "+ + "that is not fully spent", txD.hash, + txD.blockHeight) + return RuleError(str) + } + + // Some other unexpected error occurred. Return it now. + default: + return txD.err + } + } + + return nil +} + +// checkTransactionInputs performs a series of checks on the inputs to a +// transaction to ensure they are valid. An example of some of the checks +// include verifying all inputs exist, ensuring the coinbase seasoning +// requirements are met, validating all values and fees are in the legal range +// and the total output amount doesn't exceed the input amount, and verifying +// the signatures to prove the spender was the owner of the bitcoins and +// therefore allowed to spend them. As it checks the inputs, it also calculates +// the total fees for the transaction and returns that value. +func checkTransactionInputs(tx *btcwire.MsgTx, txHeight int64, txStore map[btcwire.ShaHash]*txData) (int64, error) { + // Coinbase transactions have no inputs. + if isCoinBase(tx) { + return 0, nil + } + + // TODO(davec): Need to pass the cached version in. + txHash, err := tx.TxSha(btcwire.ProtocolVersion) + if err != nil { + return 0, err + } + + var totalSatoshiIn int64 + for _, txIn := range tx.TxIn { + // Ensure the input is available. + txInHash := &txIn.PreviousOutpoint.Hash + originTx, exists := txStore[*txInHash] + if !exists { + str := fmt.Sprintf("unable to find input transaction "+ + "%v for transaction %v", txHash, txInHash) + return 0, RuleError(str) + } + + // Ensure the transaction is not spending coins which have not + // yet reached the required coinbase maturity. + if isCoinBase(originTx.tx) { + originHeight := originTx.blockHeight + blocksSincePrev := txHeight - originHeight + if blocksSincePrev < coinbaseMaturity { + str := fmt.Sprintf("tried to spend coinbase "+ + "transaction %v from height %v at "+ + "height %v before required maturity "+ + "of %v blocks", txHash, originHeight, + txHeight, coinbaseMaturity) + return 0, RuleError(str) + } + } + + // Ensure the transaction is not double spending coins. + originTxIndex := txIn.PreviousOutpoint.Index + if originTxIndex >= uint32(len(originTx.spent)) { + return 0, fmt.Errorf("out of bounds input index %d in "+ + "transaction %v referenced from transaction %v", + originTxIndex, txInHash, txHash) + } + if originTx.spent[originTxIndex] { + str := fmt.Sprintf("transaction %v tried to double "+ + "spend coins from transaction %v", txHash, + txInHash) + return 0, RuleError(str) + } + + // Ensure the transaction amounts are in range. Each of the + // output values of the input transactions must not be negative + // or more than the max allowed per transaction. All amounts in + // a transaction are in a unit value known as a satoshi. One + // bitcoin is a quantity of satoshi as defined by the + // satoshiPerBitcoin constant. + originTxSatoshi := originTx.tx.TxOut[originTxIndex].Value + if originTxSatoshi < 0 { + str := fmt.Sprintf("transaction output has negative "+ + "value of %v", originTxSatoshi) + return 0, RuleError(str) + } + if originTxSatoshi > maxSatoshi { + str := fmt.Sprintf("transaction output value of %v is "+ + "higher than max allowed value of %v", + originTxSatoshi, maxSatoshi) + return 0, RuleError(str) + } + + // The total of all outputs must not be more than the max + // allowed per transaction. Also, we could potentially overflow + // the accumulator so check for overflow. + lastSatoshiIn := totalSatoshiIn + totalSatoshiIn += originTxSatoshi + if totalSatoshiIn < lastSatoshiIn || totalSatoshiIn > maxSatoshi { + str := fmt.Sprintf("total value of all transaction "+ + "inputs is %v which is higher than max "+ + "allowed value of %v", totalSatoshiIn, + maxSatoshi) + return 0, RuleError(str) + } + } + + // Calculate the total output amount for this transaction. It is safe + // to ignore overflow and out of range errors here because those error + // conditions would have already been caught by checkTransactionSanity. + var totalSatoshiOut int64 + for _, txOut := range tx.TxOut { + totalSatoshiOut += txOut.Value + } + + // Ensure the transaction does not spend more than its inputs. + if totalSatoshiIn < totalSatoshiOut { + str := fmt.Sprintf("total value of all transaction inputs for "+ + "transaction %v is %v which is less than the amount "+ + "spent of %v", txHash, totalSatoshiIn, totalSatoshiOut) + return 0, RuleError(str) + } + + // NOTE: bitcoind checks if the transaction fees are < 0 here, but that + // is an impossible condition because of the check above that ensures + // the inputs are >= the outputs. + txFeeInSatoshi := totalSatoshiIn - totalSatoshiOut + return txFeeInSatoshi, nil +} + +// checkConnectBlock performs several checks to confirm connecting the passed +// block to the main chain (including whatever reorganization might be necessary +// to get this node to the main chain) does not violate any rules. +func (b *BlockChain) checkConnectBlock(node *blockNode, block *btcutil.Block) error { + // If the side chain blocks end up in the database, a call to + // checkBlockSanity should be done here in case a previous version + // allowed a block that is no longer valid. However, since the + // implementation only currently uses memory for the side chain blocks, + // it isn't currently necessary. + + // TODO(davec): Keep a flag if this has already been done to avoid + // multiple runs. + + // The coinbase for the Genesis block is not spendable, so just return + // now. + if node.hash.IsEqual(&btcwire.GenesisHash) { + return nil + } + + // BIP0030 added a rule to prevent blocks which contain duplicate + // transactions that 'overwrite' older transactions which are not fully + // spent. See the documentation for checkBIP0030 for more details. + // + // There are two blocks in the chain which violate this + // rule, so the check must be skipped for those blocks. The + // isBIP0030Node function is used to determine if this block is one + // of the two blocks that must be skipped. + enforceBIP0030 := !isBIP0030Node(node) + if enforceBIP0030 { + err := b.checkBIP0030(node, block) + if err != nil { + return err + } + } + + // Request a map that contains all input transactions for the block from + // the point of view of its position within the block chain. These + // transactions are needed for verification of things such as + // transaction inputs, counting pay-to-script-hashes, and scripts. + txInputStore, err := b.fetchInputTransactions(node, block) + if err != nil { + return err + } + + // BIP0016 describes a pay-to-script-hash type that is considered a + // "standard" type. The rules for this BIP only apply to transactions + // after the timestmap defined by btcscript.Bip16Activation. See + // https://en.bitcoin.it/wiki/BIP_0016 for more details. + enforceBIP0016 := false + if node.timestamp.After(btcscript.Bip16Activation) { + enforceBIP0016 = true + } + + // The number of signature operations must be less than the maximum + // allowed per block. Note that the preliminary sanity checks on a + // block also include a check similar to this one, but this check + // expands the count to include a precise count of pay-to-script-hash + // signature operations in each of the input transaction public key + // scripts. + transactions := block.MsgBlock().Transactions + totalSigOps := 0 + for i, tx := range transactions { + // Since the first (and only the first) transaction has already + // been verified to be a coinbase transaction, use i == 0 + // as an optimization for the flag to countSigOps for whether + // or not the transaction is a coinbase transaction rather than + // having to do a full coinbase check again. + numsigOps, err := countSigOps(tx, i == 0) + if err != nil { + return err + } + if enforceBIP0016 { + numP2SHSigOps, err := countP2SHSigOps(tx, i == 0, + txInputStore) + if err != nil { + return err + } + numsigOps += numP2SHSigOps + } + + // Check for overflow or going over the limits. We have to do + // this on every loop to avoid overflow. + lastSigops := totalSigOps + totalSigOps += numsigOps + if totalSigOps < lastSigops || totalSigOps > maxSigOpsPerBlock { + str := fmt.Sprintf("block contains too many "+ + "signature operations - got %v, max %v", + totalSigOps, maxSigOpsPerBlock) + return RuleError(str) + } + } + + // Perform several checks on the inputs for each transaction. Also + // accumulate the total fees. This could technically be combined with + // the loop above instead of running another loop over the transactions, + // but by separating it we can avoid running the more expensive (though + // still relatively cheap as compared to running the scripts) checks + // against all the inputs when the signature operations are out of + // bounds. + var totalFees int64 + for _, tx := range transactions { + txFee, err := checkTransactionInputs(tx, node.height, txInputStore) + if err != nil { + return err + } + + // Sum the total fees and ensure we don't overflow the + // accumulator. + lastTotalFees := totalFees + totalFees += txFee + if totalFees < lastTotalFees { + return RuleError("total fees for block overflows " + + "accumulator") + } + } + + // The total output values of the coinbase transaction must not exceed + // the expected subsidy value plus total transaction fees gained from + // mining the block. It is safe to ignore overflow and out of range + // errors here because those error conditions would have already been + // caught by checkTransactionSanity. + var totalSatoshiOut int64 + for _, txOut := range transactions[0].TxOut { + totalSatoshiOut += txOut.Value + } + expectedSatoshiOut := calcBlockSubsidy(node.height) + totalFees + if totalSatoshiOut > expectedSatoshiOut { + str := fmt.Sprintf("coinbase transaction for block pays %v "+ + "which is more than expected value of %v", + totalSatoshiOut, expectedSatoshiOut) + return RuleError(str) + } + + // Don't run scripts if this node is before the latest known good + // checkpoint since the validity is verified via the checkpoints (all + // transactions are included in the merkle root hash and any changes + // will therefore be detected by the next checkpoint). This is a huge + // optimization because running the scripts is the most time consuming + // portion of block handling. + checkpoint := b.LatestCheckpoint() + runScripts := !b.noVerify + if checkpoint != nil && node.height <= checkpoint.Height { + runScripts = false + } + + // Now that the inexpensive checks are done and have passed, verify the + // transactions are actually allowed to spend the coins by running the + // expensive ECDSA signature check scripts. Doing this last helps + // prevent CPU exhaustion attacks. + if runScripts { + err := checkBlockScripts(block, txInputStore) + if err != nil { + return err + } + } + + return nil +} diff --git a/validate_test.go b/validate_test.go new file mode 100644 index 000000000..508aa9c5d --- /dev/null +++ b/validate_test.go @@ -0,0 +1,274 @@ +// Copyright (c) 2013 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package btcchain_test + +import ( + "github.com/conformal/btcchain" + "github.com/conformal/btcutil" + "github.com/conformal/btcwire" + "testing" + "time" +) + +func TestCheckBlockSanity(t *testing.T) { + block := btcutil.NewBlock(&Block100000, btcwire.ProtocolVersion) + err := btcchain.TstCheckBlockSanity(block) + if err != nil { + t.Errorf("CheckBlockSanity: %v", err) + } +} + +// Block100000 defines block 100,000 of the block chain. It is used to +// test Block operations. +var Block100000 = btcwire.MsgBlock{ + Header: btcwire.BlockHeader{ + Version: 1, + PrevBlock: btcwire.ShaHash([32]byte{ // Make go vet happy. + 0x50, 0x12, 0x01, 0x19, 0x17, 0x2a, 0x61, 0x04, + 0x21, 0xa6, 0xc3, 0x01, 0x1d, 0xd3, 0x30, 0xd9, + 0xdf, 0x07, 0xb6, 0x36, 0x16, 0xc2, 0xcc, 0x1f, + 0x1c, 0xd0, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, + }), // 000000000002d01c1fccc21636b607dfd930d31d01c3a62104612a1719011250 + MerkleRoot: btcwire.ShaHash([32]byte{ // Make go vet happy. + 0x66, 0x57, 0xa9, 0x25, 0x2a, 0xac, 0xd5, 0xc0, + 0xb2, 0x94, 0x09, 0x96, 0xec, 0xff, 0x95, 0x22, + 0x28, 0xc3, 0x06, 0x7c, 0xc3, 0x8d, 0x48, 0x85, + 0xef, 0xb5, 0xa4, 0xac, 0x42, 0x47, 0xe9, 0xf3, + }), // f3e94742aca4b5ef85488dc37c06c3282295ffec960994b2c0d5ac2a25a95766 + Timestamp: time.Unix(1293623863, 0), // 2010-12-29 11:57:43 +0000 UTC + Bits: 0x1b04864c, // 453281356 + Nonce: 0x10572b0f, // 274148111 + TxnCount: 4, + }, + Transactions: []*btcwire.MsgTx{ + &btcwire.MsgTx{ + Version: 1, + TxIn: []*btcwire.TxIn{ + &btcwire.TxIn{ + PreviousOutpoint: btcwire.OutPoint{ + Hash: btcwire.ShaHash{}, + Index: 0xffffffff, + }, + SignatureScript: []byte{ + 0x04, 0x4c, 0x86, 0x04, 0x1b, 0x02, 0x06, 0x02, + }, + Sequence: 0xffffffff, + }, + }, + TxOut: []*btcwire.TxOut{ + &btcwire.TxOut{ + Value: 0x12a05f200, // 5000000000 + PkScript: []byte{ + 0x41, // OP_DATA_65 + 0x04, 0x1b, 0x0e, 0x8c, 0x25, 0x67, 0xc1, 0x25, + 0x36, 0xaa, 0x13, 0x35, 0x7b, 0x79, 0xa0, 0x73, + 0xdc, 0x44, 0x44, 0xac, 0xb8, 0x3c, 0x4e, 0xc7, + 0xa0, 0xe2, 0xf9, 0x9d, 0xd7, 0x45, 0x75, 0x16, + 0xc5, 0x81, 0x72, 0x42, 0xda, 0x79, 0x69, 0x24, + 0xca, 0x4e, 0x99, 0x94, 0x7d, 0x08, 0x7f, 0xed, + 0xf9, 0xce, 0x46, 0x7c, 0xb9, 0xf7, 0xc6, 0x28, + 0x70, 0x78, 0xf8, 0x01, 0xdf, 0x27, 0x6f, 0xdf, + 0x84, // 65-byte signature + 0xac, // OP_CHECKSIG + }, + }, + }, + LockTime: 0, + }, + &btcwire.MsgTx{ + Version: 1, + TxIn: []*btcwire.TxIn{ + &btcwire.TxIn{ + PreviousOutpoint: btcwire.OutPoint{ + Hash: btcwire.ShaHash([32]byte{ // Make go vet happy. + 0x03, 0x2e, 0x38, 0xe9, 0xc0, 0xa8, 0x4c, 0x60, + 0x46, 0xd6, 0x87, 0xd1, 0x05, 0x56, 0xdc, 0xac, + 0xc4, 0x1d, 0x27, 0x5e, 0xc5, 0x5f, 0xc0, 0x07, + 0x79, 0xac, 0x88, 0xfd, 0xf3, 0x57, 0xa1, 0x87, + }), // 87a157f3fd88ac7907c05fc55e271dc4acdc5605d187d646604ca8c0e9382e03 + Index: 0, + }, + SignatureScript: []byte{ + 0x49, // OP_DATA_73 + 0x30, 0x46, 0x02, 0x21, 0x00, 0xc3, 0x52, 0xd3, + 0xdd, 0x99, 0x3a, 0x98, 0x1b, 0xeb, 0xa4, 0xa6, + 0x3a, 0xd1, 0x5c, 0x20, 0x92, 0x75, 0xca, 0x94, + 0x70, 0xab, 0xfc, 0xd5, 0x7d, 0xa9, 0x3b, 0x58, + 0xe4, 0xeb, 0x5d, 0xce, 0x82, 0x02, 0x21, 0x00, + 0x84, 0x07, 0x92, 0xbc, 0x1f, 0x45, 0x60, 0x62, + 0x81, 0x9f, 0x15, 0xd3, 0x3e, 0xe7, 0x05, 0x5c, + 0xf7, 0xb5, 0xee, 0x1a, 0xf1, 0xeb, 0xcc, 0x60, + 0x28, 0xd9, 0xcd, 0xb1, 0xc3, 0xaf, 0x77, 0x48, + 0x01, // 73-byte signature + 0x41, // OP_DATA_65 + 0x04, 0xf4, 0x6d, 0xb5, 0xe9, 0xd6, 0x1a, 0x9d, + 0xc2, 0x7b, 0x8d, 0x64, 0xad, 0x23, 0xe7, 0x38, + 0x3a, 0x4e, 0x6c, 0xa1, 0x64, 0x59, 0x3c, 0x25, + 0x27, 0xc0, 0x38, 0xc0, 0x85, 0x7e, 0xb6, 0x7e, + 0xe8, 0xe8, 0x25, 0xdc, 0xa6, 0x50, 0x46, 0xb8, + 0x2c, 0x93, 0x31, 0x58, 0x6c, 0x82, 0xe0, 0xfd, + 0x1f, 0x63, 0x3f, 0x25, 0xf8, 0x7c, 0x16, 0x1b, + 0xc6, 0xf8, 0xa6, 0x30, 0x12, 0x1d, 0xf2, 0xb3, + 0xd3, // 65-byte pubkey + }, + Sequence: 0xffffffff, + }, + }, + TxOut: []*btcwire.TxOut{ + &btcwire.TxOut{ + Value: 0x2123e300, // 556000000 + PkScript: []byte{ + 0x76, // OP_DUP + 0xa9, // OP_HASH160 + 0x14, // OP_DATA_20 + 0xc3, 0x98, 0xef, 0xa9, 0xc3, 0x92, 0xba, 0x60, + 0x13, 0xc5, 0xe0, 0x4e, 0xe7, 0x29, 0x75, 0x5e, + 0xf7, 0xf5, 0x8b, 0x32, + 0x88, // OP_EQUALVERIFY + 0xac, // OP_CHECKSIG + }, + }, + &btcwire.TxOut{ + Value: 0x108e20f00, // 4444000000 + PkScript: []byte{ + 0x76, // OP_DUP + 0xa9, // OP_HASH160 + 0x14, // OP_DATA_20 + 0x94, 0x8c, 0x76, 0x5a, 0x69, 0x14, 0xd4, 0x3f, + 0x2a, 0x7a, 0xc1, 0x77, 0xda, 0x2c, 0x2f, 0x6b, + 0x52, 0xde, 0x3d, 0x7c, + 0x88, // OP_EQUALVERIFY + 0xac, // OP_CHECKSIG + }, + }, + }, + LockTime: 0, + }, + &btcwire.MsgTx{ + Version: 1, + TxIn: []*btcwire.TxIn{ + &btcwire.TxIn{ + PreviousOutpoint: btcwire.OutPoint{ + Hash: btcwire.ShaHash([32]byte{ // Make go vet happy. + 0xc3, 0x3e, 0xbf, 0xf2, 0xa7, 0x09, 0xf1, 0x3d, + 0x9f, 0x9a, 0x75, 0x69, 0xab, 0x16, 0xa3, 0x27, + 0x86, 0xaf, 0x7d, 0x7e, 0x2d, 0xe0, 0x92, 0x65, + 0xe4, 0x1c, 0x61, 0xd0, 0x78, 0x29, 0x4e, 0xcf, + }), // cf4e2978d0611ce46592e02d7e7daf8627a316ab69759a9f3df109a7f2bf3ec3 + Index: 1, + }, + SignatureScript: []byte{ + 0x47, // OP_DATA_71 + 0x30, 0x44, 0x02, 0x20, 0x03, 0x2d, 0x30, 0xdf, + 0x5e, 0xe6, 0xf5, 0x7f, 0xa4, 0x6c, 0xdd, 0xb5, + 0xeb, 0x8d, 0x0d, 0x9f, 0xe8, 0xde, 0x6b, 0x34, + 0x2d, 0x27, 0x94, 0x2a, 0xe9, 0x0a, 0x32, 0x31, + 0xe0, 0xba, 0x33, 0x3e, 0x02, 0x20, 0x3d, 0xee, + 0xe8, 0x06, 0x0f, 0xdc, 0x70, 0x23, 0x0a, 0x7f, + 0x5b, 0x4a, 0xd7, 0xd7, 0xbc, 0x3e, 0x62, 0x8c, + 0xbe, 0x21, 0x9a, 0x88, 0x6b, 0x84, 0x26, 0x9e, + 0xae, 0xb8, 0x1e, 0x26, 0xb4, 0xfe, 0x01, + 0x41, // OP_DATA_65 + 0x04, 0xae, 0x31, 0xc3, 0x1b, 0xf9, 0x12, 0x78, + 0xd9, 0x9b, 0x83, 0x77, 0xa3, 0x5b, 0xbc, 0xe5, + 0xb2, 0x7d, 0x9f, 0xff, 0x15, 0x45, 0x68, 0x39, + 0xe9, 0x19, 0x45, 0x3f, 0xc7, 0xb3, 0xf7, 0x21, + 0xf0, 0xba, 0x40, 0x3f, 0xf9, 0x6c, 0x9d, 0xee, + 0xb6, 0x80, 0xe5, 0xfd, 0x34, 0x1c, 0x0f, 0xc3, + 0xa7, 0xb9, 0x0d, 0xa4, 0x63, 0x1e, 0xe3, 0x95, + 0x60, 0x63, 0x9d, 0xb4, 0x62, 0xe9, 0xcb, 0x85, + 0x0f, // 65-byte pubkey + }, + Sequence: 0xffffffff, + }, + }, + TxOut: []*btcwire.TxOut{ + &btcwire.TxOut{ + Value: 0xf4240, // 1000000 + PkScript: []byte{ + 0x76, // OP_DUP + 0xa9, // OP_HASH160 + 0x14, // OP_DATA_20 + 0xb0, 0xdc, 0xbf, 0x97, 0xea, 0xbf, 0x44, 0x04, + 0xe3, 0x1d, 0x95, 0x24, 0x77, 0xce, 0x82, 0x2d, + 0xad, 0xbe, 0x7e, 0x10, + 0x88, // OP_EQUALVERIFY + 0xac, // OP_CHECKSIG + }, + }, + &btcwire.TxOut{ + Value: 0x11d260c0, // 299000000 + PkScript: []byte{ + 0x76, // OP_DUP + 0xa9, // OP_HASH160 + 0x14, // OP_DATA_20 + 0x6b, 0x12, 0x81, 0xee, 0xc2, 0x5a, 0xb4, 0xe1, + 0xe0, 0x79, 0x3f, 0xf4, 0xe0, 0x8a, 0xb1, 0xab, + 0xb3, 0x40, 0x9c, 0xd9, + 0x88, // OP_EQUALVERIFY + 0xac, // OP_CHECKSIG + }, + }, + }, + LockTime: 0, + }, + &btcwire.MsgTx{ + Version: 1, + TxIn: []*btcwire.TxIn{ + &btcwire.TxIn{ + PreviousOutpoint: btcwire.OutPoint{ + Hash: btcwire.ShaHash([32]byte{ // Make go vet happy. + 0x0b, 0x60, 0x72, 0xb3, 0x86, 0xd4, 0xa7, 0x73, + 0x23, 0x52, 0x37, 0xf6, 0x4c, 0x11, 0x26, 0xac, + 0x3b, 0x24, 0x0c, 0x84, 0xb9, 0x17, 0xa3, 0x90, + 0x9b, 0xa1, 0xc4, 0x3d, 0xed, 0x5f, 0x51, 0xf4, + }), // f4515fed3dc4a19b90a317b9840c243bac26114cf637522373a7d486b372600b + Index: 0, + }, + SignatureScript: []byte{ + 0x49, // OP_DATA_73 + 0x30, 0x46, 0x02, 0x21, 0x00, 0xbb, 0x1a, 0xd2, + 0x6d, 0xf9, 0x30, 0xa5, 0x1c, 0xce, 0x11, 0x0c, + 0xf4, 0x4f, 0x7a, 0x48, 0xc3, 0xc5, 0x61, 0xfd, + 0x97, 0x75, 0x00, 0xb1, 0xae, 0x5d, 0x6b, 0x6f, + 0xd1, 0x3d, 0x0b, 0x3f, 0x4a, 0x02, 0x21, 0x00, + 0xc5, 0xb4, 0x29, 0x51, 0xac, 0xed, 0xff, 0x14, + 0xab, 0xba, 0x27, 0x36, 0xfd, 0x57, 0x4b, 0xdb, + 0x46, 0x5f, 0x3e, 0x6f, 0x8d, 0xa1, 0x2e, 0x2c, + 0x53, 0x03, 0x95, 0x4a, 0xca, 0x7f, 0x78, 0xf3, + 0x01, // 73-byte signature + 0x41, // OP_DATA_65 + 0x04, 0xa7, 0x13, 0x5b, 0xfe, 0x82, 0x4c, 0x97, + 0xec, 0xc0, 0x1e, 0xc7, 0xd7, 0xe3, 0x36, 0x18, + 0x5c, 0x81, 0xe2, 0xaa, 0x2c, 0x41, 0xab, 0x17, + 0x54, 0x07, 0xc0, 0x94, 0x84, 0xce, 0x96, 0x94, + 0xb4, 0x49, 0x53, 0xfc, 0xb7, 0x51, 0x20, 0x65, + 0x64, 0xa9, 0xc2, 0x4d, 0xd0, 0x94, 0xd4, 0x2f, + 0xdb, 0xfd, 0xd5, 0xaa, 0xd3, 0xe0, 0x63, 0xce, + 0x6a, 0xf4, 0xcf, 0xaa, 0xea, 0x4e, 0xa1, 0x4f, + 0xbb, // 65-byte pubkey + }, + Sequence: 0xffffffff, + }, + }, + TxOut: []*btcwire.TxOut{ + &btcwire.TxOut{ + Value: 0xf4240, // 1000000 + PkScript: []byte{ + 0x76, // OP_DUP + 0xa9, // OP_HASH160 + 0x14, // OP_DATA_20 + 0x39, 0xaa, 0x3d, 0x56, 0x9e, 0x06, 0xa1, 0xd7, + 0x92, 0x6d, 0xc4, 0xbe, 0x11, 0x93, 0xc9, 0x9b, + 0xf2, 0xeb, 0x9e, 0xe0, + 0x88, // OP_EQUALVERIFY + 0xac, // OP_CHECKSIG + }, + }, + }, + LockTime: 0, + }, + }, +}