diff --git a/addrmgr/addrmanager.go b/addrmgr/addrmanager.go index a6c41dc1e..092ae00e6 100644 --- a/addrmgr/addrmanager.go +++ b/addrmgr/addrmanager.go @@ -22,7 +22,7 @@ import ( "sync/atomic" "time" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/wire" ) @@ -298,7 +298,7 @@ func (a *AddrManager) getNewBucket(netAddr, srcAddr *wire.NetAddress) int { data1 = append(data1, a.key[:]...) data1 = append(data1, []byte(GroupKey(netAddr))...) data1 = append(data1, []byte(GroupKey(srcAddr))...) - hash1 := chainhash.DoubleHashB(data1) + hash1 := daghash.DoubleHashB(data1) hash64 := binary.LittleEndian.Uint64(hash1) hash64 %= newBucketsPerGroup var hashbuf [8]byte @@ -308,7 +308,7 @@ func (a *AddrManager) getNewBucket(netAddr, srcAddr *wire.NetAddress) int { data2 = append(data2, GroupKey(srcAddr)...) data2 = append(data2, hashbuf[:]...) - hash2 := chainhash.DoubleHashB(data2) + hash2 := daghash.DoubleHashB(data2) return int(binary.LittleEndian.Uint64(hash2) % newBucketCount) } @@ -318,7 +318,7 @@ func (a *AddrManager) getTriedBucket(netAddr *wire.NetAddress) int { data1 := []byte{} data1 = append(data1, a.key[:]...) data1 = append(data1, []byte(NetAddressKey(netAddr))...) - hash1 := chainhash.DoubleHashB(data1) + hash1 := daghash.DoubleHashB(data1) hash64 := binary.LittleEndian.Uint64(hash1) hash64 %= triedBucketsPerGroup var hashbuf [8]byte @@ -328,7 +328,7 @@ func (a *AddrManager) getTriedBucket(netAddr *wire.NetAddress) int { data2 = append(data2, GroupKey(netAddr)...) data2 = append(data2, hashbuf[:]...) - hash2 := chainhash.DoubleHashB(data2) + hash2 := daghash.DoubleHashB(data2) return int(binary.LittleEndian.Uint64(hash2) % triedBucketCount) } diff --git a/blockchain/testdata/277647.dat.bz2 b/blockchain/testdata/277647.dat.bz2 deleted file mode 100644 index 598420a65..000000000 Binary files a/blockchain/testdata/277647.dat.bz2 and /dev/null differ diff --git a/blockchain/testdata/277647.utxostore.bz2 b/blockchain/testdata/277647.utxostore.bz2 deleted file mode 100644 index c12b65e2a..000000000 Binary files a/blockchain/testdata/277647.utxostore.bz2 and /dev/null differ diff --git a/blockchain/testdata/blk_0_to_4.dat.bz2 b/blockchain/testdata/blk_0_to_4.dat.bz2 deleted file mode 100644 index 274c710d2..000000000 Binary files a/blockchain/testdata/blk_0_to_4.dat.bz2 and /dev/null differ diff --git a/blockchain/testdata/blk_3A.dat.bz2 b/blockchain/testdata/blk_3A.dat.bz2 deleted file mode 100644 index 01266565d..000000000 Binary files a/blockchain/testdata/blk_3A.dat.bz2 and /dev/null differ diff --git a/blockchain/testdata/blk_4A.dat.bz2 b/blockchain/testdata/blk_4A.dat.bz2 deleted file mode 100644 index 19b409e75..000000000 Binary files a/blockchain/testdata/blk_4A.dat.bz2 and /dev/null differ diff --git a/blockchain/testdata/blk_5A.dat.bz2 b/blockchain/testdata/blk_5A.dat.bz2 deleted file mode 100644 index 47bff9038..000000000 Binary files a/blockchain/testdata/blk_5A.dat.bz2 and /dev/null differ diff --git a/blockchain/README.md b/blockdag/README.md similarity index 100% rename from blockchain/README.md rename to blockdag/README.md diff --git a/blockchain/accept.go b/blockdag/accept.go similarity index 54% rename from blockchain/accept.go rename to blockdag/accept.go index c162b0454..300afc375 100644 --- a/blockchain/accept.go +++ b/blockdag/accept.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain +package blockdag import ( "fmt" @@ -11,37 +11,32 @@ import ( "github.com/daglabs/btcutil" ) -// maybeAcceptBlock potentially accepts a block into the block chain and, if -// accepted, returns whether or not it is on the main chain. It performs -// several validation checks which depend on its position within the block chain -// before adding it. The block is expected to have already gone through -// ProcessBlock before calling this function with it. +// maybeAcceptBlock potentially accepts a block into the block DAG. It +// performs several validation checks which depend on its position within +// the block DAG before adding it. The block is expected to have already +// gone through ProcessBlock before calling this function with it. // -// The flags are also passed to checkBlockContext and connectBestChain. See +// The flags are also passed to checkBlockContext and connectToDAG. See // their documentation for how the flags modify their behavior. // // This function MUST be called with the chain state lock held (for writes). -func (b *BlockChain) maybeAcceptBlock(block *btcutil.Block, flags BehaviorFlags) (bool, error) { +func (b *BlockDAG) maybeAcceptBlock(block *btcutil.Block, flags BehaviorFlags) error { // The height of this block is one more than the referenced previous // block. - prevHash := &block.MsgBlock().Header.PrevBlock - prevNode := b.index.LookupNode(prevHash) - if prevNode == nil { - str := fmt.Sprintf("previous block %s is unknown", prevHash) - return false, ruleError(ErrPreviousBlockUnknown, str) - } else if b.index.NodeStatus(prevNode).KnownInvalid() { - str := fmt.Sprintf("previous block %s is known to be invalid", prevHash) - return false, ruleError(ErrInvalidAncestorBlock, str) + parents, err := lookupPreviousNodes(block, b) + if err != nil { + return err } - blockHeight := prevNode.height + 1 + selectedParent := parents.first() + blockHeight := selectedParent.height + 1 block.SetHeight(blockHeight) // The block must pass all of the validation rules which depend on the // position of the block within the block chain. - err := b.checkBlockContext(block, prevNode, flags) + err = b.checkBlockContext(block, selectedParent, flags) if err != nil { - return false, err + return err } // Insert the block into the database if it's not already there. Even @@ -57,36 +52,56 @@ func (b *BlockChain) maybeAcceptBlock(block *btcutil.Block, flags BehaviorFlags) return dbStoreBlock(dbTx, block) }) if err != nil { - return false, err + return err } // Create a new block node for the block and add it to the node index. Even // if the block ultimately gets connected to the main chain, it starts out // on a side chain. blockHeader := &block.MsgBlock().Header - newNode := newBlockNode(blockHeader, prevNode) + newNode := newBlockNode(blockHeader, parents) newNode.status = statusDataStored b.index.AddNode(newNode) err = b.index.flushToDB() if err != nil { - return false, err + return err } - // Connect the passed block to the chain while respecting proper chain - // selection according to the chain with the most proof of work. This - // also handles validation of the transaction scripts. - isMainChain, err := b.connectBestChain(newNode, block, flags) + // Connect the passed block to the DAG. This also handles validation of the + // transaction scripts. + err = b.connectToDAG(newNode, parents, block, flags) if err != nil { - return false, err + return err } // Notify the caller that the new block was accepted into the block // chain. The caller would typically want to react by relaying the // inventory to other peers. - b.chainLock.Unlock() + b.dagLock.Unlock() b.sendNotification(NTBlockAccepted, block) - b.chainLock.Lock() + b.dagLock.Lock() - return isMainChain, nil + return nil +} + +func lookupPreviousNodes(block *btcutil.Block, blockDAG *BlockDAG) (blockSet, error) { + header := block.MsgBlock().Header + prevHashes := header.PrevBlocks + + nodes := newSet() + for _, prevHash := range prevHashes { + node := blockDAG.index.LookupNode(&prevHash) + if node == nil { + str := fmt.Sprintf("previous block %s is unknown", prevHashes) + return nil, ruleError(ErrPreviousBlockUnknown, str) + } else if blockDAG.index.NodeStatus(node).KnownInvalid() { + str := fmt.Sprintf("previous block %s is known to be invalid", prevHashes) + return nil, ruleError(ErrInvalidAncestorBlock, str) + } + + nodes.add(node) + } + + return nodes, nil } diff --git a/blockchain/bench_test.go b/blockdag/bench_test.go similarity index 97% rename from blockchain/bench_test.go rename to blockdag/bench_test.go index 7f20ad487..0856d1496 100644 --- a/blockchain/bench_test.go +++ b/blockdag/bench_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain +package blockdag import ( "testing" diff --git a/blockchain/blockindex.go b/blockdag/blockindex.go similarity index 84% rename from blockchain/blockindex.go rename to blockdag/blockindex.go index 65e589851..34a28222f 100644 --- a/blockchain/blockindex.go +++ b/blockdag/blockindex.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain +package blockdag import ( "math/big" @@ -10,8 +10,8 @@ import ( "sync" "time" - "github.com/daglabs/btcd/chaincfg" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/database" "github.com/daglabs/btcd/wire" ) @@ -71,17 +71,20 @@ type blockNode struct { // hundreds of thousands of these in memory, so a few extra bytes of // padding adds up. - // parent is the parent block for this node. - parent *blockNode + // parents is the parent blocks for this node. + parents blockSet + + // selectedParent is the selected parent for this node. + selectedParent *blockNode // hash is the double sha 256 of the block. - hash chainhash.Hash + hash daghash.Hash - // workSum is the total amount of work in the chain up to and including + // workSum is the total amount of work in the DAG up to and including // this node. workSum *big.Int - // height is the position in the block chain. + // height is the position in the block DAG. height int32 // Some fields from block headers to aid in best chain selection and @@ -92,7 +95,7 @@ type blockNode struct { bits uint32 nonce uint32 timestamp int64 - merkleRoot chainhash.Hash + merkleRoot daghash.Hash // status is a bitfield representing the validation state of the block. The // status field, unlike the other fields, may be written to and so should @@ -101,13 +104,14 @@ type blockNode struct { status blockStatus } -// initBlockNode initializes a block node from the given header and parent node, -// calculating the height and workSum from the respective fields on the parent. +// initBlockNode initializes a block node from the given header and parent nodes, +// calculating the height and workSum from the respective fields on the first parent. // This function is NOT safe for concurrent access. It must only be called when // initially creating a node. -func initBlockNode(node *blockNode, blockHeader *wire.BlockHeader, parent *blockNode) { +func initBlockNode(node *blockNode, blockHeader *wire.BlockHeader, parents blockSet) { *node = blockNode{ hash: blockHeader.BlockHash(), + parents: parents, workSum: CalcWork(blockHeader.Bits), version: blockHeader.Version, bits: blockHeader.Bits, @@ -115,19 +119,19 @@ func initBlockNode(node *blockNode, blockHeader *wire.BlockHeader, parent *block timestamp: blockHeader.Timestamp.Unix(), merkleRoot: blockHeader.MerkleRoot, } - if parent != nil { - node.parent = parent - node.height = parent.height + 1 - node.workSum = node.workSum.Add(parent.workSum, node.workSum) + if len(parents) > 0 { + node.selectedParent = parents.first() + node.height = node.selectedParent.height + 1 + node.workSum = node.workSum.Add(node.selectedParent.workSum, node.workSum) } } // newBlockNode returns a new block node for the given block header and parent -// node, calculating the height and workSum from the respective fields on the +// nodes, calculating the height and workSum from the respective fields on the // parent. This function is NOT safe for concurrent access. -func newBlockNode(blockHeader *wire.BlockHeader, parent *blockNode) *blockNode { +func newBlockNode(blockHeader *wire.BlockHeader, parents blockSet) *blockNode { var node blockNode - initBlockNode(&node, blockHeader, parent) + initBlockNode(&node, blockHeader, parents) return &node } @@ -136,17 +140,14 @@ func newBlockNode(blockHeader *wire.BlockHeader, parent *blockNode) *blockNode { // This function is safe for concurrent access. func (node *blockNode) Header() wire.BlockHeader { // No lock is needed because all accessed fields are immutable. - prevHash := &zeroHash - if node.parent != nil { - prevHash = &node.parent.hash - } return wire.BlockHeader{ - Version: node.version, - PrevBlock: *prevHash, - MerkleRoot: node.merkleRoot, - Timestamp: time.Unix(node.timestamp, 0), - Bits: node.bits, - Nonce: node.nonce, + Version: node.version, + NumPrevBlocks: byte(len(node.parents)), + PrevBlocks: node.PrevHashes(), + MerkleRoot: node.merkleRoot, + Timestamp: time.Unix(node.timestamp, 0), + Bits: node.bits, + Nonce: node.nonce, } } @@ -162,7 +163,7 @@ func (node *blockNode) Ancestor(height int32) *blockNode { } n := node - for ; n != nil && n.height != height; n = n.parent { + for ; n != nil && n.height != height; n = n.selectedParent { // Intentionally left blank } @@ -192,7 +193,7 @@ func (node *blockNode) CalcPastMedianTime() time.Time { timestamps[i] = iterNode.timestamp numNodes++ - iterNode = iterNode.parent + iterNode = iterNode.selectedParent } // Prune the slice to the actual number of available timestamps which @@ -217,6 +218,15 @@ func (node *blockNode) CalcPastMedianTime() time.Time { return time.Unix(medianTimestamp, 0) } +func (node *blockNode) PrevHashes() []daghash.Hash { + prevHashes := make([]daghash.Hash, len(node.parents)) + for _, parent := range node.parents { + prevHashes = append(prevHashes, parent.hash) + } + + return prevHashes +} + // blockIndex provides facilities for keeping track of an in-memory index of the // block chain. Although the name block chain suggests a single chain of // blocks, it is actually a tree-shaped structure where any node can have @@ -226,30 +236,30 @@ type blockIndex struct { // The following fields are set when the instance is created and can't // be changed afterwards, so there is no need to protect them with a // separate mutex. - db database.DB - chainParams *chaincfg.Params + db database.DB + dagParams *dagconfig.Params sync.RWMutex - index map[chainhash.Hash]*blockNode + index map[daghash.Hash]*blockNode dirty map[*blockNode]struct{} } // newBlockIndex returns a new empty instance of a block index. The index will // be dynamically populated as block nodes are loaded from the database and // manually added. -func newBlockIndex(db database.DB, chainParams *chaincfg.Params) *blockIndex { +func newBlockIndex(db database.DB, dagParams *dagconfig.Params) *blockIndex { return &blockIndex{ - db: db, - chainParams: chainParams, - index: make(map[chainhash.Hash]*blockNode), - dirty: make(map[*blockNode]struct{}), + db: db, + dagParams: dagParams, + index: make(map[daghash.Hash]*blockNode), + dirty: make(map[*blockNode]struct{}), } } // HaveBlock returns whether or not the block index contains the provided hash. // // This function is safe for concurrent access. -func (bi *blockIndex) HaveBlock(hash *chainhash.Hash) bool { +func (bi *blockIndex) HaveBlock(hash *daghash.Hash) bool { bi.RLock() _, hasBlock := bi.index[*hash] bi.RUnlock() @@ -260,7 +270,7 @@ func (bi *blockIndex) HaveBlock(hash *chainhash.Hash) bool { // return nil if there is no entry for the hash. // // This function is safe for concurrent access. -func (bi *blockIndex) LookupNode(hash *chainhash.Hash) *blockNode { +func (bi *blockIndex) LookupNode(hash *daghash.Hash) *blockNode { bi.RLock() node := bi.index[*hash] bi.RUnlock() diff --git a/blockdag/blockset.go b/blockdag/blockset.go new file mode 100644 index 000000000..2b614f052 --- /dev/null +++ b/blockdag/blockset.go @@ -0,0 +1,140 @@ +package blockdag + +import ( + "strings" + + "github.com/daglabs/btcd/dagconfig/daghash" +) + +// blockSet implements a basic unsorted set of blocks +type blockSet map[daghash.Hash]*blockNode + +// newSet creates a new, empty BlockSet +func newSet() blockSet { + return map[daghash.Hash]*blockNode{} +} + +// setFromSlice converts a slice of blocks into an unordered set represented as map +func setFromSlice(blocks ...*blockNode) blockSet { + set := newSet() + for _, block := range blocks { + set[block.hash] = block + } + return set +} + +// toSlice converts a set of blocks into a slice +func (bs blockSet) toSlice() []*blockNode { + slice := []*blockNode{} + + for _, block := range bs { + slice = append(slice, block) + } + + return slice +} + +// add adds a block to this BlockSet +func (bs blockSet) add(block *blockNode) { + bs[block.hash] = block +} + +// remove removes a block from this BlockSet, if exists +// Does nothing if this set does not contain the block +func (bs blockSet) remove(block *blockNode) { + delete(bs, block.hash) +} + +// clone clones thie block set +func (bs blockSet) clone() blockSet { + clone := newSet() + for _, block := range bs { + clone.add(block) + } + return clone +} + +// subtract returns the difference between the BlockSet and another BlockSet +func (bs blockSet) subtract(other blockSet) blockSet { + diff := newSet() + for _, block := range bs { + if !other.contains(block) { + diff.add(block) + } + } + return diff +} + +// addSet adds all blocks in other set to this set +func (bs blockSet) addSet(other blockSet) { + for _, block := range other { + bs.add(block) + } +} + +// addSlice adds provided slice to this set +func (bs blockSet) addSlice(slice []*blockNode) { + for _, block := range slice { + bs.add(block) + } +} + +// union returns a BlockSet that contains all blocks included in this set, +// the other set, or both +func (bs blockSet) union(other blockSet) blockSet { + union := bs.clone() + + union.addSet(other) + + return union +} + +// contains returns true iff this set contains block +func (bs blockSet) contains(block *blockNode) bool { + _, ok := bs[block.hash] + return ok +} + +// hashesEqual returns true if the given hashes are equal to the hashes +// of the blocks in this set. +// NOTE: The given hash slice must not contain duplicates. +func (bs blockSet) hashesEqual(hashes []daghash.Hash) bool { + if len(hashes) != len(bs) { + return false + } + + for _, hash := range hashes { + if _, wasFound := bs[hash]; !wasFound { + return false + } + } + + return true +} + +// hashes returns the hashes of the blocks in this set. +func (bs blockSet) hashes() []daghash.Hash { + hashes := make([]daghash.Hash, 0, len(bs)) + for hash := range bs { + hashes = append(hashes, hash) + } + + return hashes +} + +// first returns the first block in this set or nil if this set is empty. +func (bs blockSet) first() *blockNode { + for _, block := range bs { + return block + } + + return nil +} + +func (bs blockSet) String() string { + ids := []string{} + for hash := range bs { + ids = append(ids, hash.String()) + } + return strings.Join(ids, ",") +} diff --git a/blockchain/checkpoints.go b/blockdag/checkpoints.go similarity index 87% rename from blockchain/checkpoints.go rename to blockdag/checkpoints.go index 4447dd3fd..eb3a94c63 100644 --- a/blockchain/checkpoints.go +++ b/blockdag/checkpoints.go @@ -2,14 +2,14 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain +package blockdag import ( "fmt" "time" - "github.com/daglabs/btcd/chaincfg" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/txscript" "github.com/daglabs/btcutil" ) @@ -19,11 +19,11 @@ import ( const CheckpointConfirmations = 2016 // newHashFromStr converts the passed big-endian hex string into a -// chainhash.Hash. It only differs from the one available in chainhash in that +// daghash.Hash. It only differs from the one available in daghash in that // it ignores the error since it will only (and must only) be called with // hard-coded, and therefore known good, hashes. -func newHashFromStr(hexStr string) *chainhash.Hash { - hash, _ := chainhash.NewHashFromStr(hexStr) +func newHashFromStr(hexStr string) *daghash.Hash { + hash, _ := daghash.NewHashFromStr(hexStr) return hash } @@ -32,14 +32,14 @@ func newHashFromStr(hexStr string) *chainhash.Hash { // nil. // // This function is safe for concurrent access. -func (b *BlockChain) Checkpoints() []chaincfg.Checkpoint { +func (b *BlockDAG) Checkpoints() []dagconfig.Checkpoint { return b.checkpoints } -// HasCheckpoints returns whether this BlockChain has checkpoints defined. +// HasCheckpoints returns whether this BlockDAG has checkpoints defined. // // This function is safe for concurrent access. -func (b *BlockChain) HasCheckpoints() bool { +func (b *BlockDAG) HasCheckpoints() bool { return len(b.checkpoints) > 0 } @@ -48,7 +48,7 @@ func (b *BlockChain) HasCheckpoints() bool { // instance, it will return nil. // // This function is safe for concurrent access. -func (b *BlockChain) LatestCheckpoint() *chaincfg.Checkpoint { +func (b *BlockDAG) LatestCheckpoint() *dagconfig.Checkpoint { if !b.HasCheckpoints() { return nil } @@ -58,7 +58,7 @@ func (b *BlockChain) LatestCheckpoint() *chaincfg.Checkpoint { // verifyCheckpoint returns whether the passed block height and hash combination // match the checkpoint data. It also returns true if there is no checkpoint // data for the passed block height. -func (b *BlockChain) verifyCheckpoint(height int32, hash *chainhash.Hash) bool { +func (b *BlockDAG) verifyCheckpoint(height int32, hash *daghash.Hash) bool { if !b.HasCheckpoints() { return true } @@ -84,7 +84,7 @@ func (b *BlockChain) verifyCheckpoint(height int32, hash *chainhash.Hash) bool { // should really only happen for blocks before the first checkpoint). // // This function MUST be called with the chain lock held (for reads). -func (b *BlockChain) findPreviousCheckpoint() (*blockNode, error) { +func (b *BlockDAG) findPreviousCheckpoint() (*blockNode, error) { if !b.HasCheckpoints() { return nil, nil } @@ -99,7 +99,7 @@ func (b *BlockChain) findPreviousCheckpoint() (*blockNode, error) { // that is already available. for i := numCheckpoints - 1; i >= 0; i-- { node := b.index.LookupNode(checkpoints[i].Hash) - if node == nil || !b.bestChain.Contains(node) { + if node == nil || !b.dag.Contains(node) { continue } @@ -130,7 +130,7 @@ func (b *BlockChain) findPreviousCheckpoint() (*blockNode, error) { // When there is a next checkpoint and the height of the current best // chain does not exceed it, the current checkpoint lockin is still // the latest known checkpoint. - if b.bestChain.Tip().height < b.nextCheckpoint.Height { + if b.dag.SelectedTip().height < b.nextCheckpoint.Height { return b.checkpointNode, nil } @@ -197,13 +197,13 @@ func isNonstandardTransaction(tx *btcutil.Tx) bool { // decision and then manually added to the list of checkpoints for a network. // // This function is safe for concurrent access. -func (b *BlockChain) IsCheckpointCandidate(block *btcutil.Block) (bool, error) { - b.chainLock.RLock() - defer b.chainLock.RUnlock() +func (b *BlockDAG) IsCheckpointCandidate(block *btcutil.Block) (bool, error) { + b.dagLock.RLock() + defer b.dagLock.RUnlock() // A checkpoint must be in the main chain. node := b.index.LookupNode(block.Hash()) - if node == nil || !b.bestChain.Contains(node) { + if node == nil || !b.dag.Contains(node) { return false, nil } @@ -218,7 +218,7 @@ func (b *BlockChain) IsCheckpointCandidate(block *btcutil.Block) (bool, error) { // A checkpoint must be at least CheckpointConfirmations blocks // before the end of the main chain. - mainChainHeight := b.bestChain.Tip().height + mainChainHeight := b.dag.SelectedTip().height if node.height > (mainChainHeight - CheckpointConfirmations) { return false, nil } @@ -228,20 +228,20 @@ func (b *BlockChain) IsCheckpointCandidate(block *btcutil.Block) (bool, error) { // This should always succeed since the check above already made sure it // is CheckpointConfirmations back, but be safe in case the constant // changes. - nextNode := b.bestChain.Next(node) + nextNode := b.dag.Next(node) if nextNode == nil { return false, nil } // A checkpoint must be have at least one block before it. - if node.parent == nil { + if &node.selectedParent == nil { return false, nil } // A checkpoint must have timestamps for the block and the blocks on // either side of it in order (due to the median time allowance this is // not always the case). - prevTime := time.Unix(node.parent.timestamp, 0) + prevTime := time.Unix(node.selectedParent.timestamp, 0) curTime := block.MsgBlock().Header.Timestamp nextTime := time.Unix(nextNode.timestamp, 0) if prevTime.After(curTime) || nextTime.Before(curTime) { diff --git a/blockchain/common_test.go b/blockdag/common_test.go similarity index 89% rename from blockchain/common_test.go rename to blockdag/common_test.go index e947129cb..bfd3f90b2 100644 --- a/blockchain/common_test.go +++ b/blockdag/common_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain +package blockdag import ( "compress/bzip2" @@ -14,8 +14,8 @@ import ( "strings" "time" - "github.com/daglabs/btcd/chaincfg" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/database" _ "github.com/daglabs/btcd/database/ffldb" "github.com/daglabs/btcd/txscript" @@ -118,7 +118,7 @@ func loadBlocks(filename string) (blocks []*btcutil.Block, err error) { // chainSetup is used to create a new db and chain instance with the genesis // block already inserted. In addition to the new chain instance, it returns // a teardown function the caller should invoke when done testing to clean up. -func chainSetup(dbName string, params *chaincfg.Params) (*BlockChain, func(), error) { +func chainSetup(dbName string, params *dagconfig.Params) (*BlockDAG, func(), error) { if !isSupportedDbType(testDbType) { return nil, nil, fmt.Errorf("unsupported db type %v", testDbType) } @@ -174,7 +174,7 @@ func chainSetup(dbName string, params *chaincfg.Params) (*BlockChain, func(), er // Create the main chain instance. chain, err := New(&Config{ DB: db, - ChainParams: ¶msCopy, + DAGParams: ¶msCopy, Checkpoints: nil, TimeSource: NewMedianTime(), SigCache: txscript.NewSigCache(1000), @@ -193,7 +193,7 @@ func loadUtxoView(filename string) (*UtxoViewpoint, error) { // // // The output index and serialized utxo len are little endian uint32s - // and the serialized utxo uses the format described in chainio.go. + // and the serialized utxo uses the format described in dagio.go. filename = filepath.Join("testdata", filename) fi, err := os.Open(filename) @@ -213,7 +213,7 @@ func loadUtxoView(filename string) (*UtxoViewpoint, error) { view := NewUtxoViewpoint() for { // Hash of the utxo entry. - var hash chainhash.Hash + var hash daghash.Hash _, err := io.ReadAtLeast(r, hash[:], len(hash[:])) if err != nil { // Expected EOF at the right offset. @@ -269,7 +269,7 @@ func convertUtxoStore(r io.Reader, w io.Writer) error { littleEndian := binary.LittleEndian for { // Hash of the utxo entry. - var hash chainhash.Hash + var hash daghash.Hash _, err := io.ReadAtLeast(r, hash[:], len(hash[:])) if err != nil { // Expected EOF at the right offset. @@ -339,34 +339,34 @@ func convertUtxoStore(r io.Reader, w io.Writer) error { // TstSetCoinbaseMaturity makes the ability to set the coinbase maturity // available when running tests. -func (b *BlockChain) TstSetCoinbaseMaturity(maturity uint16) { - b.chainParams.CoinbaseMaturity = maturity +func (b *BlockDAG) TstSetCoinbaseMaturity(maturity uint16) { + b.dagParams.CoinbaseMaturity = maturity } -// newFakeChain returns a chain that is usable for syntetic tests. It is +// newFakeDag returns a chain that is usable for syntetic tests. It is // important to note that this chain has no database associated with it, so // it is not usable with all functions and the tests must take care when making // use of it. -func newFakeChain(params *chaincfg.Params) *BlockChain { +func newFakeDag(params *dagconfig.Params) *BlockDAG { // Create a genesis block node and block index index populated with it // for use when creating the fake chain below. - node := newBlockNode(¶ms.GenesisBlock.Header, nil) + node := newBlockNode(¶ms.GenesisBlock.Header, newSet()) index := newBlockIndex(nil, params) index.AddNode(node) targetTimespan := int64(params.TargetTimespan / time.Second) targetTimePerBlock := int64(params.TargetTimePerBlock / time.Second) adjustmentFactor := params.RetargetAdjustmentFactor - return &BlockChain{ - chainParams: params, + return &BlockDAG{ + dagParams: params, timeSource: NewMedianTime(), minRetargetTimespan: targetTimespan / adjustmentFactor, maxRetargetTimespan: targetTimespan * adjustmentFactor, blocksPerRetarget: int32(targetTimespan / targetTimePerBlock), index: index, - bestChain: newChainView(node), + dag: newDAGView(node), warningCaches: newThresholdCaches(vbNumBits), - deploymentCaches: newThresholdCaches(chaincfg.DefinedDeployments), + deploymentCaches: newThresholdCaches(dagconfig.DefinedDeployments), } } @@ -375,10 +375,10 @@ func newFakeChain(params *chaincfg.Params) *BlockChain { func newFakeNode(parent *blockNode, blockVersion int32, bits uint32, timestamp time.Time) *blockNode { // Make up a header and create a block node from it. header := &wire.BlockHeader{ - Version: blockVersion, - PrevBlock: parent.hash, - Bits: bits, - Timestamp: timestamp, + Version: blockVersion, + PrevBlocks: []daghash.Hash{parent.hash}, // TODO: (Stas) This is wrong. Modified only to satisfy compilation. + Bits: bits, + Timestamp: timestamp, } - return newBlockNode(header, parent) + return newBlockNode(header, setFromSlice(parent)) // TODO: (Stas) This is wrong. Modified only to satisfy compilation. } diff --git a/blockchain/compress.go b/blockdag/compress.go similarity index 99% rename from blockchain/compress.go rename to blockdag/compress.go index 2804e1a8c..62e4e7f0c 100644 --- a/blockchain/compress.go +++ b/blockdag/compress.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain +package blockdag import ( "github.com/daglabs/btcd/btcec" diff --git a/blockchain/compress_test.go b/blockdag/compress_test.go similarity index 99% rename from blockchain/compress_test.go rename to blockdag/compress_test.go index b1a6ff274..bfa261036 100644 --- a/blockchain/compress_test.go +++ b/blockdag/compress_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain +package blockdag import ( "bytes" diff --git a/blockchain/chain.go b/blockdag/dag.go similarity index 58% rename from blockchain/chain.go rename to blockdag/dag.go index 97a4fcbe8..919d17492 100644 --- a/blockchain/chain.go +++ b/blockdag/dag.go @@ -2,16 +2,15 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain +package blockdag import ( - "container/list" "fmt" "sync" "time" - "github.com/daglabs/btcd/chaincfg" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/database" "github.com/daglabs/btcd/txscript" "github.com/daglabs/btcd/wire" @@ -38,7 +37,7 @@ const ( // // The block locator for block 17a would be the hashes of blocks: // [17a 16a 15 14 13 12 11 10 9 8 7 6 4 genesis] -type BlockLocator []*chainhash.Hash +type BlockLocator []*daghash.Hash // orphanBlock represents a block that we don't yet have the parent for. It // is a normal block plus an expiration time to prevent caching the orphan @@ -48,52 +47,59 @@ type orphanBlock struct { expiration time.Time } -// BestState houses information about the current best block and other info -// related to the state of the main chain as it exists from the point of view of -// the current best block. +// DAGState houses information about the current tips and other info +// related to the state of the DAG. // -// The BestSnapshot method can be used to obtain access to this information +// The GetDAGState method can be used to obtain access to this information // in a concurrent safe manner and the data will not be changed out from under // the caller when chain state changes occur as the function name implies. // However, the returned snapshot must be treated as immutable since it is // shared by all callers. -type BestState struct { - Hash chainhash.Hash // The hash of the block. - Height int32 // The height of the block. - Bits uint32 // The difficulty bits of the block. - BlockSize uint64 // The size of the block. - NumTxns uint64 // The number of txns in the block. - TotalTxns uint64 // The total number of txns in the chain. - MedianTime time.Time // Median time as per CalcPastMedianTime. +type DAGState struct { + SelectedTip SelectedTipState // State of the selected tip + TipHashes []daghash.Hash // The hashes of the tips + TotalTxs uint64 // The total number of transactions in the DAG. } -// newBestState returns a new best stats instance for the given parameters. -func newBestState(node *blockNode, blockSize, numTxns, - totalTxns uint64, medianTime time.Time) *BestState { +type SelectedTipState struct { + Hash daghash.Hash // The hash of the tip. + Height int32 // The height of the tip. + Bits uint32 // The difficulty bits of the tip. + BlockSize uint64 // The size of the tip. + NumTxs uint64 // The number of transactions in the tip. + MedianTime time.Time // Median time as per CalcPastMedianTime. +} - return &BestState{ - Hash: node.hash, - Height: node.height, - Bits: node.bits, - BlockSize: blockSize, - NumTxns: numTxns, - TotalTxns: totalTxns, - MedianTime: medianTime, +// newDAGState returns a new state instance for the given parameters. +func newDAGState(tipHashes []daghash.Hash, node *blockNode, blockSize, numTxs, + totalTxs uint64, medianTime time.Time) *DAGState { + + return &DAGState{ + SelectedTip: SelectedTipState{ + Hash: node.hash, + Height: node.height, + Bits: node.bits, + BlockSize: blockSize, + NumTxs: numTxs, + MedianTime: medianTime, + }, + TipHashes: tipHashes, + TotalTxs: totalTxs, } } -// BlockChain provides functions for working with the bitcoin block chain. +// BlockDAG provides functions for working with the bitcoin block chain. // It includes functionality such as rejecting duplicate blocks, ensuring blocks // follow all rules, orphan handling, checkpoint handling, and best chain // selection with reorganization. -type BlockChain struct { +type BlockDAG struct { // The following fields are set when the instance is created and can't // be changed afterwards, so there is no need to protect them with a // separate mutex. - checkpoints []chaincfg.Checkpoint - checkpointsByHeight map[int32]*chaincfg.Checkpoint + checkpoints []dagconfig.Checkpoint + checkpointsByHeight map[int32]*dagconfig.Checkpoint db database.DB - chainParams *chaincfg.Params + dagParams *dagconfig.Params timeSource MedianTimeSource sigCache *txscript.SigCache indexManager IndexManager @@ -106,9 +112,9 @@ type BlockChain struct { maxRetargetTimespan int64 // target timespan * adjustment factor blocksPerRetarget int32 // target timespan / target time per block - // chainLock protects concurrent access to the vast majority of the + // dagLock protects concurrent access to the vast majority of the // fields in this struct below this point. - chainLock sync.RWMutex + dagLock sync.RWMutex // These fields are related to the memory block index. They both have // their own locks, however they are often also protected by the chain @@ -117,36 +123,36 @@ type BlockChain struct { // index houses the entire block index in memory. The block index is // a tree-shaped structure. // - // bestChain tracks the current active chain by making use of an + // dag tracks the current active chain by making use of an // efficient chain view into the block index. - index *blockIndex - bestChain *chainView + index *blockIndex + dag *dagView // These fields are related to handling of orphan blocks. They are // protected by a combination of the chain lock and the orphan lock. orphanLock sync.RWMutex - orphans map[chainhash.Hash]*orphanBlock - prevOrphans map[chainhash.Hash][]*orphanBlock + orphans map[daghash.Hash]*orphanBlock + prevOrphans map[daghash.Hash][]*orphanBlock oldestOrphan *orphanBlock // These fields are related to checkpoint handling. They are protected // by the chain lock. - nextCheckpoint *chaincfg.Checkpoint + nextCheckpoint *dagconfig.Checkpoint checkpointNode *blockNode // The state is used as a fairly efficient way to cache information - // about the current best chain state that is returned to callers when + // about the current DAG state that is returned to callers when // requested. It operates on the principle of MVCC such that any time a // new block becomes the best block, the state pointer is replaced with // a new struct and the old state is left untouched. In this way, - // multiple callers can be pointing to different best chain states. + // multiple callers can be pointing to different DAG states. // This is acceptable for most callers because the state is only being // queried at a specific point in time. // // In addition, some of the fields are stored in the database so the - // chain state can be quickly reconstructed on load. - stateLock sync.RWMutex - stateSnapshot *BestState + // DAG state can be quickly reconstructed on load. + stateLock sync.RWMutex + dagState *DAGState // The following caches are used to efficiently keep track of the // current deployment threshold state of each rule change deployment. @@ -187,7 +193,7 @@ type BlockChain struct { // be like part of the main chain, on a side chain, or in the orphan pool. // // This function is safe for concurrent access. -func (b *BlockChain) HaveBlock(hash *chainhash.Hash) (bool, error) { +func (b *BlockDAG) HaveBlock(hash *daghash.Hash) (bool, error) { exists, err := b.blockExists(hash) if err != nil { return false, err @@ -205,7 +211,7 @@ func (b *BlockChain) HaveBlock(hash *chainhash.Hash) (bool, error) { // duplicate orphans and react accordingly. // // This function is safe for concurrent access. -func (b *BlockChain) IsKnownOrphan(hash *chainhash.Hash) bool { +func (b *BlockDAG) IsKnownOrphan(hash *daghash.Hash) bool { // Protect concurrent access. Using a read lock only so multiple // readers can query without blocking each other. b.orphanLock.RLock() @@ -219,7 +225,7 @@ func (b *BlockChain) IsKnownOrphan(hash *chainhash.Hash) bool { // map of orphan blocks. // // This function is safe for concurrent access. -func (b *BlockChain) GetOrphanRoot(hash *chainhash.Hash) *chainhash.Hash { +func (b *BlockDAG) GetOrphanRoot(hash *daghash.Hash) *daghash.Hash { // Protect concurrent access. Using a read lock only so multiple // readers can query without blocking each other. b.orphanLock.RLock() @@ -235,7 +241,7 @@ func (b *BlockChain) GetOrphanRoot(hash *chainhash.Hash) *chainhash.Hash { break } orphanRoot = prevHash - prevHash = &orphan.block.MsgBlock().Header.PrevBlock + prevHash = orphan.block.MsgBlock().Header.SelectedPrevBlock() } return orphanRoot @@ -243,7 +249,7 @@ func (b *BlockChain) GetOrphanRoot(hash *chainhash.Hash) *chainhash.Hash { // removeOrphanBlock removes the passed orphan block from the orphan pool and // previous orphan index. -func (b *BlockChain) removeOrphanBlock(orphan *orphanBlock) { +func (b *BlockDAG) removeOrphanBlock(orphan *orphanBlock) { // Protect concurrent access. b.orphanLock.Lock() defer b.orphanLock.Unlock() @@ -256,7 +262,7 @@ func (b *BlockChain) removeOrphanBlock(orphan *orphanBlock) { // for loop is intentionally used over a range here as range does not // reevaluate the slice on each iteration nor does it adjust the index // for the modified slice. - prevHash := &orphan.block.MsgBlock().Header.PrevBlock + prevHash := orphan.block.MsgBlock().Header.SelectedPrevBlock() orphans := b.prevOrphans[*prevHash] for i := 0; i < len(orphans); i++ { hash := orphans[i].block.Hash() @@ -282,7 +288,7 @@ func (b *BlockChain) removeOrphanBlock(orphan *orphanBlock) { // It also imposes a maximum limit on the number of outstanding orphan // blocks and will remove the oldest received orphan block if the limit is // exceeded. -func (b *BlockChain) addOrphanBlock(block *btcutil.Block) { +func (b *BlockDAG) addOrphanBlock(block *btcutil.Block) { // Remove expired orphan blocks. for _, oBlock := range b.orphans { if time.Now().After(oBlock.expiration) { @@ -320,7 +326,7 @@ func (b *BlockChain) addOrphanBlock(block *btcutil.Block) { b.orphans[*block.Hash()] = oBlock // Add to previous hash lookup index for faster dependency lookups. - prevHash := &block.MsgBlock().Header.PrevBlock + prevHash := block.MsgBlock().Header.SelectedPrevBlock() b.prevOrphans[*prevHash] = append(b.prevOrphans[*prevHash], oBlock) } @@ -344,18 +350,18 @@ type SequenceLock struct { // the candidate transaction to be included in a block. // // This function is safe for concurrent access. -func (b *BlockChain) CalcSequenceLock(tx *btcutil.Tx, utxoView *UtxoViewpoint, mempool bool) (*SequenceLock, error) { - b.chainLock.Lock() - defer b.chainLock.Unlock() +func (b *BlockDAG) CalcSequenceLock(tx *btcutil.Tx, utxoView *UtxoViewpoint, mempool bool) (*SequenceLock, error) { + b.dagLock.Lock() + defer b.dagLock.Unlock() - return b.calcSequenceLock(b.bestChain.Tip(), tx, utxoView, mempool) + return b.calcSequenceLock(b.dag.SelectedTip(), tx, utxoView, mempool) } // calcSequenceLock computes the relative lock-times for the passed // transaction. See the exported version, CalcSequenceLock for further details. // // This function MUST be called with the chain state lock held (for writes). -func (b *BlockChain) calcSequenceLock(node *blockNode, tx *btcutil.Tx, utxoView *UtxoViewpoint, mempool bool) (*SequenceLock, error) { +func (b *BlockDAG) calcSequenceLock(node *blockNode, tx *btcutil.Tx, utxoView *UtxoViewpoint, mempool bool) (*SequenceLock, error) { // A value of -1 for each relative lock type represents a relative time // lock value that will allow a transaction to be included in a block // at any given height or time. @@ -462,67 +468,7 @@ func LockTimeToSequence(isSeconds bool, locktime uint64) uint64 { locktime>>wire.SequenceLockTimeGranularity } -// getReorganizeNodes finds the fork point between the main chain and the passed -// node and returns a list of block nodes that would need to be detached from -// the main chain and a list of block nodes that would need to be attached to -// the fork point (which will be the end of the main chain after detaching the -// returned list of block nodes) in order to reorganize the chain such that the -// passed node is the new end of the main chain. The lists will be empty if the -// passed node is not on a side chain. -// -// This function may modify node statuses in the block index without flushing. -// -// This function MUST be called with the chain state lock held (for reads). -func (b *BlockChain) getReorganizeNodes(node *blockNode) (*list.List, *list.List) { - attachNodes := list.New() - detachNodes := list.New() - - // Do not reorganize to a known invalid chain. Ancestors deeper than the - // direct parent are checked below but this is a quick check before doing - // more unnecessary work. - if b.index.NodeStatus(node.parent).KnownInvalid() { - b.index.SetStatusFlags(node, statusInvalidAncestor) - return detachNodes, attachNodes - } - - // Find the fork point (if any) adding each block to the list of nodes - // to attach to the main tree. Push them onto the list in reverse order - // so they are attached in the appropriate order when iterating the list - // later. - forkNode := b.bestChain.FindFork(node) - invalidChain := false - for n := node; n != nil && n != forkNode; n = n.parent { - if b.index.NodeStatus(n).KnownInvalid() { - invalidChain = true - break - } - attachNodes.PushFront(n) - } - - // If any of the node's ancestors are invalid, unwind attachNodes, marking - // each one as invalid for future reference. - if invalidChain { - var next *list.Element - for e := attachNodes.Front(); e != nil; e = next { - next = e.Next() - n := attachNodes.Remove(e).(*blockNode) - b.index.SetStatusFlags(n, statusInvalidAncestor) - } - return detachNodes, attachNodes - } - - // Start from the end of the main chain and work backwards until the - // common ancestor adding each block to the list of nodes to detach from - // the main chain. - for n := b.bestChain.Tip(); n != nil && n != forkNode; n = n.parent { - detachNodes.PushBack(n) - } - - return detachNodes, attachNodes -} - -// connectBlock handles connecting the passed node/block to the end of the main -// (best) chain. +// connectBlock handles connecting the passed node/block to the DAG. // // This passed utxo view must have all referenced txos the block spends marked // as spent and all of the new txos the block creates added to it. In addition, @@ -532,14 +478,7 @@ func (b *BlockChain) getReorganizeNodes(node *blockNode) (*list.List, *list.List // it would be inefficient to repeat it. // // This function MUST be called with the chain state lock held (for writes). -func (b *BlockChain) connectBlock(node *blockNode, block *btcutil.Block, view *UtxoViewpoint, stxos []spentTxOut) error { - // Make sure it's extending the end of the best chain. - prevHash := &block.MsgBlock().Header.PrevBlock - if !prevHash.IsEqual(&b.bestChain.Tip().hash) { - return AssertError("connectBlock must be called with a block " + - "that extends the main chain") - } - +func (b *BlockDAG) connectBlock(node *blockNode, block *btcutil.Block, view *UtxoViewpoint, stxos []spentTxOut) error { // Sanity check the correct number of stxos are provided. if len(stxos) != countSpentOutputs(block) { return AssertError("connectBlock called with inconsistent " + @@ -568,20 +507,20 @@ func (b *BlockChain) connectBlock(node *blockNode, block *btcutil.Block, view *U return err } - // Generate a new best state snapshot that will be used to update the + // Generate a new state snapshot that will be used to update the // database and later memory if all database updates are successful. b.stateLock.RLock() - curTotalTxns := b.stateSnapshot.TotalTxns + currentTotalTxs := b.dagState.TotalTxs b.stateLock.RUnlock() - numTxns := uint64(len(block.MsgBlock().Transactions)) + numTxs := uint64(len(block.MsgBlock().Transactions)) blockSize := uint64(block.MsgBlock().SerializeSize()) - state := newBestState(node, blockSize, numTxns, - curTotalTxns+numTxns, node.CalcPastMedianTime()) + state := newDAGState(view.tips.hashes(), node, blockSize, numTxs, + currentTotalTxs+numTxs, node.CalcPastMedianTime()) // Atomically insert info into the database. err = b.db.Update(func(dbTx database.Tx) error { // Update best block state. - err := dbPutBestState(dbTx, state, node.workSum) + err := dbPutDAGState(dbTx, state) if err != nil { return err } @@ -629,134 +568,21 @@ func (b *BlockChain) connectBlock(node *blockNode, block *btcutil.Block, view *U view.commit() // This node is now the end of the best chain. - b.bestChain.SetTip(node) + b.dag.SetTip(node) // Update the state for the best block. Notice how this replaces the // entire struct instead of updating the existing one. This effectively // allows the old version to act as a snapshot which callers can use // freely without needing to hold a lock for the duration. See the // comments on the state variable for more details. - b.stateLock.Lock() - b.stateSnapshot = state - b.stateLock.Unlock() + b.setDAGState(state) // Notify the caller that the block was connected to the main chain. // The caller would typically want to react with actions such as // updating wallets. - b.chainLock.Unlock() + b.dagLock.Unlock() b.sendNotification(NTBlockConnected, block) - b.chainLock.Lock() - - return nil -} - -// disconnectBlock handles disconnecting the passed node/block from the end of -// the main (best) chain. -// -// This function MUST be called with the chain state lock held (for writes). -func (b *BlockChain) disconnectBlock(node *blockNode, block *btcutil.Block, view *UtxoViewpoint) error { - // Make sure the node being disconnected is the end of the best chain. - if !node.hash.IsEqual(&b.bestChain.Tip().hash) { - return AssertError("disconnectBlock must be called with the " + - "block at the end of the main chain") - } - - // Load the previous block since some details for it are needed below. - prevNode := node.parent - var prevBlock *btcutil.Block - err := b.db.View(func(dbTx database.Tx) error { - var err error - prevBlock, err = dbFetchBlockByNode(dbTx, prevNode) - return err - }) - if err != nil { - return err - } - - // Write any block status changes to DB before updating best state. - err = b.index.flushToDB() - if err != nil { - return err - } - - // Generate a new best state snapshot that will be used to update the - // database and later memory if all database updates are successful. - b.stateLock.RLock() - curTotalTxns := b.stateSnapshot.TotalTxns - b.stateLock.RUnlock() - numTxns := uint64(len(prevBlock.MsgBlock().Transactions)) - blockSize := uint64(prevBlock.MsgBlock().SerializeSize()) - newTotalTxns := curTotalTxns - uint64(len(block.MsgBlock().Transactions)) - state := newBestState(prevNode, blockSize, numTxns, - newTotalTxns, prevNode.CalcPastMedianTime()) - - err = b.db.Update(func(dbTx database.Tx) error { - // Update best block state. - err := dbPutBestState(dbTx, state, node.workSum) - if err != nil { - return err - } - - // Remove the block hash and height from the block index which - // tracks the main chain. - err = dbRemoveBlockIndex(dbTx, block.Hash(), node.height) - if err != nil { - return err - } - - // Update the utxo set using the state of the utxo view. This - // entails restoring all of the utxos spent and removing the new - // ones created by the block. - err = dbPutUtxoView(dbTx, view) - if err != nil { - return err - } - - // Update the transaction spend journal by removing the record - // that contains all txos spent by the block . - err = dbRemoveSpendJournalEntry(dbTx, block.Hash()) - if err != nil { - return err - } - - // Allow the index manager to call each of the currently active - // optional indexes with the block being disconnected so they - // can update themselves accordingly. - if b.indexManager != nil { - err := b.indexManager.DisconnectBlock(dbTx, block, view) - if err != nil { - return err - } - } - - return nil - }) - if err != nil { - return err - } - - // Prune fully spent entries and mark all entries in the view unmodified - // now that the modifications have been committed to the database. - view.commit() - - // This node's parent is now the end of the best chain. - b.bestChain.SetTip(node.parent) - - // Update the state for the best block. Notice how this replaces the - // entire struct instead of updating the existing one. This effectively - // allows the old version to act as a snapshot which callers can use - // freely without needing to hold a lock for the duration. See the - // comments on the state variable for more details. - b.stateLock.Lock() - b.stateSnapshot = state - b.stateLock.Unlock() - - // Notify the caller that the block was disconnected from the main - // chain. The caller would typically want to react with actions such as - // updating wallets. - b.chainLock.Unlock() - b.sendNotification(NTBlockDisconnected, block) - b.chainLock.Lock() + b.dagLock.Lock() return nil } @@ -771,345 +597,69 @@ func countSpentOutputs(block *btcutil.Block) int { return numSpent } -// reorganizeChain reorganizes the block chain by disconnecting the nodes in the -// detachNodes list and connecting the nodes in the attach list. It expects -// that the lists are already in the correct order and are in sync with the -// end of the current best chain. Specifically, nodes that are being -// disconnected must be in reverse order (think of popping them off the end of -// the chain) and nodes the are being attached must be in forwards order -// (think pushing them onto the end of the chain). -// -// This function may modify node statuses in the block index without flushing. -// -// This function MUST be called with the chain state lock held (for writes). -func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List) error { - // All of the blocks to detach and related spend journal entries needed - // to unspend transaction outputs in the blocks being disconnected must - // be loaded from the database during the reorg check phase below and - // then they are needed again when doing the actual database updates. - // Rather than doing two loads, cache the loaded data into these slices. - detachBlocks := make([]*btcutil.Block, 0, detachNodes.Len()) - detachSpentTxOuts := make([][]spentTxOut, 0, detachNodes.Len()) - attachBlocks := make([]*btcutil.Block, 0, attachNodes.Len()) - - // Disconnect all of the blocks back to the point of the fork. This - // entails loading the blocks and their associated spent txos from the - // database and using that information to unspend all of the spent txos - // and remove the utxos created by the blocks. - view := NewUtxoViewpoint() - view.SetBestHash(&b.bestChain.Tip().hash) - for e := detachNodes.Front(); e != nil; e = e.Next() { - n := e.Value.(*blockNode) - var block *btcutil.Block - err := b.db.View(func(dbTx database.Tx) error { - var err error - block, err = dbFetchBlockByNode(dbTx, n) - return err - }) - if err != nil { - return err - } - - // Load all of the utxos referenced by the block that aren't - // already in the view. - err = view.fetchInputUtxos(b.db, block) - if err != nil { - return err - } - - // Load all of the spent txos for the block from the spend - // journal. - var stxos []spentTxOut - err = b.db.View(func(dbTx database.Tx) error { - stxos, err = dbFetchSpendJournalEntry(dbTx, block) - return err - }) - if err != nil { - return err - } - - // Store the loaded block and spend journal entry for later. - detachBlocks = append(detachBlocks, block) - detachSpentTxOuts = append(detachSpentTxOuts, stxos) - - err = view.disconnectTransactions(b.db, block, stxos) - if err != nil { - return err - } - } - - // Perform several checks to verify each block that needs to be attached - // to the main chain can be connected without violating any rules and - // without actually connecting the block. - // - // NOTE: These checks could be done directly when connecting a block, - // however the downside to that approach is that if any of these checks - // fail after disconnecting some blocks or attaching others, all of the - // operations have to be rolled back to get the chain back into the - // state it was before the rule violation (or other failure). There are - // at least a couple of ways accomplish that rollback, but both involve - // tweaking the chain and/or database. This approach catches these - // issues before ever modifying the chain. - var validationError error - for e := attachNodes.Front(); e != nil; e = e.Next() { - n := e.Value.(*blockNode) - - // If any previous nodes in attachNodes failed validation, - // mark this one as having an invalid ancestor. - if validationError != nil { - b.index.SetStatusFlags(n, statusInvalidAncestor) - continue - } - - var block *btcutil.Block - err := b.db.View(func(dbTx database.Tx) error { - var err error - block, err = dbFetchBlockByNode(dbTx, n) - return err - }) - if err != nil { - return err - } - - // Store the loaded block for later. - attachBlocks = append(attachBlocks, block) - - // Skip checks if node has already been fully validated. Although - // checkConnectBlock gets skipped, we still need to update the UTXO - // view. - if b.index.NodeStatus(n).KnownValid() { - err = view.fetchInputUtxos(b.db, block) - if err != nil { - return err - } - err = view.connectTransactions(block, nil) - if err != nil { - return err - } - continue - } - - // Notice the spent txout details are not requested here and - // thus will not be generated. This is done because the state - // is not being immediately written to the database, so it is - // not needed. - err = b.checkConnectBlock(n, block, view, nil) - if err != nil { - // If the block failed validation mark it as invalid, then - // continue to loop through remaining nodes, marking them as - // having an invalid ancestor. - if _, ok := err.(RuleError); ok { - b.index.SetStatusFlags(n, statusValidateFailed) - validationError = err - continue - } - return err - } - b.index.SetStatusFlags(n, statusValid) - } - - if validationError != nil { - return validationError - } - - // Reset the view for the actual connection code below. This is - // required because the view was previously modified when checking if - // the reorg would be successful and the connection code requires the - // view to be valid from the viewpoint of each block being connected or - // disconnected. - view = NewUtxoViewpoint() - view.SetBestHash(&b.bestChain.Tip().hash) - - // Disconnect blocks from the main chain. - for i, e := 0, detachNodes.Front(); e != nil; i, e = i+1, e.Next() { - n := e.Value.(*blockNode) - block := detachBlocks[i] - - // Load all of the utxos referenced by the block that aren't - // already in the view. - err := view.fetchInputUtxos(b.db, block) - if err != nil { - return err - } - - // Update the view to unspend all of the spent txos and remove - // the utxos created by the block. - err = view.disconnectTransactions(b.db, block, - detachSpentTxOuts[i]) - if err != nil { - return err - } - - // Update the database and chain state. - err = b.disconnectBlock(n, block, view) - if err != nil { - return err - } - } - - // Connect the new best chain blocks. - for i, e := 0, attachNodes.Front(); e != nil; i, e = i+1, e.Next() { - n := e.Value.(*blockNode) - block := attachBlocks[i] - - // Load all of the utxos referenced by the block that aren't - // already in the view. - err := view.fetchInputUtxos(b.db, block) - if err != nil { - return err - } - - // Update the view to mark all utxos referenced by the block - // as spent and add all transactions being created by this block - // to it. Also, provide an stxo slice so the spent txout - // details are generated. - stxos := make([]spentTxOut, 0, countSpentOutputs(block)) - err = view.connectTransactions(block, &stxos) - if err != nil { - return err - } - - // Update the database and chain state. - err = b.connectBlock(n, block, view, stxos) - if err != nil { - return err - } - } - - // Log the point where the chain forked and old and new best chain - // heads. - firstAttachNode := attachNodes.Front().Value.(*blockNode) - firstDetachNode := detachNodes.Front().Value.(*blockNode) - lastAttachNode := attachNodes.Back().Value.(*blockNode) - log.Infof("REORGANIZE: Chain forks at %v", firstAttachNode.parent.hash) - log.Infof("REORGANIZE: Old best chain head was %v", firstDetachNode.hash) - log.Infof("REORGANIZE: New best chain head is %v", lastAttachNode.hash) - - return nil -} - -// connectBestChain handles connecting the passed block to the chain while -// respecting proper chain selection according to the chain with the most -// proof of work. In the typical case, the new block simply extends the main -// chain. However, it may also be extending (or creating) a side chain (fork) -// which may or may not end up becoming the main chain depending on which fork -// cumulatively has the most proof of work. It returns whether or not the block -// ended up on the main chain (either due to extending the main chain or causing -// a reorganization to become the main chain). +// connectToDAG handles connecting the passed block to the DAG. // // The flags modify the behavior of this function as follows: // - BFFastAdd: Avoids several expensive transaction validation operations. // This is useful when using checkpoints. // // This function MUST be called with the chain state lock held (for writes). -func (b *BlockChain) connectBestChain(node *blockNode, block *btcutil.Block, flags BehaviorFlags) (bool, error) { - fastAdd := flags&BFFastAdd == BFFastAdd +func (b *BlockDAG) connectToDAG(node *blockNode, parentNodes blockSet, block *btcutil.Block, flags BehaviorFlags) error { + // Skip checks if node has already been fully validated. + fastAdd := flags&BFFastAdd == BFFastAdd || b.index.NodeStatus(node).KnownValid() - // We are extending the main (best) chain with a new block. This is the - // most common case. - parentHash := &block.MsgBlock().Header.PrevBlock - if parentHash.IsEqual(&b.bestChain.Tip().hash) { - // Skip checks if node has already been fully validated. - fastAdd = fastAdd || b.index.NodeStatus(node).KnownValid() - - // Perform several checks to verify the block can be connected - // to the main chain without violating any rules and without - // actually connecting the block. - view := NewUtxoViewpoint() - view.SetBestHash(parentHash) - stxos := make([]spentTxOut, 0, countSpentOutputs(block)) - if !fastAdd { - err := b.checkConnectBlock(node, block, view, &stxos) - if err == nil { - b.index.SetStatusFlags(node, statusValid) - } else if _, ok := err.(RuleError); ok { - b.index.SetStatusFlags(node, statusValidateFailed) - } else { - return false, err - } - - // Intentionally ignore errors writing updated node status to DB. If - // it fails to write, it's not the end of the world. If the block is - // valid, we flush in connectBlock and if the block is invalid, the - // worst that can happen is we revalidate the block after a restart. - if writeErr := b.index.flushToDB(); writeErr != nil { - log.Warnf("Error flushing block index changes to disk: %v", - writeErr) - } - - if err != nil { - return false, err - } - } - - // In the fast add case the code to check the block connection - // was skipped, so the utxo view needs to load the referenced - // utxos, spend them, and add the new utxos being created by - // this block. - if fastAdd { - err := view.fetchInputUtxos(b.db, block) - if err != nil { - return false, err - } - err = view.connectTransactions(block, &stxos) - if err != nil { - return false, err - } - } - - // Connect the block to the main chain. - err := b.connectBlock(node, block, view, stxos) - if err != nil { - return false, err - } - - return true, nil - } - if fastAdd { - log.Warnf("fastAdd set in the side chain case? %v\n", - block.Hash()) - } - - // We're extending (or creating) a side chain, but the cumulative - // work for this new side chain is not enough to make it the new chain. - if node.workSum.Cmp(b.bestChain.Tip().workSum) <= 0 { - // Log information about how the block is forking the chain. - fork := b.bestChain.FindFork(node) - if fork.hash.IsEqual(parentHash) { - log.Infof("FORK: Block %v forks the chain at height %d"+ - "/block %v, but does not cause a reorganize", - node.hash, fork.height, fork.hash) + // Perform several checks to verify the block can be connected + // to the DAG without violating any rules and without actually + // connecting the block. + view := NewUtxoViewpoint() + view.SetTips(parentNodes) + stxos := make([]spentTxOut, 0, countSpentOutputs(block)) + if !fastAdd { + err := b.checkConnectBlock(node, block, view, &stxos) + if err == nil { + b.index.SetStatusFlags(node, statusValid) + } else if _, ok := err.(RuleError); ok { + b.index.SetStatusFlags(node, statusValidateFailed) } else { - log.Infof("EXTEND FORK: Block %v extends a side chain "+ - "which forks the chain at height %d/block %v", - node.hash, fork.height, fork.hash) + return err } - return false, nil + // Intentionally ignore errors writing updated node status to DB. If + // it fails to write, it's not the end of the world. If the block is + // valid, we flush in connectBlock and if the block is invalid, the + // worst that can happen is we revalidate the block after a restart. + if writeErr := b.index.flushToDB(); writeErr != nil { + log.Warnf("Error flushing block index changes to disk: %v", + writeErr) + } + + if err != nil { + return err + } } - // We're extending (or creating) a side chain and the cumulative work - // for this new side chain is more than the old best chain, so this side - // chain needs to become the main chain. In order to accomplish that, - // find the common ancestor of both sides of the fork, disconnect the - // blocks that form the (now) old fork from the main chain, and attach - // the blocks that form the new chain to the main chain starting at the - // common ancenstor (the point where the chain forked). - detachNodes, attachNodes := b.getReorganizeNodes(node) - - // Reorganize the chain. - log.Infof("REORGANIZE: Block %v is causing a reorganize.", node.hash) - err := b.reorganizeChain(detachNodes, attachNodes) - - // Either getReorganizeNodes or reorganizeChain could have made unsaved - // changes to the block index, so flush regardless of whether there was an - // error. The index would only be dirty if the block failed to connect, so - // we can ignore any errors writing. - if writeErr := b.index.flushToDB(); writeErr != nil { - log.Warnf("Error flushing block index changes to disk: %v", writeErr) + // In the fast add case the code to check the block connection + // was skipped, so the utxo view needs to load the referenced + // utxos, spend them, and add the new utxos being created by + // this block. + if fastAdd { + err := view.fetchInputUtxos(b.db, block) + if err != nil { + return err + } + err = view.connectTransactions(node, block.Transactions(), &stxos) + if err != nil { + return err + } } - return err == nil, err + // Connect the block to the DAG. + err := b.connectBlock(node, block, view, stxos) + if err != nil { + return err + } + + return nil } // isCurrent returns whether or not the chain believes it is current. Several @@ -1119,11 +669,11 @@ func (b *BlockChain) connectBestChain(node *blockNode, block *btcutil.Block, fla // - Latest block has a timestamp newer than 24 hours ago // // This function MUST be called with the chain state lock held (for reads). -func (b *BlockChain) isCurrent() bool { +func (b *BlockDAG) isCurrent() bool { // Not current if the latest main (best) chain height is before the // latest known good checkpoint (when checkpoints are enabled). checkpoint := b.LatestCheckpoint() - if checkpoint != nil && b.bestChain.Tip().height < checkpoint.Height { + if checkpoint != nil && b.dag.SelectedTip().height < checkpoint.Height { return false } @@ -1133,7 +683,7 @@ func (b *BlockChain) isCurrent() bool { // The chain appears to be current if none of the checks reported // otherwise. minus24Hours := b.timeSource.AdjustedTime().Add(-24 * time.Hour).Unix() - return b.bestChain.Tip().timestamp >= minus24Hours + return b.dag.SelectedTip().timestamp >= minus24Hours } // IsCurrent returns whether or not the chain believes it is current. Several @@ -1143,28 +693,43 @@ func (b *BlockChain) isCurrent() bool { // - Latest block has a timestamp newer than 24 hours ago // // This function is safe for concurrent access. -func (b *BlockChain) IsCurrent() bool { - b.chainLock.RLock() - defer b.chainLock.RUnlock() +func (b *BlockDAG) IsCurrent() bool { + b.dagLock.RLock() + defer b.dagLock.RUnlock() return b.isCurrent() } -// BestSnapshot returns information about the current best chain block and -// related state as of the current point in time. The returned instance must be -// treated as immutable since it is shared by all callers. +// GetDAGState returns information about the DAG and related state as of the +// current point in time. The returned instance must be treated as immutable +// since it is shared by all callers. // // This function is safe for concurrent access. -func (b *BlockChain) BestSnapshot() *BestState { +func (b *BlockDAG) GetDAGState() *DAGState { b.stateLock.RLock() - snapshot := b.stateSnapshot - b.stateLock.RUnlock() - return snapshot + defer func() { + b.stateLock.RUnlock() + }() + + return b.dagState +} + +// setDAGState sets information about the DAG and related state as of the +// current point in time. +// +// This function is safe for concurrent access. +func (b *BlockDAG) setDAGState(dagState *DAGState) { + b.stateLock.Lock() + defer func() { + b.stateLock.Unlock() + }() + + b.dagState = dagState } // FetchHeader returns the block header identified by the given hash or an error // if it doesn't exist. -func (b *BlockChain) FetchHeader(hash *chainhash.Hash) (wire.BlockHeader, error) { +func (b *BlockDAG) FetchHeader(hash *daghash.Hash) (wire.BlockHeader, error) { // Reconstruct the header from the block index if possible. if node := b.index.LookupNode(hash); node != nil { return node.Header(), nil @@ -1187,9 +752,9 @@ func (b *BlockChain) FetchHeader(hash *chainhash.Hash) (wire.BlockHeader, error) // the main chain. // // This function is safe for concurrent access. -func (b *BlockChain) MainChainHasBlock(hash *chainhash.Hash) bool { +func (b *BlockDAG) MainChainHasBlock(hash *daghash.Hash) bool { node := b.index.LookupNode(hash) - return node != nil && b.bestChain.Contains(node) + return node != nil && b.dag.Contains(node) } // BlockLocatorFromHash returns a block locator for the passed block hash. @@ -1200,11 +765,11 @@ func (b *BlockChain) MainChainHasBlock(hash *chainhash.Hash) bool { // the passed hash is not currently known. // // This function is safe for concurrent access. -func (b *BlockChain) BlockLocatorFromHash(hash *chainhash.Hash) BlockLocator { - b.chainLock.RLock() +func (b *BlockDAG) BlockLocatorFromHash(hash *daghash.Hash) BlockLocator { + b.dagLock.RLock() node := b.index.LookupNode(hash) - locator := b.bestChain.blockLocator(node) - b.chainLock.RUnlock() + locator := b.dag.blockLocator(node) + b.dagLock.RUnlock() return locator } @@ -1212,10 +777,10 @@ func (b *BlockChain) BlockLocatorFromHash(hash *chainhash.Hash) BlockLocator { // main (best) chain. // // This function is safe for concurrent access. -func (b *BlockChain) LatestBlockLocator() (BlockLocator, error) { - b.chainLock.RLock() - locator := b.bestChain.BlockLocator(nil) - b.chainLock.RUnlock() +func (b *BlockDAG) LatestBlockLocator() (BlockLocator, error) { + b.dagLock.RLock() + locator := b.dag.BlockLocator(nil) + b.dagLock.RUnlock() return locator, nil } @@ -1223,9 +788,9 @@ func (b *BlockChain) LatestBlockLocator() (BlockLocator, error) { // main chain. // // This function is safe for concurrent access. -func (b *BlockChain) BlockHeightByHash(hash *chainhash.Hash) (int32, error) { +func (b *BlockDAG) BlockHeightByHash(hash *daghash.Hash) (int32, error) { node := b.index.LookupNode(hash) - if node == nil || !b.bestChain.Contains(node) { + if node == nil || !b.dag.Contains(node) { str := fmt.Sprintf("block %s is not in the main chain", hash) return 0, errNotInMainChain(str) } @@ -1237,8 +802,8 @@ func (b *BlockChain) BlockHeightByHash(hash *chainhash.Hash) (int32, error) { // main chain. // // This function is safe for concurrent access. -func (b *BlockChain) BlockHashByHeight(blockHeight int32) (*chainhash.Hash, error) { - node := b.bestChain.NodeByHeight(blockHeight) +func (b *BlockDAG) BlockHashByHeight(blockHeight int32) (*daghash.Hash, error) { + node := b.dag.NodeByHeight(blockHeight) if node == nil { str := fmt.Sprintf("no block at height %d exists", blockHeight) return nil, errNotInMainChain(str) @@ -1253,7 +818,7 @@ func (b *BlockChain) BlockHashByHeight(blockHeight int32) (*chainhash.Hash, erro // height. The end height will be limited to the current main chain height. // // This function is safe for concurrent access. -func (b *BlockChain) HeightRange(startHeight, endHeight int32) ([]chainhash.Hash, error) { +func (b *BlockDAG) HeightRange(startHeight, endHeight int32) ([]daghash.Hash, error) { // Ensure requested heights are sane. if startHeight < 0 { return nil, fmt.Errorf("start height of fetch range must not "+ @@ -1273,12 +838,12 @@ func (b *BlockChain) HeightRange(startHeight, endHeight int32) ([]chainhash.Hash // Grab a lock on the chain view to prevent it from changing due to a // reorg while building the hashes. - b.bestChain.mtx.Lock() - defer b.bestChain.mtx.Unlock() + b.dag.mtx.Lock() + defer b.dag.mtx.Unlock() // When the requested start height is after the most recent best chain // height, there is nothing to do. - latestHeight := b.bestChain.tip().height + latestHeight := b.dag.tip().height if startHeight > latestHeight { return nil, nil } @@ -1289,9 +854,9 @@ func (b *BlockChain) HeightRange(startHeight, endHeight int32) ([]chainhash.Hash } // Fetch as many as are available within the specified range. - hashes := make([]chainhash.Hash, 0, endHeight-startHeight) + hashes := make([]daghash.Hash, 0, endHeight-startHeight) for i := startHeight; i < endHeight; i++ { - hashes = append(hashes, b.bestChain.nodeByHeight(i).hash) + hashes = append(hashes, b.dag.nodeByHeight(i).hash) } return hashes, nil } @@ -1302,8 +867,8 @@ func (b *BlockChain) HeightRange(startHeight, endHeight int32) ([]chainhash.Hash // end hash must belong to a block that is known to be valid. // // This function is safe for concurrent access. -func (b *BlockChain) HeightToHashRange(startHeight int32, - endHash *chainhash.Hash, maxResults int) ([]chainhash.Hash, error) { +func (b *BlockDAG) HeightToHashRange(startHeight int32, + endHash *daghash.Hash, maxResults int) ([]daghash.Hash, error) { endNode := b.index.LookupNode(endHash) if endNode == nil { @@ -1330,10 +895,10 @@ func (b *BlockChain) HeightToHashRange(startHeight int32, // Walk backwards from endHeight to startHeight, collecting block hashes. node := endNode - hashes := make([]chainhash.Hash, resultsLength) + hashes := make([]daghash.Hash, resultsLength) for i := resultsLength - 1; i >= 0; i-- { hashes[i] = node.hash - node = node.parent + node = node.selectedParent } return hashes, nil } @@ -1342,8 +907,8 @@ func (b *BlockChain) HeightToHashRange(startHeight int32, // endHash where the block height is a positive multiple of interval. // // This function is safe for concurrent access. -func (b *BlockChain) IntervalBlockHashes(endHash *chainhash.Hash, interval int, -) ([]chainhash.Hash, error) { +func (b *BlockDAG) IntervalBlockHashes(endHash *daghash.Hash, interval int, +) ([]daghash.Hash, error) { endNode := b.index.LookupNode(endHash) if endNode == nil { @@ -1355,18 +920,18 @@ func (b *BlockChain) IntervalBlockHashes(endHash *chainhash.Hash, interval int, endHeight := endNode.height resultsLength := int(endHeight) / interval - hashes := make([]chainhash.Hash, resultsLength) + hashes := make([]daghash.Hash, resultsLength) - b.bestChain.mtx.Lock() - defer b.bestChain.mtx.Unlock() + b.dag.mtx.Lock() + defer b.dag.mtx.Unlock() blockNode := endNode for index := int(endHeight) / interval; index > 0; index-- { - // Use the bestChain chainView for faster lookups once lookup intersects + // Use the bestChain dagView for faster lookups once lookup intersects // the best chain. blockHeight := int32(index * interval) - if b.bestChain.contains(blockNode) { - blockNode = b.bestChain.nodeByHeight(blockHeight) + if b.dag.contains(blockNode) { + blockNode = b.dag.nodeByHeight(blockHeight) } else { blockNode = blockNode.Ancestor(blockHeight) } @@ -1393,7 +958,7 @@ func (b *BlockChain) IntervalBlockHashes(endHash *chainhash.Hash, interval int, // functions. // // This function MUST be called with the chain state lock held (for reads). -func (b *BlockChain) locateInventory(locator BlockLocator, hashStop *chainhash.Hash, maxEntries uint32) (*blockNode, uint32) { +func (b *BlockDAG) locateInventory(locator BlockLocator, hashStop *daghash.Hash, maxEntries uint32) (*blockNode, uint32) { // There are no block locators so a specific block is being requested // as identified by the stop hash. stopNode := b.index.LookupNode(hashStop) @@ -1409,10 +974,10 @@ func (b *BlockChain) locateInventory(locator BlockLocator, hashStop *chainhash.H // Find the most recent locator block hash in the main chain. In the // case none of the hashes in the locator are in the main chain, fall // back to the genesis block. - startNode := b.bestChain.Genesis() + startNode := b.dag.Genesis() for _, hash := range locator { node := b.index.LookupNode(hash) - if node != nil && b.bestChain.Contains(node) { + if node != nil && b.dag.Contains(node) { startNode = node break } @@ -1421,14 +986,14 @@ func (b *BlockChain) locateInventory(locator BlockLocator, hashStop *chainhash.H // Start at the block after the most recently known block. When there // is no next block it means the most recently known block is the tip of // the best chain, so there is nothing more to do. - startNode = b.bestChain.Next(startNode) + startNode = b.dag.Next(startNode) if startNode == nil { return nil, 0 } // Calculate how many entries are needed. - total := uint32((b.bestChain.Tip().height - startNode.height) + 1) - if stopNode != nil && b.bestChain.Contains(stopNode) && + total := uint32((b.dag.SelectedTip().height - startNode.height) + 1) + if stopNode != nil && b.dag.Contains(stopNode) && stopNode.height >= startNode.height { total = uint32((stopNode.height - startNode.height) + 1) @@ -1447,7 +1012,7 @@ func (b *BlockChain) locateInventory(locator BlockLocator, hashStop *chainhash.H // See the comment on the exported function for more details on special cases. // // This function MUST be called with the chain state lock held (for reads). -func (b *BlockChain) locateBlocks(locator BlockLocator, hashStop *chainhash.Hash, maxHashes uint32) []chainhash.Hash { +func (b *BlockDAG) locateBlocks(locator BlockLocator, hashStop *daghash.Hash, maxHashes uint32) []daghash.Hash { // Find the node after the first known block in the locator and the // total number of nodes after it needed while respecting the stop hash // and max entries. @@ -1457,10 +1022,10 @@ func (b *BlockChain) locateBlocks(locator BlockLocator, hashStop *chainhash.Hash } // Populate and return the found hashes. - hashes := make([]chainhash.Hash, 0, total) + hashes := make([]daghash.Hash, 0, total) for i := uint32(0); i < total; i++ { hashes = append(hashes, node.hash) - node = b.bestChain.Next(node) + node = b.dag.Next(node) } return hashes } @@ -1478,10 +1043,10 @@ func (b *BlockChain) locateBlocks(locator BlockLocator, hashStop *chainhash.Hash // after the genesis block will be returned // // This function is safe for concurrent access. -func (b *BlockChain) LocateBlocks(locator BlockLocator, hashStop *chainhash.Hash, maxHashes uint32) []chainhash.Hash { - b.chainLock.RLock() +func (b *BlockDAG) LocateBlocks(locator BlockLocator, hashStop *daghash.Hash, maxHashes uint32) []daghash.Hash { + b.dagLock.RLock() hashes := b.locateBlocks(locator, hashStop, maxHashes) - b.chainLock.RUnlock() + b.dagLock.RUnlock() return hashes } @@ -1492,7 +1057,7 @@ func (b *BlockChain) LocateBlocks(locator BlockLocator, hashStop *chainhash.Hash // See the comment on the exported function for more details on special cases. // // This function MUST be called with the chain state lock held (for reads). -func (b *BlockChain) locateHeaders(locator BlockLocator, hashStop *chainhash.Hash, maxHeaders uint32) []wire.BlockHeader { +func (b *BlockDAG) locateHeaders(locator BlockLocator, hashStop *daghash.Hash, maxHeaders uint32) []wire.BlockHeader { // Find the node after the first known block in the locator and the // total number of nodes after it needed while respecting the stop hash // and max entries. @@ -1505,7 +1070,7 @@ func (b *BlockChain) locateHeaders(locator BlockLocator, hashStop *chainhash.Has headers := make([]wire.BlockHeader, 0, total) for i := uint32(0); i < total; i++ { headers = append(headers, node.Header()) - node = b.bestChain.Next(node) + node = b.dag.Next(node) } return headers } @@ -1523,10 +1088,10 @@ func (b *BlockChain) locateHeaders(locator BlockLocator, hashStop *chainhash.Has // after the genesis block will be returned // // This function is safe for concurrent access. -func (b *BlockChain) LocateHeaders(locator BlockLocator, hashStop *chainhash.Hash) []wire.BlockHeader { - b.chainLock.RLock() +func (b *BlockDAG) LocateHeaders(locator BlockLocator, hashStop *daghash.Hash) []wire.BlockHeader { + b.dagLock.RLock() headers := b.locateHeaders(locator, hashStop, wire.MaxBlockHeadersPerMsg) - b.chainLock.RUnlock() + b.dagLock.RUnlock() return headers } @@ -1539,7 +1104,7 @@ type IndexManager interface { // channel parameter specifies a channel the caller can close to signal // that the process should be interrupted. It can be nil if that // behavior is not desired. - Init(*BlockChain, <-chan struct{}) error + Init(*BlockDAG, <-chan struct{}) error // ConnectBlock is invoked when a new block has been connected to the // main chain. @@ -1565,19 +1130,19 @@ type Config struct { // This field can be nil if the caller does not desire the behavior. Interrupt <-chan struct{} - // ChainParams identifies which chain parameters the chain is associated + // DAGParams identifies which chain parameters the chain is associated // with. // // This field is required. - ChainParams *chaincfg.Params + DAGParams *dagconfig.Params // Checkpoints hold caller-defined checkpoints that should be added to - // the default checkpoints in ChainParams. Checkpoints must be sorted + // the default checkpoints in DAGParams. Checkpoints must be sorted // by height. // // This field can be nil if the caller does not wish to specify any // checkpoints. - Checkpoints []chaincfg.Checkpoint + Checkpoints []dagconfig.Checkpoint // TimeSource defines the median time source to use for things such as // block processing and determining whether or not the chain is current. @@ -1604,13 +1169,13 @@ type Config struct { IndexManager IndexManager } -// New returns a BlockChain instance using the provided configuration details. -func New(config *Config) (*BlockChain, error) { +// New returns a BlockDAG instance using the provided configuration details. +func New(config *Config) (*BlockDAG, error) { // Enforce required config fields. if config.DB == nil { return nil, AssertError("blockchain.New database is nil") } - if config.ChainParams == nil { + if config.DAGParams == nil { return nil, AssertError("blockchain.New chain parameters nil") } if config.TimeSource == nil { @@ -1619,10 +1184,10 @@ func New(config *Config) (*BlockChain, error) { // Generate a checkpoint by height map from the provided checkpoints // and assert the provided checkpoints are sorted by height as required. - var checkpointsByHeight map[int32]*chaincfg.Checkpoint + var checkpointsByHeight map[int32]*dagconfig.Checkpoint var prevCheckpointHeight int32 if len(config.Checkpoints) > 0 { - checkpointsByHeight = make(map[int32]*chaincfg.Checkpoint) + checkpointsByHeight = make(map[int32]*dagconfig.Checkpoint) for i := range config.Checkpoints { checkpoint := &config.Checkpoints[i] if checkpoint.Height <= prevCheckpointHeight { @@ -1635,15 +1200,15 @@ func New(config *Config) (*BlockChain, error) { } } - params := config.ChainParams + params := config.DAGParams targetTimespan := int64(params.TargetTimespan / time.Second) targetTimePerBlock := int64(params.TargetTimePerBlock / time.Second) adjustmentFactor := params.RetargetAdjustmentFactor - b := BlockChain{ + b := BlockDAG{ checkpoints: config.Checkpoints, checkpointsByHeight: checkpointsByHeight, db: config.DB, - chainParams: params, + dagParams: params, timeSource: config.TimeSource, sigCache: config.SigCache, indexManager: config.IndexManager, @@ -1651,17 +1216,17 @@ func New(config *Config) (*BlockChain, error) { maxRetargetTimespan: targetTimespan * adjustmentFactor, blocksPerRetarget: int32(targetTimespan / targetTimePerBlock), index: newBlockIndex(config.DB, params), - bestChain: newChainView(nil), - orphans: make(map[chainhash.Hash]*orphanBlock), - prevOrphans: make(map[chainhash.Hash][]*orphanBlock), + dag: newDAGView(nil), + orphans: make(map[daghash.Hash]*orphanBlock), + prevOrphans: make(map[daghash.Hash][]*orphanBlock), warningCaches: newThresholdCaches(vbNumBits), - deploymentCaches: newThresholdCaches(chaincfg.DefinedDeployments), + deploymentCaches: newThresholdCaches(dagconfig.DefinedDeployments), } // Initialize the chain state from the passed database. When the db // does not yet contain any chain state, both it and the chain state // will be initialized to contain only the genesis block. - if err := b.initChainState(); err != nil { + if err := b.initDAGState(); err != nil { return nil, err } @@ -1684,10 +1249,10 @@ func New(config *Config) (*BlockChain, error) { return nil, err } - bestNode := b.bestChain.Tip() - log.Infof("Chain state (height %d, hash %v, totaltx %d, work %v)", - bestNode.height, bestNode.hash, b.stateSnapshot.TotalTxns, - bestNode.workSum) + selectedTip := b.dag.SelectedTip() + log.Infof("DAG state (height %d, hash %v, totaltx %d, work %v)", + selectedTip.height, selectedTip.hash, b.dagState.TotalTxs, + selectedTip.workSum) return &b, nil } diff --git a/blockchain/chain_test.go b/blockdag/dag_test.go similarity index 90% rename from blockchain/chain_test.go rename to blockdag/dag_test.go index 6ecb10e49..1c2b58b02 100644 --- a/blockchain/chain_test.go +++ b/blockdag/dag_test.go @@ -2,27 +2,27 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain +package blockdag import ( "reflect" "testing" "time" - "github.com/daglabs/btcd/chaincfg" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/wire" "github.com/daglabs/btcutil" ) // TestHaveBlock tests the HaveBlock API to ensure proper functionality. func TestHaveBlock(t *testing.T) { - // Load up blocks such that there is a side chain. + // Load up blocks such that there is a fork in the DAG. // (genesis block) -> 1 -> 2 -> 3 -> 4 - // \-> 3a + // \-> 3b testFiles := []string{ - "blk_0_to_4.dat.bz2", - "blk_3A.dat.bz2", + "blk_0_to_4.dat", + "blk_3B.dat", } var blocks []*btcutil.Block @@ -37,7 +37,7 @@ func TestHaveBlock(t *testing.T) { // Create a new database and chain instance to run tests against. chain, teardownFunc, err := chainSetup("haveblock", - &chaincfg.MainNetParams) + &dagconfig.MainNetParams) if err != nil { t.Errorf("Failed to setup chain instance: %v", err) return @@ -49,7 +49,7 @@ func TestHaveBlock(t *testing.T) { chain.TstSetCoinbaseMaturity(1) for i := 1; i < len(blocks); i++ { - _, isOrphan, err := chain.ProcessBlock(blocks[i], BFNone) + isOrphan, err := chain.ProcessBlock(blocks[i], BFNone) if err != nil { t.Errorf("ProcessBlock fail on block %v: %v\n", i, err) return @@ -62,7 +62,7 @@ func TestHaveBlock(t *testing.T) { } // Insert an orphan block. - _, isOrphan, err := chain.ProcessBlock(btcutil.NewBlock(&Block100000), + isOrphan, err := chain.ProcessBlock(btcutil.NewBlock(&Block100000), BFNone) if err != nil { t.Errorf("Unable to process block: %v", err) @@ -78,21 +78,21 @@ func TestHaveBlock(t *testing.T) { hash string want bool }{ - // Genesis block should be present (in the main chain). - {hash: chaincfg.MainNetParams.GenesisHash.String(), want: true}, + // Genesis block should be present. + {hash: dagconfig.MainNetParams.GenesisHash.String(), want: true}, - // Block 3a should be present (on a side chain). - {hash: "00000000474284d20067a4d33f6a02284e6ef70764a3a26d6a5b9df52ef663dd", want: true}, + // Block 3b should be present (as a second child of Block 2). + {hash: "00000033119c0f74eff8b4711fce3769ea33e8a69670d9c0366179a9a5b8aec3", want: true}, // Block 100000 should be present (as an orphan). - {hash: "000000000003ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506", want: true}, + {hash: "000000824dca82828b9a18ac09ed7c93292f6a042fae580c1192c8cd086fd990", want: true}, // Random hashes should not be available. {hash: "123", want: false}, } for i, test := range tests { - hash, err := chainhash.NewHashFromStr(test.hash) + hash, err := daghash.NewHashFromStr(test.hash) if err != nil { t.Errorf("NewHashFromStr: %v", err) continue @@ -116,20 +116,20 @@ func TestHaveBlock(t *testing.T) { // combinations of inputs to the CalcSequenceLock function in order to ensure // the returned SequenceLocks are correct for each test instance. func TestCalcSequenceLock(t *testing.T) { - netParams := &chaincfg.SimNetParams + netParams := &dagconfig.SimNetParams blockVersion := int32(0x20000000) - // Generate enough synthetic blocks for the rest of the test - chain := newFakeChain(netParams) - node := chain.bestChain.Tip() + // Generate enough synthetic blocks to activate CSV. + chain := newFakeDag(netParams) + node := chain.dag.SelectedTip() blockTime := node.Header().Timestamp numBlocksToGenerate := uint32(5) for i := uint32(0); i < numBlocksToGenerate; i++ { blockTime = blockTime.Add(time.Second) node = newFakeNode(node, blockVersion, 0, blockTime) chain.index.AddNode(node) - chain.bestChain.SetTip(node) + chain.dag.SetTip(node) } // Create a utxo view with a fake utxo for the inputs used in the @@ -143,7 +143,7 @@ func TestCalcSequenceLock(t *testing.T) { }) utxoView := NewUtxoViewpoint() utxoView.AddTxOuts(targetTx, int32(numBlocksToGenerate)-4) - utxoView.SetBestHash(&node.hash) + utxoView.SetTips(setFromSlice(node)) // Create a utxo that spends the fake utxo created above for use in the // transactions created in the tests. It has an age of 4 blocks. Note @@ -421,8 +421,8 @@ func TestCalcSequenceLock(t *testing.T) { // nodeHashes is a convenience function that returns the hashes for all of the // passed indexes of the provided nodes. It is used to construct expected hash // slices in the tests. -func nodeHashes(nodes []*blockNode, indexes ...int) []chainhash.Hash { - hashes := make([]chainhash.Hash, 0, len(indexes)) +func nodeHashes(nodes []*blockNode, indexes ...int) []daghash.Hash { + hashes := make([]daghash.Hash, 0, len(indexes)) for _, idx := range indexes { hashes = append(hashes, nodes[idx].hash) } @@ -443,46 +443,46 @@ func nodeHeaders(nodes []*blockNode, indexes ...int) []wire.BlockHeader { // TestLocateInventory ensures that locating inventory via the LocateHeaders and // LocateBlocks functions behaves as expected. func TestLocateInventory(t *testing.T) { - // Construct a synthetic block chain with a block index consisting of + // Construct a synthetic block DAG with a block index consisting of // the following structure. // genesis -> 1 -> 2 -> ... -> 15 -> 16 -> 17 -> 18 // \-> 16a -> 17a tip := tstTip - chain := newFakeChain(&chaincfg.MainNetParams) - branch0Nodes := chainedNodes(chain.bestChain.Genesis(), 18) - branch1Nodes := chainedNodes(branch0Nodes[14], 2) + dag := newFakeDag(&dagconfig.MainNetParams) + branch0Nodes := chainedNodes(setFromSlice(dag.dag.Genesis()), 18) + branch1Nodes := chainedNodes(setFromSlice(branch0Nodes[14]), 2) for _, node := range branch0Nodes { - chain.index.AddNode(node) + dag.index.AddNode(node) } for _, node := range branch1Nodes { - chain.index.AddNode(node) + dag.index.AddNode(node) } - chain.bestChain.SetTip(tip(branch0Nodes)) + dag.dag.SetTip(tip(branch0Nodes)) // Create chain views for different branches of the overall chain to // simulate a local and remote node on different parts of the chain. - localView := newChainView(tip(branch0Nodes)) - remoteView := newChainView(tip(branch1Nodes)) + localView := newDAGView(tip(branch0Nodes)) + remoteView := newDAGView(tip(branch1Nodes)) // Create a chain view for a completely unrelated block chain to // simulate a remote node on a totally different chain. - unrelatedBranchNodes := chainedNodes(nil, 5) - unrelatedView := newChainView(tip(unrelatedBranchNodes)) + unrelatedBranchNodes := chainedNodes(newSet(), 5) + unrelatedView := newDAGView(tip(unrelatedBranchNodes)) tests := []struct { name string locator BlockLocator // locator for requested inventory - hashStop chainhash.Hash // stop hash for locator + hashStop daghash.Hash // stop hash for locator maxAllowed uint32 // max to locate, 0 = wire const headers []wire.BlockHeader // expected located headers - hashes []chainhash.Hash // expected located hashes + hashes []daghash.Hash // expected located hashes }{ { // Empty block locators and unknown stop hash. No // inventory should be located. name: "no locators, no stop", locator: nil, - hashStop: chainhash.Hash{}, + hashStop: daghash.Hash{}, headers: nil, hashes: nil, }, @@ -511,7 +511,7 @@ func TestLocateInventory(t *testing.T) { // the main chain and the stop hash has no effect. name: "remote side chain, unknown stop", locator: remoteView.BlockLocator(nil), - hashStop: chainhash.Hash{0x01}, + hashStop: daghash.Hash{0x01}, headers: nodeHeaders(branch0Nodes, 15, 16, 17), hashes: nodeHashes(branch0Nodes, 15, 16, 17), }, @@ -581,7 +581,7 @@ func TestLocateInventory(t *testing.T) { // effect. name: "remote main chain past, unknown stop", locator: localView.BlockLocator(branch0Nodes[12]), - hashStop: chainhash.Hash{0x01}, + hashStop: daghash.Hash{0x01}, headers: nodeHeaders(branch0Nodes, 13, 14, 15, 16, 17), hashes: nodeHashes(branch0Nodes, 13, 14, 15, 16, 17), }, @@ -651,7 +651,7 @@ func TestLocateInventory(t *testing.T) { // located inventory. name: "remote main chain same, unknown stop", locator: localView.BlockLocator(nil), - hashStop: chainhash.Hash{0x01}, + hashStop: daghash.Hash{0x01}, headers: nil, hashes: nil, }, @@ -675,7 +675,7 @@ func TestLocateInventory(t *testing.T) { // block. name: "remote unrelated chain", locator: unrelatedView.BlockLocator(nil), - hashStop: chainhash.Hash{}, + hashStop: daghash.Hash{}, headers: nodeHeaders(branch0Nodes, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17), hashes: nodeHashes(branch0Nodes, 0, 1, 2, 3, 4, 5, 6, @@ -688,7 +688,7 @@ func TestLocateInventory(t *testing.T) { // block limited by the max. name: "remote genesis", locator: locatorHashes(branch0Nodes, 0), - hashStop: chainhash.Hash{}, + hashStop: daghash.Hash{}, maxAllowed: 3, headers: nodeHeaders(branch0Nodes, 1, 2, 3), hashes: nodeHashes(branch0Nodes, 1, 2, 3), @@ -704,7 +704,7 @@ func TestLocateInventory(t *testing.T) { // the fork point. name: "weak locator, single known side block", locator: locatorHashes(branch1Nodes, 1), - hashStop: chainhash.Hash{}, + hashStop: daghash.Hash{}, headers: nodeHeaders(branch0Nodes, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17), hashes: nodeHashes(branch0Nodes, 0, 1, 2, 3, 4, 5, 6, @@ -721,7 +721,7 @@ func TestLocateInventory(t *testing.T) { // there are no more locators to find the fork point. name: "weak locator, multiple known side blocks", locator: locatorHashes(branch1Nodes, 1), - hashStop: chainhash.Hash{}, + hashStop: daghash.Hash{}, headers: nodeHeaders(branch0Nodes, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17), hashes: nodeHashes(branch0Nodes, 0, 1, 2, 3, 4, 5, 6, @@ -751,12 +751,12 @@ func TestLocateInventory(t *testing.T) { if test.maxAllowed != 0 { // Need to use the unexported function to override the // max allowed for headers. - chain.chainLock.RLock() - headers = chain.locateHeaders(test.locator, + dag.dagLock.RLock() + headers = dag.locateHeaders(test.locator, &test.hashStop, test.maxAllowed) - chain.chainLock.RUnlock() + dag.dagLock.RUnlock() } else { - headers = chain.LocateHeaders(test.locator, + headers = dag.LocateHeaders(test.locator, &test.hashStop) } if !reflect.DeepEqual(headers, test.headers) { @@ -770,7 +770,7 @@ func TestLocateInventory(t *testing.T) { if test.maxAllowed != 0 { maxAllowed = test.maxAllowed } - hashes := chain.LocateBlocks(test.locator, &test.hashStop, + hashes := dag.LocateBlocks(test.locator, &test.hashStop, maxAllowed) if !reflect.DeepEqual(hashes, test.hashes) { t.Errorf("%s: unxpected hashes -- got %v, want %v", @@ -788,9 +788,9 @@ func TestHeightToHashRange(t *testing.T) { // genesis -> 1 -> 2 -> ... -> 15 -> 16 -> 17 -> 18 // \-> 16a -> 17a -> 18a (unvalidated) tip := tstTip - chain := newFakeChain(&chaincfg.MainNetParams) - branch0Nodes := chainedNodes(chain.bestChain.Genesis(), 18) - branch1Nodes := chainedNodes(branch0Nodes[14], 3) + chain := newFakeDag(&dagconfig.MainNetParams) + branch0Nodes := chainedNodes(setFromSlice(chain.dag.Genesis()), 18) + branch1Nodes := chainedNodes(setFromSlice(branch0Nodes[14]), 3) for _, node := range branch0Nodes { chain.index.SetStatusFlags(node, statusValid) chain.index.AddNode(node) @@ -801,14 +801,14 @@ func TestHeightToHashRange(t *testing.T) { } chain.index.AddNode(node) } - chain.bestChain.SetTip(tip(branch0Nodes)) + chain.dag.SetTip(tip(branch0Nodes)) tests := []struct { name string - startHeight int32 // locator for requested inventory - endHash chainhash.Hash // stop hash for locator - maxResults int // max to locate, 0 = wire const - hashes []chainhash.Hash // expected located hashes + startHeight int32 // locator for requested inventory + endHash daghash.Hash // stop hash for locator + maxResults int // max to locate, 0 = wire const + hashes []daghash.Hash // expected located hashes expectError bool }{ { @@ -880,9 +880,9 @@ func TestIntervalBlockHashes(t *testing.T) { // genesis -> 1 -> 2 -> ... -> 15 -> 16 -> 17 -> 18 // \-> 16a -> 17a -> 18a (unvalidated) tip := tstTip - chain := newFakeChain(&chaincfg.MainNetParams) - branch0Nodes := chainedNodes(chain.bestChain.Genesis(), 18) - branch1Nodes := chainedNodes(branch0Nodes[14], 3) + chain := newFakeDag(&dagconfig.MainNetParams) + branch0Nodes := chainedNodes(setFromSlice(chain.dag.Genesis()), 18) + branch1Nodes := chainedNodes(setFromSlice(branch0Nodes[14]), 3) for _, node := range branch0Nodes { chain.index.SetStatusFlags(node, statusValid) chain.index.AddNode(node) @@ -893,13 +893,13 @@ func TestIntervalBlockHashes(t *testing.T) { } chain.index.AddNode(node) } - chain.bestChain.SetTip(tip(branch0Nodes)) + chain.dag.SetTip(tip(branch0Nodes)) tests := []struct { name string - endHash chainhash.Hash + endHash daghash.Hash interval int - hashes []chainhash.Hash + hashes []daghash.Hash expectError bool }{ { @@ -919,7 +919,7 @@ func TestIntervalBlockHashes(t *testing.T) { name: "no results", endHash: branch0Nodes[17].hash, interval: 20, - hashes: []chainhash.Hash{}, + hashes: []daghash.Hash{}, }, { name: "unvalidated block", diff --git a/blockchain/chainio.go b/blockdag/dagio.go similarity index 76% rename from blockchain/chainio.go rename to blockdag/dagio.go index e56ff0edd..ed4fc9ae3 100644 --- a/blockchain/chainio.go +++ b/blockdag/dagio.go @@ -2,20 +2,20 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain +package blockdag import ( "bytes" "encoding/binary" "fmt" - "math/big" "sync" "time" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/database" "github.com/daglabs/btcd/wire" "github.com/daglabs/btcutil" + "encoding/json" ) const ( @@ -47,9 +47,9 @@ var ( // the block height -> block hash index. heightIndexBucketName = []byte("heightidx") - // chainStateKeyName is the name of the db key used to store the best - // chain state. - chainStateKeyName = []byte("chainstate") + // dagStateKeyName is the name of the db key used to store the DAG + // state. + dagStateKeyName = []byte("dagstate") // spendJournalVersionKeyName is the name of the db key used to store // the version of the spend journal currently in the database. @@ -104,13 +104,6 @@ func isDeserializeErr(err error) bool { return ok } -// isDbBucketNotFoundErr returns whether or not the passed error is a -// database.Error with an error code of database.ErrBucketNotFound. -func isDbBucketNotFoundErr(err error) bool { - dbErr, ok := err.(database.Error) - return ok && dbErr.ErrorCode == database.ErrBucketNotFound -} - // dbFetchVersion fetches an individual version with the given key from the // metadata bucket. It is primarily used to track versions on entities such as // buckets. It returns zero if the provided key does not exist. @@ -343,10 +336,10 @@ func decodeSpentTxOut(serialized []byte, stxo *spentTxOut) (int, error) { // Since the serialization format is not self describing, as noted in the // format comments, this function also requires the transactions that spend the // txouts. -func deserializeSpendJournalEntry(serialized []byte, txns []*wire.MsgTx) ([]spentTxOut, error) { +func deserializeSpendJournalEntry(serialized []byte, txs []*wire.MsgTx) ([]spentTxOut, error) { // Calculate the total number of stxos. var numStxos int - for _, tx := range txns { + for _, tx := range txs { numStxos += len(tx.TxIn) } @@ -369,8 +362,8 @@ func deserializeSpendJournalEntry(serialized []byte, txns []*wire.MsgTx) ([]spen stxoIdx := numStxos - 1 offset := 0 stxos := make([]spentTxOut, numStxos) - for txIdx := len(txns) - 1; txIdx > -1; txIdx-- { - tx := txns[txIdx] + for txIdx := len(txs) - 1; txIdx > -1; txIdx-- { + tx := txs[txIdx] // Loop backwards through all of the transaction inputs and read // the associated stxo. @@ -416,53 +409,16 @@ func serializeSpendJournalEntry(stxos []spentTxOut) []byte { return serialized } -// dbFetchSpendJournalEntry fetches the spend journal entry for the passed block -// and deserializes it into a slice of spent txout entries. -// -// NOTE: Legacy entries will not have the coinbase flag or height set unless it -// was the final output spend in the containing transaction. It is up to the -// caller to handle this properly by looking the information up in the utxo set. -func dbFetchSpendJournalEntry(dbTx database.Tx, block *btcutil.Block) ([]spentTxOut, error) { - // Exclude the coinbase transaction since it can't spend anything. - spendBucket := dbTx.Metadata().Bucket(spendJournalBucketName) - serialized := spendBucket.Get(block.Hash()[:]) - blockTxns := block.MsgBlock().Transactions[1:] - stxos, err := deserializeSpendJournalEntry(serialized, blockTxns) - if err != nil { - // Ensure any deserialization errors are returned as database - // corruption errors. - if isDeserializeErr(err) { - return nil, database.Error{ - ErrorCode: database.ErrCorruption, - Description: fmt.Sprintf("corrupt spend "+ - "information for %v: %v", block.Hash(), - err), - } - } - - return nil, err - } - - return stxos, nil -} - // dbPutSpendJournalEntry uses an existing database transaction to update the // spend journal entry for the given block hash using the provided slice of // spent txouts. The spent txouts slice must contain an entry for every txout // the transactions in the block spend in the order they are spent. -func dbPutSpendJournalEntry(dbTx database.Tx, blockHash *chainhash.Hash, stxos []spentTxOut) error { +func dbPutSpendJournalEntry(dbTx database.Tx, blockHash *daghash.Hash, stxos []spentTxOut) error { spendBucket := dbTx.Metadata().Bucket(spendJournalBucketName) serialized := serializeSpendJournalEntry(stxos) return spendBucket.Put(blockHash[:], serialized) } -// dbRemoveSpendJournalEntry uses an existing database transaction to remove the -// spend journal entry for the passed block hash. -func dbRemoveSpendJournalEntry(dbTx database.Tx, blockHash *chainhash.Hash) error { - spendBucket := dbTx.Metadata().Bucket(spendJournalBucketName) - return spendBucket.Delete(blockHash[:]) -} - // ----------------------------------------------------------------------------- // The unspent transaction output (utxo) set consists of an entry for each // unspent output using a format that is optimized to reduce space using domain @@ -478,7 +434,7 @@ func dbRemoveSpendJournalEntry(dbTx database.Tx, blockHash *chainhash.Hash) erro // // // Field Type Size -// hash chainhash.Hash chainhash.HashSize +// hash daghash.Hash daghash.HashSize // output index VLQ variable // // The serialized value format is: @@ -549,7 +505,7 @@ var maxUint32VLQSerializeSize = serializeSizeVLQ(1<<32 - 1) // provide temporary buffers for outpoint database keys. var outpointKeyPool = sync.Pool{ New: func() interface{} { - b := make([]byte, chainhash.HashSize+maxUint32VLQSerializeSize) + b := make([]byte, daghash.HashSize+maxUint32VLQSerializeSize) return &b // Pointer to slice to avoid boxing alloc. }, } @@ -566,9 +522,9 @@ func outpointKey(outpoint wire.OutPoint) *[]byte { // doing byte-wise comparisons will produce them in order. key := outpointKeyPool.Get().(*[]byte) idx := uint64(outpoint.Index) - *key = (*key)[:chainhash.HashSize+serializeSizeVLQ(idx)] + *key = (*key)[:daghash.HashSize+serializeSizeVLQ(idx)] copy(*key, outpoint.Hash[:]) - putVLQ((*key)[chainhash.HashSize:], idx) + putVLQ((*key)[daghash.HashSize:], idx) return key } @@ -666,7 +622,7 @@ func deserializeUtxoEntry(serialized []byte) (*UtxoEntry, error) { // // When there are no entries for the provided hash, nil will be returned for the // both the entry and the error. -func dbFetchUtxoEntryByHash(dbTx database.Tx, hash *chainhash.Hash) (*UtxoEntry, error) { +func dbFetchUtxoEntryByHash(dbTx database.Tx, hash *daghash.Hash) (*UtxoEntry, error) { // Attempt to find an entry by seeking for the hash along with a zero // index. Due to the fact the keys are serialized as , // where the index uses an MSB encoding, if there are any entries for @@ -683,10 +639,10 @@ func dbFetchUtxoEntryByHash(dbTx database.Tx, hash *chainhash.Hash) (*UtxoEntry, // highest hash after the requested one, so make sure the hashes // actually match. cursorKey := cursor.Key() - if len(cursorKey) < chainhash.HashSize { + if len(cursorKey) < daghash.HashSize { return nil, nil } - if !bytes.Equal(hash[:], cursorKey[:chainhash.HashSize]) { + if !bytes.Equal(hash[:], cursorKey[:daghash.HashSize]) { return nil, nil } @@ -793,13 +749,13 @@ func dbPutUtxoView(dbTx database.Tx, view *UtxoViewpoint) error { // // // Field Type Size -// hash chainhash.Hash chainhash.HashSize +// hash daghash.Hash daghash.HashSize // ----------------------------------------------------------------------------- // dbPutBlockIndex uses an existing database transaction to update or add the // block index entries for the hash to height and height to hash mappings for // the provided values. -func dbPutBlockIndex(dbTx database.Tx, hash *chainhash.Hash, height int32) error { +func dbPutBlockIndex(dbTx database.Tx, hash *daghash.Hash, height int32) error { // Serialize the height for use in the index entries. var serializedHeight [4]byte byteOrder.PutUint32(serializedHeight[:], uint32(height)) @@ -816,27 +772,9 @@ func dbPutBlockIndex(dbTx database.Tx, hash *chainhash.Hash, height int32) error return heightIndex.Put(serializedHeight[:], hash[:]) } -// dbRemoveBlockIndex uses an existing database transaction remove block index -// entries from the hash to height and height to hash mappings for the provided -// values. -func dbRemoveBlockIndex(dbTx database.Tx, hash *chainhash.Hash, height int32) error { - // Remove the block hash to height mapping. - meta := dbTx.Metadata() - hashIndex := meta.Bucket(hashIndexBucketName) - if err := hashIndex.Delete(hash[:]); err != nil { - return err - } - - // Remove the block height to hash mapping. - var serializedHeight [4]byte - byteOrder.PutUint32(serializedHeight[:], uint32(height)) - heightIndex := meta.Bucket(heightIndexBucketName) - return heightIndex.Delete(serializedHeight[:]) -} - // dbFetchHeightByHash uses an existing database transaction to retrieve the // height for the provided hash from the index. -func dbFetchHeightByHash(dbTx database.Tx, hash *chainhash.Hash) (int32, error) { +func dbFetchHeightByHash(dbTx database.Tx, hash *daghash.Hash) (int32, error) { meta := dbTx.Metadata() hashIndex := meta.Bucket(hashIndexBucketName) serializedHeight := hashIndex.Get(hash[:]) @@ -848,147 +786,72 @@ func dbFetchHeightByHash(dbTx database.Tx, hash *chainhash.Hash) (int32, error) return int32(byteOrder.Uint32(serializedHeight)), nil } -// dbFetchHashByHeight uses an existing database transaction to retrieve the -// hash for the provided height from the index. -func dbFetchHashByHeight(dbTx database.Tx, height int32) (*chainhash.Hash, error) { - var serializedHeight [4]byte - byteOrder.PutUint32(serializedHeight[:], uint32(height)) - - meta := dbTx.Metadata() - heightIndex := meta.Bucket(heightIndexBucketName) - hashBytes := heightIndex.Get(serializedHeight[:]) - if hashBytes == nil { - str := fmt.Sprintf("no block at height %d exists", height) - return nil, errNotInMainChain(str) - } - - var hash chainhash.Hash - copy(hash[:], hashBytes) - return &hash, nil +// dbDAGState represents the data to be stored in the database for the current +// DAG state. +type dbDAGState struct { + SelectedHash daghash.Hash + TotalTxs uint64 } -// ----------------------------------------------------------------------------- -// The best chain state consists of the best block hash and height, the total -// number of transactions up to and including those in the best block, and the -// accumulated work sum up to and including the best block. -// -// The serialized format is: -// -// -// -// Field Type Size -// block hash chainhash.Hash chainhash.HashSize -// block height uint32 4 bytes -// total txns uint64 8 bytes -// work sum length uint32 4 bytes -// work sum big.Int work sum length -// ----------------------------------------------------------------------------- - -// bestChainState represents the data to be stored the database for the current -// best chain state. -type bestChainState struct { - hash chainhash.Hash - height uint32 - totalTxns uint64 - workSum *big.Int +// serializeDAGState returns the serialization of the DAG state. +// This is data to be stored in the DAG state bucket. +func serializeDAGState(state dbDAGState) ([]byte, error) { + return json.Marshal(state) } -// serializeBestChainState returns the serialization of the passed block best -// chain state. This is data to be stored in the chain state bucket. -func serializeBestChainState(state bestChainState) []byte { - // Calculate the full size needed to serialize the chain state. - workSumBytes := state.workSum.Bytes() - workSumBytesLen := uint32(len(workSumBytes)) - serializedLen := chainhash.HashSize + 4 + 8 + 4 + workSumBytesLen - - // Serialize the chain state. - serializedData := make([]byte, serializedLen) - copy(serializedData[0:chainhash.HashSize], state.hash[:]) - offset := uint32(chainhash.HashSize) - byteOrder.PutUint32(serializedData[offset:], state.height) - offset += 4 - byteOrder.PutUint64(serializedData[offset:], state.totalTxns) - offset += 8 - byteOrder.PutUint32(serializedData[offset:], workSumBytesLen) - offset += 4 - copy(serializedData[offset:], workSumBytes) - return serializedData[:] -} - -// deserializeBestChainState deserializes the passed serialized best chain -// state. This is data stored in the chain state bucket and is updated after -// every block is connected or disconnected form the main chain. -// block. -func deserializeBestChainState(serializedData []byte) (bestChainState, error) { - // Ensure the serialized data has enough bytes to properly deserialize - // the hash, height, total transactions, and work sum length. - if len(serializedData) < chainhash.HashSize+16 { - return bestChainState{}, database.Error{ +// deserializeDAGState deserializes the passed serialized DAG +// state. This is data stored in the DAG state bucket and is updated after +// every block is connected or disconnected form the DAG. +func deserializeDAGState(serializedData []byte) (*dbDAGState, error) { + var dbState dbDAGState + err := json.Unmarshal(serializedData, &dbState) + if err != nil { + return nil, database.Error{ ErrorCode: database.ErrCorruption, - Description: "corrupt best chain state", + Description: "corrupt DAG state", } } - state := bestChainState{} - copy(state.hash[:], serializedData[0:chainhash.HashSize]) - offset := uint32(chainhash.HashSize) - state.height = byteOrder.Uint32(serializedData[offset : offset+4]) - offset += 4 - state.totalTxns = byteOrder.Uint64(serializedData[offset : offset+8]) - offset += 8 - workSumBytesLen := byteOrder.Uint32(serializedData[offset : offset+4]) - offset += 4 - - // Ensure the serialized data has enough bytes to deserialize the work - // sum. - if uint32(len(serializedData[offset:])) < workSumBytesLen { - return bestChainState{}, database.Error{ - ErrorCode: database.ErrCorruption, - Description: "corrupt best chain state", - } - } - workSumBytes := serializedData[offset : offset+workSumBytesLen] - state.workSum = new(big.Int).SetBytes(workSumBytes) - - return state, nil + return &dbState, nil } -// dbPutBestState uses an existing database transaction to update the best chain +// dbPutDAGState uses an existing database transaction to update the DAG // state with the given parameters. -func dbPutBestState(dbTx database.Tx, snapshot *BestState, workSum *big.Int) error { - // Serialize the current best chain state. - serializedData := serializeBestChainState(bestChainState{ - hash: snapshot.Hash, - height: uint32(snapshot.Height), - totalTxns: snapshot.TotalTxns, - workSum: workSum, +func dbPutDAGState(dbTx database.Tx, state *DAGState) error { + serializedData, err := serializeDAGState(dbDAGState{ + SelectedHash: state.SelectedTip.Hash, + TotalTxs: state.TotalTxs, }) - // Store the current best chain state into the database. - return dbTx.Metadata().Put(chainStateKeyName, serializedData) + if err != nil { + return err + } + + return dbTx.Metadata().Put(dagStateKeyName, serializedData) } -// createChainState initializes both the database and the chain state to the +// createDAGState initializes both the database and the DAG state to the // genesis block. This includes creating the necessary buckets and inserting // the genesis block, so it must only be called on an uninitialized database. -func (b *BlockChain) createChainState() error { - // Create a new node from the genesis block and set it as the best node. - genesisBlock := btcutil.NewBlock(b.chainParams.GenesisBlock) +func (b *BlockDAG) createDAGState() error { + // Create a new node from the genesis block and set it as the DAG. + genesisBlock := btcutil.NewBlock(b.dagParams.GenesisBlock) genesisBlock.SetHeight(0) header := &genesisBlock.MsgBlock().Header node := newBlockNode(header, nil) node.status = statusDataStored | statusValid - b.bestChain.SetTip(node) + b.dag.SetTip(node) // Add the new node to the index which is used for faster lookups. b.index.addNode(node) - // Initialize the state related to the best block. Since it is the - // genesis block, use its timestamp for the median time. - numTxns := uint64(len(genesisBlock.MsgBlock().Transactions)) + // Initialize the DAG state. Since it is the genesis block, use + // its timestamp for the median time. + numTxs := uint64(len(genesisBlock.MsgBlock().Transactions)) blockSize := uint64(genesisBlock.MsgBlock().SerializeSize()) - b.stateSnapshot = newBestState(node, blockSize, numTxns, - numTxns, time.Unix(node.timestamp, 0)) + dagState := newDAGState(b.dag.Tips().hashes(), node, blockSize, numTxs, + numTxs, time.Unix(node.timestamp, 0)) + b.setDAGState(dagState) // Create the initial the database chain state including creating the // necessary index buckets and inserting the genesis block. @@ -1054,8 +917,8 @@ func (b *BlockChain) createChainState() error { return err } - // Store the current best chain state into the database. - err = dbPutBestState(dbTx, b.stateSnapshot, node.workSum) + // Store the current DAG state into the database. + err = dbPutDAGState(dbTx, b.dagState) if err != nil { return err } @@ -1066,15 +929,15 @@ func (b *BlockChain) createChainState() error { return err } -// initChainState attempts to load and initialize the chain state from the -// database. When the db does not yet contain any chain state, both it and the -// chain state are initialized to the genesis block. -func (b *BlockChain) initChainState() error { +// initDAGState attempts to load and initialize the DAG state from the +// database. When the db does not yet contain any DAG state, both it and the +// DAG state are initialized to the genesis block. +func (b *BlockDAG) initDAGState() error { // Determine the state of the chain database. We may need to initialize // everything from scratch or upgrade certain buckets. var initialized, hasBlockIndex bool err := b.db.View(func(dbTx database.Tx) error { - initialized = dbTx.Metadata().Get(chainStateKeyName) != nil + initialized = dbTx.Metadata().Get(dagStateKeyName) != nil hasBlockIndex = dbTx.Metadata().Bucket(blockIndexBucketName) != nil return nil }) @@ -1085,7 +948,7 @@ func (b *BlockChain) initChainState() error { if !initialized { // At this point the database has not already been initialized, so // initialize both it and the chain state to the genesis block. - return b.createChainState() + return b.createDAGState() } if !hasBlockIndex { @@ -1095,21 +958,21 @@ func (b *BlockChain) initChainState() error { } } - // Attempt to load the chain state from the database. + // Attempt to load the DAG state from the database. return b.db.View(func(dbTx database.Tx) error { - // Fetch the stored chain state from the database metadata. + // Fetch the stored DAG state from the database metadata. // When it doesn't exist, it means the database hasn't been - // initialized for use with chain yet, so break out now to allow + // initialized for use with the DAG yet, so break out now to allow // that to happen under a writable database transaction. - serializedData := dbTx.Metadata().Get(chainStateKeyName) - log.Tracef("Serialized chain state: %x", serializedData) - state, err := deserializeBestChainState(serializedData) + serializedData := dbTx.Metadata().Get(dagStateKeyName) + log.Tracef("Serialized DAG state: %x", serializedData) + state, err := deserializeDAGState(serializedData) if err != nil { return err } - // Load all of the headers from the data for the known best - // chain and construct the block index accordingly. Since the + // Load all of the headers from the data for the known DAG + // and construct the block index accordingly. Since the // number of nodes are already known, perform a single alloc // for them versus a whole bunch of little ones to reduce // pressure on the GC. @@ -1141,20 +1004,20 @@ func (b *BlockChain) initChainState() error { var parent *blockNode if lastNode == nil { blockHash := header.BlockHash() - if !blockHash.IsEqual(b.chainParams.GenesisHash) { - return AssertError(fmt.Sprintf("initChainState: Expected "+ + if !blockHash.IsEqual(b.dagParams.GenesisHash) { + return AssertError(fmt.Sprintf("initDAGState: Expected "+ "first entry in block index to be genesis block, "+ "found %s", blockHash)) } - } else if header.PrevBlock == lastNode.hash { + } else if *header.SelectedPrevBlock() == lastNode.hash { // Since we iterate block headers in order of height, if the // blocks are mostly linear there is a very good chance the // previous header processed is the parent. parent = lastNode } else { - parent = b.index.LookupNode(&header.PrevBlock) + parent = b.index.LookupNode(header.SelectedPrevBlock()) if parent == nil { - return AssertError(fmt.Sprintf("initChainState: Could "+ + return AssertError(fmt.Sprintf("initDAGState: Could "+ "not find parent for block %s", header.BlockHash())) } } @@ -1162,7 +1025,7 @@ func (b *BlockChain) initChainState() error { // Initialize the block node for the block, connect it, // and add it to the block index. node := &blockNodes[i] - initBlockNode(node, header, parent) + initBlockNode(node, header, setFromSlice(parent)) // TODO: (Stas) This is wrong. Modified only to satisfy compilation. node.status = status b.index.addNode(node) @@ -1170,16 +1033,16 @@ func (b *BlockChain) initChainState() error { i++ } - // Set the best chain view to the stored best state. - tip := b.index.LookupNode(&state.hash) - if tip == nil { - return AssertError(fmt.Sprintf("initChainState: cannot find "+ - "chain tip %s in block index", state.hash)) + // Set the DAG view to the stored state. + selectedTip := b.index.LookupNode(&state.SelectedHash) + if selectedTip == nil { + return AssertError(fmt.Sprintf("initDAGState: cannot find "+ + "DAG selectedTip %s in block index", state.SelectedHash)) } - b.bestChain.SetTip(tip) + b.dag.SetTip(selectedTip) - // Load the raw block bytes for the best block. - blockBytes, err := dbTx.FetchBlock(&state.hash) + // Load the raw block bytes for the selected tip. + blockBytes, err := dbTx.FetchBlock(&state.SelectedHash) if err != nil { return err } @@ -1189,10 +1052,11 @@ func (b *BlockChain) initChainState() error { return err } - // Initialize the state related to the best block. + // Initialize the DAG state. blockSize := uint64(len(blockBytes)) numTxns := uint64(len(block.Transactions)) - b.stateSnapshot = newBestState(tip, blockSize, numTxns, state.totalTxns, tip.CalcPastMedianTime()) + dagState := newDAGState(b.dag.Tips().hashes(), selectedTip, blockSize, numTxns, state.TotalTxs, selectedTip.CalcPastMedianTime()) + b.setDAGState(dagState) return nil }) @@ -1219,7 +1083,7 @@ func deserializeBlockRow(blockRow []byte) (*wire.BlockHeader, blockStatus, error // dbFetchHeaderByHash uses an existing database transaction to retrieve the // block header for the provided hash. -func dbFetchHeaderByHash(dbTx database.Tx, hash *chainhash.Hash) (*wire.BlockHeader, error) { +func dbFetchHeaderByHash(dbTx database.Tx, hash *daghash.Hash) (*wire.BlockHeader, error) { headerBytes, err := dbTx.FetchBlockHeader(hash) if err != nil { return nil, err @@ -1234,17 +1098,6 @@ func dbFetchHeaderByHash(dbTx database.Tx, hash *chainhash.Hash) (*wire.BlockHea return &header, nil } -// dbFetchHeaderByHeight uses an existing database transaction to retrieve the -// block header for the provided height. -func dbFetchHeaderByHeight(dbTx database.Tx, height int32) (*wire.BlockHeader, error) { - hash, err := dbFetchHashByHeight(dbTx, height) - if err != nil { - return nil, err - } - - return dbFetchHeaderByHash(dbTx, hash) -} - // dbFetchBlockByNode uses an existing database transaction to retrieve the // raw block for the provided node, deserialize it, and return a btcutil.Block // with the height set. @@ -1303,19 +1156,19 @@ func dbStoreBlock(dbTx database.Tx, block *btcutil.Block) error { // blockIndexKey generates the binary key for an entry in the block index // bucket. The key is composed of the block height encoded as a big-endian // 32-bit unsigned int followed by the 32 byte block hash. -func blockIndexKey(blockHash *chainhash.Hash, blockHeight uint32) []byte { - indexKey := make([]byte, chainhash.HashSize+4) +func blockIndexKey(blockHash *daghash.Hash, blockHeight uint32) []byte { + indexKey := make([]byte, daghash.HashSize+4) binary.BigEndian.PutUint32(indexKey[0:4], blockHeight) - copy(indexKey[4:chainhash.HashSize+4], blockHash[:]) + copy(indexKey[4:daghash.HashSize+4], blockHash[:]) return indexKey } // BlockByHeight returns the block at the given height in the main chain. // // This function is safe for concurrent access. -func (b *BlockChain) BlockByHeight(blockHeight int32) (*btcutil.Block, error) { +func (b *BlockDAG) BlockByHeight(blockHeight int32) (*btcutil.Block, error) { // Lookup the block height in the best chain. - node := b.bestChain.NodeByHeight(blockHeight) + node := b.dag.NodeByHeight(blockHeight) if node == nil { str := fmt.Sprintf("no block at height %d exists", blockHeight) return nil, errNotInMainChain(str) @@ -1335,11 +1188,11 @@ func (b *BlockChain) BlockByHeight(blockHeight int32) (*btcutil.Block, error) { // the appropriate chain height set. // // This function is safe for concurrent access. -func (b *BlockChain) BlockByHash(hash *chainhash.Hash) (*btcutil.Block, error) { +func (b *BlockDAG) BlockByHash(hash *daghash.Hash) (*btcutil.Block, error) { // Lookup the block hash in block index and ensure it is in the best // chain. node := b.index.LookupNode(hash) - if node == nil || !b.bestChain.Contains(node) { + if node == nil || !b.dag.Contains(node) { str := fmt.Sprintf("block %s is not in the main chain", hash) return nil, errNotInMainChain(str) } diff --git a/blockchain/chainio_test.go b/blockdag/dagio_test.go similarity index 90% rename from blockchain/chainio_test.go rename to blockdag/dagio_test.go index e2572d3a9..14e52fc4b 100644 --- a/blockchain/chainio_test.go +++ b/blockdag/dagio_test.go @@ -2,13 +2,12 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain +package blockdag import ( "bytes" "errors" "math" - "math/big" "reflect" "testing" @@ -605,76 +604,70 @@ func TestUtxoEntryDeserializeErrors(t *testing.T) { } } -// TestBestChainStateSerialization ensures serializing and deserializing the -// best chain state works as expected. -func TestBestChainStateSerialization(t *testing.T) { +// TestDAGStateSerialization ensures serializing and deserializing the +// DAG state works as expected. +func TestDAGStateSerialization(t *testing.T) { t.Parallel() - workSum := new(big.Int) tests := []struct { name string - state bestChainState + state dbDAGState serialized []byte }{ { name: "genesis", - state: bestChainState{ - hash: *newHashFromStr("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"), - height: 0, - totalTxns: 1, - workSum: func() *big.Int { - workSum.Add(workSum, CalcWork(486604799)) - return new(big.Int).Set(workSum) - }(), // 0x0100010001 + state: dbDAGState{ + SelectedHash: *newHashFromStr("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"), + TotalTxs: 1, }, - serialized: hexToBytes("6fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d6190000000000000000000100000000000000050000000100010001"), + serialized: []byte("{\"SelectedHash\":[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0],\"TotalTxs\":1}"), }, { name: "block 1", - state: bestChainState{ - hash: *newHashFromStr("ec85da8297525c2a2a5f3e826510ea1a48ee741e13a18b93ceeb2fb6c9848925,"), - height: 1, - totalTxns: 2, - workSum: func() *big.Int { - workSum.Add(workSum, CalcWork(486604799)) - return new(big.Int).Set(workSum) - }(), // 0x0200020002 + state: dbDAGState{ + SelectedHash: *newHashFromStr("00000000839a8e6886ab5951d76f411475428afc90947ee320161bbf18eb6048"), + TotalTxs: 2, }, - serialized: hexToBytes("4860eb18bf1b1620e37e9490fc8a427514416fd75159ab86688e9a8300000000010000000200000000000000050000000200020002"), + serialized: []byte("{\"SelectedHash\":[72,96,235,24,191,27,22,32,227,126,148,144,252,138,66,117,20,65,111,215,81,89,171,134,104,142,154,131,0,0,0,0],\"TotalTxs\":2}"), }, } for i, test := range tests { + gotBytes, err := serializeDAGState(test.state) + if err != nil { + t.Errorf("serializeDAGState #%d (%s) "+ + "unexpected error: %v", i, test.name, err) + continue + } + // Ensure the state serializes to the expected value. - gotBytes := serializeBestChainState(test.state) if !bytes.Equal(gotBytes, test.serialized) { - t.Errorf("serializeBestChainState #%d (%s): mismatched "+ - "bytes - got %x, want %x", i, test.name, - gotBytes, test.serialized) + t.Errorf("serializeDAGState #%d (%s): mismatched "+ + "bytes - got %s, want %s", i, test.name, + string(gotBytes), string(test.serialized)) continue } // Ensure the serialized bytes are decoded back to the expected // state. - state, err := deserializeBestChainState(test.serialized) + state, err := deserializeDAGState(test.serialized) if err != nil { - t.Errorf("deserializeBestChainState #%d (%s) "+ + t.Errorf("deserializeDAGState #%d (%s) "+ "unexpected error: %v", i, test.name, err) continue } - if !reflect.DeepEqual(state, test.state) { - t.Errorf("deserializeBestChainState #%d (%s) "+ + if !reflect.DeepEqual(*state, test.state) { + t.Errorf("deserializeDAGState #%d (%s) "+ "mismatched state - got %v, want %v", i, - test.name, state, test.state) + test.name, *state, test.state) continue - } } } -// TestBestChainStateDeserializeErrors performs negative tests against -// deserializing the chain state to ensure error paths work as expected. -func TestBestChainStateDeserializeErrors(t *testing.T) { +// TestDAGStateDeserializeErrors performs negative tests against +// deserializing the DAG state to ensure error paths work as expected. +func TestDAGStateDeserializeErrors(t *testing.T) { t.Parallel() tests := []struct { @@ -688,22 +681,17 @@ func TestBestChainStateDeserializeErrors(t *testing.T) { errType: database.Error{ErrorCode: database.ErrCorruption}, }, { - name: "short data in hash", - serialized: hexToBytes("0000"), - errType: database.Error{ErrorCode: database.ErrCorruption}, - }, - { - name: "short data in work sum", - serialized: hexToBytes("6fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000001000000000000000500000001000100"), + name: "corrupted data", + serialized: []byte("{\"SelectedHash\":[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,7"), errType: database.Error{ErrorCode: database.ErrCorruption}, }, } for _, test := range tests { // Ensure the expected error type and code is returned. - _, err := deserializeBestChainState(test.serialized) + _, err := deserializeDAGState(test.serialized) if reflect.TypeOf(err) != reflect.TypeOf(test.errType) { - t.Errorf("deserializeBestChainState (%s): expected "+ + t.Errorf("deserializeDAGState (%s): expected "+ "error type does not match - got %T, want %T", test.name, err, test.errType) continue @@ -711,7 +699,7 @@ func TestBestChainStateDeserializeErrors(t *testing.T) { if derr, ok := err.(database.Error); ok { tderr := test.errType.(database.Error) if derr.ErrorCode != tderr.ErrorCode { - t.Errorf("deserializeBestChainState (%s): "+ + t.Errorf("deserializeDAGState (%s): "+ "wrong error code got: %v, want: %v", test.name, derr.ErrorCode, tderr.ErrorCode) diff --git a/blockchain/chainview.go b/blockdag/dagview.go similarity index 87% rename from blockchain/chainview.go rename to blockdag/dagview.go index a4c3692cd..3f9d79ca5 100644 --- a/blockchain/chainview.go +++ b/blockdag/dagview.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain +package blockdag import ( "sync" @@ -31,7 +31,7 @@ func fastLog2Floor(n uint32) uint8 { return rv } -// chainView provides a flat view of a specific branch of the block chain from +// dagView provides a flat view of a specific branch of the block chain from // its tip back to the genesis block and provides various convenience functions // for comparing chains. // @@ -41,17 +41,17 @@ func fastLog2Floor(n uint32) uint8 { // // The chain view for the branch ending in 6a consists of: // genesis -> 1 -> 2 -> 3 -> 4a -> 5a -> 6a -type chainView struct { +type dagView struct { mtx sync.Mutex nodes []*blockNode } -// newChainView returns a new chain view for the given tip block node. Passing +// newDAGView returns a new chain view for the given tip block node. Passing // nil as the tip will result in a chain view that is not initialized. The tip // can be updated at any time via the setTip function. -func newChainView(tip *blockNode) *chainView { +func newDAGView(tip *blockNode) *dagView { // The mutex is intentionally not held since this is a constructor. - var c chainView + var c dagView c.setTip(tip) return &c } @@ -61,7 +61,7 @@ func newChainView(tip *blockNode) *chainView { // held. // // This function MUST be called with the view mutex locked (for reads). -func (c *chainView) genesis() *blockNode { +func (c *dagView) genesis() *blockNode { if len(c.nodes) == 0 { return nil } @@ -72,7 +72,7 @@ func (c *chainView) genesis() *blockNode { // Genesis returns the genesis block for the chain view. // // This function is safe for concurrent access. -func (c *chainView) Genesis() *blockNode { +func (c *dagView) Genesis() *blockNode { c.mtx.Lock() genesis := c.genesis() c.mtx.Unlock() @@ -84,7 +84,7 @@ func (c *chainView) Genesis() *blockNode { // it is up to the caller to ensure the lock is held. // // This function MUST be called with the view mutex locked (for reads). -func (c *chainView) tip() *blockNode { +func (c *dagView) tip() *blockNode { if len(c.nodes) == 0 { return nil } @@ -92,15 +92,28 @@ func (c *chainView) tip() *blockNode { return c.nodes[len(c.nodes)-1] } -// Tip returns the current tip block node for the chain view. It will return -// nil if there is no tip. +// Tips returns the current tip block nodes for the chain view. It will return +// an empty slice if there is no tip. // // This function is safe for concurrent access. -func (c *chainView) Tip() *blockNode { +func (c *dagView) Tips() blockSet { c.mtx.Lock() tip := c.tip() c.mtx.Unlock() - return tip + + if tip == nil { // TODO: (Stas) This is wrong. Modified only to satisfy compilation. + return newSet() + } + + return setFromSlice(tip) // TODO: (Stas) This is wrong. Modified only to satisfy compilation. +} + +// SelecedTip returns the current selected tip block node for the chain view. +// It will return nil if there is no tip. +// +// This function is safe for concurrent access. +func (c *dagView) SelectedTip() *blockNode { + return c.Tips().first() } // setTip sets the chain view to use the provided block node as the current tip @@ -111,7 +124,7 @@ func (c *chainView) Tip() *blockNode { // up to the caller to ensure the lock is held. // // This function MUST be called with the view mutex locked (for writes). -func (c *chainView) setTip(node *blockNode) { +func (c *dagView) setTip(node *blockNode) { if node == nil { // Keep the backing array around for potential future use. c.nodes = c.nodes[:0] @@ -141,7 +154,7 @@ func (c *chainView) setTip(node *blockNode) { for node != nil && c.nodes[node.height] != node { c.nodes[node.height] = node - node = node.parent + node = node.selectedParent } } @@ -152,7 +165,7 @@ func (c *chainView) setTip(node *blockNode) { // tips is efficient. // // This function is safe for concurrent access. -func (c *chainView) SetTip(node *blockNode) { +func (c *dagView) SetTip(node *blockNode) { c.mtx.Lock() c.setTip(node) c.mtx.Unlock() @@ -164,7 +177,7 @@ func (c *chainView) SetTip(node *blockNode) { // to the caller to ensure the lock is held. // // This function MUST be called with the view mutex locked (for reads). -func (c *chainView) height() int32 { +func (c *dagView) height() int32 { return int32(len(c.nodes) - 1) } @@ -173,7 +186,7 @@ func (c *chainView) height() int32 { // initialized). // // This function is safe for concurrent access. -func (c *chainView) Height() int32 { +func (c *dagView) Height() int32 { c.mtx.Lock() height := c.height() c.mtx.Unlock() @@ -185,7 +198,7 @@ func (c *chainView) Height() int32 { // version in that it is up to the caller to ensure the lock is held. // // This function MUST be called with the view mutex locked (for reads). -func (c *chainView) nodeByHeight(height int32) *blockNode { +func (c *dagView) nodeByHeight(height int32) *blockNode { if height < 0 || height >= int32(len(c.nodes)) { return nil } @@ -197,7 +210,7 @@ func (c *chainView) nodeByHeight(height int32) *blockNode { // returned if the height does not exist. // // This function is safe for concurrent access. -func (c *chainView) NodeByHeight(height int32) *blockNode { +func (c *dagView) NodeByHeight(height int32) *blockNode { c.mtx.Lock() node := c.nodeByHeight(height) c.mtx.Unlock() @@ -208,7 +221,7 @@ func (c *chainView) NodeByHeight(height int32) *blockNode { // views (tip set to nil) are considered equal. // // This function is safe for concurrent access. -func (c *chainView) Equals(other *chainView) bool { +func (c *dagView) Equals(other *dagView) bool { c.mtx.Lock() other.mtx.Lock() equals := len(c.nodes) == len(other.nodes) && c.tip() == other.tip() @@ -222,7 +235,7 @@ func (c *chainView) Equals(other *chainView) bool { // caller to ensure the lock is held. // // This function MUST be called with the view mutex locked (for reads). -func (c *chainView) contains(node *blockNode) bool { +func (c *dagView) contains(node *blockNode) bool { return c.nodeByHeight(node.height) == node } @@ -230,7 +243,7 @@ func (c *chainView) contains(node *blockNode) bool { // node. // // This function is safe for concurrent access. -func (c *chainView) Contains(node *blockNode) bool { +func (c *dagView) Contains(node *blockNode) bool { c.mtx.Lock() contains := c.contains(node) c.mtx.Unlock() @@ -245,7 +258,7 @@ func (c *chainView) Contains(node *blockNode) bool { // See the comment on the exported function for more details. // // This function MUST be called with the view mutex locked (for reads). -func (c *chainView) next(node *blockNode) *blockNode { +func (c *dagView) next(node *blockNode) *blockNode { if node == nil || !c.contains(node) { return nil } @@ -270,7 +283,7 @@ func (c *chainView) next(node *blockNode) *blockNode { // of the view. // // This function is safe for concurrent access. -func (c *chainView) Next(node *blockNode) *blockNode { +func (c *dagView) Next(node *blockNode) *blockNode { c.mtx.Lock() next := c.next(node) c.mtx.Unlock() @@ -285,7 +298,7 @@ func (c *chainView) Next(node *blockNode) *blockNode { // See the exported FindFork comments for more details. // // This function MUST be called with the view mutex locked (for reads). -func (c *chainView) findFork(node *blockNode) *blockNode { +func (c *dagView) findFork(node *blockNode) *blockNode { // No fork point for node that doesn't exist. if node == nil { return nil @@ -311,7 +324,7 @@ func (c *chainView) findFork(node *blockNode) *blockNode { // contain the node or there are no more nodes in which case there is no // common node between the two. for node != nil && !c.contains(node) { - node = node.parent + node = node.selectedParent } return node @@ -333,7 +346,7 @@ func (c *chainView) findFork(node *blockNode) *blockNode { // the branch formed by the view. // // This function is safe for concurrent access. -func (c *chainView) FindFork(node *blockNode) *blockNode { +func (c *dagView) FindFork(node *blockNode) *blockNode { c.mtx.Lock() fork := c.findFork(node) c.mtx.Unlock() @@ -348,7 +361,7 @@ func (c *chainView) FindFork(node *blockNode) *blockNode { // See the exported BlockLocator function comments for more details. // // This function MUST be called with the view mutex locked (for reads). -func (c *chainView) blockLocator(node *blockNode) BlockLocator { +func (c *dagView) blockLocator(node *blockNode) BlockLocator { // Use the current tip if requested. if node == nil { node = c.tip() @@ -415,7 +428,7 @@ func (c *chainView) blockLocator(node *blockNode) BlockLocator { // locator. // // This function is safe for concurrent access. -func (c *chainView) BlockLocator(node *blockNode) BlockLocator { +func (c *dagView) BlockLocator(node *blockNode) BlockLocator { c.mtx.Lock() locator := c.blockLocator(node) c.mtx.Unlock() diff --git a/blockchain/chainview_test.go b/blockdag/dagview_test.go similarity index 85% rename from blockchain/chainview_test.go rename to blockdag/dagview_test.go index 7e0bacd02..2e8c81038 100644 --- a/blockchain/chainview_test.go +++ b/blockdag/dagview_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain +package blockdag import ( "fmt" @@ -20,18 +20,16 @@ var testNoncePrng = rand.New(rand.NewSource(0)) // chainedNodes returns the specified number of nodes constructed such that each // subsequent node points to the previous one to create a chain. The first node // will point to the passed parent which can be nil if desired. -func chainedNodes(parent *blockNode, numNodes int) []*blockNode { +func chainedNodes(parents blockSet, numNodes int) []*blockNode { nodes := make([]*blockNode, numNodes) - tip := parent + tips := parents for i := 0; i < numNodes; i++ { // This is invalid, but all that is needed is enough to get the // synthetic tests to work. header := wire.BlockHeader{Nonce: testNoncePrng.Uint32()} - if tip != nil { - header.PrevBlock = tip.hash - } - nodes[i] = newBlockNode(&header, tip) - tip = nodes[i] + header.PrevBlocks = tips.hashes() + nodes[i] = newBlockNode(&header, tips) + tips = setFromSlice(nodes[i]) } return nodes } @@ -78,54 +76,54 @@ func TestChainView(t *testing.T) { // \-> 2a -> 3a -> 4a -> 5a -> 6a -> 7a -> ... -> 26a // \-> 3a'-> 4a' -> 5a' branch0Nodes := chainedNodes(nil, 5) - branch1Nodes := chainedNodes(branch0Nodes[1], 25) - branch2Nodes := chainedNodes(branch1Nodes[0], 3) + branch1Nodes := chainedNodes(setFromSlice(branch0Nodes[1]), 25) + branch2Nodes := chainedNodes(setFromSlice(branch1Nodes[0]), 3) tip := tstTip tests := []struct { name string - view *chainView // active view + view *dagView // active view genesis *blockNode // expected genesis block of active view tip *blockNode // expected tip of active view - side *chainView // side chain view + side *dagView // side chain view sideTip *blockNode // expected tip of side chain view fork *blockNode // expected fork node contains []*blockNode // expected nodes in active view noContains []*blockNode // expected nodes NOT in active view - equal *chainView // view expected equal to active view - unequal *chainView // view expected NOT equal to active + equal *dagView // view expected equal to active view + unequal *dagView // view expected NOT equal to active locator BlockLocator // expected locator for active view tip }{ { // Create a view for branch 0 as the active chain and // another view for branch 1 as the side chain. name: "chain0-chain1", - view: newChainView(tip(branch0Nodes)), + view: newDAGView(tip(branch0Nodes)), genesis: branch0Nodes[0], tip: tip(branch0Nodes), - side: newChainView(tip(branch1Nodes)), + side: newDAGView(tip(branch1Nodes)), sideTip: tip(branch1Nodes), fork: branch0Nodes[1], contains: branch0Nodes, noContains: branch1Nodes, - equal: newChainView(tip(branch0Nodes)), - unequal: newChainView(tip(branch1Nodes)), + equal: newDAGView(tip(branch0Nodes)), + unequal: newDAGView(tip(branch1Nodes)), locator: locatorHashes(branch0Nodes, 4, 3, 2, 1, 0), }, { // Create a view for branch 1 as the active chain and // another view for branch 2 as the side chain. name: "chain1-chain2", - view: newChainView(tip(branch1Nodes)), + view: newDAGView(tip(branch1Nodes)), genesis: branch0Nodes[0], tip: tip(branch1Nodes), - side: newChainView(tip(branch2Nodes)), + side: newDAGView(tip(branch2Nodes)), sideTip: tip(branch2Nodes), fork: branch1Nodes[0], contains: branch1Nodes, noContains: branch2Nodes, - equal: newChainView(tip(branch1Nodes)), - unequal: newChainView(tip(branch2Nodes)), + equal: newDAGView(tip(branch1Nodes)), + unequal: newDAGView(tip(branch2Nodes)), locator: zipLocators( locatorHashes(branch1Nodes, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 11, 7), @@ -135,16 +133,16 @@ func TestChainView(t *testing.T) { // Create a view for branch 2 as the active chain and // another view for branch 0 as the side chain. name: "chain2-chain0", - view: newChainView(tip(branch2Nodes)), + view: newDAGView(tip(branch2Nodes)), genesis: branch0Nodes[0], tip: tip(branch2Nodes), - side: newChainView(tip(branch0Nodes)), + side: newDAGView(tip(branch0Nodes)), sideTip: tip(branch0Nodes), fork: branch0Nodes[1], contains: branch2Nodes, noContains: branch0Nodes[2:], - equal: newChainView(tip(branch2Nodes)), - unequal: newChainView(tip(branch0Nodes)), + equal: newDAGView(tip(branch2Nodes)), + unequal: newDAGView(tip(branch0Nodes)), locator: zipLocators( locatorHashes(branch2Nodes, 2, 1, 0), locatorHashes(branch1Nodes, 0), @@ -184,28 +182,28 @@ testLoop: } // Ensure the active and side chain tips are the expected nodes. - if test.view.Tip() != test.tip { + if test.view.SelectedTip() != test.tip { t.Errorf("%s: unexpected active view tip -- got %v, "+ - "want %v", test.name, test.view.Tip(), test.tip) + "want %v", test.name, test.view.Tips(), test.tip) continue } - if test.side.Tip() != test.sideTip { + if test.side.SelectedTip() != test.sideTip { t.Errorf("%s: unexpected active view tip -- got %v, "+ - "want %v", test.name, test.side.Tip(), + "want %v", test.name, test.side.Tips(), test.sideTip) continue } // Ensure that regardless of the order the two chains are // compared they both return the expected fork point. - forkNode := test.view.FindFork(test.side.Tip()) + forkNode := test.view.FindFork(test.side.SelectedTip()) if forkNode != test.fork { t.Errorf("%s: unexpected fork node (view, side) -- "+ "got %v, want %v", test.name, forkNode, test.fork) continue } - forkNode = test.side.FindFork(test.view.Tip()) + forkNode = test.side.FindFork(test.view.SelectedTip()) if forkNode != test.fork { t.Errorf("%s: unexpected fork node (side, view) -- "+ "got %v, want %v", test.name, forkNode, @@ -215,11 +213,11 @@ testLoop: // Ensure that the fork point for a node that is already part // of the chain view is the node itself. - forkNode = test.view.FindFork(test.view.Tip()) - if forkNode != test.view.Tip() { + forkNode = test.view.FindFork(test.view.SelectedTip()) + if forkNode != test.view.SelectedTip() { t.Errorf("%s: unexpected fork node (view, tip) -- "+ "got %v, want %v", test.name, forkNode, - test.view.Tip()) + test.view.Tips()) continue } @@ -310,8 +308,8 @@ func TestChainViewForkCorners(t *testing.T) { unrelatedBranchNodes := chainedNodes(nil, 7) // Create chain views for the two unrelated histories. - view1 := newChainView(tstTip(branchNodes)) - view2 := newChainView(tstTip(unrelatedBranchNodes)) + view1 := newDAGView(tstTip(branchNodes)) + view2 := newDAGView(tstTip(unrelatedBranchNodes)) // Ensure attempting to find a fork point with a node that doesn't exist // doesn't produce a node. @@ -342,13 +340,13 @@ func TestChainViewSetTip(t *testing.T) { // structure. // 0 -> 1 -> 2 -> 3 -> 4 // \-> 2a -> 3a -> 4a -> 5a -> 6a -> 7a -> ... -> 26a - branch0Nodes := chainedNodes(nil, 5) - branch1Nodes := chainedNodes(branch0Nodes[1], 25) + branch0Nodes := chainedNodes(newSet(), 5) + branch1Nodes := chainedNodes(setFromSlice(branch0Nodes[1]), 25) tip := tstTip tests := []struct { name string - view *chainView // active view + view *dagView // active view tips []*blockNode // tips to set contains [][]*blockNode // expected nodes in view for each tip }{ @@ -356,7 +354,7 @@ func TestChainViewSetTip(t *testing.T) { // Create an empty view and set the tip to increasingly // longer chains. name: "increasing", - view: newChainView(nil), + view: newDAGView(nil), tips: []*blockNode{tip(branch0Nodes), tip(branch1Nodes)}, contains: [][]*blockNode{branch0Nodes, branch1Nodes}, }, @@ -364,7 +362,7 @@ func TestChainViewSetTip(t *testing.T) { // Create a view with a longer chain and set the tip to // increasingly shorter chains. name: "decreasing", - view: newChainView(tip(branch1Nodes)), + view: newDAGView(tip(branch1Nodes)), tips: []*blockNode{tip(branch0Nodes), nil}, contains: [][]*blockNode{branch0Nodes, nil}, }, @@ -373,7 +371,7 @@ func TestChainViewSetTip(t *testing.T) { // a longer chain followed by setting it back to the // shorter chain. name: "small-large-small", - view: newChainView(tip(branch0Nodes)), + view: newDAGView(tip(branch0Nodes)), tips: []*blockNode{tip(branch1Nodes), tip(branch0Nodes)}, contains: [][]*blockNode{branch1Nodes, branch0Nodes}, }, @@ -382,7 +380,7 @@ func TestChainViewSetTip(t *testing.T) { // a smaller chain followed by setting it back to the // longer chain. name: "large-small-large", - view: newChainView(tip(branch1Nodes)), + view: newDAGView(tip(branch1Nodes)), tips: []*blockNode{tip(branch0Nodes), tip(branch1Nodes)}, contains: [][]*blockNode{branch0Nodes, branch1Nodes}, }, @@ -393,9 +391,9 @@ testLoop: for i, tip := range test.tips { // Ensure the view tip is the expected node. test.view.SetTip(tip) - if test.view.Tip() != tip { + if test.view.SelectedTip() != tip { // TODO: (Stas) This is wrong. Modified only to satisfy compilation. t.Errorf("%s: unexpected view tip -- got %v, "+ - "want %v", test.name, test.view.Tip(), + "want %v", test.name, test.view.Tips(), tip) continue testLoop } @@ -417,8 +415,8 @@ testLoop: // as expected. func TestChainViewNil(t *testing.T) { // Ensure two unininitialized views are considered equal. - view := newChainView(nil) - if !view.Equals(newChainView(nil)) { + view := newDAGView(nil) + if !view.Equals(newDAGView(nil)) { t.Fatal("uninitialized nil views unequal") } @@ -428,9 +426,9 @@ func TestChainViewNil(t *testing.T) { genesis) } - // Ensure the tip of an uninitialized view does not produce a node. - if tip := view.Tip(); tip != nil { - t.Fatalf("Tip: unexpected tip -- got %v, want nil", tip) + // Ensure the tips of an uninitialized view do not produce a node. + if tips := view.Tips(); len(tips) > 0 { + t.Fatalf("Tip: unexpected tips -- got %v, want nothing", tips) } // Ensure the height of an uninitialized view is the expected value. diff --git a/blockchain/difficulty.go b/blockdag/difficulty.go similarity index 87% rename from blockchain/difficulty.go rename to blockdag/difficulty.go index 819067a03..3a9ca2e6c 100644 --- a/blockchain/difficulty.go +++ b/blockdag/difficulty.go @@ -2,13 +2,13 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain +package blockdag import ( "math/big" "time" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" ) var ( @@ -21,9 +21,9 @@ var ( oneLsh256 = new(big.Int).Lsh(bigOne, 256) ) -// HashToBig converts a chainhash.Hash into a big.Int that can be used to +// HashToBig converts a daghash.Hash into a big.Int that can be used to // perform math comparisons. -func HashToBig(hash *chainhash.Hash) *big.Int { +func HashToBig(hash *daghash.Hash) *big.Int { // A Hash is in little-endian, but the big package wants the bytes in // big-endian, so reverse them. buf := *hash @@ -156,19 +156,19 @@ func CalcWork(bits uint32) *big.Int { // can have given starting difficulty bits and a duration. It is mainly used to // verify that claimed proof of work by a block is sane as compared to a // known good checkpoint. -func (b *BlockChain) calcEasiestDifficulty(bits uint32, duration time.Duration) uint32 { +func (b *BlockDAG) calcEasiestDifficulty(bits uint32, duration time.Duration) uint32 { // Convert types used in the calculations below. durationVal := int64(duration / time.Second) - adjustmentFactor := big.NewInt(b.chainParams.RetargetAdjustmentFactor) + adjustmentFactor := big.NewInt(b.dagParams.RetargetAdjustmentFactor) // The test network rules allow minimum difficulty blocks after more // than twice the desired amount of time needed to generate a block has // elapsed. - if b.chainParams.ReduceMinDifficulty { - reductionTime := int64(b.chainParams.MinDiffReductionTime / + if b.dagParams.ReduceMinDifficulty { + reductionTime := int64(b.dagParams.MinDiffReductionTime / time.Second) if durationVal > reductionTime { - return b.chainParams.PowLimitBits + return b.dagParams.PowLimitBits } } @@ -177,14 +177,14 @@ func (b *BlockChain) calcEasiestDifficulty(bits uint32, duration time.Duration) // the number of retargets for the duration and starting difficulty // multiplied by the max adjustment factor. newTarget := CompactToBig(bits) - for durationVal > 0 && newTarget.Cmp(b.chainParams.PowLimit) < 0 { + for durationVal > 0 && newTarget.Cmp(b.dagParams.PowLimit) < 0 { newTarget.Mul(newTarget, adjustmentFactor) durationVal -= b.maxRetargetTimespan } // Limit new value to the proof of work limit. - if newTarget.Cmp(b.chainParams.PowLimit) > 0 { - newTarget.Set(b.chainParams.PowLimit) + if newTarget.Cmp(b.dagParams.PowLimit) > 0 { + newTarget.Set(b.dagParams.PowLimit) } return BigToCompact(newTarget) @@ -194,19 +194,19 @@ func (b *BlockChain) calcEasiestDifficulty(bits uint32, duration time.Duration) // did not have the special testnet minimum difficulty rule applied. // // This function MUST be called with the chain state lock held (for writes). -func (b *BlockChain) findPrevTestNetDifficulty(startNode *blockNode) uint32 { +func (b *BlockDAG) findPrevTestNetDifficulty(startNode *blockNode) uint32 { // Search backwards through the chain for the last block without // the special rule applied. iterNode := startNode for iterNode != nil && iterNode.height%b.blocksPerRetarget != 0 && - iterNode.bits == b.chainParams.PowLimitBits { + iterNode.bits == b.dagParams.PowLimitBits { - iterNode = iterNode.parent + iterNode = iterNode.selectedParent } // Return the found difficulty or the minimum difficulty if no // appropriate block was found. - lastBits := b.chainParams.PowLimitBits + lastBits := b.dagParams.PowLimitBits if iterNode != nil { lastBits = iterNode.bits } @@ -218,10 +218,10 @@ func (b *BlockChain) findPrevTestNetDifficulty(startNode *blockNode) uint32 { // This function differs from the exported CalcNextRequiredDifficulty in that // the exported version uses the current best chain as the previous block node // while this function accepts any block node. -func (b *BlockChain) calcNextRequiredDifficulty(lastNode *blockNode, newBlockTime time.Time) (uint32, error) { +func (b *BlockDAG) calcNextRequiredDifficulty(lastNode *blockNode, newBlockTime time.Time) (uint32, error) { // Genesis block. if lastNode == nil { - return b.chainParams.PowLimitBits, nil + return b.dagParams.PowLimitBits, nil } // Return the previous block's difficulty requirements if this block @@ -230,14 +230,14 @@ func (b *BlockChain) calcNextRequiredDifficulty(lastNode *blockNode, newBlockTim // For networks that support it, allow special reduction of the // required difficulty once too much time has elapsed without // mining a block. - if b.chainParams.ReduceMinDifficulty { + if b.dagParams.ReduceMinDifficulty { // Return minimum difficulty when more than the desired // amount of time has elapsed without mining a block. - reductionTime := int64(b.chainParams.MinDiffReductionTime / + reductionTime := int64(b.dagParams.MinDiffReductionTime / time.Second) allowMinTime := lastNode.timestamp + reductionTime if newBlockTime.Unix() > allowMinTime { - return b.chainParams.PowLimitBits, nil + return b.dagParams.PowLimitBits, nil } // The block was mined within the desired timeframe, so @@ -275,12 +275,12 @@ func (b *BlockChain) calcNextRequiredDifficulty(lastNode *blockNode, newBlockTim // result. oldTarget := CompactToBig(lastNode.bits) newTarget := new(big.Int).Mul(oldTarget, big.NewInt(adjustedTimespan)) - targetTimeSpan := int64(b.chainParams.TargetTimespan / time.Second) + targetTimeSpan := int64(b.dagParams.TargetTimespan / time.Second) newTarget.Div(newTarget, big.NewInt(targetTimeSpan)) // Limit new value to the proof of work limit. - if newTarget.Cmp(b.chainParams.PowLimit) > 0 { - newTarget.Set(b.chainParams.PowLimit) + if newTarget.Cmp(b.dagParams.PowLimit) > 0 { + newTarget.Set(b.dagParams.PowLimit) } // Log new target difficulty and return it. The new target logging is @@ -294,7 +294,7 @@ func (b *BlockChain) calcNextRequiredDifficulty(lastNode *blockNode, newBlockTim log.Debugf("Actual timespan %v, adjusted timespan %v, target timespan %v", time.Duration(actualTimespan)*time.Second, time.Duration(adjustedTimespan)*time.Second, - b.chainParams.TargetTimespan) + b.dagParams.TargetTimespan) return newTargetBits, nil } @@ -304,9 +304,9 @@ func (b *BlockChain) calcNextRequiredDifficulty(lastNode *blockNode, newBlockTim // rules. // // This function is safe for concurrent access. -func (b *BlockChain) CalcNextRequiredDifficulty(timestamp time.Time) (uint32, error) { - b.chainLock.Lock() - difficulty, err := b.calcNextRequiredDifficulty(b.bestChain.Tip(), timestamp) - b.chainLock.Unlock() +func (b *BlockDAG) CalcNextRequiredDifficulty(timestamp time.Time) (uint32, error) { + b.dagLock.Lock() + difficulty, err := b.calcNextRequiredDifficulty(b.dag.SelectedTip(), timestamp) + b.dagLock.Unlock() return difficulty, err } diff --git a/blockchain/difficulty_test.go b/blockdag/difficulty_test.go similarity index 98% rename from blockchain/difficulty_test.go rename to blockdag/difficulty_test.go index b42b7c730..3b6350b5c 100644 --- a/blockchain/difficulty_test.go +++ b/blockdag/difficulty_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain +package blockdag import ( "math/big" diff --git a/blockchain/doc.go b/blockdag/doc.go similarity index 99% rename from blockchain/doc.go rename to blockdag/doc.go index df99c5f5f..e85188933 100644 --- a/blockchain/doc.go +++ b/blockdag/doc.go @@ -78,4 +78,4 @@ This package includes spec changes outlined by the following BIPs: BIP0030 (https://en.bitcoin.it/wiki/BIP_0030) BIP0034 (https://en.bitcoin.it/wiki/BIP_0034) */ -package blockchain \ No newline at end of file +package blockdag \ No newline at end of file diff --git a/blockchain/error.go b/blockdag/error.go similarity index 99% rename from blockchain/error.go rename to blockdag/error.go index 77d1bb39d..c7b902dfd 100644 --- a/blockchain/error.go +++ b/blockdag/error.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain +package blockdag import ( "fmt" diff --git a/blockchain/error_test.go b/blockdag/error_test.go similarity index 99% rename from blockchain/error_test.go rename to blockdag/error_test.go index 90fe83f8a..079e2fc46 100644 --- a/blockchain/error_test.go +++ b/blockdag/error_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain +package blockdag import ( "testing" diff --git a/blockchain/example_test.go b/blockdag/example_test.go similarity index 78% rename from blockchain/example_test.go rename to blockdag/example_test.go index b9a6fe666..daac4c758 100644 --- a/blockchain/example_test.go +++ b/blockdag/example_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain_test +package blockdag_test import ( "fmt" @@ -10,8 +10,8 @@ import ( "os" "path/filepath" - "github.com/daglabs/btcd/blockchain" - "github.com/daglabs/btcd/chaincfg" + "github.com/daglabs/btcd/blockdag" + "github.com/daglabs/btcd/dagconfig" "github.com/daglabs/btcd/database" _ "github.com/daglabs/btcd/database/ffldb" "github.com/daglabs/btcutil" @@ -30,7 +30,7 @@ func ExampleBlockChain_ProcessBlock() { // around. dbPath := filepath.Join(os.TempDir(), "exampleprocessblock") _ = os.RemoveAll(dbPath) - db, err := database.Create("ffldb", dbPath, chaincfg.MainNetParams.Net) + db, err := database.Create("ffldb", dbPath, dagconfig.MainNetParams.Net) if err != nil { fmt.Printf("Failed to create database: %v\n", err) return @@ -38,17 +38,17 @@ func ExampleBlockChain_ProcessBlock() { defer os.RemoveAll(dbPath) defer db.Close() - // Create a new BlockChain instance using the underlying database for + // Create a new BlockDAG instance using the underlying database for // the main bitcoin network. This example does not demonstrate some // of the other available configuration options such as specifying a // notification callback and signature cache. Also, the caller would // ordinarily keep a reference to the median time source and add time // values obtained from other peers on the network so the local time is // adjusted to be in agreement with other peers. - chain, err := blockchain.New(&blockchain.Config{ - DB: db, - ChainParams: &chaincfg.MainNetParams, - TimeSource: blockchain.NewMedianTime(), + chain, err := blockdag.New(&blockdag.Config{ + DB: db, + DAGParams: &dagconfig.MainNetParams, + TimeSource: blockdag.NewMedianTime(), }) if err != nil { fmt.Printf("Failed to create chain instance: %v\n", err) @@ -58,18 +58,17 @@ func ExampleBlockChain_ProcessBlock() { // Process a block. For this example, we are going to intentionally // cause an error by trying to process the genesis block which already // exists. - genesisBlock := btcutil.NewBlock(chaincfg.MainNetParams.GenesisBlock) - isMainChain, isOrphan, err := chain.ProcessBlock(genesisBlock, - blockchain.BFNone) + genesisBlock := btcutil.NewBlock(dagconfig.MainNetParams.GenesisBlock) + isOrphan, err := chain.ProcessBlock(genesisBlock, + blockdag.BFNone) if err != nil { fmt.Printf("Failed to process block: %v\n", err) return } - fmt.Printf("Block accepted. Is it on the main chain?: %v", isMainChain) fmt.Printf("Block accepted. Is it an orphan?: %v", isOrphan) // Output: - // Failed to process block: already have block 000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f + // Failed to process block: already have block 2f0484f539fb39c0788abdea0805f799364c68cd162935504b80f3696fa99332 } // This example demonstrates how to convert the compact "bits" in a block header @@ -78,7 +77,7 @@ func ExampleBlockChain_ProcessBlock() { func ExampleCompactToBig() { // Convert the bits from block 300000 in the main block chain. bits := uint32(419465580) - targetDifficulty := blockchain.CompactToBig(bits) + targetDifficulty := blockdag.CompactToBig(bits) // Display it in hex. fmt.Printf("%064x\n", targetDifficulty.Bytes()) @@ -98,7 +97,7 @@ func ExampleBigToCompact() { fmt.Println("invalid target difficulty") return } - bits := blockchain.BigToCompact(targetDifficulty) + bits := blockdag.BigToCompact(targetDifficulty) fmt.Println(bits) diff --git a/blockchain/fullblocks_test.go b/blockdag/fullblocks_test.go similarity index 86% rename from blockchain/fullblocks_test.go rename to blockdag/fullblocks_test.go index 244046d79..758bf7793 100644 --- a/blockchain/fullblocks_test.go +++ b/blockdag/fullblocks_test.go @@ -3,7 +3,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain_test +package blockdag_test import ( "bytes" @@ -12,10 +12,10 @@ import ( "path/filepath" "testing" - "github.com/daglabs/btcd/blockchain" - "github.com/daglabs/btcd/blockchain/fullblocktests" - "github.com/daglabs/btcd/chaincfg" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/blockdag" + "github.com/daglabs/btcd/blockdag/fullblocktests" + "github.com/daglabs/btcd/dagconfig" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/database" _ "github.com/daglabs/btcd/database/ffldb" "github.com/daglabs/btcd/txscript" @@ -60,7 +60,7 @@ func isSupportedDbType(dbType string) bool { // chainSetup is used to create a new db and chain instance with the genesis // block already inserted. In addition to the new chain instance, it returns // a teardown function the caller should invoke when done testing to clean up. -func chainSetup(dbName string, params *chaincfg.Params) (*blockchain.BlockChain, func(), error) { +func chainSetup(dbName string, params *dagconfig.Params) (*blockdag.BlockDAG, func(), error) { if !isSupportedDbType(testDbType) { return nil, nil, fmt.Errorf("unsupported db type %v", testDbType) } @@ -114,11 +114,11 @@ func chainSetup(dbName string, params *chaincfg.Params) (*blockchain.BlockChain, paramsCopy := *params // Create the main chain instance. - chain, err := blockchain.New(&blockchain.Config{ + chain, err := blockdag.New(&blockdag.Config{ DB: db, - ChainParams: ¶msCopy, + DAGParams: ¶msCopy, Checkpoints: nil, - TimeSource: blockchain.NewMedianTime(), + TimeSource: blockdag.NewMedianTime(), SigCache: txscript.NewSigCache(1000), }) if err != nil { @@ -132,6 +132,10 @@ func chainSetup(dbName string, params *chaincfg.Params) (*blockchain.BlockChain, // TestFullBlocks ensures all tests generated by the fullblocktests package // have the expected result when processed via ProcessBlock. func TestFullBlocks(t *testing.T) { + // TODO: (Stas) This test was disabled for until we have implemented Phantom + // Ticket: https://daglabs.atlassian.net/browse/DEV-60 + t.SkipNow() + tests, err := fullblocktests.Generate(false) if err != nil { t.Fatalf("failed to generate tests: %v", err) @@ -139,7 +143,7 @@ func TestFullBlocks(t *testing.T) { // Create a new database and chain instance to run tests against. chain, teardownFunc, err := chainSetup("fullblocktest", - &chaincfg.RegressionNetParams) + &dagconfig.RegressionNetParams) if err != nil { t.Errorf("Failed to setup chain instance: %v", err) return @@ -156,22 +160,14 @@ func TestFullBlocks(t *testing.T) { t.Logf("Testing block %s (hash %s, height %d)", item.Name, block.Hash(), blockHeight) - isMainChain, isOrphan, err := chain.ProcessBlock(block, - blockchain.BFNone) + isOrphan, err := chain.ProcessBlock(block, + blockdag.BFNone) if err != nil { t.Fatalf("block %q (hash %s, height %d) should "+ "have been accepted: %v", item.Name, block.Hash(), blockHeight, err) } - // Ensure the main chain and orphan flags match the values - // specified in the test. - if isMainChain != item.IsMainChain { - t.Fatalf("block %q (hash %s, height %d) unexpected main "+ - "chain flag -- got %v, want %v", item.Name, - block.Hash(), blockHeight, isMainChain, - item.IsMainChain) - } if isOrphan != item.IsOrphan { t.Fatalf("block %q (hash %s, height %d) unexpected "+ "orphan flag -- got %v, want %v", item.Name, @@ -190,7 +186,7 @@ func TestFullBlocks(t *testing.T) { t.Logf("Testing block %s (hash %s, height %d)", item.Name, block.Hash(), blockHeight) - _, _, err := chain.ProcessBlock(block, blockchain.BFNone) + _, err := chain.ProcessBlock(block, blockdag.BFNone) if err == nil { t.Fatalf("block %q (hash %s, height %d) should not "+ "have been accepted", item.Name, block.Hash(), @@ -199,7 +195,7 @@ func TestFullBlocks(t *testing.T) { // Ensure the error code is of the expected type and the reject // code matches the value specified in the test instance. - rerr, ok := err.(blockchain.RuleError) + rerr, ok := err.(blockdag.RuleError) if !ok { t.Fatalf("block %q (hash %s, height %d) returned "+ "unexpected error type -- got %T, want "+ @@ -222,7 +218,7 @@ func TestFullBlocks(t *testing.T) { if headerLen > 80 { headerLen = 80 } - blockHash := chainhash.DoubleHashH(item.RawBlock[0:headerLen]) + blockHash := daghash.DoubleHashH(item.RawBlock[0:headerLen]) blockHeight := item.Height t.Logf("Testing block %s (hash %s, height %d)", item.Name, blockHash, blockHeight) @@ -247,10 +243,10 @@ func TestFullBlocks(t *testing.T) { t.Logf("Testing block %s (hash %s, height %d)", item.Name, block.Hash(), blockHeight) - _, isOrphan, err := chain.ProcessBlock(block, blockchain.BFNone) + isOrphan, err := chain.ProcessBlock(block, blockdag.BFNone) if err != nil { // Ensure the error code is of the expected type. - if _, ok := err.(blockchain.RuleError); !ok { + if _, ok := err.(blockdag.RuleError); !ok { t.Fatalf("block %q (hash %s, height %d) "+ "returned unexpected error type -- "+ "got %T, want blockchain.RuleError", @@ -276,14 +272,14 @@ func TestFullBlocks(t *testing.T) { item.Name, block.Hash(), blockHeight) // Ensure hash and height match. - best := chain.BestSnapshot() - if best.Hash != item.Block.BlockHash() || - best.Height != blockHeight { + dagState := chain.GetDAGState() + if dagState.SelectedTip.Hash != item.Block.BlockHash() || + dagState.SelectedTip.Height != blockHeight { t.Fatalf("block %q (hash %s, height %d) should be "+ "the current tip -- got (hash %s, height %d)", - item.Name, block.Hash(), blockHeight, best.Hash, - best.Height) + item.Name, block.Hash(), blockHeight, dagState.SelectedTip.Hash, + dagState.SelectedTip.Height) } } diff --git a/blockchain/fullblocktests/README.md b/blockdag/fullblocktests/README.md similarity index 100% rename from blockchain/fullblocktests/README.md rename to blockdag/fullblocktests/README.md diff --git a/blockchain/fullblocktests/doc.go b/blockdag/fullblocktests/doc.go similarity index 100% rename from blockchain/fullblocktests/doc.go rename to blockdag/fullblocktests/doc.go diff --git a/blockchain/fullblocktests/generate.go b/blockdag/fullblocktests/generate.go similarity index 94% rename from blockchain/fullblocktests/generate.go rename to blockdag/fullblocktests/generate.go index 39b92b5d6..e98b9d618 100644 --- a/blockchain/fullblocktests/generate.go +++ b/blockdag/fullblocktests/generate.go @@ -18,10 +18,10 @@ import ( "runtime" "time" - "github.com/daglabs/btcd/blockchain" + "github.com/daglabs/btcd/blockdag" "github.com/daglabs/btcd/btcec" - "github.com/daglabs/btcd/chaincfg" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/txscript" "github.com/daglabs/btcd/wire" "github.com/daglabs/btcutil" @@ -64,11 +64,10 @@ type TestInstance interface { // the blockchain either by extending the main chain, on a side chain, or as an // orphan. type AcceptedBlock struct { - Name string - Block *wire.MsgBlock - Height int32 - IsMainChain bool - IsOrphan bool + Name string + Block *wire.MsgBlock + Height int32 + IsOrphan bool } // Ensure AcceptedBlock implements the TestInstance interface. @@ -86,7 +85,7 @@ type RejectedBlock struct { Name string Block *wire.MsgBlock Height int32 - RejectCode blockchain.ErrorCode + RejectCode blockdag.ErrorCode } // Ensure RejectedBlock implements the TestInstance interface. @@ -179,17 +178,17 @@ func makeSpendableOut(block *wire.MsgBlock, txIndex, txOutIndex uint32) spendabl // that build from one another along with housing other useful things such as // available spendable outputs used throughout the tests. type testGenerator struct { - params *chaincfg.Params + params *dagconfig.Params tip *wire.MsgBlock tipName string tipHeight int32 - blocks map[chainhash.Hash]*wire.MsgBlock + blocks map[daghash.Hash]*wire.MsgBlock blocksByName map[string]*wire.MsgBlock blockHeights map[string]int32 // Used for tracking spendable coinbase outputs. spendableOuts []spendableOut - prevCollectedHash chainhash.Hash + prevCollectedHash daghash.Hash // Common key for any tests which require signed transactions. privKey *btcec.PrivateKey @@ -197,13 +196,13 @@ type testGenerator struct { // makeTestGenerator returns a test generator instance initialized with the // genesis block as the tip. -func makeTestGenerator(params *chaincfg.Params) (testGenerator, error) { +func makeTestGenerator(params *dagconfig.Params) (testGenerator, error) { privKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), []byte{0x01}) genesis := params.GenesisBlock genesisHash := genesis.BlockHash() return testGenerator{ params: params, - blocks: map[chainhash.Hash]*wire.MsgBlock{genesisHash: genesis}, + blocks: map[daghash.Hash]*wire.MsgBlock{genesisHash: genesis}, blocksByName: map[string]*wire.MsgBlock{"genesis": genesis}, blockHeights: map[string]int32{"genesis": 0}, tip: genesis, @@ -286,13 +285,13 @@ func (g *testGenerator) createCoinbaseTx(blockHeight int32) *wire.MsgTx { tx.AddTxIn(&wire.TxIn{ // Coinbase transactions have no inputs, so previous outpoint is // zero hash and max index. - PreviousOutPoint: *wire.NewOutPoint(&chainhash.Hash{}, + PreviousOutPoint: *wire.NewOutPoint(&daghash.Hash{}, wire.MaxPrevOutIndex), Sequence: wire.MaxTxInSequenceNum, SignatureScript: coinbaseScript, }) tx.AddTxOut(&wire.TxOut{ - Value: blockchain.CalcBlockSubsidy(blockHeight, g.params), + Value: blockdag.CalcBlockSubsidy(blockHeight, g.params), PkScript: opTrueScript, }) return tx @@ -300,16 +299,16 @@ func (g *testGenerator) createCoinbaseTx(blockHeight int32) *wire.MsgTx { // calcMerkleRoot creates a merkle tree from the slice of transactions and // returns the root of the tree. -func calcMerkleRoot(txns []*wire.MsgTx) chainhash.Hash { +func calcMerkleRoot(txns []*wire.MsgTx) daghash.Hash { if len(txns) == 0 { - return chainhash.Hash{} + return daghash.Hash{} } utilTxns := make([]*btcutil.Tx, 0, len(txns)) for _, tx := range txns { utilTxns = append(utilTxns, btcutil.NewTx(tx)) } - merkles := blockchain.BuildMerkleTreeStore(utilTxns) + merkles := blockdag.BuildMerkleTreeStore(utilTxns) return *merkles[len(merkles)-1] } @@ -330,7 +329,7 @@ func solveBlock(header *wire.BlockHeader) bool { // solver accepts a block header and a nonce range to test. It is // intended to be run as a goroutine. - targetDifficulty := blockchain.CompactToBig(header.Bits) + targetDifficulty := blockdag.CompactToBig(header.Bits) quit := make(chan bool) results := make(chan sbResult) solver := func(hdr wire.BlockHeader, startNonce, stopNonce uint32) { @@ -343,7 +342,7 @@ func solveBlock(header *wire.BlockHeader) bool { default: hdr.Nonce = i hash := hdr.BlockHash() - if blockchain.HashToBig(&hash).Cmp( + if blockdag.HashToBig(&hash).Cmp( targetDifficulty) <= 0 { results <- sbResult{true, i} @@ -510,12 +509,13 @@ func (g *testGenerator) nextBlock(blockName string, spend *spendableOut, mungers block := wire.MsgBlock{ Header: wire.BlockHeader{ - Version: 1, - PrevBlock: g.tip.BlockHash(), - MerkleRoot: calcMerkleRoot(txns), - Bits: g.params.PowLimitBits, - Timestamp: ts, - Nonce: 0, // To be solved. + Version: 1, + NumPrevBlocks: 1, // TODO: (Stas) This is wrong. Modified only to satisfy compilation. + PrevBlocks: []daghash.Hash{g.tip.BlockHash()}, // TODO: (Stas) This is wrong. Modified only to satisfy compilation. + MerkleRoot: calcMerkleRoot(txns), + Bits: g.params.PowLimitBits, + Timestamp: ts, + Nonce: 0, // To be solved. }, Transactions: txns, } @@ -553,7 +553,7 @@ func (g *testGenerator) nextBlock(blockName string, spend *spendableOut, mungers // map references to a block via its old hash and insert new ones for the new // block hash. This is useful if the test code has to manually change a block // after 'nextBlock' has returned. -func (g *testGenerator) updateBlockState(oldBlockName string, oldBlockHash chainhash.Hash, newBlockName string, newBlock *wire.MsgBlock) { +func (g *testGenerator) updateBlockState(oldBlockName string, oldBlockHash daghash.Hash, newBlockName string, newBlock *wire.MsgBlock) { // Look up the height from the existing entries. blockHeight := g.blockHeights[oldBlockName] @@ -607,7 +607,7 @@ func (g *testGenerator) saveSpendableCoinbaseOuts() { // reaching the block that has already had the coinbase outputs // collected. var collectBlocks []*wire.MsgBlock - for b := g.tip; b != nil; b = g.blocks[b.Header.PrevBlock] { + for b := g.tip; b != nil; b = g.blocks[*b.Header.SelectedPrevBlock()] { if b.BlockHash() == g.prevCollectedHash { break } @@ -736,7 +736,7 @@ func (g *testGenerator) assertTipBlockNumTxns(expected int) { // assertTipBlockHash panics if the current tip block associated with the // generator does not match the specified hash. -func (g *testGenerator) assertTipBlockHash(expected chainhash.Hash) { +func (g *testGenerator) assertTipBlockHash(expected daghash.Hash) { hash := g.tip.BlockHash() if hash != expected { panic(fmt.Sprintf("block hash of block %q (height %d) is %v "+ @@ -747,7 +747,7 @@ func (g *testGenerator) assertTipBlockHash(expected chainhash.Hash) { // assertTipBlockMerkleRoot panics if the merkle root in header of the current // tip block associated with the generator does not match the specified hash. -func (g *testGenerator) assertTipBlockMerkleRoot(expected chainhash.Hash) { +func (g *testGenerator) assertTipBlockMerkleRoot(expected daghash.Hash) { hash := g.tip.Header.MerkleRoot if hash != expected { panic(fmt.Sprintf("merkle root of block %q (height %d) is %v "+ @@ -835,12 +835,11 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { // // expectTipBlock creates a test instance that expects the provided // block to be the current tip of the block chain. - acceptBlock := func(blockName string, block *wire.MsgBlock, isMainChain, isOrphan bool) TestInstance { + acceptBlock := func(blockName string, block *wire.MsgBlock, isOrphan bool) TestInstance { blockHeight := g.blockHeights[blockName] - return AcceptedBlock{blockName, block, blockHeight, isMainChain, - isOrphan} + return AcceptedBlock{blockName, block, blockHeight, isOrphan} } - rejectBlock := func(blockName string, block *wire.MsgBlock, code blockchain.ErrorCode) TestInstance { + rejectBlock := func(blockName string, block *wire.MsgBlock, code blockdag.ErrorCode) TestInstance { blockHeight := g.blockHeights[blockName] return RejectedBlock{blockName, block, blockHeight, code} } @@ -881,16 +880,16 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { // test instance for the current tip. accepted := func() { tests = append(tests, []TestInstance{ - acceptBlock(g.tipName, g.tip, true, false), + acceptBlock(g.tipName, g.tip, false), }) } acceptedToSideChainWithExpectedTip := func(tipName string) { tests = append(tests, []TestInstance{ - acceptBlock(g.tipName, g.tip, false, false), + acceptBlock(g.tipName, g.tip, false), expectTipBlock(tipName, g.blocksByName[tipName]), }) } - rejected := func(code blockchain.ErrorCode) { + rejected := func(code blockdag.ErrorCode) { tests = append(tests, []TestInstance{ rejectBlock(g.tipName, g.tip, code), }) @@ -919,7 +918,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { g.nextBlock(blockName, nil) g.saveTipCoinbaseOut() testInstances = append(testInstances, acceptBlock(g.tipName, - g.tip, true, false)) + g.tip, false)) } tests = append(tests, testInstances) @@ -997,7 +996,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { acceptedToSideChainWithExpectedTip("b6") g.nextBlock("b8", outs[4]) - rejected(blockchain.ErrMissingTxOut) + rejected(blockdag.ErrMissingTxOut) // --------------------------------------------------------------------- // Too much proof-of-work coinbase tests. @@ -1010,7 +1009,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { // \-> b3(1) -> b4(2) g.setTip("b6") g.nextBlock("b9", outs[4], additionalCoinbase(1)) - rejected(blockchain.ErrBadCoinbaseValue) + rejected(blockdag.ErrBadCoinbaseValue) // Create a fork that ends with block that generates too much coinbase. // @@ -1022,7 +1021,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { acceptedToSideChainWithExpectedTip("b6") g.nextBlock("b11", outs[4], additionalCoinbase(1)) - rejected(blockchain.ErrBadCoinbaseValue) + rejected(blockdag.ErrBadCoinbaseValue) // Create a fork that ends with block that generates too much coinbase // as before, but with a valid fork first. @@ -1036,9 +1035,9 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { b13 := g.nextBlock("b13", outs[4]) b14 := g.nextBlock("b14", outs[5], additionalCoinbase(1)) tests = append(tests, []TestInstance{ - acceptBlock("b13", b13, false, true), - acceptBlock("b14", b14, false, true), - rejectBlock("b12", b12, blockchain.ErrBadCoinbaseValue), + acceptBlock("b13", b13, true), + acceptBlock("b14", b14, true), + rejectBlock("b12", b12, blockdag.ErrBadCoinbaseValue), expectTipBlock("b13", b13), }) @@ -1065,7 +1064,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { tooManySigOps := repeatOpcode(txscript.OpCheckSig, maxBlockSigOps+1) g.nextBlock("b16", outs[6], replaceSpendScript(tooManySigOps)) g.assertTipBlockSigOpsCount(maxBlockSigOps + 1) - rejected(blockchain.ErrTooManySigOps) + rejected(blockdag.ErrTooManySigOps) // --------------------------------------------------------------------- // Cross-fork spend tests. @@ -1078,7 +1077,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { // \-> b3(1) -> b4(2) g.setTip("b15") g.nextBlock("b17", &b3Tx1Out) - rejected(blockchain.ErrMissingTxOut) + rejected(blockdag.ErrMissingTxOut) // Create block that forks and spends a tx created on a third fork. // @@ -1090,7 +1089,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { acceptedToSideChainWithExpectedTip("b15") g.nextBlock("b19", outs[6]) - rejected(blockchain.ErrMissingTxOut) + rejected(blockdag.ErrMissingTxOut) // --------------------------------------------------------------------- // Immature coinbase tests. @@ -1102,7 +1101,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { // \-> b20(7) g.setTip("b15") g.nextBlock("b20", outs[7]) - rejected(blockchain.ErrImmatureSpend) + rejected(blockdag.ErrImmatureSpend) // Create block that spends immature coinbase on a fork. // @@ -1113,7 +1112,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { acceptedToSideChainWithExpectedTip("b15") g.nextBlock("b22", outs[7]) - rejected(blockchain.ErrImmatureSpend) + rejected(blockdag.ErrImmatureSpend) // --------------------------------------------------------------------- // Max block size tests. @@ -1143,7 +1142,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { replaceSpendScript(sizePadScript)(b) }) g.assertTipBlockSize(maxBlockSize + 1) - rejected(blockchain.ErrBlockTooBig) + rejected(blockdag.ErrBlockTooBig) // Parent was rejected, so this block must either be an orphan or // outright rejected due to an invalid parent. @@ -1163,7 +1162,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { g.setTip("b15") tooSmallCbScript := repeatOpcode(0x00, minCoinbaseScriptLen-1) g.nextBlock("b26", outs[6], replaceCoinbaseSigScript(tooSmallCbScript)) - rejected(blockchain.ErrBadCoinbaseScriptLen) + rejected(blockdag.ErrBadCoinbaseScriptLen) // Parent was rejected, so this block must either be an orphan or // outright rejected due to an invalid parent. @@ -1179,7 +1178,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { g.setTip("b15") tooLargeCbScript := repeatOpcode(0x00, maxCoinbaseScriptLen+1) g.nextBlock("b28", outs[6], replaceCoinbaseSigScript(tooLargeCbScript)) - rejected(blockchain.ErrBadCoinbaseScriptLen) + rejected(blockdag.ErrBadCoinbaseScriptLen) // Parent was rejected, so this block must either be an orphan or // outright rejected due to an invalid parent. @@ -1219,7 +1218,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { tooManySigOps = append(manySigOps, txscript.OpCheckSig) g.nextBlock("b32", outs[9], replaceSpendScript(tooManySigOps)) g.assertTipBlockSigOpsCount(maxBlockSigOps + 1) - rejected(blockchain.ErrTooManySigOps) + rejected(blockdag.ErrTooManySigOps) // Create block with max signature operations as OP_CHECKMULTISIGVERIFY. // @@ -1240,7 +1239,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { tooManySigOps = append(manySigOps, txscript.OpCheckSig) g.nextBlock("b34", outs[10], replaceSpendScript(tooManySigOps)) g.assertTipBlockSigOpsCount(maxBlockSigOps + 1) - rejected(blockchain.ErrTooManySigOps) + rejected(blockdag.ErrTooManySigOps) // Create block with max signature operations as OP_CHECKSIGVERIFY. // @@ -1261,7 +1260,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { tooManySigOps = repeatOpcode(txscript.OpCheckSigVerify, maxBlockSigOps+1) g.nextBlock("b36", outs[11], replaceSpendScript(tooManySigOps)) g.assertTipBlockSigOpsCount(maxBlockSigOps + 1) - rejected(blockchain.ErrTooManySigOps) + rejected(blockdag.ErrTooManySigOps) // --------------------------------------------------------------------- // Spending of tx outputs in block that failed to connect tests. @@ -1278,11 +1277,11 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { doubleSpendTx := createSpendTx(outs[11], lowFee) g.nextBlock("b37", outs[11], additionalTx(doubleSpendTx)) b37Tx1Out := makeSpendableOut(g.tip, 1, 0) - rejected(blockchain.ErrMissingTxOut) + rejected(blockdag.ErrMissingTxOut) g.setTip("b35") g.nextBlock("b38", &b37Tx1Out) - rejected(blockchain.ErrMissingTxOut) + rejected(blockdag.ErrMissingTxOut) // --------------------------------------------------------------------- // Pay-to-script-hash signature operation count tests. @@ -1354,7 +1353,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { tx.TxOut[0].PkScript = repeatOpcode(txscript.OpCheckSig, fill) b.AddTransaction(tx) }) - rejected(blockchain.ErrTooManySigOps) + rejected(blockdag.ErrTooManySigOps) // Create a block with the max allowed signature operations where the // majority of them are in pay-to-script-hash scripts. @@ -1417,7 +1416,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { nonCoinbaseTx := createSpendTx(outs[14], lowFee) b.Transactions[0] = nonCoinbaseTx }) - rejected(blockchain.ErrFirstTxNotCoinbase) + rejected(blockdag.ErrFirstTxNotCoinbase) // Create block with no transactions. // @@ -1427,7 +1426,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { g.nextBlock("b45", nil, func(b *wire.MsgBlock) { b.Transactions = nil }) - rejected(blockchain.ErrNoTransactions) + rejected(blockdag.ErrNoTransactions) // Create block with invalid proof of work. // @@ -1445,14 +1444,14 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { // a uint256 is higher than the limit. b46.Header.Nonce++ blockHash := b46.BlockHash() - hashNum := blockchain.HashToBig(&blockHash) + hashNum := blockdag.HashToBig(&blockHash) if hashNum.Cmp(g.params.PowLimit) >= 0 { break } } g.updateBlockState("b46", origHash, "b46", b46) } - rejected(blockchain.ErrHighHash) + rejected(blockdag.ErrHighHash) // Create block with a timestamp too far in the future. // @@ -1464,7 +1463,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { nowPlus3Hours := time.Now().Add(time.Hour * 3) b.Header.Timestamp = time.Unix(nowPlus3Hours.Unix(), 0) }) - rejected(blockchain.ErrTimeTooNew) + rejected(blockdag.ErrTimeTooNew) // Create block with an invalid merkle root. // @@ -1472,9 +1471,9 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { // \-> b48(14) g.setTip("b43") g.nextBlock("b48", outs[14], func(b *wire.MsgBlock) { - b.Header.MerkleRoot = chainhash.Hash{} + b.Header.MerkleRoot = daghash.Hash{} }) - rejected(blockchain.ErrBadMerkleRoot) + rejected(blockdag.ErrBadMerkleRoot) // Create block with an invalid proof-of-work limit. // @@ -1484,7 +1483,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { g.nextBlock("b49", outs[14], func(b *wire.MsgBlock) { b.Header.Bits-- }) - rejected(blockchain.ErrUnexpectedDifficulty) + rejected(blockdag.ErrUnexpectedDifficulty) // Create block with an invalid negative proof-of-work limit. // @@ -1500,7 +1499,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { b49a.Header.Bits = 0x01810000 // -1 in compact form. g.updateBlockState("b49a", origHash, "b49a", b49a) } - rejected(blockchain.ErrUnexpectedDifficulty) + rejected(blockdag.ErrUnexpectedDifficulty) // Create block with two coinbase transactions. // @@ -1509,7 +1508,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { g.setTip("b43") coinbaseTx := g.createCoinbaseTx(g.tipHeight + 1) g.nextBlock("b50", outs[14], additionalTx(coinbaseTx)) - rejected(blockchain.ErrMultipleCoinbases) + rejected(blockdag.ErrMultipleCoinbases) // Create block with duplicate transactions. // @@ -1523,7 +1522,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { b.AddTransaction(b.Transactions[1]) }) g.assertTipBlockNumTxns(3) - rejected(blockchain.ErrDuplicateTx) + rejected(blockdag.ErrDuplicateTx) // Create a block that spends a transaction that does not exist. // @@ -1536,7 +1535,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { b.Transactions[1].TxIn[0].PreviousOutPoint.Hash = *hash b.Transactions[1].TxIn[0].PreviousOutPoint.Index = 0 }) - rejected(blockchain.ErrMissingTxOut) + rejected(blockdag.ErrMissingTxOut) // --------------------------------------------------------------------- // Block header median time tests. @@ -1555,13 +1554,13 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { // ... -> b33(9) -> b35(10) -> b39(11) -> b42(12) -> b43(13) -> b53(14) // \-> b54(15) g.nextBlock("b54", outs[15], func(b *wire.MsgBlock) { - medianBlock := g.blocks[b.Header.PrevBlock] + medianBlock := g.blocks[*b.Header.SelectedPrevBlock()] for i := 0; i < medianTimeBlocks/2; i++ { - medianBlock = g.blocks[medianBlock.Header.PrevBlock] + medianBlock = g.blocks[*medianBlock.Header.SelectedPrevBlock()] } b.Header.Timestamp = medianBlock.Header.Timestamp }) - rejected(blockchain.ErrTimeTooOld) + rejected(blockdag.ErrTimeTooOld) // Create a block with a timestamp that is one second after the median // time. The block must be accepted. @@ -1569,9 +1568,9 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { // ... -> b33(9) -> b35(10) -> b39(11) -> b42(12) -> b43(13) -> b53(14) -> b55(15) g.setTip("b53") g.nextBlock("b55", outs[15], func(b *wire.MsgBlock) { - medianBlock := g.blocks[b.Header.PrevBlock] + medianBlock := g.blocks[*b.Header.SelectedPrevBlock()] for i := 0; i < medianTimeBlocks/2; i++ { - medianBlock = g.blocks[medianBlock.Header.PrevBlock] + medianBlock = g.blocks[*medianBlock.Header.SelectedPrevBlock()] } medianBlockTime := medianBlock.Header.Timestamp b.Header.Timestamp = medianBlockTime.Add(time.Second) @@ -1632,7 +1631,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { g.assertTipBlockNumTxns(4) g.assertTipBlockHash(b57.BlockHash()) g.assertTipBlockMerkleRoot(b57.Header.MerkleRoot) - rejected(blockchain.ErrDuplicateTx) + rejected(blockdag.ErrDuplicateTx) // Since the two blocks have the same hash and the generator state now // has b56 associated with the hash, manually remove b56, replace it @@ -1673,7 +1672,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { b.AddTransaction(b.Transactions[3]) }) g.assertTipBlockNumTxns(8) - rejected(blockchain.ErrDuplicateTx) + rejected(blockdag.ErrDuplicateTx) // --------------------------------------------------------------------- // Invalid transaction type tests. @@ -1688,7 +1687,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { g.nextBlock("b58", outs[17], func(b *wire.MsgBlock) { b.Transactions[1].TxIn[0].PreviousOutPoint.Index = 42 }) - rejected(blockchain.ErrMissingTxOut) + rejected(blockdag.ErrMissingTxOut) // Create block with transaction that pays more than its inputs. // @@ -1698,7 +1697,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { g.nextBlock("b59", outs[17], func(b *wire.MsgBlock) { b.Transactions[1].TxOut[0].Value = int64(outs[17].amount) + 1 }) - rejected(blockchain.ErrSpendTooHigh) + rejected(blockdag.ErrSpendTooHigh) // --------------------------------------------------------------------- // BIP0030 tests. @@ -1719,10 +1718,10 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { g.nextBlock("b61", outs[18], func(b *wire.MsgBlock) { // Duplicate the coinbase of the parent block to force the // condition. - parent := g.blocks[b.Header.PrevBlock] + parent := g.blocks[*b.Header.SelectedPrevBlock()] b.Transactions[0] = parent.Transactions[0] }) - rejected(blockchain.ErrOverwriteTx) + rejected(blockdag.ErrOverwriteTx) // --------------------------------------------------------------------- // Blocks with non-final transaction tests. @@ -1740,7 +1739,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { b.Transactions[1].LockTime = 0xffffffff b.Transactions[1].TxIn[0].Sequence = 0 }) - rejected(blockchain.ErrUnfinalizedTx) + rejected(blockdag.ErrUnfinalizedTx) // Create block that contains a non-final coinbase transaction. // @@ -1754,7 +1753,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { b.Transactions[0].LockTime = 0xffffffff b.Transactions[0].TxIn[0].Sequence = 0 }) - rejected(blockchain.ErrUnfinalizedTx) + rejected(blockdag.ErrUnfinalizedTx) // --------------------------------------------------------------------- // Non-canonical variable-length integer tests. @@ -1816,7 +1815,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { b.AddTransaction(tx3) b.AddTransaction(tx2) }) - rejected(blockchain.ErrMissingTxOut) + rejected(blockdag.ErrMissingTxOut) // Create block that double spends a transaction created in the same // block. @@ -1831,7 +1830,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { b.AddTransaction(tx3) b.AddTransaction(tx4) }) - rejected(blockchain.ErrMissingTxOut) + rejected(blockdag.ErrMissingTxOut) // --------------------------------------------------------------------- // Extra subsidy tests. @@ -1844,7 +1843,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { // \-> b68(20) g.setTip("b65") g.nextBlock("b68", outs[20], additionalCoinbase(10), additionalSpendFee(9)) - rejected(blockchain.ErrBadCoinbaseValue) + rejected(blockdag.ErrBadCoinbaseValue) // Create block that pays 10 extra to the coinbase and a tx that pays // the extra 10 fee. @@ -1892,7 +1891,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { maxScriptElementSize+1) g.nextBlock("b70", outs[21], replaceSpendScript(tooManySigOps)) g.assertTipBlockSigOpsCount(maxBlockSigOps + 1) - rejected(blockchain.ErrTooManySigOps) + rejected(blockdag.ErrTooManySigOps) // Create block with more than max allowed signature operations such // that the signature operation that pushes it over the limit is before @@ -1908,7 +1907,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { binary.LittleEndian.PutUint32(tooManySigOps[maxBlockSigOps+2:], 0xffffffff) g.nextBlock("b71", outs[21], replaceSpendScript(tooManySigOps)) g.assertTipBlockSigOpsCount(maxBlockSigOps + 1) - rejected(blockchain.ErrTooManySigOps) + rejected(blockdag.ErrTooManySigOps) // Create block with the max allowed signature operations such that all // counted signature operations are before an invalid push data that @@ -2022,7 +2021,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { // to effective negate that behavior. b75OpReturnOut.amount++ g.nextBlock("b80", &b75OpReturnOut) - rejected(blockchain.ErrMissingTxOut) + rejected(blockdag.ErrMissingTxOut) // Create a block that has a transaction with multiple OP_RETURNs. Even // though it's not considered a standard transaction, it is still valid @@ -2080,7 +2079,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { g.assertTipBlockSize(maxBlockSize) g.saveTipCoinbaseOut() testInstances = append(testInstances, acceptBlock(g.tipName, - g.tip, true, false)) + g.tip, false)) // Use the next available spendable output. First use up any // remaining spendable outputs that were already popped into the @@ -2104,7 +2103,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { chain2TipName = fmt.Sprintf("bralt%d", i) g.nextBlock(chain2TipName, nil) testInstances = append(testInstances, acceptBlock(g.tipName, - g.tip, false, false)) + g.tip, false)) } testInstances = append(testInstances, expectTipBlock(chain1TipName, g.blocksByName[chain1TipName])) diff --git a/blockchain/fullblocktests/params.go b/blockdag/fullblocktests/params.go similarity index 80% rename from blockchain/fullblocktests/params.go rename to blockdag/fullblocktests/params.go index 2206f3302..da52c7478 100644 --- a/blockchain/fullblocktests/params.go +++ b/blockdag/fullblocktests/params.go @@ -10,17 +10,17 @@ import ( "math/big" "time" - "github.com/daglabs/btcd/chaincfg" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/wire" ) // newHashFromStr converts the passed big-endian hex string into a -// wire.Hash. It only differs from the one available in chainhash in that +// wire.Hash. It only differs from the one available in daghash in that // it panics on an error since it will only (and must only) be called with // hard-coded, and therefore known good, hashes. -func newHashFromStr(hexStr string) *chainhash.Hash { - hash, err := chainhash.NewHashFromStr(hexStr) +func newHashFromStr(hexStr string) *daghash.Hash { + hash, err := daghash.NewHashFromStr(hexStr) if err != nil { panic(err) } @@ -52,18 +52,19 @@ var ( // as the public transaction ledger for the regression test network. regTestGenesisBlock = wire.MsgBlock{ Header: wire.BlockHeader{ - Version: 1, - PrevBlock: *newHashFromStr("0000000000000000000000000000000000000000000000000000000000000000"), - MerkleRoot: *newHashFromStr("4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b"), - Timestamp: time.Unix(1296688602, 0), // 2011-02-02 23:16:42 +0000 UTC - Bits: 0x207fffff, // 545259519 [7fffff0000000000000000000000000000000000000000000000000000000000] - Nonce: 2, + Version: 1, + NumPrevBlocks: 0, + PrevBlocks: []daghash.Hash{}, + MerkleRoot: *newHashFromStr("4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b"), + Timestamp: time.Unix(0x5b28c636, 0), // 2018-06-19 09:00:38 +0000 UTC + Bits: 0x207fffff, // 545259519 [7fffff0000000000000000000000000000000000000000000000000000000000] + Nonce: 1, }, Transactions: []*wire.MsgTx{{ Version: 1, TxIn: []*wire.TxIn{{ PreviousOutPoint: wire.OutPoint{ - Hash: chainhash.Hash{}, + Hash: daghash.Hash{}, Index: 0xffffffff, }, SignatureScript: fromHex("04ffff001d010445" + @@ -90,10 +91,10 @@ var ( // network. // // NOTE: The test generator intentionally does not use the existing definitions -// in the chaincfg package since the intent is to be able to generate known -// good tests which exercise that code. Using the chaincfg parameters would +// in the dagconfig package since the intent is to be able to generate known +// good tests which exercise that code. Using the dagconfig parameters would // allow them to change out from under the tests potentially invalidating them. -var regressionNetParams = &chaincfg.Params{ +var regressionNetParams = &dagconfig.Params{ Name: "regtest", Net: wire.TestNet, DefaultPort: "18444", diff --git a/blockchain/indexers/README.md b/blockdag/indexers/README.md similarity index 100% rename from blockchain/indexers/README.md rename to blockdag/indexers/README.md diff --git a/blockchain/indexers/addrindex.go b/blockdag/indexers/addrindex.go similarity index 96% rename from blockchain/indexers/addrindex.go rename to blockdag/indexers/addrindex.go index 9358e357e..5b12bdae7 100644 --- a/blockchain/indexers/addrindex.go +++ b/blockdag/indexers/addrindex.go @@ -9,9 +9,9 @@ import ( "fmt" "sync" - "github.com/daglabs/btcd/blockchain" - "github.com/daglabs/btcd/chaincfg" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/blockdag" + "github.com/daglabs/btcd/dagconfig" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/database" "github.com/daglabs/btcd/txscript" "github.com/daglabs/btcd/wire" @@ -126,7 +126,7 @@ var ( // fetchBlockHashFunc defines a callback function to use in order to convert a // serialized block ID to an associated block hash. -type fetchBlockHashFunc func(serializedID []byte) (*chainhash.Hash, error) +type fetchBlockHashFunc func(serializedID []byte) (*daghash.Hash, error) // serializeAddrIndexEntry serializes the provided block id and transaction // location according to the format described in detail above. @@ -552,8 +552,8 @@ type AddrIndex struct { // The following fields are set when the instance is created and can't // be changed afterwards, so there is no need to protect them with a // separate mutex. - db database.DB - chainParams *chaincfg.Params + db database.DB + dagParams *dagconfig.Params // The following fields are used to quickly link transactions and // addresses that have not been included into a block yet when an @@ -569,8 +569,8 @@ type AddrIndex struct { // This allows fairly efficient updates when transactions are removed // once they are included into a block. unconfirmedLock sync.RWMutex - txnsByAddr map[[addrKeySize]byte]map[chainhash.Hash]*btcutil.Tx - addrsByTx map[chainhash.Hash]map[[addrKeySize]byte]struct{} + txnsByAddr map[[addrKeySize]byte]map[daghash.Hash]*btcutil.Tx + addrsByTx map[daghash.Hash]map[[addrKeySize]byte]struct{} } // Ensure the AddrIndex type implements the Indexer interface. @@ -633,7 +633,7 @@ func (idx *AddrIndex) indexPkScript(data writeIndexData, pkScript []byte, txIdx // Nothing to index if the script is non-standard or otherwise doesn't // contain any addresses. _, addrs, _, err := txscript.ExtractPkScriptAddrs(pkScript, - idx.chainParams) + idx.dagParams) if err != nil || len(addrs) == 0 { return } @@ -662,7 +662,7 @@ func (idx *AddrIndex) indexPkScript(data writeIndexData, pkScript []byte, txIdx // indexBlock extract all of the standard addresses from all of the transactions // in the passed block and maps each of them to the associated transaction using // the passed map. -func (idx *AddrIndex) indexBlock(data writeIndexData, block *btcutil.Block, view *blockchain.UtxoViewpoint) { +func (idx *AddrIndex) indexBlock(data writeIndexData, block *btcutil.Block, view *blockdag.UtxoViewpoint) { for txIdx, tx := range block.Transactions() { // Coinbases do not reference any inputs. Since the block is // required to have already gone through full validation, it has @@ -693,7 +693,7 @@ func (idx *AddrIndex) indexBlock(data writeIndexData, block *btcutil.Block, view // the transactions in the block involve. // // This is part of the Indexer interface. -func (idx *AddrIndex) ConnectBlock(dbTx database.Tx, block *btcutil.Block, view *blockchain.UtxoViewpoint) error { +func (idx *AddrIndex) ConnectBlock(dbTx database.Tx, block *btcutil.Block, view *blockdag.UtxoViewpoint) error { // The offset and length of the transactions within the serialized // block. txLocs, err := block.TxLoc() @@ -731,7 +731,7 @@ func (idx *AddrIndex) ConnectBlock(dbTx database.Tx, block *btcutil.Block, view // each transaction in the block involve. // // This is part of the Indexer interface. -func (idx *AddrIndex) DisconnectBlock(dbTx database.Tx, block *btcutil.Block, view *blockchain.UtxoViewpoint) error { +func (idx *AddrIndex) DisconnectBlock(dbTx database.Tx, block *btcutil.Block, view *blockdag.UtxoViewpoint) error { // Build all of the address to transaction mappings in a local map. addrsToTxns := make(writeIndexData) idx.indexBlock(addrsToTxns, block, view) @@ -770,7 +770,7 @@ func (idx *AddrIndex) TxRegionsForAddress(dbTx database.Tx, addr btcutil.Address err = idx.db.View(func(dbTx database.Tx) error { // Create closure to lookup the block hash given the ID using // the database transaction. - fetchBlockHash := func(id []byte) (*chainhash.Hash, error) { + fetchBlockHash := func(id []byte) (*daghash.Hash, error) { // Deserialize and populate the result. return dbFetchBlockHashBySerializedID(dbTx, id) } @@ -796,7 +796,7 @@ func (idx *AddrIndex) indexUnconfirmedAddresses(pkScript []byte, tx *btcutil.Tx) // script fails to parse and it was already validated before being // admitted to the mempool. _, addresses, _, _ := txscript.ExtractPkScriptAddrs(pkScript, - idx.chainParams) + idx.dagParams) for _, addr := range addresses { // Ignore unsupported address types. addrKey, err := addrToKey(addr) @@ -808,7 +808,7 @@ func (idx *AddrIndex) indexUnconfirmedAddresses(pkScript []byte, tx *btcutil.Tx) idx.unconfirmedLock.Lock() addrIndexEntry := idx.txnsByAddr[addrKey] if addrIndexEntry == nil { - addrIndexEntry = make(map[chainhash.Hash]*btcutil.Tx) + addrIndexEntry = make(map[daghash.Hash]*btcutil.Tx) idx.txnsByAddr[addrKey] = addrIndexEntry } addrIndexEntry[*tx.Hash()] = tx @@ -833,7 +833,7 @@ func (idx *AddrIndex) indexUnconfirmedAddresses(pkScript []byte, tx *btcutil.Tx) // addresses not being indexed. // // This function is safe for concurrent access. -func (idx *AddrIndex) AddUnconfirmedTx(tx *btcutil.Tx, utxoView *blockchain.UtxoViewpoint) { +func (idx *AddrIndex) AddUnconfirmedTx(tx *btcutil.Tx, utxoView *blockdag.UtxoViewpoint) { // Index addresses of all referenced previous transaction outputs. // // The existence checks are elided since this is only called after the @@ -860,7 +860,7 @@ func (idx *AddrIndex) AddUnconfirmedTx(tx *btcutil.Tx, utxoView *blockchain.Utxo // (memory-only) address index. // // This function is safe for concurrent access. -func (idx *AddrIndex) RemoveUnconfirmedTx(hash *chainhash.Hash) { +func (idx *AddrIndex) RemoveUnconfirmedTx(hash *daghash.Hash) { idx.unconfirmedLock.Lock() defer idx.unconfirmedLock.Unlock() @@ -914,12 +914,12 @@ func (idx *AddrIndex) UnconfirmedTxnsForAddress(addr btcutil.Address) []*btcutil // It implements the Indexer interface which plugs into the IndexManager that in // turn is used by the blockchain package. This allows the index to be // seamlessly maintained along with the chain. -func NewAddrIndex(db database.DB, chainParams *chaincfg.Params) *AddrIndex { +func NewAddrIndex(db database.DB, dagParams *dagconfig.Params) *AddrIndex { return &AddrIndex{ - db: db, - chainParams: chainParams, - txnsByAddr: make(map[[addrKeySize]byte]map[chainhash.Hash]*btcutil.Tx), - addrsByTx: make(map[chainhash.Hash]map[[addrKeySize]byte]struct{}), + db: db, + dagParams: dagParams, + txnsByAddr: make(map[[addrKeySize]byte]map[daghash.Hash]*btcutil.Tx), + addrsByTx: make(map[daghash.Hash]map[[addrKeySize]byte]struct{}), } } diff --git a/blockchain/indexers/addrindex_test.go b/blockdag/indexers/addrindex_test.go similarity index 100% rename from blockchain/indexers/addrindex_test.go rename to blockdag/indexers/addrindex_test.go diff --git a/blockchain/indexers/blocklogger.go b/blockdag/indexers/blocklogger.go similarity index 100% rename from blockchain/indexers/blocklogger.go rename to blockdag/indexers/blocklogger.go diff --git a/blockchain/indexers/cfindex.go b/blockdag/indexers/cfindex.go similarity index 86% rename from blockchain/indexers/cfindex.go rename to blockdag/indexers/cfindex.go index 12138f511..91fdea6a1 100644 --- a/blockchain/indexers/cfindex.go +++ b/blockdag/indexers/cfindex.go @@ -7,9 +7,9 @@ package indexers import ( "errors" - "github.com/daglabs/btcd/blockchain" - "github.com/daglabs/btcd/chaincfg" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/blockdag" + "github.com/daglabs/btcd/dagconfig" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/database" "github.com/daglabs/btcd/wire" "github.com/daglabs/btcutil" @@ -53,34 +53,34 @@ var ( maxFilterType = uint8(len(cfHeaderKeys) - 1) - // zeroHash is the chainhash.Hash value of all zero bytes, defined here for + // zeroHash is the daghash.Hash value of all zero bytes, defined here for // convenience. - zeroHash chainhash.Hash + zeroHash daghash.Hash ) // dbFetchFilterIdxEntry retrieves a data blob from the filter index database. // An entry's absence is not considered an error. -func dbFetchFilterIdxEntry(dbTx database.Tx, key []byte, h *chainhash.Hash) ([]byte, error) { +func dbFetchFilterIdxEntry(dbTx database.Tx, key []byte, h *daghash.Hash) ([]byte, error) { idx := dbTx.Metadata().Bucket(cfIndexParentBucketKey).Bucket(key) return idx.Get(h[:]), nil } // dbStoreFilterIdxEntry stores a data blob in the filter index database. -func dbStoreFilterIdxEntry(dbTx database.Tx, key []byte, h *chainhash.Hash, f []byte) error { +func dbStoreFilterIdxEntry(dbTx database.Tx, key []byte, h *daghash.Hash, f []byte) error { idx := dbTx.Metadata().Bucket(cfIndexParentBucketKey).Bucket(key) return idx.Put(h[:], f) } // dbDeleteFilterIdxEntry deletes a data blob from the filter index database. -func dbDeleteFilterIdxEntry(dbTx database.Tx, key []byte, h *chainhash.Hash) error { +func dbDeleteFilterIdxEntry(dbTx database.Tx, key []byte, h *daghash.Hash) error { idx := dbTx.Metadata().Bucket(cfIndexParentBucketKey).Bucket(key) return idx.Delete(h[:]) } // CfIndex implements a committed filter (cf) by hash index. type CfIndex struct { - db database.DB - chainParams *chaincfg.Params + db database.DB + dagParams *dagconfig.Params } // Ensure the CfIndex type implements the Indexer interface. @@ -174,8 +174,8 @@ func storeFilter(dbTx database.Tx, block *btcutil.Block, f *gcs.Filter, } // Then fetch the previous block's filter header. - var prevHeader *chainhash.Hash - ph := &block.MsgBlock().Header.PrevBlock + var prevHeader *daghash.Hash + ph := block.MsgBlock().Header.SelectedPrevBlock() if ph.IsEqual(&zeroHash) { prevHeader = &zeroHash } else { @@ -185,7 +185,7 @@ func storeFilter(dbTx database.Tx, block *btcutil.Block, f *gcs.Filter, } // Construct the new block's filter header, and store it. - prevHeader, err = chainhash.NewHash(pfh) + prevHeader, err = daghash.NewHash(pfh) if err != nil { return err } @@ -202,7 +202,7 @@ func storeFilter(dbTx database.Tx, block *btcutil.Block, f *gcs.Filter, // connected to the main chain. This indexer adds a hash-to-cf mapping for // every passed block. This is part of the Indexer interface. func (idx *CfIndex) ConnectBlock(dbTx database.Tx, block *btcutil.Block, - view *blockchain.UtxoViewpoint) error { + view *blockdag.UtxoViewpoint) error { f, err := builder.BuildBasicFilter(block.MsgBlock()) if err != nil { @@ -226,7 +226,7 @@ func (idx *CfIndex) ConnectBlock(dbTx database.Tx, block *btcutil.Block, // disconnected from the main chain. This indexer removes the hash-to-cf // mapping for every passed block. This is part of the Indexer interface. func (idx *CfIndex) DisconnectBlock(dbTx database.Tx, block *btcutil.Block, - view *blockchain.UtxoViewpoint) error { + view *blockdag.UtxoViewpoint) error { for _, key := range cfIndexKeys { err := dbDeleteFilterIdxEntry(dbTx, key, block.Hash()) @@ -255,7 +255,7 @@ func (idx *CfIndex) DisconnectBlock(dbTx database.Tx, block *btcutil.Block, // entryByBlockHash fetches a filter index entry of a particular type // (eg. filter, filter header, etc) for a filter type and block hash. func (idx *CfIndex) entryByBlockHash(filterTypeKeys [][]byte, - filterType wire.FilterType, h *chainhash.Hash) ([]byte, error) { + filterType wire.FilterType, h *daghash.Hash) ([]byte, error) { if uint8(filterType) > maxFilterType { return nil, errors.New("unsupported filter type") @@ -274,7 +274,7 @@ func (idx *CfIndex) entryByBlockHash(filterTypeKeys [][]byte, // entriesByBlockHashes batch fetches a filter index entry of a particular type // (eg. filter, filter header, etc) for a filter type and slice of block hashes. func (idx *CfIndex) entriesByBlockHashes(filterTypeKeys [][]byte, - filterType wire.FilterType, blockHashes []*chainhash.Hash) ([][]byte, error) { + filterType wire.FilterType, blockHashes []*daghash.Hash) ([][]byte, error) { if uint8(filterType) > maxFilterType { return nil, errors.New("unsupported filter type") @@ -297,42 +297,42 @@ func (idx *CfIndex) entriesByBlockHashes(filterTypeKeys [][]byte, // FilterByBlockHash returns the serialized contents of a block's basic or // extended committed filter. -func (idx *CfIndex) FilterByBlockHash(h *chainhash.Hash, +func (idx *CfIndex) FilterByBlockHash(h *daghash.Hash, filterType wire.FilterType) ([]byte, error) { return idx.entryByBlockHash(cfIndexKeys, filterType, h) } // FiltersByBlockHashes returns the serialized contents of a block's basic or // extended committed filter for a set of blocks by hash. -func (idx *CfIndex) FiltersByBlockHashes(blockHashes []*chainhash.Hash, +func (idx *CfIndex) FiltersByBlockHashes(blockHashes []*daghash.Hash, filterType wire.FilterType) ([][]byte, error) { return idx.entriesByBlockHashes(cfIndexKeys, filterType, blockHashes) } // FilterHeaderByBlockHash returns the serialized contents of a block's basic // or extended committed filter header. -func (idx *CfIndex) FilterHeaderByBlockHash(h *chainhash.Hash, +func (idx *CfIndex) FilterHeaderByBlockHash(h *daghash.Hash, filterType wire.FilterType) ([]byte, error) { return idx.entryByBlockHash(cfHeaderKeys, filterType, h) } // FilterHeadersByBlockHashes returns the serialized contents of a block's basic // or extended committed filter header for a set of blocks by hash. -func (idx *CfIndex) FilterHeadersByBlockHashes(blockHashes []*chainhash.Hash, +func (idx *CfIndex) FilterHeadersByBlockHashes(blockHashes []*daghash.Hash, filterType wire.FilterType) ([][]byte, error) { return idx.entriesByBlockHashes(cfHeaderKeys, filterType, blockHashes) } // FilterHashByBlockHash returns the serialized contents of a block's basic // or extended committed filter hash. -func (idx *CfIndex) FilterHashByBlockHash(h *chainhash.Hash, +func (idx *CfIndex) FilterHashByBlockHash(h *daghash.Hash, filterType wire.FilterType) ([]byte, error) { return idx.entryByBlockHash(cfHashKeys, filterType, h) } // FilterHashesByBlockHashes returns the serialized contents of a block's basic // or extended committed filter hash for a set of blocks by hash. -func (idx *CfIndex) FilterHashesByBlockHashes(blockHashes []*chainhash.Hash, +func (idx *CfIndex) FilterHashesByBlockHashes(blockHashes []*daghash.Hash, filterType wire.FilterType) ([][]byte, error) { return idx.entriesByBlockHashes(cfHashKeys, filterType, blockHashes) } @@ -344,8 +344,8 @@ func (idx *CfIndex) FilterHashesByBlockHashes(blockHashes []*chainhash.Hash, // It implements the Indexer interface which plugs into the IndexManager that // in turn is used by the blockchain package. This allows the index to be // seamlessly maintained along with the chain. -func NewCfIndex(db database.DB, chainParams *chaincfg.Params) *CfIndex { - return &CfIndex{db: db, chainParams: chainParams} +func NewCfIndex(db database.DB, dagParams *dagconfig.Params) *CfIndex { + return &CfIndex{db: db, dagParams: dagParams} } // DropCfIndex drops the CF index from the provided database if exists. diff --git a/blockchain/indexers/common.go b/blockdag/indexers/common.go similarity index 97% rename from blockchain/indexers/common.go rename to blockdag/indexers/common.go index a4783d0f3..d1dd8d2c7 100644 --- a/blockchain/indexers/common.go +++ b/blockdag/indexers/common.go @@ -11,7 +11,7 @@ import ( "encoding/binary" "errors" - "github.com/daglabs/btcd/blockchain" + "github.com/daglabs/btcd/blockdag" "github.com/daglabs/btcd/database" "github.com/daglabs/btcutil" ) @@ -52,11 +52,11 @@ type Indexer interface { // ConnectBlock is invoked when the index manager is notified that a new // block has been connected to the main chain. - ConnectBlock(dbTx database.Tx, block *btcutil.Block, view *blockchain.UtxoViewpoint) error + ConnectBlock(dbTx database.Tx, block *btcutil.Block, view *blockdag.UtxoViewpoint) error // DisconnectBlock is invoked when the index manager is notified that a // block has been disconnected from the main chain. - DisconnectBlock(dbTx database.Tx, block *btcutil.Block, view *blockchain.UtxoViewpoint) error + DisconnectBlock(dbTx database.Tx, block *btcutil.Block, view *blockdag.UtxoViewpoint) error } // AssertError identifies an error that indicates an internal code consistency diff --git a/blockchain/indexers/log.go b/blockdag/indexers/log.go similarity index 100% rename from blockchain/indexers/log.go rename to blockdag/indexers/log.go diff --git a/blockchain/indexers/manager.go b/blockdag/indexers/manager.go similarity index 92% rename from blockchain/indexers/manager.go rename to blockdag/indexers/manager.go index fa351280e..73b0bfb4b 100644 --- a/blockchain/indexers/manager.go +++ b/blockdag/indexers/manager.go @@ -8,8 +8,8 @@ import ( "bytes" "fmt" - "github.com/daglabs/btcd/blockchain" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/blockdag" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/database" "github.com/daglabs/btcd/wire" "github.com/daglabs/btcutil" @@ -30,16 +30,16 @@ var ( // [],... // // Field Type Size -// block hash chainhash.Hash chainhash.HashSize +// block hash daghash.Hash daghash.HashSize // block height uint32 4 bytes // ----------------------------------------------------------------------------- // dbPutIndexerTip uses an existing database transaction to update or add the // current tip for the given index to the provided values. -func dbPutIndexerTip(dbTx database.Tx, idxKey []byte, hash *chainhash.Hash, height int32) error { - serialized := make([]byte, chainhash.HashSize+4) +func dbPutIndexerTip(dbTx database.Tx, idxKey []byte, hash *daghash.Hash, height int32) error { + serialized := make([]byte, daghash.HashSize+4) copy(serialized, hash[:]) - byteOrder.PutUint32(serialized[chainhash.HashSize:], uint32(height)) + byteOrder.PutUint32(serialized[daghash.HashSize:], uint32(height)) indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName) return indexesBucket.Put(idxKey, serialized) @@ -47,10 +47,10 @@ func dbPutIndexerTip(dbTx database.Tx, idxKey []byte, hash *chainhash.Hash, heig // dbFetchIndexerTip uses an existing database transaction to retrieve the // hash and height of the current tip for the provided index. -func dbFetchIndexerTip(dbTx database.Tx, idxKey []byte) (*chainhash.Hash, int32, error) { +func dbFetchIndexerTip(dbTx database.Tx, idxKey []byte) (*daghash.Hash, int32, error) { indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName) serialized := indexesBucket.Get(idxKey) - if len(serialized) < chainhash.HashSize+4 { + if len(serialized) < daghash.HashSize+4 { return nil, 0, database.Error{ ErrorCode: database.ErrCorruption, Description: fmt.Sprintf("unexpected end of data for "+ @@ -58,9 +58,9 @@ func dbFetchIndexerTip(dbTx database.Tx, idxKey []byte) (*chainhash.Hash, int32, } } - var hash chainhash.Hash - copy(hash[:], serialized[:chainhash.HashSize]) - height := int32(byteOrder.Uint32(serialized[chainhash.HashSize:])) + var hash daghash.Hash + copy(hash[:], serialized[:daghash.HashSize]) + height := int32(byteOrder.Uint32(serialized[daghash.HashSize:])) return &hash, height, nil } @@ -68,7 +68,7 @@ func dbFetchIndexerTip(dbTx database.Tx, idxKey []byte) (*chainhash.Hash, int32, // given block using the provided indexer and updates the tip of the indexer // accordingly. An error will be returned if the current tip for the indexer is // not the previous block for the passed block. -func dbIndexConnectBlock(dbTx database.Tx, indexer Indexer, block *btcutil.Block, view *blockchain.UtxoViewpoint) error { +func dbIndexConnectBlock(dbTx database.Tx, indexer Indexer, block *btcutil.Block, view *blockdag.UtxoViewpoint) error { // Assert that the block being connected properly connects to the // current tip of the index. idxKey := indexer.Key() @@ -76,7 +76,7 @@ func dbIndexConnectBlock(dbTx database.Tx, indexer Indexer, block *btcutil.Block if err != nil { return err } - if !curTipHash.IsEqual(&block.MsgBlock().Header.PrevBlock) { + if !curTipHash.IsEqual(block.MsgBlock().Header.SelectedPrevBlock()) { return AssertError(fmt.Sprintf("dbIndexConnectBlock must be "+ "called with a block that extends the current index "+ "tip (%s, tip %s, block %s)", indexer.Name(), @@ -96,7 +96,7 @@ func dbIndexConnectBlock(dbTx database.Tx, indexer Indexer, block *btcutil.Block // given block using the provided indexer and updates the tip of the indexer // accordingly. An error will be returned if the current tip for the indexer is // not the passed block. -func dbIndexDisconnectBlock(dbTx database.Tx, indexer Indexer, block *btcutil.Block, view *blockchain.UtxoViewpoint) error { +func dbIndexDisconnectBlock(dbTx database.Tx, indexer Indexer, block *btcutil.Block, view *blockdag.UtxoViewpoint) error { // Assert that the block being disconnected is the current tip of the // index. idxKey := indexer.Key() @@ -118,7 +118,7 @@ func dbIndexDisconnectBlock(dbTx database.Tx, indexer Indexer, block *btcutil.Bl } // Update the current index tip. - prevHash := &block.MsgBlock().Header.PrevBlock + prevHash := block.MsgBlock().Header.SelectedPrevBlock() return dbPutIndexerTip(dbTx, idxKey, prevHash, block.Height()-1) } @@ -131,7 +131,7 @@ type Manager struct { } // Ensure the Manager type implements the blockchain.IndexManager interface. -var _ blockchain.IndexManager = (*Manager)(nil) +var _ blockdag.IndexManager = (*Manager)(nil) // indexDropKey returns the key for an index which indicates it is in the // process of being dropped. @@ -212,7 +212,7 @@ func (m *Manager) maybeCreateIndexes(dbTx database.Tx) error { // Set the tip for the index to values which represent an // uninitialized index. - err := dbPutIndexerTip(dbTx, idxKey, &chainhash.Hash{}, -1) + err := dbPutIndexerTip(dbTx, idxKey, &daghash.Hash{}, -1) if err != nil { return err } @@ -229,7 +229,7 @@ func (m *Manager) maybeCreateIndexes(dbTx database.Tx) error { // catch up due to the I/O contention. // // This is part of the blockchain.IndexManager interface. -func (m *Manager) Init(chain *blockchain.BlockChain, interrupt <-chan struct{}) error { +func (m *Manager) Init(chain *blockdag.BlockDAG, interrupt <-chan struct{}) error { // Nothing to do when no indexes are enabled. if len(m.enabledIndexes) == 0 { return nil @@ -275,7 +275,7 @@ func (m *Manager) Init(chain *blockchain.BlockChain, interrupt <-chan struct{}) // Fetch the current tip for the index. var height int32 - var hash *chainhash.Hash + var hash *daghash.Hash err := m.db.View(func(dbTx database.Tx) error { idxKey := indexer.Key() hash, height, err = dbFetchIndexerTip(dbTx, idxKey) @@ -313,7 +313,7 @@ func (m *Manager) Init(chain *blockchain.BlockChain, interrupt <-chan struct{}) // When the index requires all of the referenced // txouts they need to be retrieved from the // transaction index. - var view *blockchain.UtxoViewpoint + var view *blockdag.UtxoViewpoint if indexNeedsInputs(indexer) { var err error view, err = makeUtxoView(dbTx, block, @@ -332,7 +332,7 @@ func (m *Manager) Init(chain *blockchain.BlockChain, interrupt <-chan struct{}) } // Update the tip to the previous block. - hash = &block.MsgBlock().Header.PrevBlock + hash = block.MsgBlock().Header.SelectedPrevBlock() height-- return nil @@ -357,8 +357,8 @@ func (m *Manager) Init(chain *blockchain.BlockChain, interrupt <-chan struct{}) // lowest one so the catchup code only needs to start at the earliest // block and is able to skip connecting the block for the indexes that // don't need it. - bestHeight := chain.BestSnapshot().Height - lowestHeight := bestHeight + dagHeight := chain.GetDAGState().SelectedTip.Height + lowestHeight := dagHeight indexerHeights := make([]int32, len(m.enabledIndexes)) err = m.db.View(func(dbTx database.Tx) error { for i, indexer := range m.enabledIndexes { @@ -382,7 +382,7 @@ func (m *Manager) Init(chain *blockchain.BlockChain, interrupt <-chan struct{}) } // Nothing to index if all of the indexes are caught up. - if lowestHeight == bestHeight { + if lowestHeight == dagHeight { return nil } @@ -393,8 +393,8 @@ func (m *Manager) Init(chain *blockchain.BlockChain, interrupt <-chan struct{}) // tip and need to be caught up, so log the details and loop through // each block that needs to be indexed. log.Infof("Catching up indexes from height %d to %d", lowestHeight, - bestHeight) - for height := lowestHeight + 1; height <= bestHeight; height++ { + dagHeight) + for height := lowestHeight + 1; height <= dagHeight; height++ { // Load the block for the height since it is required to index // it. block, err := chain.BlockByHeight(height) @@ -407,7 +407,7 @@ func (m *Manager) Init(chain *blockchain.BlockChain, interrupt <-chan struct{}) } // Connect the block for all indexes that need it. - var view *blockchain.UtxoViewpoint + var view *blockdag.UtxoViewpoint for i, indexer := range m.enabledIndexes { // Skip indexes that don't need to be updated with this // block. @@ -445,7 +445,7 @@ func (m *Manager) Init(chain *blockchain.BlockChain, interrupt <-chan struct{}) } } - log.Infof("Indexes caught up to height %d", bestHeight) + log.Infof("Indexes caught up to height %d", dagHeight) return nil } @@ -461,7 +461,7 @@ func indexNeedsInputs(index Indexer) bool { // dbFetchTx looks up the passed transaction hash in the transaction index and // loads it from the database. -func dbFetchTx(dbTx database.Tx, hash *chainhash.Hash) (*wire.MsgTx, error) { +func dbFetchTx(dbTx database.Tx, hash *daghash.Hash) (*wire.MsgTx, error) { // Look up the location of the transaction. blockRegion, err := dbFetchTxIndexEntry(dbTx, hash) if err != nil { @@ -492,8 +492,8 @@ func dbFetchTx(dbTx database.Tx, hash *chainhash.Hash) (*wire.MsgTx, error) { // transactions in the block. This is sometimes needed when catching indexes up // because many of the txouts could actually already be spent however the // associated scripts are still required to index them. -func makeUtxoView(dbTx database.Tx, block *btcutil.Block, interrupt <-chan struct{}) (*blockchain.UtxoViewpoint, error) { - view := blockchain.NewUtxoViewpoint() +func makeUtxoView(dbTx database.Tx, block *btcutil.Block, interrupt <-chan struct{}) (*blockdag.UtxoViewpoint, error) { + view := blockdag.NewUtxoViewpoint() for txIdx, tx := range block.Transactions() { // Coinbases do not reference any inputs. Since the block is // required to have already gone through full validation, it has @@ -528,7 +528,7 @@ func makeUtxoView(dbTx database.Tx, block *btcutil.Block, interrupt <-chan struc // checks, and invokes each indexer. // // This is part of the blockchain.IndexManager interface. -func (m *Manager) ConnectBlock(dbTx database.Tx, block *btcutil.Block, view *blockchain.UtxoViewpoint) error { +func (m *Manager) ConnectBlock(dbTx database.Tx, block *btcutil.Block, view *blockdag.UtxoViewpoint) error { // Call each of the currently active optional indexes with the block // being connected so they can update accordingly. for _, index := range m.enabledIndexes { @@ -546,7 +546,7 @@ func (m *Manager) ConnectBlock(dbTx database.Tx, block *btcutil.Block, view *blo // the index entries associated with the block. // // This is part of the blockchain.IndexManager interface. -func (m *Manager) DisconnectBlock(dbTx database.Tx, block *btcutil.Block, view *blockchain.UtxoViewpoint) error { +func (m *Manager) DisconnectBlock(dbTx database.Tx, block *btcutil.Block, view *blockdag.UtxoViewpoint) error { // Call each of the currently active optional indexes with the block // being disconnected so they can update accordingly. for _, index := range m.enabledIndexes { diff --git a/blockchain/indexers/txindex.go b/blockdag/indexers/txindex.go similarity index 93% rename from blockchain/indexers/txindex.go rename to blockdag/indexers/txindex.go index b43a41904..a7d80edb3 100644 --- a/blockchain/indexers/txindex.go +++ b/blockdag/indexers/txindex.go @@ -8,8 +8,8 @@ import ( "errors" "fmt" - "github.com/daglabs/btcd/blockchain" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/blockdag" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/database" "github.com/daglabs/btcd/wire" "github.com/daglabs/btcutil" @@ -64,7 +64,7 @@ var ( // = // // Field Type Size -// hash chainhash.Hash 32 bytes +// hash daghash.Hash 32 bytes // ID uint32 4 bytes // ----- // Total: 36 bytes @@ -74,7 +74,7 @@ var ( // // Field Type Size // ID uint32 4 bytes -// hash chainhash.Hash 32 bytes +// hash daghash.Hash 32 bytes // ----- // Total: 36 bytes // @@ -83,7 +83,7 @@ var ( // = // // Field Type Size -// txhash chainhash.Hash 32 bytes +// txhash daghash.Hash 32 bytes // block id uint32 4 bytes // start offset uint32 4 bytes // tx length uint32 4 bytes @@ -94,7 +94,7 @@ var ( // dbPutBlockIDIndexEntry uses an existing database transaction to update or add // the index entries for the hash to id and id to hash mappings for the provided // values. -func dbPutBlockIDIndexEntry(dbTx database.Tx, hash *chainhash.Hash, id uint32) error { +func dbPutBlockIDIndexEntry(dbTx database.Tx, hash *daghash.Hash, id uint32) error { // Serialize the height for use in the index entries. var serializedID [4]byte byteOrder.PutUint32(serializedID[:], id) @@ -113,7 +113,7 @@ func dbPutBlockIDIndexEntry(dbTx database.Tx, hash *chainhash.Hash, id uint32) e // dbRemoveBlockIDIndexEntry uses an existing database transaction remove index // entries from the hash to id and id to hash mappings for the provided hash. -func dbRemoveBlockIDIndexEntry(dbTx database.Tx, hash *chainhash.Hash) error { +func dbRemoveBlockIDIndexEntry(dbTx database.Tx, hash *daghash.Hash) error { // Remove the block hash to ID mapping. meta := dbTx.Metadata() hashIndex := meta.Bucket(idByHashIndexBucketName) @@ -132,7 +132,7 @@ func dbRemoveBlockIDIndexEntry(dbTx database.Tx, hash *chainhash.Hash) error { // dbFetchBlockIDByHash uses an existing database transaction to retrieve the // block id for the provided hash from the index. -func dbFetchBlockIDByHash(dbTx database.Tx, hash *chainhash.Hash) (uint32, error) { +func dbFetchBlockIDByHash(dbTx database.Tx, hash *daghash.Hash) (uint32, error) { hashIndex := dbTx.Metadata().Bucket(idByHashIndexBucketName) serializedID := hashIndex.Get(hash[:]) if serializedID == nil { @@ -144,21 +144,21 @@ func dbFetchBlockIDByHash(dbTx database.Tx, hash *chainhash.Hash) (uint32, error // dbFetchBlockHashBySerializedID uses an existing database transaction to // retrieve the hash for the provided serialized block id from the index. -func dbFetchBlockHashBySerializedID(dbTx database.Tx, serializedID []byte) (*chainhash.Hash, error) { +func dbFetchBlockHashBySerializedID(dbTx database.Tx, serializedID []byte) (*daghash.Hash, error) { idIndex := dbTx.Metadata().Bucket(hashByIDIndexBucketName) hashBytes := idIndex.Get(serializedID) if hashBytes == nil { return nil, errNoBlockIDEntry } - var hash chainhash.Hash + var hash daghash.Hash copy(hash[:], hashBytes) return &hash, nil } // dbFetchBlockHashByID uses an existing database transaction to retrieve the // hash for the provided block id from the index. -func dbFetchBlockHashByID(dbTx database.Tx, id uint32) (*chainhash.Hash, error) { +func dbFetchBlockHashByID(dbTx database.Tx, id uint32) (*daghash.Hash, error) { var serializedID [4]byte byteOrder.PutUint32(serializedID[:], id) return dbFetchBlockHashBySerializedID(dbTx, serializedID[:]) @@ -177,7 +177,7 @@ func putTxIndexEntry(target []byte, blockID uint32, txLoc wire.TxLoc) { // dbPutTxIndexEntry uses an existing database transaction to update the // transaction index given the provided serialized data that is expected to have // been serialized putTxIndexEntry. -func dbPutTxIndexEntry(dbTx database.Tx, txHash *chainhash.Hash, serializedData []byte) error { +func dbPutTxIndexEntry(dbTx database.Tx, txHash *daghash.Hash, serializedData []byte) error { txIndex := dbTx.Metadata().Bucket(txIndexKey) return txIndex.Put(txHash[:], serializedData) } @@ -186,7 +186,7 @@ func dbPutTxIndexEntry(dbTx database.Tx, txHash *chainhash.Hash, serializedData // region for the provided transaction hash from the transaction index. When // there is no entry for the provided hash, nil will be returned for the both // the region and the error. -func dbFetchTxIndexEntry(dbTx database.Tx, txHash *chainhash.Hash) (*database.BlockRegion, error) { +func dbFetchTxIndexEntry(dbTx database.Tx, txHash *daghash.Hash) (*database.BlockRegion, error) { // Load the record from the database and return now if it doesn't exist. txIndex := dbTx.Metadata().Bucket(txIndexKey) serializedData := txIndex.Get(txHash[:]) @@ -214,7 +214,7 @@ func dbFetchTxIndexEntry(dbTx database.Tx, txHash *chainhash.Hash) (*database.Bl } // Deserialize the final entry. - region := database.BlockRegion{Hash: &chainhash.Hash{}} + region := database.BlockRegion{Hash: &daghash.Hash{}} copy(region.Hash[:], hash[:]) region.Offset = byteOrder.Uint32(serializedData[4:8]) region.Len = byteOrder.Uint32(serializedData[8:12]) @@ -255,7 +255,7 @@ func dbAddTxIndexEntries(dbTx database.Tx, block *btcutil.Block, blockID uint32) // dbRemoveTxIndexEntry uses an existing database transaction to remove the most // recent transaction index entry for the given hash. -func dbRemoveTxIndexEntry(dbTx database.Tx, txHash *chainhash.Hash) error { +func dbRemoveTxIndexEntry(dbTx database.Tx, txHash *daghash.Hash) error { txIndex := dbTx.Metadata().Bucket(txIndexKey) serializedData := txIndex.Get(txHash[:]) if len(serializedData) == 0 { @@ -388,7 +388,7 @@ func (idx *TxIndex) Create(dbTx database.Tx) error { // for every transaction in the passed block. // // This is part of the Indexer interface. -func (idx *TxIndex) ConnectBlock(dbTx database.Tx, block *btcutil.Block, view *blockchain.UtxoViewpoint) error { +func (idx *TxIndex) ConnectBlock(dbTx database.Tx, block *btcutil.Block, view *blockdag.UtxoViewpoint) error { // Increment the internal block ID to use for the block being connected // and add all of the transactions in the block to the index. newBlockID := idx.curBlockID + 1 @@ -411,7 +411,7 @@ func (idx *TxIndex) ConnectBlock(dbTx database.Tx, block *btcutil.Block, view *b // hash-to-transaction mapping for every transaction in the block. // // This is part of the Indexer interface. -func (idx *TxIndex) DisconnectBlock(dbTx database.Tx, block *btcutil.Block, view *blockchain.UtxoViewpoint) error { +func (idx *TxIndex) DisconnectBlock(dbTx database.Tx, block *btcutil.Block, view *blockdag.UtxoViewpoint) error { // Remove all of the transactions in the block from the index. if err := dbRemoveTxIndexEntries(dbTx, block); err != nil { return err @@ -432,7 +432,7 @@ func (idx *TxIndex) DisconnectBlock(dbTx database.Tx, block *btcutil.Block, view // will be returned for the both the entry and the error. // // This function is safe for concurrent access. -func (idx *TxIndex) TxBlockRegion(hash *chainhash.Hash) (*database.BlockRegion, error) { +func (idx *TxIndex) TxBlockRegion(hash *daghash.Hash) (*database.BlockRegion, error) { var region *database.BlockRegion err := idx.db.View(func(dbTx database.Tx) error { var err error diff --git a/blockchain/log.go b/blockdag/log.go similarity index 97% rename from blockchain/log.go rename to blockdag/log.go index d5fb2625c..56725b296 100644 --- a/blockchain/log.go +++ b/blockdag/log.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain +package blockdag import ( "github.com/btcsuite/btclog" diff --git a/blockchain/mediantime.go b/blockdag/mediantime.go similarity index 99% rename from blockchain/mediantime.go rename to blockdag/mediantime.go index ac0689e28..1b19ed80e 100644 --- a/blockchain/mediantime.go +++ b/blockdag/mediantime.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain +package blockdag import ( "math" diff --git a/blockchain/mediantime_test.go b/blockdag/mediantime_test.go similarity index 99% rename from blockchain/mediantime_test.go rename to blockdag/mediantime_test.go index 948d03105..4c5432dab 100644 --- a/blockchain/mediantime_test.go +++ b/blockdag/mediantime_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain +package blockdag import ( "strconv" diff --git a/blockchain/merkle.go b/blockdag/merkle.go similarity index 88% rename from blockchain/merkle.go rename to blockdag/merkle.go index ba670813f..319f804cd 100644 --- a/blockchain/merkle.go +++ b/blockdag/merkle.go @@ -2,12 +2,12 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain +package blockdag import ( "math" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcutil" ) @@ -28,13 +28,13 @@ func nextPowerOfTwo(n int) int { // HashMerkleBranches takes two hashes, treated as the left and right tree // nodes, and returns the hash of their concatenation. This is a helper // function used to aid in the generation of a merkle tree. -func HashMerkleBranches(left *chainhash.Hash, right *chainhash.Hash) *chainhash.Hash { +func HashMerkleBranches(left *daghash.Hash, right *daghash.Hash) *daghash.Hash { // Concatenate the left and right nodes. - var hash [chainhash.HashSize * 2]byte - copy(hash[:chainhash.HashSize], left[:]) - copy(hash[chainhash.HashSize:], right[:]) + var hash [daghash.HashSize * 2]byte + copy(hash[:daghash.HashSize], left[:]) + copy(hash[daghash.HashSize:], right[:]) - newHash := chainhash.DoubleHashH(hash[:]) + newHash := daghash.DoubleHashH(hash[:]) return &newHash } @@ -66,12 +66,12 @@ func HashMerkleBranches(left *chainhash.Hash, right *chainhash.Hash) *chainhash. // are calculated by concatenating the left node with itself before hashing. // Since this function uses nodes that are pointers to the hashes, empty nodes // will be nil. -func BuildMerkleTreeStore(transactions []*btcutil.Tx) []*chainhash.Hash { +func BuildMerkleTreeStore(transactions []*btcutil.Tx) []*daghash.Hash { // Calculate how many entries are required to hold the binary merkle // tree as a linear array and create an array of that size. nextPoT := nextPowerOfTwo(len(transactions)) arraySize := nextPoT*2 - 1 - merkles := make([]*chainhash.Hash, arraySize) + merkles := make([]*daghash.Hash, arraySize) // Create the base transaction hashes and populate the array with them. for i, tx := range transactions { diff --git a/blockchain/merkle_test.go b/blockdag/merkle_test.go similarity index 97% rename from blockchain/merkle_test.go rename to blockdag/merkle_test.go index 1c00ba659..a93fc815d 100644 --- a/blockchain/merkle_test.go +++ b/blockdag/merkle_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain +package blockdag import ( "testing" diff --git a/blockchain/notifications.go b/blockdag/notifications.go similarity index 94% rename from blockchain/notifications.go rename to blockdag/notifications.go index 25cc4f1f0..a8940f234 100644 --- a/blockchain/notifications.go +++ b/blockdag/notifications.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain +package blockdag import ( "fmt" @@ -61,7 +61,7 @@ type Notification struct { // Subscribe to block chain notifications. Registers a callback to be executed // when various events take place. See the documentation on Notification and // NotificationType for details on the types and contents of notifications. -func (b *BlockChain) Subscribe(callback NotificationCallback) { +func (b *BlockDAG) Subscribe(callback NotificationCallback) { b.notificationsLock.Lock() b.notifications = append(b.notifications, callback) b.notificationsLock.Unlock() @@ -70,7 +70,7 @@ func (b *BlockChain) Subscribe(callback NotificationCallback) { // sendNotification sends a notification with the passed type and data if the // caller requested notifications by providing a callback function in the call // to New. -func (b *BlockChain) sendNotification(typ NotificationType, data interface{}) { +func (b *BlockDAG) sendNotification(typ NotificationType, data interface{}) { // Generate and send the notification. n := Notification{Type: typ, Data: data} b.notificationsLock.RLock() diff --git a/blockchain/notifications_test.go b/blockdag/notifications_test.go similarity index 86% rename from blockchain/notifications_test.go rename to blockdag/notifications_test.go index 56648da28..1d82c367c 100644 --- a/blockchain/notifications_test.go +++ b/blockdag/notifications_test.go @@ -2,24 +2,24 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain +package blockdag import ( "testing" - "github.com/daglabs/btcd/chaincfg" + "github.com/daglabs/btcd/dagconfig" ) // TestNotifications ensures that notification callbacks are fired on events. func TestNotifications(t *testing.T) { - blocks, err := loadBlocks("blk_0_to_4.dat.bz2") + blocks, err := loadBlocks("blk_0_to_4.dat") if err != nil { t.Fatalf("Error loading file: %v\n", err) } // Create a new database and chain instance to run tests against. chain, teardownFunc, err := chainSetup("notifications", - &chaincfg.MainNetParams) + &dagconfig.MainNetParams) if err != nil { t.Fatalf("Failed to setup chain instance: %v", err) } @@ -39,7 +39,7 @@ func TestNotifications(t *testing.T) { chain.Subscribe(callback) } - _, _, err = chain.ProcessBlock(blocks[1], BFNone) + _, err = chain.ProcessBlock(blocks[1], BFNone) if err != nil { t.Fatalf("ProcessBlock fail on block 1: %v\n", err) } diff --git a/blockchain/process.go b/blockdag/process.go similarity index 82% rename from blockchain/process.go rename to blockdag/process.go index fd7d8b378..8ccbbbc80 100644 --- a/blockchain/process.go +++ b/blockdag/process.go @@ -2,13 +2,13 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain +package blockdag import ( "fmt" "time" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/database" "github.com/daglabs/btcutil" ) @@ -37,7 +37,7 @@ const ( // the main chain or any side chains. // // This function is safe for concurrent access. -func (b *BlockChain) blockExists(hash *chainhash.Hash) (bool, error) { +func (b *BlockDAG) blockExists(hash *daghash.Hash) (bool, error) { // Check block index first (could be main chain or side chain blocks). if b.index.HaveBlock(hash) { return true, nil @@ -80,11 +80,11 @@ func (b *BlockChain) blockExists(hash *chainhash.Hash) (bool, error) { // are needed to pass along to maybeAcceptBlock. // // This function MUST be called with the chain state lock held (for writes). -func (b *BlockChain) processOrphans(hash *chainhash.Hash, flags BehaviorFlags) error { +func (b *BlockDAG) processOrphans(hash *daghash.Hash, flags BehaviorFlags) error { // Start with processing at least the passed hash. Leave a little room // for additional orphan blocks that need to be processed without // needing to grow the array in the common case. - processHashes := make([]*chainhash.Hash, 0, 10) + processHashes := make([]*daghash.Hash, 0, 10) processHashes = append(processHashes, hash) for len(processHashes) > 0 { // Pop the first hash to process from the slice. @@ -115,7 +115,7 @@ func (b *BlockChain) processOrphans(hash *chainhash.Hash, flags BehaviorFlags) e i-- // Potentially accept the block into the block chain. - _, err := b.maybeAcceptBlock(orphan.block, flags) + err := b.maybeAcceptBlock(orphan.block, flags) if err != nil { return err } @@ -132,16 +132,15 @@ func (b *BlockChain) processOrphans(hash *chainhash.Hash, flags BehaviorFlags) e // ProcessBlock is the main workhorse for handling insertion of new blocks into // the block chain. It includes functionality such as rejecting duplicate // blocks, ensuring blocks follow all rules, orphan handling, and insertion into -// the block chain along with best chain selection and reorganization. +// the block DAG. // // When no errors occurred during processing, the first return value indicates -// whether or not the block is on the main chain and the second indicates // whether or not the block is an orphan. // // This function is safe for concurrent access. -func (b *BlockChain) ProcessBlock(block *btcutil.Block, flags BehaviorFlags) (bool, bool, error) { - b.chainLock.Lock() - defer b.chainLock.Unlock() +func (b *BlockDAG) ProcessBlock(block *btcutil.Block, flags BehaviorFlags) (bool, error) { + b.dagLock.Lock() + defer b.dagLock.Unlock() fastAdd := flags&BFFastAdd == BFFastAdd @@ -151,23 +150,23 @@ func (b *BlockChain) ProcessBlock(block *btcutil.Block, flags BehaviorFlags) (bo // The block must not already exist in the main chain or side chains. exists, err := b.blockExists(blockHash) if err != nil { - return false, false, err + return false, err } if exists { str := fmt.Sprintf("already have block %v", blockHash) - return false, false, ruleError(ErrDuplicateBlock, str) + return false, ruleError(ErrDuplicateBlock, str) } // The block must not already exist as an orphan. if _, exists := b.orphans[*blockHash]; exists { str := fmt.Sprintf("already have block (orphan) %v", blockHash) - return false, false, ruleError(ErrDuplicateBlock, str) + return false, ruleError(ErrDuplicateBlock, str) } // Perform preliminary sanity checks on the block and its transactions. - err = checkBlockSanity(block, b.chainParams.PowLimit, b.timeSource, flags) + err = checkBlockSanity(block, b.dagParams.PowLimit, b.timeSource, flags) if err != nil { - return false, false, err + return false, err } // Find the previous checkpoint and perform some additional checks based @@ -179,7 +178,7 @@ func (b *BlockChain) ProcessBlock(block *btcutil.Block, flags BehaviorFlags) (bo blockHeader := &block.MsgBlock().Header checkpointNode, err := b.findPreviousCheckpoint() if err != nil { - return false, false, err + return false, err } if checkpointNode != nil { // Ensure the block timestamp is after the checkpoint timestamp. @@ -188,7 +187,7 @@ func (b *BlockChain) ProcessBlock(block *btcutil.Block, flags BehaviorFlags) (bo str := fmt.Sprintf("block %v has timestamp %v before "+ "last checkpoint timestamp %v", blockHash, blockHeader.Timestamp, checkpointTime) - return false, false, ruleError(ErrCheckpointTimeTooOld, str) + return false, ruleError(ErrCheckpointTimeTooOld, str) } if !fastAdd { // Even though the checks prior to now have already ensured the @@ -205,29 +204,36 @@ func (b *BlockChain) ProcessBlock(block *btcutil.Block, flags BehaviorFlags) (bo str := fmt.Sprintf("block target difficulty of %064x "+ "is too low when compared to the previous "+ "checkpoint", currentTarget) - return false, false, ruleError(ErrDifficultyTooLow, str) + return false, ruleError(ErrDifficultyTooLow, str) } } } // Handle orphan blocks. - prevHash := &blockHeader.PrevBlock - prevHashExists, err := b.blockExists(prevHash) - if err != nil { - return false, false, err - } - if !prevHashExists { - log.Infof("Adding orphan block %v with parent %v", blockHash, prevHash) - b.addOrphanBlock(block) + allPrevBlocksExist := true + for _, prevBlock := range blockHeader.PrevBlocks { + prevBlockExists, err := b.blockExists(&prevBlock) + if err != nil { + return false, err + } - return false, true, nil + if !prevBlockExists { + log.Infof("Adding orphan block %v with parent %v", blockHash, prevBlock) + b.addOrphanBlock(block) + + allPrevBlocksExist = false + } + } + + if !allPrevBlocksExist { + return true, nil } // The block has passed all context independent checks and appears sane - // enough to potentially accept it into the block chain. - isMainChain, err := b.maybeAcceptBlock(block, flags) + // enough to potentially accept it into the block DAG. + err = b.maybeAcceptBlock(block, flags) if err != nil { - return false, false, err + return false, err } // Accept any orphan blocks that depend on this block (they are @@ -235,10 +241,10 @@ func (b *BlockChain) ProcessBlock(block *btcutil.Block, flags BehaviorFlags) (bo // there are no more. err = b.processOrphans(blockHash, flags) if err != nil { - return false, false, err + return false, err } log.Debugf("Accepted block %v", blockHash) - return isMainChain, false, nil + return false, nil } diff --git a/blockchain/scriptval.go b/blockdag/scriptval.go similarity index 99% rename from blockchain/scriptval.go rename to blockdag/scriptval.go index 855ee50c8..b88bc4199 100644 --- a/blockchain/scriptval.go +++ b/blockdag/scriptval.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain +package blockdag import ( "fmt" diff --git a/blockchain/scriptval_test.go b/blockdag/scriptval_test.go similarity index 88% rename from blockchain/scriptval_test.go rename to blockdag/scriptval_test.go index 6f739fe5f..ea9a312f1 100644 --- a/blockchain/scriptval_test.go +++ b/blockdag/scriptval_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain +package blockdag import ( "fmt" @@ -18,7 +18,7 @@ func TestCheckBlockScripts(t *testing.T) { runtime.GOMAXPROCS(runtime.NumCPU()) testBlockNum := 277647 - blockDataFile := fmt.Sprintf("%d.dat.bz2", testBlockNum) + blockDataFile := fmt.Sprintf("%d.dat", testBlockNum) blocks, err := loadBlocks(blockDataFile) if err != nil { t.Errorf("Error loading file: %v\n", err) @@ -33,7 +33,7 @@ func TestCheckBlockScripts(t *testing.T) { return } - storeDataFile := fmt.Sprintf("%d.utxostore.bz2", testBlockNum) + storeDataFile := fmt.Sprintf("%d.utxostore", testBlockNum) view, err := loadUtxoView(storeDataFile) if err != nil { t.Errorf("Error loading txstore: %v\n", err) diff --git a/blockdag/testdata/277647.dat b/blockdag/testdata/277647.dat new file mode 100644 index 000000000..4670e2d38 Binary files /dev/null and b/blockdag/testdata/277647.dat differ diff --git a/blockdag/testdata/277647.utxostore b/blockdag/testdata/277647.utxostore new file mode 100644 index 000000000..092ea6ed6 Binary files /dev/null and b/blockdag/testdata/277647.utxostore differ diff --git a/blockdag/testdata/blk_0_to_4.dat b/blockdag/testdata/blk_0_to_4.dat new file mode 100644 index 000000000..5e479d4a6 Binary files /dev/null and b/blockdag/testdata/blk_0_to_4.dat differ diff --git a/blockdag/testdata/blk_3A.dat b/blockdag/testdata/blk_3A.dat new file mode 100644 index 000000000..738583596 Binary files /dev/null and b/blockdag/testdata/blk_3A.dat differ diff --git a/blockdag/testdata/blk_3B.dat b/blockdag/testdata/blk_3B.dat new file mode 100644 index 000000000..84a900909 Binary files /dev/null and b/blockdag/testdata/blk_3B.dat differ diff --git a/blockchain/testdata/reorgtest.hex b/blockdag/testdata/reorgtest.hex similarity index 100% rename from blockchain/testdata/reorgtest.hex rename to blockdag/testdata/reorgtest.hex diff --git a/blockchain/thresholdstate.go b/blockdag/thresholdstate.go similarity index 88% rename from blockchain/thresholdstate.go rename to blockdag/thresholdstate.go index 32fb45608..fbaf3bfda 100644 --- a/blockchain/thresholdstate.go +++ b/blockdag/thresholdstate.go @@ -2,12 +2,12 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain +package blockdag import ( "fmt" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" ) // ThresholdState define the various threshold states used when voting on @@ -93,19 +93,19 @@ type thresholdConditionChecker interface { // thresholdStateCache provides a type to cache the threshold states of each // threshold window for a set of IDs. type thresholdStateCache struct { - entries map[chainhash.Hash]ThresholdState + entries map[daghash.Hash]ThresholdState } // Lookup returns the threshold state associated with the given hash along with // a boolean that indicates whether or not it is valid. -func (c *thresholdStateCache) Lookup(hash *chainhash.Hash) (ThresholdState, bool) { +func (c *thresholdStateCache) Lookup(hash *daghash.Hash) (ThresholdState, bool) { state, ok := c.entries[*hash] return state, ok } // Update updates the cache to contain the provided hash to threshold state // mapping. -func (c *thresholdStateCache) Update(hash *chainhash.Hash, state ThresholdState) { +func (c *thresholdStateCache) Update(hash *daghash.Hash, state ThresholdState) { c.entries[*hash] = state } @@ -115,7 +115,7 @@ func newThresholdCaches(numCaches uint32) []thresholdStateCache { caches := make([]thresholdStateCache, numCaches) for i := 0; i < len(caches); i++ { caches[i] = thresholdStateCache{ - entries: make(map[chainhash.Hash]ThresholdState), + entries: make(map[daghash.Hash]ThresholdState), } } return caches @@ -126,7 +126,7 @@ func newThresholdCaches(numCaches uint32) []thresholdStateCache { // threshold states for previous windows are only calculated once. // // This function MUST be called with the chain state lock held (for writes). -func (b *BlockChain) thresholdState(prevNode *blockNode, checker thresholdConditionChecker, cache *thresholdStateCache) (ThresholdState, error) { +func (b *BlockDAG) thresholdState(prevNode *blockNode, checker thresholdConditionChecker, cache *thresholdStateCache) (ThresholdState, error) { // The threshold state for the window that contains the genesis block is // defined by definition. confirmationWindow := int32(checker.MinerConfirmationWindow()) @@ -230,7 +230,7 @@ func (b *BlockChain) thresholdState(prevNode *blockNode, checker thresholdCondit } // Get the previous block node. - countNode = countNode.parent + countNode = countNode.selectedParent } // The state is locked in if the number of blocks in the @@ -245,8 +245,8 @@ func (b *BlockChain) thresholdState(prevNode *blockNode, checker thresholdCondit // was locked in. state = ThresholdActive - // Nothing to do if the previous state is active or failed since - // they are both terminal states. + // Nothing to do if the previous state is active or failed since + // they are both terminal states. case ThresholdActive: case ThresholdFailed: } @@ -263,10 +263,10 @@ func (b *BlockChain) thresholdState(prevNode *blockNode, checker thresholdCondit // deployment ID for the block AFTER the end of the current best chain. // // This function is safe for concurrent access. -func (b *BlockChain) ThresholdState(deploymentID uint32) (ThresholdState, error) { - b.chainLock.Lock() - state, err := b.deploymentState(b.bestChain.Tip(), deploymentID) - b.chainLock.Unlock() +func (b *BlockDAG) ThresholdState(deploymentID uint32) (ThresholdState, error) { + b.dagLock.Lock() + state, err := b.deploymentState(b.dag.SelectedTip(), deploymentID) + b.dagLock.Unlock() return state, err } @@ -275,10 +275,10 @@ func (b *BlockChain) ThresholdState(deploymentID uint32) (ThresholdState, error) // false otherwise. // // This function is safe for concurrent access. -func (b *BlockChain) IsDeploymentActive(deploymentID uint32) (bool, error) { - b.chainLock.Lock() - state, err := b.deploymentState(b.bestChain.Tip(), deploymentID) - b.chainLock.Unlock() +func (b *BlockDAG) IsDeploymentActive(deploymentID uint32) (bool, error) { + b.dagLock.Lock() + state, err := b.deploymentState(b.dag.SelectedTip(), deploymentID) + b.dagLock.Unlock() if err != nil { return false, err } @@ -296,12 +296,12 @@ func (b *BlockChain) IsDeploymentActive(deploymentID uint32) (bool, error) { // AFTER the passed node. // // This function MUST be called with the chain state lock held (for writes). -func (b *BlockChain) deploymentState(prevNode *blockNode, deploymentID uint32) (ThresholdState, error) { - if deploymentID > uint32(len(b.chainParams.Deployments)) { +func (b *BlockDAG) deploymentState(prevNode *blockNode, deploymentID uint32) (ThresholdState, error) { + if deploymentID > uint32(len(b.dagParams.Deployments)) { return ThresholdFailed, DeploymentError(deploymentID) } - deployment := &b.chainParams.Deployments[deploymentID] + deployment := &b.dagParams.Deployments[deploymentID] checker := deploymentChecker{deployment: deployment, chain: b} cache := &b.deploymentCaches[deploymentID] @@ -311,12 +311,12 @@ func (b *BlockChain) deploymentState(prevNode *blockNode, deploymentID uint32) ( // initThresholdCaches initializes the threshold state caches for each warning // bit and defined deployment and provides warnings if the chain is current per // the warnUnknownVersions and warnUnknownRuleActivations functions. -func (b *BlockChain) initThresholdCaches() error { +func (b *BlockDAG) initThresholdCaches() error { // Initialize the warning and deployment caches by calculating the // threshold state for each of them. This will ensure the caches are // populated and any states that needed to be recalculated due to // definition changes is done now. - prevNode := b.bestChain.Tip().parent + prevNode := b.dag.SelectedTip().selectedParent for bit := uint32(0); bit < vbNumBits; bit++ { checker := bitConditionChecker{bit: bit, chain: b} cache := &b.warningCaches[bit] @@ -325,8 +325,8 @@ func (b *BlockChain) initThresholdCaches() error { return err } } - for id := 0; id < len(b.chainParams.Deployments); id++ { - deployment := &b.chainParams.Deployments[id] + for id := 0; id < len(b.dagParams.Deployments); id++ { + deployment := &b.dagParams.Deployments[id] cache := &b.deploymentCaches[id] checker := deploymentChecker{deployment: deployment, chain: b} _, err := b.thresholdState(prevNode, checker, cache) @@ -340,7 +340,7 @@ func (b *BlockChain) initThresholdCaches() error { if b.isCurrent() { // Warn if a high enough percentage of the last blocks have // unexpected versions. - bestNode := b.bestChain.Tip() + bestNode := b.dag.SelectedTip() if err := b.warnUnknownVersions(bestNode); err != nil { return err } diff --git a/blockchain/thresholdstate_test.go b/blockdag/thresholdstate_test.go similarity index 97% rename from blockchain/thresholdstate_test.go rename to blockdag/thresholdstate_test.go index 089d24c5b..f3a55b5e1 100644 --- a/blockchain/thresholdstate_test.go +++ b/blockdag/thresholdstate_test.go @@ -2,12 +2,12 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain +package blockdag import ( "testing" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" ) // TestThresholdStateStringer tests the stringized output for the @@ -65,7 +65,7 @@ nextTest: for _, test := range tests { cache := &newThresholdCaches(1)[0] for i := 0; i < test.numEntries; i++ { - var hash chainhash.Hash + var hash daghash.Hash hash[0] = uint8(i + 1) // Ensure the hash isn't available in the cache already. diff --git a/blockchain/timesorter.go b/blockdag/timesorter.go similarity index 97% rename from blockchain/timesorter.go rename to blockdag/timesorter.go index d0288e1d3..5e6921b0d 100644 --- a/blockchain/timesorter.go +++ b/blockdag/timesorter.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain +package blockdag // timeSorter implements sort.Interface to allow a slice of timestamps to // be sorted. diff --git a/blockchain/timesorter_test.go b/blockdag/timesorter_test.go similarity index 98% rename from blockchain/timesorter_test.go rename to blockdag/timesorter_test.go index 68eb69326..1bd3e492e 100644 --- a/blockchain/timesorter_test.go +++ b/blockdag/timesorter_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain +package blockdag import ( "reflect" diff --git a/blockchain/upgrade.go b/blockdag/upgrade.go similarity index 95% rename from blockchain/upgrade.go rename to blockdag/upgrade.go index 40a3ca67c..7040f52fe 100644 --- a/blockchain/upgrade.go +++ b/blockdag/upgrade.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain +package blockdag import ( "bytes" @@ -11,7 +11,7 @@ import ( "fmt" "time" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/database" "github.com/daglabs/btcd/wire" ) @@ -46,8 +46,8 @@ func interruptRequested(interrupted <-chan struct{}) bool { // chain. This is used by the block index migration to track block metadata that // will be written to disk. type blockChainContext struct { - parent *chainhash.Hash - children []*chainhash.Hash + parent *daghash.Hash + children []*daghash.Hash height int32 mainChain bool } @@ -77,12 +77,12 @@ func migrateBlockIndex(db database.DB) error { } // Get tip of the main chain. - serializedData := dbTx.Metadata().Get(chainStateKeyName) - state, err := deserializeBestChainState(serializedData) + serializedData := dbTx.Metadata().Get(dagStateKeyName) + state, err := deserializeDAGState(serializedData) if err != nil { return err } - tip := &state.hash + tip := &state.SelectedHash // Scan the old block index bucket and construct a mapping of each block // to parent block and all child blocks. @@ -106,8 +106,8 @@ func migrateBlockIndex(db database.DB) error { endOffset := blockHdrOffset + blockHdrSize headerBytes := blockRow[blockHdrOffset:endOffset:endOffset] - var hash chainhash.Hash - copy(hash[:], hashBytes[0:chainhash.HashSize]) + var hash daghash.Hash + copy(hash[:], hashBytes[0:daghash.HashSize]) chainContext := blocksMap[hash] if chainContext.height == -1 { @@ -149,8 +149,8 @@ func migrateBlockIndex(db database.DB) error { // each block to its parent block and all child blocks. This mapping represents // the full tree of blocks. This function does not populate the height or // mainChain fields of the returned blockChainContext values. -func readBlockTree(v1BlockIdxBucket database.Bucket) (map[chainhash.Hash]*blockChainContext, error) { - blocksMap := make(map[chainhash.Hash]*blockChainContext) +func readBlockTree(v1BlockIdxBucket database.Bucket) (map[daghash.Hash]*blockChainContext, error) { + blocksMap := make(map[daghash.Hash]*blockChainContext) err := v1BlockIdxBucket.ForEach(func(_, blockRow []byte) error { var header wire.BlockHeader endOffset := blockHdrOffset + blockHdrSize @@ -161,7 +161,7 @@ func readBlockTree(v1BlockIdxBucket database.Bucket) (map[chainhash.Hash]*blockC } blockHash := header.BlockHash() - prevHash := header.PrevBlock + prevHash := *header.SelectedPrevBlock() if blocksMap[blockHash] == nil { blocksMap[blockHash] = &blockChainContext{height: -1} @@ -184,7 +184,7 @@ func readBlockTree(v1BlockIdxBucket database.Bucket) (map[chainhash.Hash]*blockC // breadth-first, assigning a height to every block with a path back to the // genesis block. This function modifies the height field on the blocksMap // entries. -func determineBlockHeights(blocksMap map[chainhash.Hash]*blockChainContext) error { +func determineBlockHeights(blocksMap map[daghash.Hash]*blockChainContext) error { queue := list.New() // The genesis block is included in blocksMap as a child of the zero hash @@ -201,7 +201,7 @@ func determineBlockHeights(blocksMap map[chainhash.Hash]*blockChainContext) erro for e := queue.Front(); e != nil; e = queue.Front() { queue.Remove(e) - hash := e.Value.(*chainhash.Hash) + hash := e.Value.(*daghash.Hash) height := blocksMap[*hash].height // For each block with this one as a parent, assign it a height and @@ -218,7 +218,7 @@ func determineBlockHeights(blocksMap map[chainhash.Hash]*blockChainContext) erro // determineMainChainBlocks traverses the block graph down from the tip to // determine which block hashes that are part of the main chain. This function // modifies the mainChain field on the blocksMap entries. -func determineMainChainBlocks(blocksMap map[chainhash.Hash]*blockChainContext, tip *chainhash.Hash) { +func determineMainChainBlocks(blocksMap map[daghash.Hash]*blockChainContext, tip *daghash.Hash) { for nextHash := tip; *nextHash != zeroHash; nextHash = blocksMap[*nextHash].parent { blocksMap[*nextHash].mainChain = true } @@ -476,7 +476,7 @@ func upgradeUtxoSetToV2(db database.DB, interrupt <-chan struct{}) error { // Old key was the transaction hash. oldKey := v1Cursor.Key() - var txHash chainhash.Hash + var txHash daghash.Hash copy(txHash[:], oldKey) // Deserialize the old entry which included all utxos @@ -578,7 +578,7 @@ func upgradeUtxoSetToV2(db database.DB, interrupt <-chan struct{}) error { // // All buckets used by this package are guaranteed to be the latest version if // this function returns without error. -func (b *BlockChain) maybeUpgradeDbBuckets(interrupt <-chan struct{}) error { +func (b *BlockDAG) maybeUpgradeDbBuckets(interrupt <-chan struct{}) error { // Load or create bucket versions as needed. var utxoSetVersion uint32 err := b.db.Update(func(dbTx database.Tx) error { diff --git a/blockchain/upgrade_test.go b/blockdag/upgrade_test.go similarity index 99% rename from blockchain/upgrade_test.go rename to blockdag/upgrade_test.go index 67a10085f..25c2cab37 100644 --- a/blockchain/upgrade_test.go +++ b/blockdag/upgrade_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain +package blockdag import ( "reflect" diff --git a/blockchain/utxoviewpoint.go b/blockdag/utxoviewpoint.go similarity index 74% rename from blockchain/utxoviewpoint.go rename to blockdag/utxoviewpoint.go index 256103a1b..41dda9c3f 100644 --- a/blockchain/utxoviewpoint.go +++ b/blockdag/utxoviewpoint.go @@ -2,12 +2,12 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain +package blockdag import ( "fmt" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/database" "github.com/daglabs/btcd/txscript" "github.com/daglabs/btcd/wire" @@ -119,20 +119,32 @@ func (entry *UtxoEntry) Clone() *UtxoEntry { // The unspent outputs are needed by other transactions for things such as // script validation and double spend prevention. type UtxoViewpoint struct { - entries map[wire.OutPoint]*UtxoEntry - bestHash chainhash.Hash + entries map[wire.OutPoint]*UtxoEntry + tips blockSet } -// BestHash returns the hash of the best block in the chain the view currently -// respresents. -func (view *UtxoViewpoint) BestHash() *chainhash.Hash { - return &view.bestHash +// Tips returns the hashes of the tips in the DAG the view currently +// represents. +func (view *UtxoViewpoint) Tips() blockSet { + return view.tips } -// SetBestHash sets the hash of the best block in the chain the view currently -// respresents. -func (view *UtxoViewpoint) SetBestHash(hash *chainhash.Hash) { - view.bestHash = *hash +// SetTips sets the hashes of the tips in the DAG the view currently +// represents. +func (view *UtxoViewpoint) SetTips(tips blockSet) { + view.tips = tips +} + +// AddBlock removes all the parents of block from the tips and adds +// the given block to the tips. +func (view *UtxoViewpoint) AddBlock(block *blockNode) { + updatedTips := view.tips.clone() + for _, parent := range block.parents { + updatedTips.remove(parent) + } + + updatedTips.add(block) + view.tips = updatedTips } // LookupEntry returns information about a given transaction output according to @@ -264,24 +276,24 @@ func (view *UtxoViewpoint) connectTransaction(tx *btcutil.Tx, blockHeight int32, // spend as spent, and setting the best hash for the view to the passed block. // In addition, when the 'stxos' argument is not nil, it will be updated to // append an entry for each spent txout. -func (view *UtxoViewpoint) connectTransactions(block *btcutil.Block, stxos *[]spentTxOut) error { - for _, tx := range block.Transactions() { - err := view.connectTransaction(tx, block.Height(), stxos) +func (view *UtxoViewpoint) connectTransactions(block *blockNode, transactions []*btcutil.Tx, stxos *[]spentTxOut) error { + for _, tx := range transactions { + err := view.connectTransaction(tx, block.height, stxos) if err != nil { return err } } - // Update the best hash for view to include this block since all of its + // Update the tips for view to include this block since all of its // transactions have been connected. - view.SetBestHash(block.Hash()) + view.AddBlock(block) return nil } // fetchEntryByHash attempts to find any available utxo for the given hash by // searching the entire set of possible outputs for the given hash. It checks // the view first and then falls back to the database if needed. -func (view *UtxoViewpoint) fetchEntryByHash(db database.DB, hash *chainhash.Hash) (*UtxoEntry, error) { +func (view *UtxoViewpoint) fetchEntryByHash(db database.DB, hash *daghash.Hash) (*UtxoEntry, error) { // First attempt to find a utxo with the provided hash in the view. prevOut := wire.OutPoint{Hash: *hash} for idx := uint32(0); idx < MaxOutputsPerBlock; idx++ { @@ -304,140 +316,6 @@ func (view *UtxoViewpoint) fetchEntryByHash(db database.DB, hash *chainhash.Hash return entry, err } -// disconnectTransactions updates the view by removing all of the transactions -// created by the passed block, restoring all utxos the transactions spent by -// using the provided spent txo information, and setting the best hash for the -// view to the block before the passed block. -func (view *UtxoViewpoint) disconnectTransactions(db database.DB, block *btcutil.Block, stxos []spentTxOut) error { - // Sanity check the correct number of stxos are provided. - if len(stxos) != countSpentOutputs(block) { - return AssertError("disconnectTransactions called with bad " + - "spent transaction out information") - } - - // Loop backwards through all transactions so everything is unspent in - // reverse order. This is necessary since transactions later in a block - // can spend from previous ones. - stxoIdx := len(stxos) - 1 - transactions := block.Transactions() - for txIdx := len(transactions) - 1; txIdx > -1; txIdx-- { - tx := transactions[txIdx] - - // All entries will need to potentially be marked as a coinbase. - var packedFlags txoFlags - isCoinBase := txIdx == 0 - if isCoinBase { - packedFlags |= tfCoinBase - } - - // Mark all of the spendable outputs originally created by the - // transaction as spent. It is instructive to note that while - // the outputs aren't actually being spent here, rather they no - // longer exist, since a pruned utxo set is used, there is no - // practical difference between a utxo that does not exist and - // one that has been spent. - // - // When the utxo does not already exist in the view, add an - // entry for it and then mark it spent. This is done because - // the code relies on its existence in the view in order to - // signal modifications have happened. - txHash := tx.Hash() - prevOut := wire.OutPoint{Hash: *txHash} - for txOutIdx, txOut := range tx.MsgTx().TxOut { - if txscript.IsUnspendable(txOut.PkScript) { - continue - } - - prevOut.Index = uint32(txOutIdx) - entry := view.entries[prevOut] - if entry == nil { - entry = &UtxoEntry{ - amount: txOut.Value, - pkScript: txOut.PkScript, - blockHeight: block.Height(), - packedFlags: packedFlags, - } - - view.entries[prevOut] = entry - } - - entry.Spend() - } - - // Loop backwards through all of the transaction inputs (except - // for the coinbase which has no inputs) and unspend the - // referenced txos. This is necessary to match the order of the - // spent txout entries. - if isCoinBase { - continue - } - for txInIdx := len(tx.MsgTx().TxIn) - 1; txInIdx > -1; txInIdx-- { - // Ensure the spent txout index is decremented to stay - // in sync with the transaction input. - stxo := &stxos[stxoIdx] - stxoIdx-- - - // When there is not already an entry for the referenced - // output in the view, it means it was previously spent, - // so create a new utxo entry in order to resurrect it. - originOut := &tx.MsgTx().TxIn[txInIdx].PreviousOutPoint - entry := view.entries[*originOut] - if entry == nil { - entry = new(UtxoEntry) - view.entries[*originOut] = entry - } - - // The legacy v1 spend journal format only stored the - // coinbase flag and height when the output was the last - // unspent output of the transaction. As a result, when - // the information is missing, search for it by scanning - // all possible outputs of the transaction since it must - // be in one of them. - // - // It should be noted that this is quite inefficient, - // but it realistically will almost never run since all - // new entries include the information for all outputs - // and thus the only way this will be hit is if a long - // enough reorg happens such that a block with the old - // spend data is being disconnected. The probability of - // that in practice is extremely low to begin with and - // becomes vanishingly small the more new blocks are - // connected. In the case of a fresh database that has - // only ever run with the new v2 format, this code path - // will never run. - if stxo.height == 0 { - utxo, err := view.fetchEntryByHash(db, txHash) - if err != nil { - return err - } - if utxo == nil { - return AssertError(fmt.Sprintf("unable "+ - "to resurrect legacy stxo %v", - *originOut)) - } - - stxo.height = utxo.BlockHeight() - stxo.isCoinBase = utxo.IsCoinBase() - } - - // Restore the utxo using the stxo data from the spend - // journal and mark it as modified. - entry.amount = stxo.amount - entry.pkScript = stxo.pkScript - entry.blockHeight = stxo.height - entry.packedFlags = tfModified - if stxo.isCoinBase { - entry.packedFlags |= tfCoinBase - } - } - } - - // Update the best hash for view to the previous block since all of the - // transactions for the current block have been disconnected. - view.SetBestHash(&block.MsgBlock().Header.PrevBlock) - return nil -} - // RemoveEntry removes the given transaction output from the current state of // the view. It will have no effect if the passed output does not exist in the // view. @@ -530,7 +408,7 @@ func (view *UtxoViewpoint) fetchInputUtxos(db database.DB, block *btcutil.Block) // Build a map of in-flight transactions because some of the inputs in // this block could be referencing other transactions earlier in this // block which are not yet in the chain. - txInFlight := map[chainhash.Hash]int{} + txInFlight := map[daghash.Hash]int{} transactions := block.Transactions() for i, tx := range transactions { txInFlight[*tx.Hash()] = i @@ -589,7 +467,7 @@ func NewUtxoViewpoint() *UtxoViewpoint { // so the returned view can be examined for duplicate transactions. // // This function is safe for concurrent access however the returned view is NOT. -func (b *BlockChain) FetchUtxoView(tx *btcutil.Tx) (*UtxoViewpoint, error) { +func (b *BlockDAG) FetchUtxoView(tx *btcutil.Tx) (*UtxoViewpoint, error) { // Create a set of needed outputs based on those referenced by the // inputs of the passed transaction and the outputs of the transaction // itself. @@ -608,9 +486,9 @@ func (b *BlockChain) FetchUtxoView(tx *btcutil.Tx) (*UtxoViewpoint, error) { // Request the utxos from the point of view of the end of the main // chain. view := NewUtxoViewpoint() - b.chainLock.RLock() + b.dagLock.RLock() err := view.fetchUtxosMain(b.db, neededSet) - b.chainLock.RUnlock() + b.dagLock.RUnlock() return view, err } @@ -624,9 +502,9 @@ func (b *BlockChain) FetchUtxoView(tx *btcutil.Tx) (*UtxoViewpoint, error) { // // This function is safe for concurrent access however the returned entry (if // any) is NOT. -func (b *BlockChain) FetchUtxoEntry(outpoint wire.OutPoint) (*UtxoEntry, error) { - b.chainLock.RLock() - defer b.chainLock.RUnlock() +func (b *BlockDAG) FetchUtxoEntry(outpoint wire.OutPoint) (*UtxoEntry, error) { + b.dagLock.RLock() + defer b.dagLock.RUnlock() var entry *UtxoEntry err := b.db.View(func(dbTx database.Tx) error { diff --git a/blockchain/validate.go b/blockdag/validate.go similarity index 94% rename from blockchain/validate.go rename to blockdag/validate.go index 3be4e0d58..18a49a788 100644 --- a/blockchain/validate.go +++ b/blockdag/validate.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain +package blockdag import ( "encoding/binary" @@ -11,8 +11,8 @@ import ( "math/big" "time" - "github.com/daglabs/btcd/chaincfg" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/txscript" "github.com/daglabs/btcd/wire" "github.com/daglabs/btcutil" @@ -52,10 +52,10 @@ const ( ) var ( - // zeroHash is the zero value for a chainhash.Hash and is defined as + // zeroHash is the zero value for a daghash.Hash and is defined as // a package level variable to avoid the need to create a new instance // every time a check is needed. - zeroHash chainhash.Hash + zeroHash daghash.Hash // block91842Hash is one of the two nodes which violate the rules // set forth in BIP0030. It is defined as a package level variable to @@ -199,13 +199,13 @@ func isBIP0030Node(node *blockNode) bool { // // At the target block generation rate for the main network, this is // approximately every 4 years. -func CalcBlockSubsidy(height int32, chainParams *chaincfg.Params) int64 { - if chainParams.SubsidyReductionInterval == 0 { +func CalcBlockSubsidy(height int32, dagParams *dagconfig.Params) int64 { + if dagParams.SubsidyReductionInterval == 0 { return baseSubsidy } // Equivalent to: baseSubsidy / 2^(height/subsidyHalvingInterval) - return baseSubsidy >> uint(height/chainParams.SubsidyReductionInterval) + return baseSubsidy >> uint(height/dagParams.SubsidyReductionInterval) } // CheckTransactionSanity performs some preliminary checks on a transaction to @@ -547,7 +547,7 @@ func checkBlockSanity(block *btcutil.Block, powLimit *big.Int, timeSource Median // Check for duplicate transactions. This check will be fairly quick // since the transaction hashes are already cached due to building the // merkle tree above. - existingTxHashes := make(map[chainhash.Hash]struct{}) + existingTxHashes := make(map[daghash.Hash]struct{}) for _, tx := range transactions { hash := tx.Hash() if _, exists := existingTxHashes[*hash]; exists { @@ -649,13 +649,13 @@ func checkSerializedHeight(coinbaseTx *btcutil.Tx, wantHeight int32) error { // the checkpoints are not performed. // // This function MUST be called with the chain state lock held (for writes). -func (b *BlockChain) checkBlockHeaderContext(header *wire.BlockHeader, prevNode *blockNode, flags BehaviorFlags) error { +func (b *BlockDAG) checkBlockHeaderContext(header *wire.BlockHeader, selectedParent *blockNode, flags BehaviorFlags) error { fastAdd := flags&BFFastAdd == BFFastAdd if !fastAdd { // Ensure the difficulty specified in the block header matches // the calculated difficulty based on the previous block and // difficulty retarget rules. - expectedDifficulty, err := b.calcNextRequiredDifficulty(prevNode, + expectedDifficulty, err := b.calcNextRequiredDifficulty(selectedParent, header.Timestamp) if err != nil { return err @@ -669,7 +669,7 @@ func (b *BlockChain) checkBlockHeaderContext(header *wire.BlockHeader, prevNode // Ensure the timestamp for the block header is after the // median time of the last several blocks (medianTimeBlocks). - medianTime := prevNode.CalcPastMedianTime() + medianTime := selectedParent.CalcPastMedianTime() if !header.Timestamp.After(medianTime) { str := "block timestamp of %v is not after expected %v" str = fmt.Sprintf(str, header.Timestamp, medianTime) @@ -679,7 +679,7 @@ func (b *BlockChain) checkBlockHeaderContext(header *wire.BlockHeader, prevNode // The height of this block is one more than the referenced previous // block. - blockHeight := prevNode.height + 1 + blockHeight := selectedParent.height + 1 // Ensure chain matches up to predetermined checkpoints. blockHash := header.BlockHash() @@ -707,7 +707,7 @@ func (b *BlockChain) checkBlockHeaderContext(header *wire.BlockHeader, prevNode // Reject outdated block versions once a majority of the network // has upgraded. These were originally voted on by BIP0034, // BIP0065, and BIP0066. - params := b.chainParams + params := b.dagParams if header.Version < 2 && blockHeight >= params.BIP0034Height || header.Version < 3 && blockHeight >= params.BIP0066Height { @@ -730,10 +730,10 @@ func (b *BlockChain) checkBlockHeaderContext(header *wire.BlockHeader, prevNode // for how the flags modify its behavior. // // This function MUST be called with the chain state lock held (for writes). -func (b *BlockChain) checkBlockContext(block *btcutil.Block, prevNode *blockNode, flags BehaviorFlags) error { +func (b *BlockDAG) checkBlockContext(block *btcutil.Block, selectedParent *blockNode, flags BehaviorFlags) error { // Perform all block header related validation checks. header := &block.MsgBlock().Header - err := b.checkBlockHeaderContext(header, prevNode, flags) + err := b.checkBlockHeaderContext(header, selectedParent, flags) if err != nil { return err } @@ -741,11 +741,11 @@ func (b *BlockChain) checkBlockContext(block *btcutil.Block, prevNode *blockNode fastAdd := flags&BFFastAdd == BFFastAdd if !fastAdd { - blockTime := prevNode.CalcPastMedianTime() + blockTime := selectedParent.CalcPastMedianTime() // The height of this block is one more than the referenced // previous block. - blockHeight := prevNode.height + 1 + blockHeight := selectedParent.height + 1 // Ensure all transactions in the block are finalized. for _, tx := range block.Transactions() { @@ -763,7 +763,7 @@ func (b *BlockChain) checkBlockContext(block *btcutil.Block, prevNode *blockNode // once a majority of the network has upgraded. This is part of // BIP0034. if ShouldHaveSerializedBlockHeight(header) && - blockHeight >= b.chainParams.BIP0034Height { + blockHeight >= b.dagParams.BIP0034Height { coinbaseTx := block.Transactions()[0] err := checkSerializedHeight(coinbaseTx, blockHeight) @@ -787,7 +787,7 @@ func (b *BlockChain) checkBlockContext(block *btcutil.Block, prevNode *blockNode // http://r6.ca/blog/20120206T005236Z.html. // // This function MUST be called with the chain state lock held (for reads). -func (b *BlockChain) checkBIP0030(node *blockNode, block *btcutil.Block, view *UtxoViewpoint) error { +func (b *BlockDAG) checkBIP0030(node *blockNode, block *btcutil.Block, view *UtxoViewpoint) error { // Fetch utxos for all of the transaction ouputs in this block. // Typically, there will not be any utxos for any of the outputs. fetchSet := make(map[wire.OutPoint]struct{}) @@ -829,7 +829,7 @@ func (b *BlockChain) checkBIP0030(node *blockNode, block *btcutil.Block, view *U // // NOTE: The transaction MUST have already been sanity checked with the // CheckTransactionSanity function prior to calling this function. -func CheckTransactionInputs(tx *btcutil.Tx, txHeight int32, utxoView *UtxoViewpoint, chainParams *chaincfg.Params) (int64, error) { +func CheckTransactionInputs(tx *btcutil.Tx, txHeight int32, utxoView *UtxoViewpoint, dagParams *dagconfig.Params) (int64, error) { // Coinbase transactions have no inputs. if IsCoinBase(tx) { return 0, nil @@ -853,7 +853,7 @@ func CheckTransactionInputs(tx *btcutil.Tx, txHeight int32, utxoView *UtxoViewpo if utxo.IsCoinBase() { originHeight := utxo.BlockHeight() blocksSincePrev := txHeight - originHeight - coinbaseMaturity := int32(chainParams.CoinbaseMaturity) + coinbaseMaturity := int32(dagParams.CoinbaseMaturity) if blocksSincePrev < coinbaseMaturity { str := fmt.Sprintf("tried to spend coinbase "+ "transaction output %v from height %v "+ @@ -944,7 +944,7 @@ func CheckTransactionInputs(tx *btcutil.Tx, txHeight int32, utxoView *UtxoViewpo // with that node. // // This function MUST be called with the chain state lock held (for writes). -func (b *BlockChain) checkConnectBlock(node *blockNode, block *btcutil.Block, view *UtxoViewpoint, stxos *[]spentTxOut) error { +func (b *BlockDAG) checkConnectBlock(node *blockNode, block *btcutil.Block, view *UtxoViewpoint, stxos *[]spentTxOut) error { // If the side chain blocks end up in the database, a call to // CheckBlockSanity should be done here in case a previous version // allowed a block that is no longer valid. However, since the @@ -953,17 +953,17 @@ func (b *BlockChain) checkConnectBlock(node *blockNode, block *btcutil.Block, vi // The coinbase for the Genesis block is not spendable, so just return // an error now. - if node.hash.IsEqual(b.chainParams.GenesisHash) { + if node.hash.IsEqual(b.dagParams.GenesisHash) { str := "the coinbase for the genesis block is not spendable" return ruleError(ErrMissingTxOut, str) } // Ensure the view is for the node being checked. - parentHash := &block.MsgBlock().Header.PrevBlock - if !view.BestHash().IsEqual(parentHash) { + parentHashes := block.MsgBlock().Header.PrevBlocks + if !view.Tips().hashesEqual(parentHashes) { return AssertError(fmt.Sprintf("inconsistent view when "+ - "checking block connection: best hash is %v instead "+ - "of expected %v", view.BestHash(), parentHash)) + "checking block connection: tips are %v instead "+ + "of expected %v", view.Tips(), parentHashes)) } // BIP0030 added a rule to prevent blocks which contain duplicate @@ -982,7 +982,7 @@ func (b *BlockChain) checkConnectBlock(node *blockNode, block *btcutil.Block, vi // BIP0034 is not yet active. This is a useful optimization because the // BIP0030 check is expensive since it involves a ton of cache misses in // the utxoset. - if !isBIP0030Node(node) && (node.height < b.chainParams.BIP0034Height) { + if !isBIP0030Node(node) && (node.height < b.dagParams.BIP0034Height) { err := b.checkBIP0030(node, block, view) if err != nil { return err @@ -1051,7 +1051,7 @@ func (b *BlockChain) checkConnectBlock(node *blockNode, block *btcutil.Block, vi var totalFees int64 for _, tx := range transactions { txFee, err := CheckTransactionInputs(tx, node.height, view, - b.chainParams) + b.dagParams) if err != nil { return err } @@ -1075,6 +1075,10 @@ func (b *BlockChain) checkConnectBlock(node *blockNode, block *btcutil.Block, vi } } + // Update the tips for view to include this block since all of its + // transactions have been connected. + view.AddBlock(node) + // The total output values of the coinbase transaction must not exceed // the expected subsidy value plus total transaction fees gained from // mining the block. It is safe to ignore overflow and out of range @@ -1084,7 +1088,7 @@ func (b *BlockChain) checkConnectBlock(node *blockNode, block *btcutil.Block, vi for _, txOut := range transactions[0].MsgTx().TxOut { totalSatoshiOut += txOut.Value } - expectedSatoshiOut := CalcBlockSubsidy(node.height, b.chainParams) + + expectedSatoshiOut := CalcBlockSubsidy(node.height, b.dagParams) + totalFees if totalSatoshiOut > expectedSatoshiOut { str := fmt.Sprintf("coinbase transaction for block pays %v "+ @@ -1115,13 +1119,13 @@ func (b *BlockChain) checkConnectBlock(node *blockNode, block *btcutil.Block, vi // Enforce DER signatures for block versions 3+ once the historical // activation threshold has been reached. This is part of BIP0066. blockHeader := &block.MsgBlock().Header - if blockHeader.Version >= 3 && node.height >= b.chainParams.BIP0066Height { + if blockHeader.Version >= 3 && node.height >= b.dagParams.BIP0066Height { scriptFlags |= txscript.ScriptVerifyDERSignatures } // We obtain the MTP of the *previous* block in order to // determine if transactions in the current block are final. - medianTime := node.parent.CalcPastMedianTime() + medianTime := node.selectedParent.CalcPastMedianTime() // We also enforce the relative sequence number based // lock-times within the inputs of all transactions in this @@ -1155,9 +1159,9 @@ func (b *BlockChain) checkConnectBlock(node *blockNode, block *btcutil.Block, vi } } - // Update the best hash for view to include this block since all of its + // Update the view tips to include this block since all of its // transactions have been connected. - view.SetBestHash(&node.hash) + view.AddBlock(node) return nil } @@ -1167,29 +1171,30 @@ func (b *BlockChain) checkConnectBlock(node *blockNode, block *btcutil.Block, vi // work requirement. The block must connect to the current tip of the main chain. // // This function is safe for concurrent access. -func (b *BlockChain) CheckConnectBlockTemplate(block *btcutil.Block) error { - b.chainLock.Lock() - defer b.chainLock.Unlock() +func (b *BlockDAG) CheckConnectBlockTemplate(block *btcutil.Block) error { + b.dagLock.Lock() + defer b.dagLock.Unlock() // Skip the proof of work check as this is just a block template. flags := BFNoPoWCheck // This only checks whether the block can be connected to the tip of the // current chain. - tip := b.bestChain.Tip() + tips := b.dag.Tips() header := block.MsgBlock().Header - if tip.hash != header.PrevBlock { - str := fmt.Sprintf("previous block must be the current chain tip %v, "+ - "instead got %v", tip.hash, header.PrevBlock) + prevHashes := header.PrevBlocks + if !tips.hashesEqual(prevHashes) { + str := fmt.Sprintf("previous blocks must be the currents tips %v, "+ + "instead got %v", tips, prevHashes) return ruleError(ErrPrevBlockNotBest, str) } - err := checkBlockSanity(block, b.chainParams.PowLimit, b.timeSource, flags) + err := checkBlockSanity(block, b.dagParams.PowLimit, b.timeSource, flags) if err != nil { return err } - err = b.checkBlockContext(block, tip, flags) + err = b.checkBlockContext(block, b.dag.SelectedTip(), flags) if err != nil { return err } @@ -1197,7 +1202,7 @@ func (b *BlockChain) CheckConnectBlockTemplate(block *btcutil.Block) error { // Leave the spent txouts entry nil in the state since the information // is not needed and thus extra work can be avoided. view := NewUtxoViewpoint() - view.SetBestHash(&tip.hash) - newNode := newBlockNode(&header, tip) + view.SetTips(tips) + newNode := newBlockNode(&header, b.dag.Tips()) return b.checkConnectBlock(newNode, block, view, nil) } diff --git a/blockchain/validate_test.go b/blockdag/validate_test.go similarity index 90% rename from blockchain/validate_test.go rename to blockdag/validate_test.go index ddce9e928..546f04948 100644 --- a/blockchain/validate_test.go +++ b/blockdag/validate_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain +package blockdag import ( "math" @@ -10,8 +10,8 @@ import ( "testing" "time" - "github.com/daglabs/btcd/chaincfg" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/wire" "github.com/daglabs/btcutil" ) @@ -68,7 +68,7 @@ func TestSequenceLocksActive(t *testing.T) { func TestCheckConnectBlockTemplate(t *testing.T) { // Create a new database and chain instance to run tests against. chain, teardownFunc, err := chainSetup("checkconnectblocktemplate", - &chaincfg.MainNetParams) + &dagconfig.MainNetParams) if err != nil { t.Errorf("Failed to setup chain instance: %v", err) return @@ -83,8 +83,8 @@ func TestCheckConnectBlockTemplate(t *testing.T) { // (genesis block) -> 1 -> 2 -> 3 -> 4 // \-> 3a testFiles := []string{ - "blk_0_to_4.dat.bz2", - "blk_3A.dat.bz2", + "blk_0_to_4.dat", + "blk_3B.dat", } var blocks []*btcutil.Block @@ -97,15 +97,11 @@ func TestCheckConnectBlockTemplate(t *testing.T) { } for i := 1; i <= 3; i++ { - isMainChain, _, err := chain.ProcessBlock(blocks[i], BFNone) + _, err := chain.ProcessBlock(blocks[i], BFNone) if err != nil { t.Fatalf("CheckConnectBlockTemplate: Received unexpected error "+ "processing block %d: %v", i, err) } - if !isMainChain { - t.Fatalf("CheckConnectBlockTemplate: Expected block %d to connect "+ - "to main chain", i) - } } // Block 3 should fail to connect since it's already inserted. @@ -151,7 +147,7 @@ func TestCheckConnectBlockTemplate(t *testing.T) { // TestCheckBlockSanity tests the CheckBlockSanity function to ensure it works // as expected. func TestCheckBlockSanity(t *testing.T) { - powLimit := chaincfg.MainNetParams.PowLimit + powLimit := dagconfig.MainNetParams.PowLimit block := btcutil.NewBlock(&Block100000) timeSource := NewMedianTime() err := CheckBlockSanity(block, powLimit, timeSource) @@ -174,7 +170,7 @@ func TestCheckBlockSanity(t *testing.T) { // and handled properly. func TestCheckSerializedHeight(t *testing.T) { // Create an empty coinbase template to be used in the tests below. - coinbaseOutpoint := wire.NewOutPoint(&chainhash.Hash{}, math.MaxUint32) + coinbaseOutpoint := wire.NewOutPoint(&daghash.Hash{}, math.MaxUint32) coinbaseTx := wire.NewMsgTx(1) coinbaseTx.AddTxIn(wire.NewTxIn(coinbaseOutpoint, nil)) @@ -238,22 +234,30 @@ func TestCheckSerializedHeight(t *testing.T) { // test Block operations. var Block100000 = wire.MsgBlock{ Header: wire.BlockHeader{ - Version: 1, - PrevBlock: chainhash.Hash([32]byte{ // Make go vet happy. - 0x50, 0x12, 0x01, 0x19, 0x17, 0x2a, 0x61, 0x04, - 0x21, 0xa6, 0xc3, 0x01, 0x1d, 0xd3, 0x30, 0xd9, - 0xdf, 0x07, 0xb6, 0x36, 0x16, 0xc2, 0xcc, 0x1f, - 0x1c, 0xd0, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, - }), // 000000000002d01c1fccc21636b607dfd930d31d01c3a62104612a1719011250 - MerkleRoot: chainhash.Hash([32]byte{ // Make go vet happy. - 0x66, 0x57, 0xa9, 0x25, 0x2a, 0xac, 0xd5, 0xc0, - 0xb2, 0x94, 0x09, 0x96, 0xec, 0xff, 0x95, 0x22, - 0x28, 0xc3, 0x06, 0x7c, 0xc3, 0x8d, 0x48, 0x85, - 0xef, 0xb5, 0xa4, 0xac, 0x42, 0x47, 0xe9, 0xf3, + Version: 1, + NumPrevBlocks: 2, + PrevBlocks: []daghash.Hash{ + [32]byte{ // Make go vet happy. + 0x0f, 0xea, 0x33, 0x21, 0x67, 0xd0, 0x25, 0x05, + 0x89, 0x83, 0x19, 0x9f, 0x47, 0x50, 0x3a, 0x3b, + 0x91, 0xd2, 0x0a, 0xec, 0x57, 0x7a, 0x10, 0x89, + 0xd6, 0x36, 0xf8, 0x6d, 0x29, 0x00, 0x00, 0x00, + }, // MainNet genesis + [32]byte{ // Make go vet happy. + 0x42, 0xeb, 0x66, 0x1f, 0x71, 0xb2, 0xb9, 0x26, + 0x05, 0xae, 0xe7, 0xf4, 0x8a, 0x6b, 0x7c, 0x86, + 0x33, 0x54, 0x0a, 0x27, 0xe5, 0x47, 0xb9, 0xb5, + 0xee, 0x13, 0x18, 0x83, 0x46, 0x00, 0x00, 0x00, + }}, // SimNet genesis + MerkleRoot: daghash.Hash([32]byte{ // Make go vet happy. + 0xc0, 0x92, 0x53, 0x8f, 0x6f, 0xf7, 0xf5, 0x24, + 0xd5, 0x33, 0xd4, 0x8b, 0xf3, 0xc0, 0xf8, 0xf9, + 0x6f, 0xff, 0xfb, 0xb7, 0xdc, 0x39, 0x9d, 0x76, + 0x8d, 0xb0, 0xe1, 0x9c, 0x2e, 0x6d, 0x22, 0xd9, }), // f3e94742aca4b5ef85488dc37c06c3282295ffec960994b2c0d5ac2a25a95766 - Timestamp: time.Unix(1293623863, 0), // 2010-12-29 11:57:43 +0000 UTC - Bits: 0x1b04864c, // 453281356 - Nonce: 0x10572b0f, // 274148111 + Timestamp: time.Unix(0x5b50aa0e, 0), // 2018-06-20 08:32:43 +0000 UTC + Bits: 0x1e00ffff, // 503382015 + Nonce: 0x800c2a77, // 714047 }, Transactions: []*wire.MsgTx{ { @@ -261,7 +265,7 @@ var Block100000 = wire.MsgBlock{ TxIn: []*wire.TxIn{ { PreviousOutPoint: wire.OutPoint{ - Hash: chainhash.Hash{}, + Hash: daghash.Hash{}, Index: 0xffffffff, }, SignatureScript: []byte{ @@ -295,7 +299,7 @@ var Block100000 = wire.MsgBlock{ TxIn: []*wire.TxIn{ { PreviousOutPoint: wire.OutPoint{ - Hash: chainhash.Hash([32]byte{ // Make go vet happy. + Hash: daghash.Hash([32]byte{ // Make go vet happy. 0x03, 0x2e, 0x38, 0xe9, 0xc0, 0xa8, 0x4c, 0x60, 0x46, 0xd6, 0x87, 0xd1, 0x05, 0x56, 0xdc, 0xac, 0xc4, 0x1d, 0x27, 0x5e, 0xc5, 0x5f, 0xc0, 0x07, @@ -364,7 +368,7 @@ var Block100000 = wire.MsgBlock{ TxIn: []*wire.TxIn{ { PreviousOutPoint: wire.OutPoint{ - Hash: chainhash.Hash([32]byte{ // Make go vet happy. + Hash: daghash.Hash([32]byte{ // Make go vet happy. 0xc3, 0x3e, 0xbf, 0xf2, 0xa7, 0x09, 0xf1, 0x3d, 0x9f, 0x9a, 0x75, 0x69, 0xab, 0x16, 0xa3, 0x27, 0x86, 0xaf, 0x7d, 0x7e, 0x2d, 0xe0, 0x92, 0x65, @@ -432,7 +436,7 @@ var Block100000 = wire.MsgBlock{ TxIn: []*wire.TxIn{ { PreviousOutPoint: wire.OutPoint{ - Hash: chainhash.Hash([32]byte{ // Make go vet happy. + Hash: daghash.Hash([32]byte{ // Make go vet happy. 0x0b, 0x60, 0x72, 0xb3, 0x86, 0xd4, 0xa7, 0x73, 0x23, 0x52, 0x37, 0xf6, 0x4c, 0x11, 0x26, 0xac, 0x3b, 0x24, 0x0c, 0x84, 0xb9, 0x17, 0xa3, 0x90, diff --git a/blockchain/versionbits.go b/blockdag/versionbits.go similarity index 89% rename from blockchain/versionbits.go rename to blockdag/versionbits.go index 61423d098..44c324b6c 100644 --- a/blockchain/versionbits.go +++ b/blockdag/versionbits.go @@ -2,12 +2,12 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain +package blockdag import ( "math" - "github.com/daglabs/btcd/chaincfg" + "github.com/daglabs/btcd/dagconfig" ) const ( @@ -44,7 +44,7 @@ const ( // unknown rule activations. type bitConditionChecker struct { bit uint32 - chain *BlockChain + chain *BlockDAG } // Ensure the bitConditionChecker type implements the thresholdConditionChecker @@ -82,7 +82,7 @@ func (c bitConditionChecker) EndTime() uint64 { // // This is part of the thresholdConditionChecker interface implementation. func (c bitConditionChecker) RuleChangeActivationThreshold() uint32 { - return c.chain.chainParams.RuleChangeActivationThreshold + return c.chain.dagParams.RuleChangeActivationThreshold } // MinerConfirmationWindow is the number of blocks in each threshold state @@ -93,7 +93,7 @@ func (c bitConditionChecker) RuleChangeActivationThreshold() uint32 { // // This is part of the thresholdConditionChecker interface implementation. func (c bitConditionChecker) MinerConfirmationWindow() uint32 { - return c.chain.chainParams.MinerConfirmationWindow + return c.chain.dagParams.MinerConfirmationWindow } // Condition returns true when the specific bit associated with the checker is @@ -113,7 +113,7 @@ func (c bitConditionChecker) Condition(node *blockNode) (bool, error) { return false, nil } - expectedVersion, err := c.chain.calcNextBlockVersion(node.parent) + expectedVersion, err := c.chain.calcNextBlockVersion(node.selectedParent) if err != nil { return false, err } @@ -124,8 +124,8 @@ func (c bitConditionChecker) Condition(node *blockNode) (bool, error) { // test a specific deployment rule. This is required for properly detecting // and activating consensus rule changes. type deploymentChecker struct { - deployment *chaincfg.ConsensusDeployment - chain *BlockChain + deployment *dagconfig.ConsensusDeployment + chain *BlockDAG } // Ensure the deploymentChecker type implements the thresholdConditionChecker @@ -163,7 +163,7 @@ func (c deploymentChecker) EndTime() uint64 { // // This is part of the thresholdConditionChecker interface implementation. func (c deploymentChecker) RuleChangeActivationThreshold() uint32 { - return c.chain.chainParams.RuleChangeActivationThreshold + return c.chain.dagParams.RuleChangeActivationThreshold } // MinerConfirmationWindow is the number of blocks in each threshold state @@ -174,7 +174,7 @@ func (c deploymentChecker) RuleChangeActivationThreshold() uint32 { // // This is part of the thresholdConditionChecker interface implementation. func (c deploymentChecker) MinerConfirmationWindow() uint32 { - return c.chain.chainParams.MinerConfirmationWindow + return c.chain.dagParams.MinerConfirmationWindow } // Condition returns true when the specific bit defined by the deployment @@ -197,13 +197,13 @@ func (c deploymentChecker) Condition(node *blockNode) (bool, error) { // while this function accepts any block node. // // This function MUST be called with the chain state lock held (for writes). -func (b *BlockChain) calcNextBlockVersion(prevNode *blockNode) (int32, error) { +func (b *BlockDAG) calcNextBlockVersion(prevNode *blockNode) (int32, error) { // Set the appropriate bits for each actively defined rule deployment // that is either in the process of being voted on, or locked in for the // activation at the next threshold window change. expectedVersion := uint32(vbTopBits) - for id := 0; id < len(b.chainParams.Deployments); id++ { - deployment := &b.chainParams.Deployments[id] + for id := 0; id < len(b.dagParams.Deployments); id++ { + deployment := &b.dagParams.Deployments[id] cache := &b.deploymentCaches[id] checker := deploymentChecker{deployment: deployment, chain: b} state, err := b.thresholdState(prevNode, checker, cache) @@ -222,10 +222,10 @@ func (b *BlockChain) calcNextBlockVersion(prevNode *blockNode) (int32, error) { // rule change deployments. // // This function is safe for concurrent access. -func (b *BlockChain) CalcNextBlockVersion() (int32, error) { - b.chainLock.Lock() - version, err := b.calcNextBlockVersion(b.bestChain.Tip()) - b.chainLock.Unlock() +func (b *BlockDAG) CalcNextBlockVersion() (int32, error) { + b.dagLock.Lock() + version, err := b.calcNextBlockVersion(b.dag.SelectedTip()) + b.dagLock.Unlock() return version, err } @@ -235,13 +235,13 @@ func (b *BlockChain) CalcNextBlockVersion() (int32, error) { // activated. // // This function MUST be called with the chain state lock held (for writes) -func (b *BlockChain) warnUnknownRuleActivations(node *blockNode) error { +func (b *BlockDAG) warnUnknownRuleActivations(node *blockNode) error { // Warn if any unknown new rules are either about to activate or have // already been activated. for bit := uint32(0); bit < vbNumBits; bit++ { checker := bitConditionChecker{bit: bit, chain: b} cache := &b.warningCaches[bit] - state, err := b.thresholdState(node.parent, checker, cache) + state, err := b.thresholdState(node.selectedParent, checker, cache) if err != nil { return err } @@ -269,7 +269,7 @@ func (b *BlockChain) warnUnknownRuleActivations(node *blockNode) error { // blocks have unexpected versions. // // This function MUST be called with the chain state lock held (for writes) -func (b *BlockChain) warnUnknownVersions(node *blockNode) error { +func (b *BlockDAG) warnUnknownVersions(node *blockNode) error { // Nothing to do if already warned. if b.unknownVersionsWarned { return nil @@ -278,7 +278,7 @@ func (b *BlockChain) warnUnknownVersions(node *blockNode) error { // Warn if enough previous blocks have unexpected versions. numUpgraded := uint32(0) for i := uint32(0); i < unknownVerNumToCheck && node != nil; i++ { - expectedVersion, err := b.calcNextBlockVersion(node.parent) + expectedVersion, err := b.calcNextBlockVersion(node.selectedParent) if err != nil { return err } @@ -288,7 +288,7 @@ func (b *BlockChain) warnUnknownVersions(node *blockNode) error { numUpgraded++ } - node = node.parent + node = node.selectedParent } if numUpgraded > unknownVerWarnNum { log.Warn("Unknown block versions are being mined, so new " + diff --git a/btcd.go b/btcd.go index 7b73b8242..718bd5ec8 100644 --- a/btcd.go +++ b/btcd.go @@ -15,7 +15,7 @@ import ( "runtime/debug" "runtime/pprof" - "github.com/daglabs/btcd/blockchain/indexers" + "github.com/daglabs/btcd/blockdag/indexers" "github.com/daglabs/btcd/database" "github.com/daglabs/btcd/limits" ) diff --git a/btcec/example_test.go b/btcec/example_test.go index 92b09af82..d392d5719 100644 --- a/btcec/example_test.go +++ b/btcec/example_test.go @@ -9,7 +9,7 @@ import ( "fmt" "github.com/daglabs/btcd/btcec" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" ) // This example demonstrates signing a message with a secp256k1 private key that @@ -26,7 +26,7 @@ func Example_signMessage() { // Sign a message using the private key. message := "test message" - messageHash := chainhash.DoubleHashB([]byte(message)) + messageHash := daghash.DoubleHashB([]byte(message)) signature, err := privKey.Sign(messageHash) if err != nil { fmt.Println(err) @@ -79,7 +79,7 @@ func Example_verifySignature() { // Verify the signature for the message using the public key. message := "test message" - messageHash := chainhash.DoubleHashB([]byte(message)) + messageHash := daghash.DoubleHashB([]byte(message)) verified := signature.Verify(messageHash, pubKey) fmt.Println("Signature Verified?", verified) diff --git a/btcjson/btcdextcmds.go b/btcjson/btcdextcmds.go index 963ccb3a3..83fd0985a 100644 --- a/btcjson/btcdextcmds.go +++ b/btcjson/btcdextcmds.go @@ -4,7 +4,7 @@ // license that can be found in the LICENSE file. // NOTE: This file is intended to house the RPC commands that are supported by -// a chain server with btcd extensions. +// a dag server with btcd extensions. package btcjson diff --git a/btcjson/chainsvrcmds.go b/btcjson/dagsvrcmds.go similarity index 95% rename from btcjson/chainsvrcmds.go rename to btcjson/dagsvrcmds.go index cb9c74236..3dbc9f2dc 100644 --- a/btcjson/chainsvrcmds.go +++ b/btcjson/dagsvrcmds.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // NOTE: This file is intended to house the RPC commands that are supported by -// a chain server. +// a dag server. package btcjson @@ -147,13 +147,13 @@ func NewGetBlockCmd(hash string, verbose, verboseTx *bool) *GetBlockCmd { } } -// GetBlockChainInfoCmd defines the getblockchaininfo JSON-RPC command. -type GetBlockChainInfoCmd struct{} +// GetBlockDAGInfoCmd defines the getblockdaginfo JSON-RPC command. +type GetBlockDAGInfoCmd struct{} -// NewGetBlockChainInfoCmd returns a new instance which can be used to issue a -// getblockchaininfo JSON-RPC command. -func NewGetBlockChainInfoCmd() *GetBlockChainInfoCmd { - return &GetBlockChainInfoCmd{} +// NewGetBlockDAGInfoCmd returns a new instance which can be used to issue a +// getblockdaginfo JSON-RPC command. +func NewGetBlockDAGInfoCmd() *GetBlockDAGInfoCmd { + return &GetBlockDAGInfoCmd{} } // GetBlockCountCmd defines the getblockcount JSON-RPC command. @@ -312,13 +312,13 @@ func NewGetCFilterHeaderCmd(hash string, } } -// GetChainTipsCmd defines the getchaintips JSON-RPC command. -type GetChainTipsCmd struct{} +// GetDAGTipsCmd defines the getdagtips JSON-RPC command. +type GetDAGTipsCmd struct{} -// NewGetChainTipsCmd returns a new instance which can be used to issue a -// getchaintips JSON-RPC command. -func NewGetChainTipsCmd() *GetChainTipsCmd { - return &GetChainTipsCmd{} +// NewGetDAGTipsCmd returns a new instance which can be used to issue a +// getdagtips JSON-RPC command. +func NewGetDAGTipsCmd() *GetDAGTipsCmd { + return &GetDAGTipsCmd{} } // GetConnectionCountCmd defines the getconnectioncount JSON-RPC command. @@ -725,19 +725,19 @@ func NewValidateAddressCmd(address string) *ValidateAddressCmd { } } -// VerifyChainCmd defines the verifychain JSON-RPC command. -type VerifyChainCmd struct { +// VerifyDAGCmd defines the verifydag JSON-RPC command. +type VerifyDAGCmd struct { CheckLevel *int32 `jsonrpcdefault:"3"` CheckDepth *int32 `jsonrpcdefault:"288"` // 0 = all } -// NewVerifyChainCmd returns a new instance which can be used to issue a -// verifychain JSON-RPC command. +// NewVerifyDAGCmd returns a new instance which can be used to issue a +// verifydag JSON-RPC command. // // The parameters which are pointers indicate they are optional. Passing nil // for optional parameters will use the default value. -func NewVerifyChainCmd(checkLevel, checkDepth *int32) *VerifyChainCmd { - return &VerifyChainCmd{ +func NewVerifyDAGCmd(checkLevel, checkDepth *int32) *VerifyDAGCmd { + return &VerifyDAGCmd{ CheckLevel: checkLevel, CheckDepth: checkDepth, } @@ -784,14 +784,14 @@ func init() { MustRegisterCmd("getaddednodeinfo", (*GetAddedNodeInfoCmd)(nil), flags) MustRegisterCmd("getbestblockhash", (*GetBestBlockHashCmd)(nil), flags) MustRegisterCmd("getblock", (*GetBlockCmd)(nil), flags) - MustRegisterCmd("getblockchaininfo", (*GetBlockChainInfoCmd)(nil), flags) + MustRegisterCmd("getblockdaginfo", (*GetBlockDAGInfoCmd)(nil), flags) MustRegisterCmd("getblockcount", (*GetBlockCountCmd)(nil), flags) MustRegisterCmd("getblockhash", (*GetBlockHashCmd)(nil), flags) MustRegisterCmd("getblockheader", (*GetBlockHeaderCmd)(nil), flags) MustRegisterCmd("getblocktemplate", (*GetBlockTemplateCmd)(nil), flags) MustRegisterCmd("getcfilter", (*GetCFilterCmd)(nil), flags) MustRegisterCmd("getcfilterheader", (*GetCFilterHeaderCmd)(nil), flags) - MustRegisterCmd("getchaintips", (*GetChainTipsCmd)(nil), flags) + MustRegisterCmd("getdagtips", (*GetDAGTipsCmd)(nil), flags) MustRegisterCmd("getconnectioncount", (*GetConnectionCountCmd)(nil), flags) MustRegisterCmd("getdifficulty", (*GetDifficultyCmd)(nil), flags) MustRegisterCmd("getgenerate", (*GetGenerateCmd)(nil), flags) @@ -822,7 +822,7 @@ func init() { MustRegisterCmd("submitblock", (*SubmitBlockCmd)(nil), flags) MustRegisterCmd("uptime", (*UptimeCmd)(nil), flags) MustRegisterCmd("validateaddress", (*ValidateAddressCmd)(nil), flags) - MustRegisterCmd("verifychain", (*VerifyChainCmd)(nil), flags) + MustRegisterCmd("verifydag", (*VerifyDAGCmd)(nil), flags) MustRegisterCmd("verifymessage", (*VerifyMessageCmd)(nil), flags) MustRegisterCmd("verifytxoutproof", (*VerifyTxOutProofCmd)(nil), flags) } diff --git a/btcjson/chainsvrcmds_test.go b/btcjson/dagsvrcmds_test.go similarity index 96% rename from btcjson/chainsvrcmds_test.go rename to btcjson/dagsvrcmds_test.go index 32158706d..d80c6518c 100644 --- a/btcjson/chainsvrcmds_test.go +++ b/btcjson/dagsvrcmds_test.go @@ -15,11 +15,11 @@ import ( "github.com/daglabs/btcd/wire" ) -// TestChainSvrCmds tests all of the chain server commands marshal and unmarshal +// TestDAGSvrCmds tests all of the dag server commands marshal and unmarshal // into valid results include handling of optional fields being omitted in the // marshalled command, while optional fields with defaults have the default // assigned on unmarshalled commands. -func TestChainSvrCmds(t *testing.T) { +func TestDAGSvrCmds(t *testing.T) { t.Parallel() testID := int(1) @@ -189,15 +189,15 @@ func TestChainSvrCmds(t *testing.T) { }, }, { - name: "getblockchaininfo", + name: "getblockdaginfo", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getblockchaininfo") + return btcjson.NewCmd("getblockdaginfo") }, staticCmd: func() interface{} { - return btcjson.NewGetBlockChainInfoCmd() + return btcjson.NewGetBlockDAGInfoCmd() }, - marshalled: `{"jsonrpc":"1.0","method":"getblockchaininfo","params":[],"id":1}`, - unmarshalled: &btcjson.GetBlockChainInfoCmd{}, + marshalled: `{"jsonrpc":"1.0","method":"getblockdaginfo","params":[],"id":1}`, + unmarshalled: &btcjson.GetBlockDAGInfoCmd{}, }, { name: "getblockcount", @@ -351,15 +351,15 @@ func TestChainSvrCmds(t *testing.T) { }, }, { - name: "getchaintips", + name: "getdagtips", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getchaintips") + return btcjson.NewCmd("getdagtips") }, staticCmd: func() interface{} { - return btcjson.NewGetChainTipsCmd() + return btcjson.NewGetDAGTipsCmd() }, - marshalled: `{"jsonrpc":"1.0","method":"getchaintips","params":[],"id":1}`, - unmarshalled: &btcjson.GetChainTipsCmd{}, + marshalled: `{"jsonrpc":"1.0","method":"getdagtips","params":[],"id":1}`, + unmarshalled: &btcjson.GetDAGTipsCmd{}, }, { name: "getconnectioncount", @@ -1017,43 +1017,43 @@ func TestChainSvrCmds(t *testing.T) { }, }, { - name: "verifychain", + name: "verifydag", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("verifychain") + return btcjson.NewCmd("verifydag") }, staticCmd: func() interface{} { - return btcjson.NewVerifyChainCmd(nil, nil) + return btcjson.NewVerifyDAGCmd(nil, nil) }, - marshalled: `{"jsonrpc":"1.0","method":"verifychain","params":[],"id":1}`, - unmarshalled: &btcjson.VerifyChainCmd{ + marshalled: `{"jsonrpc":"1.0","method":"verifydag","params":[],"id":1}`, + unmarshalled: &btcjson.VerifyDAGCmd{ CheckLevel: btcjson.Int32(3), CheckDepth: btcjson.Int32(288), }, }, { - name: "verifychain optional1", + name: "verifydag optional1", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("verifychain", 2) + return btcjson.NewCmd("verifydag", 2) }, staticCmd: func() interface{} { - return btcjson.NewVerifyChainCmd(btcjson.Int32(2), nil) + return btcjson.NewVerifyDAGCmd(btcjson.Int32(2), nil) }, - marshalled: `{"jsonrpc":"1.0","method":"verifychain","params":[2],"id":1}`, - unmarshalled: &btcjson.VerifyChainCmd{ + marshalled: `{"jsonrpc":"1.0","method":"verifydag","params":[2],"id":1}`, + unmarshalled: &btcjson.VerifyDAGCmd{ CheckLevel: btcjson.Int32(2), CheckDepth: btcjson.Int32(288), }, }, { - name: "verifychain optional2", + name: "verifydag optional2", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("verifychain", 2, 500) + return btcjson.NewCmd("verifydag", 2, 500) }, staticCmd: func() interface{} { - return btcjson.NewVerifyChainCmd(btcjson.Int32(2), btcjson.Int32(500)) + return btcjson.NewVerifyDAGCmd(btcjson.Int32(2), btcjson.Int32(500)) }, - marshalled: `{"jsonrpc":"1.0","method":"verifychain","params":[2,500],"id":1}`, - unmarshalled: &btcjson.VerifyChainCmd{ + marshalled: `{"jsonrpc":"1.0","method":"verifydag","params":[2,500],"id":1}`, + unmarshalled: &btcjson.VerifyDAGCmd{ CheckLevel: btcjson.Int32(2), CheckDepth: btcjson.Int32(500), }, @@ -1156,9 +1156,9 @@ func TestChainSvrCmds(t *testing.T) { } } -// TestChainSvrCmdErrors ensures any errors that occur in the command during +// TestDAGSvrCmdErrors ensures any errors that occur in the command during // custom mashal and unmarshal are as expected. -func TestChainSvrCmdErrors(t *testing.T) { +func TestDAGSvrCmdErrors(t *testing.T) { t.Parallel() tests := []struct { diff --git a/btcjson/chainsvrresults.go b/btcjson/dagsvrresults.go similarity index 96% rename from btcjson/chainsvrresults.go rename to btcjson/dagsvrresults.go index 1b4e7e2e8..2dbeea4a2 100644 --- a/btcjson/chainsvrresults.go +++ b/btcjson/dagsvrresults.go @@ -95,19 +95,19 @@ type Bip9SoftForkDescription struct { Since int32 `json:"since"` } -// GetBlockChainInfoResult models the data returned from the getblockchaininfo +// GetBlockDAGInfoResult models the data returned from the getblockdaginfo // command. -type GetBlockChainInfoResult struct { - Chain string `json:"chain"` +type GetBlockDAGInfoResult struct { + DAG string `json:"dag"` Blocks int32 `json:"blocks"` Headers int32 `json:"headers"` - BestBlockHash string `json:"bestblockhash"` + TipHashes []string `json:"tiphashes"` Difficulty float64 `json:"difficulty"` MedianTime int64 `json:"mediantime"` VerificationProgress float64 `json:"verificationprogress,omitempty"` Pruned bool `json:"pruned"` PruneHeight int32 `json:"pruneheight,omitempty"` - ChainWork string `json:"chainwork,omitempty"` + DAGWork string `json:"dagwork,omitempty"` SoftForks []*SoftForkDescription `json:"softforks"` Bip9SoftForks map[string]*Bip9SoftForkDescription `json:"bip9_softforks"` } @@ -423,8 +423,8 @@ type GetWorkResult struct { Target string `json:"target"` } -// InfoChainResult models the data returned by the chain server getinfo command. -type InfoChainResult struct { +// InfoDAGResult models the data returned by the dag server getinfo command. +type InfoDAGResult struct { Version int32 `json:"version"` ProtocolVersion int32 `json:"protocolversion"` Blocks int32 `json:"blocks"` @@ -449,7 +449,7 @@ type TxRawResult struct { Vout []Vout `json:"vout"` BlockHash string `json:"blockhash,omitempty"` Confirmations uint64 `json:"confirmations,omitempty"` - Time int64 `json:"time,omitempty"` + Time uint64 `json:"time,omitempty"` Blocktime uint64 `json:"blocktime,omitempty"` } @@ -479,9 +479,9 @@ type TxRawDecodeResult struct { Vout []Vout `json:"vout"` } -// ValidateAddressChainResult models the data returned by the chain server +// ValidateAddressResult models the data returned by the dag server // validateaddress command. -type ValidateAddressChainResult struct { +type ValidateAddressResult struct { IsValid bool `json:"isvalid"` Address string `json:"address,omitempty"` } diff --git a/btcjson/chainsvrresults_test.go b/btcjson/dagsvrresults_test.go similarity index 94% rename from btcjson/chainsvrresults_test.go rename to btcjson/dagsvrresults_test.go index b91d0b46e..32a3b1dcb 100644 --- a/btcjson/chainsvrresults_test.go +++ b/btcjson/dagsvrresults_test.go @@ -11,10 +11,10 @@ import ( "github.com/daglabs/btcd/btcjson" ) -// TestChainSvrCustomResults ensures any results that have custom marshalling +// TestDAGSvrCustomResults ensures any results that have custom marshalling // work as inteded. // and unmarshal code of results are as expected. -func TestChainSvrCustomResults(t *testing.T) { +func TestDAGSvrCustomResults(t *testing.T) { t.Parallel() tests := []struct { diff --git a/btcjson/chainsvrwscmds.go b/btcjson/dagsvrwscmds.go similarity index 99% rename from btcjson/chainsvrwscmds.go rename to btcjson/dagsvrwscmds.go index bf973e255..81d7702a8 100644 --- a/btcjson/chainsvrwscmds.go +++ b/btcjson/dagsvrwscmds.go @@ -4,7 +4,7 @@ // license that can be found in the LICENSE file. // NOTE: This file is intended to house the RPC commands that are supported by -// a chain server, but are only available via websockets. +// a dag server, but are only available via websockets. package btcjson diff --git a/btcjson/chainsvrwscmds_test.go b/btcjson/dagsvrwscmds_test.go similarity index 98% rename from btcjson/chainsvrwscmds_test.go rename to btcjson/dagsvrwscmds_test.go index d5a50b0b2..dd9c5cf1a 100644 --- a/btcjson/chainsvrwscmds_test.go +++ b/btcjson/dagsvrwscmds_test.go @@ -15,11 +15,11 @@ import ( "github.com/daglabs/btcd/btcjson" ) -// TestChainSvrWsCmds tests all of the chain server websocket-specific commands +// TestDAGSvrWsCmds tests all of the dag server websocket-specific commands // marshal and unmarshal into valid results include handling of optional fields // being omitted in the marshalled command, while optional fields with defaults // have the default assigned on unmarshalled commands. -func TestChainSvrWsCmds(t *testing.T) { +func TestDAGSvrWsCmds(t *testing.T) { t.Parallel() testID := int(1) diff --git a/btcjson/chainsvrwsntfns.go b/btcjson/dagsvrwsntfns.go similarity index 92% rename from btcjson/chainsvrwsntfns.go rename to btcjson/dagsvrwsntfns.go index 1f1562343..b4fa25587 100644 --- a/btcjson/chainsvrwsntfns.go +++ b/btcjson/dagsvrwsntfns.go @@ -4,35 +4,35 @@ // license that can be found in the LICENSE file. // NOTE: This file is intended to house the RPC websocket notifications that are -// supported by a chain server. +// supported by a dag server. package btcjson const ( // BlockConnectedNtfnMethod is the legacy, deprecated method used for - // notifications from the chain server that a block has been connected. + // notifications from the dag server that a block has been connected. // // NOTE: Deprecated. Use FilteredBlockConnectedNtfnMethod instead. BlockConnectedNtfnMethod = "blockconnected" // BlockDisconnectedNtfnMethod is the legacy, deprecated method used for - // notifications from the chain server that a block has been + // notifications from the dag server that a block has been // disconnected. // // NOTE: Deprecated. Use FilteredBlockDisconnectedNtfnMethod instead. BlockDisconnectedNtfnMethod = "blockdisconnected" // FilteredBlockConnectedNtfnMethod is the new method used for - // notifications from the chain server that a block has been connected. + // notifications from the dag server that a block has been connected. FilteredBlockConnectedNtfnMethod = "filteredblockconnected" // FilteredBlockDisconnectedNtfnMethod is the new method used for - // notifications from the chain server that a block has been + // notifications from the dag server that a block has been // disconnected. FilteredBlockDisconnectedNtfnMethod = "filteredblockdisconnected" // RecvTxNtfnMethod is the legacy, deprecated method used for - // notifications from the chain server that a transaction which pays to + // notifications from the dag server that a transaction which pays to // a registered address has been processed. // // NOTE: Deprecated. Use RelevantTxAcceptedNtfnMethod and @@ -40,7 +40,7 @@ const ( RecvTxNtfnMethod = "recvtx" // RedeemingTxNtfnMethod is the legacy, deprecated method used for - // notifications from the chain server that a transaction which spends a + // notifications from the dag server that a transaction which spends a // registered outpoint has been processed. // // NOTE: Deprecated. Use RelevantTxAcceptedNtfnMethod and @@ -48,31 +48,31 @@ const ( RedeemingTxNtfnMethod = "redeemingtx" // RescanFinishedNtfnMethod is the legacy, deprecated method used for - // notifications from the chain server that a legacy, deprecated rescan + // notifications from the dag server that a legacy, deprecated rescan // operation has finished. // // NOTE: Deprecated. Not used with rescanblocks command. RescanFinishedNtfnMethod = "rescanfinished" // RescanProgressNtfnMethod is the legacy, deprecated method used for - // notifications from the chain server that a legacy, deprecated rescan + // notifications from the dag server that a legacy, deprecated rescan // operation this is underway has made progress. // // NOTE: Deprecated. Not used with rescanblocks command. RescanProgressNtfnMethod = "rescanprogress" // TxAcceptedNtfnMethod is the method used for notifications from the - // chain server that a transaction has been accepted into the mempool. + // dag server that a transaction has been accepted into the mempool. TxAcceptedNtfnMethod = "txaccepted" // TxAcceptedVerboseNtfnMethod is the method used for notifications from - // the chain server that a transaction has been accepted into the + // the dag server that a transaction has been accepted into the // mempool. This differs from TxAcceptedNtfnMethod in that it provides // more details in the notification. TxAcceptedVerboseNtfnMethod = "txacceptedverbose" // RelevantTxAcceptedNtfnMethod is the new method used for notifications - // from the chain server that inform a client that a transaction that + // from the dag server that inform a client that a transaction that // matches the loaded filter was accepted by the mempool. RelevantTxAcceptedNtfnMethod = "relevanttxaccepted" ) diff --git a/btcjson/chainsvrwsntfns_test.go b/btcjson/dagsvrwsntfns_test.go similarity index 98% rename from btcjson/chainsvrwsntfns_test.go rename to btcjson/dagsvrwsntfns_test.go index 7c5fb324f..925816ec2 100644 --- a/btcjson/chainsvrwsntfns_test.go +++ b/btcjson/dagsvrwsntfns_test.go @@ -15,11 +15,11 @@ import ( "github.com/daglabs/btcd/btcjson" ) -// TestChainSvrWsNtfns tests all of the chain server websocket-specific +// TestDAGSvrWsNtfns tests all of the dag server websocket-specific // notifications marshal and unmarshal into valid results include handling of // optional fields being omitted in the marshalled command, while optional // fields with defaults have the default assigned on unmarshalled commands. -func TestChainSvrWsNtfns(t *testing.T) { +func TestDAGSvrWsNtfns(t *testing.T) { t.Parallel() tests := []struct { diff --git a/btcjson/chainsvrwsresults.go b/btcjson/dagsvrwsresults.go similarity index 100% rename from btcjson/chainsvrwsresults.go rename to btcjson/dagsvrwsresults.go diff --git a/btcjson/chainsvrwsresults_test.go b/btcjson/dagsvrwsresults_test.go similarity index 89% rename from btcjson/chainsvrwsresults_test.go rename to btcjson/dagsvrwsresults_test.go index f2e93b65e..d3fcf3250 100644 --- a/btcjson/chainsvrwsresults_test.go +++ b/btcjson/dagsvrwsresults_test.go @@ -12,9 +12,9 @@ import ( "github.com/daglabs/btcd/btcjson" ) -// TestChainSvrWsResults ensures any results that have custom marshalling +// TestDAGSvrWsResults ensures any results that have custom marshalling // work as inteded. -func TestChainSvrWsResults(t *testing.T) { +func TestDAGSvrWsResults(t *testing.T) { t.Parallel() tests := []struct { diff --git a/btcjson/doc.go b/btcjson/doc.go index eca45930e..d3b4d22bd 100644 --- a/btcjson/doc.go +++ b/btcjson/doc.go @@ -107,7 +107,7 @@ function. Command Inspection All registered commands are registered with flags that identify information such -as whether the command applies to a chain server, wallet server, or is a +as whether the command applies to a dag server, wallet server, or is a notification along with the method name to use. These flags can be obtained with the MethodUsageFlags flags, and the method can be obtained with the CmdMethod function. @@ -143,4 +143,4 @@ The second category of errors (type RPCError), on the other hand, are useful for returning errors to RPC clients. Consequently, they are used in the previously described Response type. */ -package btcjson \ No newline at end of file +package btcjson diff --git a/btcjson/walletsvrwsntfns.go b/btcjson/walletsvrwsntfns.go index 8df8ebe68..97e483092 100644 --- a/btcjson/walletsvrwsntfns.go +++ b/btcjson/walletsvrwsntfns.go @@ -13,7 +13,7 @@ const ( AccountBalanceNtfnMethod = "accountbalance" // BtcdConnectedNtfnMethod is the method used for notifications when - // a wallet server is connected to a chain server. + // a wallet server is connected to a dag server. BtcdConnectedNtfnMethod = "btcdconnected" // WalletLockStateNtfnMethod is the method used to notify the lock state diff --git a/btcjson/walletsvrwsntfns_test.go b/btcjson/walletsvrwsntfns_test.go index 32b38c240..4d9384302 100644 --- a/btcjson/walletsvrwsntfns_test.go +++ b/btcjson/walletsvrwsntfns_test.go @@ -14,7 +14,7 @@ import ( "github.com/daglabs/btcd/btcjson" ) -// TestWalletSvrWsNtfns tests all of the chain server websocket-specific +// TestWalletSvrWsNtfns tests all of the dag server websocket-specific // notifications marshal and unmarshal into valid results include handling of // optional fields being omitted in the marshalled command, while optional // fields with defaults have the default assigned on unmarshalled commands. diff --git a/chaincfg/genesis_test.go b/chaincfg/genesis_test.go deleted file mode 100644 index d04a72f75..000000000 --- a/chaincfg/genesis_test.go +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright (c) 2014-2016 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package chaincfg - -import ( - "bytes" - "testing" - - "github.com/davecgh/go-spew/spew" -) - -// TestGenesisBlock tests the genesis block of the main network for validity by -// checking the encoded bytes and hashes. -func TestGenesisBlock(t *testing.T) { - // Encode the genesis block to raw bytes. - var buf bytes.Buffer - err := MainNetParams.GenesisBlock.Serialize(&buf) - if err != nil { - t.Fatalf("TestGenesisBlock: %v", err) - } - - // Ensure the encoded block matches the expected bytes. - if !bytes.Equal(buf.Bytes(), genesisBlockBytes) { - t.Fatalf("TestGenesisBlock: Genesis block does not appear valid - "+ - "got %v, want %v", spew.Sdump(buf.Bytes()), - spew.Sdump(genesisBlockBytes)) - } - - // Check hash of the block against expected hash. - hash := MainNetParams.GenesisBlock.BlockHash() - if !MainNetParams.GenesisHash.IsEqual(&hash) { - t.Fatalf("TestGenesisBlock: Genesis block hash does not "+ - "appear valid - got %v, want %v", spew.Sdump(hash), - spew.Sdump(MainNetParams.GenesisHash)) - } -} - -// TestRegTestGenesisBlock tests the genesis block of the regression test -// network for validity by checking the encoded bytes and hashes. -func TestRegTestGenesisBlock(t *testing.T) { - // Encode the genesis block to raw bytes. - var buf bytes.Buffer - err := RegressionNetParams.GenesisBlock.Serialize(&buf) - if err != nil { - t.Fatalf("TestRegTestGenesisBlock: %v", err) - } - - // Ensure the encoded block matches the expected bytes. - if !bytes.Equal(buf.Bytes(), regTestGenesisBlockBytes) { - t.Fatalf("TestRegTestGenesisBlock: Genesis block does not "+ - "appear valid - got %v, want %v", - spew.Sdump(buf.Bytes()), - spew.Sdump(regTestGenesisBlockBytes)) - } - - // Check hash of the block against expected hash. - hash := RegressionNetParams.GenesisBlock.BlockHash() - if !RegressionNetParams.GenesisHash.IsEqual(&hash) { - t.Fatalf("TestRegTestGenesisBlock: Genesis block hash does "+ - "not appear valid - got %v, want %v", spew.Sdump(hash), - spew.Sdump(RegressionNetParams.GenesisHash)) - } -} - -// TestTestNet3GenesisBlock tests the genesis block of the test network (version -// 3) for validity by checking the encoded bytes and hashes. -func TestTestNet3GenesisBlock(t *testing.T) { - // Encode the genesis block to raw bytes. - var buf bytes.Buffer - err := TestNet3Params.GenesisBlock.Serialize(&buf) - if err != nil { - t.Fatalf("TestTestNet3GenesisBlock: %v", err) - } - - // Ensure the encoded block matches the expected bytes. - if !bytes.Equal(buf.Bytes(), testNet3GenesisBlockBytes) { - t.Fatalf("TestTestNet3GenesisBlock: Genesis block does not "+ - "appear valid - got %v, want %v", - spew.Sdump(buf.Bytes()), - spew.Sdump(testNet3GenesisBlockBytes)) - } - - // Check hash of the block against expected hash. - hash := TestNet3Params.GenesisBlock.BlockHash() - if !TestNet3Params.GenesisHash.IsEqual(&hash) { - t.Fatalf("TestTestNet3GenesisBlock: Genesis block hash does "+ - "not appear valid - got %v, want %v", spew.Sdump(hash), - spew.Sdump(TestNet3Params.GenesisHash)) - } -} - -// TestSimNetGenesisBlock tests the genesis block of the simulation test network -// for validity by checking the encoded bytes and hashes. -func TestSimNetGenesisBlock(t *testing.T) { - // Encode the genesis block to raw bytes. - var buf bytes.Buffer - err := SimNetParams.GenesisBlock.Serialize(&buf) - if err != nil { - t.Fatalf("TestSimNetGenesisBlock: %v", err) - } - - // Ensure the encoded block matches the expected bytes. - if !bytes.Equal(buf.Bytes(), simNetGenesisBlockBytes) { - t.Fatalf("TestSimNetGenesisBlock: Genesis block does not "+ - "appear valid - got %v, want %v", - spew.Sdump(buf.Bytes()), - spew.Sdump(simNetGenesisBlockBytes)) - } - - // Check hash of the block against expected hash. - hash := SimNetParams.GenesisBlock.BlockHash() - if !SimNetParams.GenesisHash.IsEqual(&hash) { - t.Fatalf("TestSimNetGenesisBlock: Genesis block hash does "+ - "not appear valid - got %v, want %v", spew.Sdump(hash), - spew.Sdump(SimNetParams.GenesisHash)) - } -} - -// genesisBlockBytes are the wire encoded bytes for the genesis block of the -// main network as of protocol version 60002. -var genesisBlockBytes = []byte{ - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x3b, 0xa3, 0xed, 0xfd, /* |....;...| */ - 0x7a, 0x7b, 0x12, 0xb2, 0x7a, 0xc7, 0x2c, 0x3e, /* |z{..z.,>| */ - 0x67, 0x76, 0x8f, 0x61, 0x7f, 0xc8, 0x1b, 0xc3, /* |gv.a....| */ - 0x88, 0x8a, 0x51, 0x32, 0x3a, 0x9f, 0xb8, 0xaa, /* |..Q2:...| */ - 0x4b, 0x1e, 0x5e, 0x4a, 0x29, 0xab, 0x5f, 0x49, /* |K.^J)._I| */ - 0xff, 0xff, 0x00, 0x1d, 0x1d, 0xac, 0x2b, 0x7c, /* |......+|| */ - 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, /* |........| */ - 0xff, 0xff, 0x4d, 0x04, 0xff, 0xff, 0x00, 0x1d, /* |..M.....| */ - 0x01, 0x04, 0x45, 0x54, 0x68, 0x65, 0x20, 0x54, /* |..EThe T| */ - 0x69, 0x6d, 0x65, 0x73, 0x20, 0x30, 0x33, 0x2f, /* |imes 03/| */ - 0x4a, 0x61, 0x6e, 0x2f, 0x32, 0x30, 0x30, 0x39, /* |Jan/2009| */ - 0x20, 0x43, 0x68, 0x61, 0x6e, 0x63, 0x65, 0x6c, /* | Chancel| */ - 0x6c, 0x6f, 0x72, 0x20, 0x6f, 0x6e, 0x20, 0x62, /* |lor on b| */ - 0x72, 0x69, 0x6e, 0x6b, 0x20, 0x6f, 0x66, 0x20, /* |rink of | */ - 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x20, 0x62, /* |second b| */ - 0x61, 0x69, 0x6c, 0x6f, 0x75, 0x74, 0x20, 0x66, /* |ailout f| */ - 0x6f, 0x72, 0x20, 0x62, 0x61, 0x6e, 0x6b, 0x73, /* |or banks| */ - 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0xf2, 0x05, /* |........| */ - 0x2a, 0x01, 0x00, 0x00, 0x00, 0x43, 0x41, 0x04, /* |*....CA.| */ - 0x67, 0x8a, 0xfd, 0xb0, 0xfe, 0x55, 0x48, 0x27, /* |g....UH'| */ - 0x19, 0x67, 0xf1, 0xa6, 0x71, 0x30, 0xb7, 0x10, /* |.g..q0..| */ - 0x5c, 0xd6, 0xa8, 0x28, 0xe0, 0x39, 0x09, 0xa6, /* |\..(.9..| */ - 0x79, 0x62, 0xe0, 0xea, 0x1f, 0x61, 0xde, 0xb6, /* |yb...a..| */ - 0x49, 0xf6, 0xbc, 0x3f, 0x4c, 0xef, 0x38, 0xc4, /* |I..?L.8.| */ - 0xf3, 0x55, 0x04, 0xe5, 0x1e, 0xc1, 0x12, 0xde, /* |.U......| */ - 0x5c, 0x38, 0x4d, 0xf7, 0xba, 0x0b, 0x8d, 0x57, /* |\8M....W| */ - 0x8a, 0x4c, 0x70, 0x2b, 0x6b, 0xf1, 0x1d, 0x5f, /* |.Lp+k.._|*/ - 0xac, 0x00, 0x00, 0x00, 0x00, /* |.....| */ -} - -// regTestGenesisBlockBytes are the wire encoded bytes for the genesis block of -// the regression test network as of protocol version 60002. -var regTestGenesisBlockBytes = []byte{ - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x3b, 0xa3, 0xed, 0xfd, /* |....;...| */ - 0x7a, 0x7b, 0x12, 0xb2, 0x7a, 0xc7, 0x2c, 0x3e, /* |z{..z.,>| */ - 0x67, 0x76, 0x8f, 0x61, 0x7f, 0xc8, 0x1b, 0xc3, /* |gv.a....| */ - 0x88, 0x8a, 0x51, 0x32, 0x3a, 0x9f, 0xb8, 0xaa, /* |..Q2:...| */ - 0x4b, 0x1e, 0x5e, 0x4a, 0xda, 0xe5, 0x49, 0x4d, /* |K.^J)._I| */ - 0xff, 0xff, 0x7f, 0x20, 0x02, 0x00, 0x00, 0x00, /* |......+|| */ - 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, /* |........| */ - 0xff, 0xff, 0x4d, 0x04, 0xff, 0xff, 0x00, 0x1d, /* |..M.....| */ - 0x01, 0x04, 0x45, 0x54, 0x68, 0x65, 0x20, 0x54, /* |..EThe T| */ - 0x69, 0x6d, 0x65, 0x73, 0x20, 0x30, 0x33, 0x2f, /* |imes 03/| */ - 0x4a, 0x61, 0x6e, 0x2f, 0x32, 0x30, 0x30, 0x39, /* |Jan/2009| */ - 0x20, 0x43, 0x68, 0x61, 0x6e, 0x63, 0x65, 0x6c, /* | Chancel| */ - 0x6c, 0x6f, 0x72, 0x20, 0x6f, 0x6e, 0x20, 0x62, /* |lor on b| */ - 0x72, 0x69, 0x6e, 0x6b, 0x20, 0x6f, 0x66, 0x20, /* |rink of | */ - 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x20, 0x62, /* |second b| */ - 0x61, 0x69, 0x6c, 0x6f, 0x75, 0x74, 0x20, 0x66, /* |ailout f| */ - 0x6f, 0x72, 0x20, 0x62, 0x61, 0x6e, 0x6b, 0x73, /* |or banks| */ - 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0xf2, 0x05, /* |........| */ - 0x2a, 0x01, 0x00, 0x00, 0x00, 0x43, 0x41, 0x04, /* |*....CA.| */ - 0x67, 0x8a, 0xfd, 0xb0, 0xfe, 0x55, 0x48, 0x27, /* |g....UH'| */ - 0x19, 0x67, 0xf1, 0xa6, 0x71, 0x30, 0xb7, 0x10, /* |.g..q0..| */ - 0x5c, 0xd6, 0xa8, 0x28, 0xe0, 0x39, 0x09, 0xa6, /* |\..(.9..| */ - 0x79, 0x62, 0xe0, 0xea, 0x1f, 0x61, 0xde, 0xb6, /* |yb...a..| */ - 0x49, 0xf6, 0xbc, 0x3f, 0x4c, 0xef, 0x38, 0xc4, /* |I..?L.8.| */ - 0xf3, 0x55, 0x04, 0xe5, 0x1e, 0xc1, 0x12, 0xde, /* |.U......| */ - 0x5c, 0x38, 0x4d, 0xf7, 0xba, 0x0b, 0x8d, 0x57, /* |\8M....W| */ - 0x8a, 0x4c, 0x70, 0x2b, 0x6b, 0xf1, 0x1d, 0x5f, /* |.Lp+k.._|*/ - 0xac, 0x00, 0x00, 0x00, 0x00, /* |.....| */ -} - -// testNet3GenesisBlockBytes are the wire encoded bytes for the genesis block of -// the test network (version 3) as of protocol version 60002. -var testNet3GenesisBlockBytes = []byte{ - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x3b, 0xa3, 0xed, 0xfd, /* |....;...| */ - 0x7a, 0x7b, 0x12, 0xb2, 0x7a, 0xc7, 0x2c, 0x3e, /* |z{..z.,>| */ - 0x67, 0x76, 0x8f, 0x61, 0x7f, 0xc8, 0x1b, 0xc3, /* |gv.a....| */ - 0x88, 0x8a, 0x51, 0x32, 0x3a, 0x9f, 0xb8, 0xaa, /* |..Q2:...| */ - 0x4b, 0x1e, 0x5e, 0x4a, 0xda, 0xe5, 0x49, 0x4d, /* |K.^J)._I| */ - 0xff, 0xff, 0x00, 0x1d, 0x1a, 0xa4, 0xae, 0x18, /* |......+|| */ - 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, /* |........| */ - 0xff, 0xff, 0x4d, 0x04, 0xff, 0xff, 0x00, 0x1d, /* |..M.....| */ - 0x01, 0x04, 0x45, 0x54, 0x68, 0x65, 0x20, 0x54, /* |..EThe T| */ - 0x69, 0x6d, 0x65, 0x73, 0x20, 0x30, 0x33, 0x2f, /* |imes 03/| */ - 0x4a, 0x61, 0x6e, 0x2f, 0x32, 0x30, 0x30, 0x39, /* |Jan/2009| */ - 0x20, 0x43, 0x68, 0x61, 0x6e, 0x63, 0x65, 0x6c, /* | Chancel| */ - 0x6c, 0x6f, 0x72, 0x20, 0x6f, 0x6e, 0x20, 0x62, /* |lor on b| */ - 0x72, 0x69, 0x6e, 0x6b, 0x20, 0x6f, 0x66, 0x20, /* |rink of | */ - 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x20, 0x62, /* |second b| */ - 0x61, 0x69, 0x6c, 0x6f, 0x75, 0x74, 0x20, 0x66, /* |ailout f| */ - 0x6f, 0x72, 0x20, 0x62, 0x61, 0x6e, 0x6b, 0x73, /* |or banks| */ - 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0xf2, 0x05, /* |........| */ - 0x2a, 0x01, 0x00, 0x00, 0x00, 0x43, 0x41, 0x04, /* |*....CA.| */ - 0x67, 0x8a, 0xfd, 0xb0, 0xfe, 0x55, 0x48, 0x27, /* |g....UH'| */ - 0x19, 0x67, 0xf1, 0xa6, 0x71, 0x30, 0xb7, 0x10, /* |.g..q0..| */ - 0x5c, 0xd6, 0xa8, 0x28, 0xe0, 0x39, 0x09, 0xa6, /* |\..(.9..| */ - 0x79, 0x62, 0xe0, 0xea, 0x1f, 0x61, 0xde, 0xb6, /* |yb...a..| */ - 0x49, 0xf6, 0xbc, 0x3f, 0x4c, 0xef, 0x38, 0xc4, /* |I..?L.8.| */ - 0xf3, 0x55, 0x04, 0xe5, 0x1e, 0xc1, 0x12, 0xde, /* |.U......| */ - 0x5c, 0x38, 0x4d, 0xf7, 0xba, 0x0b, 0x8d, 0x57, /* |\8M....W| */ - 0x8a, 0x4c, 0x70, 0x2b, 0x6b, 0xf1, 0x1d, 0x5f, /* |.Lp+k.._|*/ - 0xac, 0x00, 0x00, 0x00, 0x00, /* |.....| */ -} - -// simNetGenesisBlockBytes are the wire encoded bytes for the genesis block of -// the simulation test network as of protocol version 70002. -var simNetGenesisBlockBytes = []byte{ - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x3b, 0xa3, 0xed, 0xfd, /* |....;...| */ - 0x7a, 0x7b, 0x12, 0xb2, 0x7a, 0xc7, 0x2c, 0x3e, /* |z{..z.,>| */ - 0x67, 0x76, 0x8f, 0x61, 0x7f, 0xc8, 0x1b, 0xc3, /* |gv.a....| */ - 0x88, 0x8a, 0x51, 0x32, 0x3a, 0x9f, 0xb8, 0xaa, /* |..Q2:...| */ - 0x4b, 0x1e, 0x5e, 0x4a, 0x45, 0x06, 0x86, 0x53, /* |K.^J)._I| */ - 0xff, 0xff, 0x7f, 0x20, 0x02, 0x00, 0x00, 0x00, /* |......+|| */ - 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, /* |........| */ - 0xff, 0xff, 0x4d, 0x04, 0xff, 0xff, 0x00, 0x1d, /* |..M.....| */ - 0x01, 0x04, 0x45, 0x54, 0x68, 0x65, 0x20, 0x54, /* |..EThe T| */ - 0x69, 0x6d, 0x65, 0x73, 0x20, 0x30, 0x33, 0x2f, /* |imes 03/| */ - 0x4a, 0x61, 0x6e, 0x2f, 0x32, 0x30, 0x30, 0x39, /* |Jan/2009| */ - 0x20, 0x43, 0x68, 0x61, 0x6e, 0x63, 0x65, 0x6c, /* | Chancel| */ - 0x6c, 0x6f, 0x72, 0x20, 0x6f, 0x6e, 0x20, 0x62, /* |lor on b| */ - 0x72, 0x69, 0x6e, 0x6b, 0x20, 0x6f, 0x66, 0x20, /* |rink of | */ - 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x20, 0x62, /* |second b| */ - 0x61, 0x69, 0x6c, 0x6f, 0x75, 0x74, 0x20, 0x66, /* |ailout f| */ - 0x6f, 0x72, 0x20, 0x62, 0x61, 0x6e, 0x6b, 0x73, /* |or banks| */ - 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0xf2, 0x05, /* |........| */ - 0x2a, 0x01, 0x00, 0x00, 0x00, 0x43, 0x41, 0x04, /* |*....CA.| */ - 0x67, 0x8a, 0xfd, 0xb0, 0xfe, 0x55, 0x48, 0x27, /* |g....UH'| */ - 0x19, 0x67, 0xf1, 0xa6, 0x71, 0x30, 0xb7, 0x10, /* |.g..q0..| */ - 0x5c, 0xd6, 0xa8, 0x28, 0xe0, 0x39, 0x09, 0xa6, /* |\..(.9..| */ - 0x79, 0x62, 0xe0, 0xea, 0x1f, 0x61, 0xde, 0xb6, /* |yb...a..| */ - 0x49, 0xf6, 0xbc, 0x3f, 0x4c, 0xef, 0x38, 0xc4, /* |I..?L.8.| */ - 0xf3, 0x55, 0x04, 0xe5, 0x1e, 0xc1, 0x12, 0xde, /* |.U......| */ - 0x5c, 0x38, 0x4d, 0xf7, 0xba, 0x0b, 0x8d, 0x57, /* |\8M....W| */ - 0x8a, 0x4c, 0x70, 0x2b, 0x6b, 0xf1, 0x1d, 0x5f, /* |.Lp+k.._|*/ - 0xac, 0x00, 0x00, 0x00, 0x00, /* |.....| */ -} diff --git a/cmd/addblock/addblock.go b/cmd/addblock/addblock.go index d8b558032..cba2e3288 100644 --- a/cmd/addblock/addblock.go +++ b/cmd/addblock/addblock.go @@ -9,8 +9,8 @@ import ( "path/filepath" "runtime" - "github.com/daglabs/btcd/blockchain" - "github.com/daglabs/btcd/blockchain/indexers" + "github.com/daglabs/btcd/blockdag" + "github.com/daglabs/btcd/blockdag/indexers" "github.com/daglabs/btcd/database" "github.com/daglabs/btcd/limits" "github.com/btcsuite/btclog" @@ -73,7 +73,7 @@ func realMain() error { defer os.Stdout.Sync() log = backendLogger.Logger("MAIN") database.UseLogger(backendLogger.Logger("BCDB")) - blockchain.UseLogger(backendLogger.Logger("CHAN")) + blockdag.UseLogger(backendLogger.Logger("CHAN")) indexers.UseLogger(backendLogger.Logger("INDX")) // Load the block database. diff --git a/cmd/addblock/config.go b/cmd/addblock/config.go index ff82ffb1c..5d4dda5f4 100644 --- a/cmd/addblock/config.go +++ b/cmd/addblock/config.go @@ -9,7 +9,7 @@ import ( "os" "path/filepath" - "github.com/daglabs/btcd/chaincfg" + "github.com/daglabs/btcd/dagconfig" "github.com/daglabs/btcd/database" _ "github.com/daglabs/btcd/database/ffldb" "github.com/daglabs/btcd/wire" @@ -27,7 +27,7 @@ var ( btcdHomeDir = btcutil.AppDataDir("btcd", false) defaultDataDir = filepath.Join(btcdHomeDir, "data") knownDbTypes = database.SupportedDrivers() - activeNetParams = &chaincfg.MainNetParams + activeNetParams = &dagconfig.MainNetParams ) // config defines the configuration options for findcheckpoint. @@ -69,13 +69,13 @@ func validDbType(dbType string) bool { // netName returns the name used when referring to a bitcoin network. At the // time of writing, btcd currently places blocks for testnet version 3 in the // data and log directory "testnet", which does not match the Name field of the -// chaincfg parameters. This function can be used to override this directory name +// dagconfig parameters. This function can be used to override this directory name // as "testnet" when the passed active network matches wire.TestNet3. // // A proper upgrade to move the data and log directories for this network to // "testnet3" is planned for the future, at which point this function can be // removed and the network parameter's name used instead. -func netName(chainParams *chaincfg.Params) string { +func netName(chainParams *dagconfig.Params) string { switch chainParams.Net { case wire.TestNet3: return "testnet" @@ -111,15 +111,15 @@ func loadConfig() (*config, []string, error) { // while we're at it if cfg.TestNet3 { numNets++ - activeNetParams = &chaincfg.TestNet3Params + activeNetParams = &dagconfig.TestNet3Params } if cfg.RegressionTest { numNets++ - activeNetParams = &chaincfg.RegressionNetParams + activeNetParams = &dagconfig.RegressionNetParams } if cfg.SimNet { numNets++ - activeNetParams = &chaincfg.SimNetParams + activeNetParams = &dagconfig.SimNetParams } if numNets > 1 { str := "%s: The testnet, regtest, and simnet params can't be " + diff --git a/cmd/addblock/import.go b/cmd/addblock/import.go index 0e4ad07b7..97c07bddb 100644 --- a/cmd/addblock/import.go +++ b/cmd/addblock/import.go @@ -11,15 +11,15 @@ import ( "sync" "time" - "github.com/daglabs/btcd/blockchain" - "github.com/daglabs/btcd/blockchain/indexers" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/blockdag" + "github.com/daglabs/btcd/blockdag/indexers" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/database" "github.com/daglabs/btcd/wire" "github.com/daglabs/btcutil" ) -var zeroHash = chainhash.Hash{} +var zeroHash = daghash.Hash{} // importResults houses the stats and result as an import operation. type importResults struct { @@ -32,7 +32,7 @@ type importResults struct { // file to the block database. type blockImporter struct { db database.DB - chain *blockchain.BlockChain + dag *blockdag.BlockDAG r io.ReadSeeker processQueue chan []byte doneChan chan bool @@ -105,7 +105,7 @@ func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) { // Skip blocks that already exist. blockHash := block.Hash() - exists, err := bi.chain.HaveBlock(blockHash) + exists, err := bi.dag.HaveBlock(blockHash) if err != nil { return false, err } @@ -116,7 +116,7 @@ func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) { // Don't bother trying to process orphans. prevHash := &block.MsgBlock().Header.PrevBlock if !prevHash.IsEqual(&zeroHash) { - exists, err := bi.chain.HaveBlock(prevHash) + exists, err := bi.dag.HaveBlock(prevHash) if err != nil { return false, err } @@ -129,15 +129,11 @@ func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) { // Ensure the blocks follows all of the chain rules and match up to the // known checkpoints. - isMainChain, isOrphan, err := bi.chain.ProcessBlock(block, - blockchain.BFFastAdd) + isOrphan, err := bi.dag.ProcessBlock(block, + blockdag.BFFastAdd) if err != nil { return false, err } - if !isMainChain { - return false, fmt.Errorf("import file contains an block that "+ - "does not extend the main chain: %v", blockHash) - } if isOrphan { return false, fmt.Errorf("import file contains an orphan "+ "block: %v", blockHash) @@ -325,15 +321,15 @@ func newBlockImporter(db database.DB, r io.ReadSeeker) (*blockImporter, error) { } // Create an index manager if any of the optional indexes are enabled. - var indexManager blockchain.IndexManager + var indexManager blockdag.IndexManager if len(indexes) > 0 { indexManager = indexers.NewManager(db, indexes) } - chain, err := blockchain.New(&blockchain.Config{ + dag, err := blockdag.New(&blockdag.Config{ DB: db, - ChainParams: activeNetParams, - TimeSource: blockchain.NewMedianTime(), + DAGParams: activeNetParams, + TimeSource: blockdag.NewMedianTime(), IndexManager: indexManager, }) if err != nil { @@ -347,7 +343,7 @@ func newBlockImporter(db database.DB, r io.ReadSeeker) (*blockImporter, error) { doneChan: make(chan bool), errChan: make(chan error), quit: make(chan struct{}), - chain: chain, + dag: dag, lastLogTime: time.Now(), }, nil } diff --git a/cmd/findcheckpoint/config.go b/cmd/findcheckpoint/config.go index b3d3a63a0..0f97c59fd 100644 --- a/cmd/findcheckpoint/config.go +++ b/cmd/findcheckpoint/config.go @@ -9,7 +9,7 @@ import ( "os" "path/filepath" - "github.com/daglabs/btcd/chaincfg" + "github.com/daglabs/btcd/dagconfig" "github.com/daglabs/btcd/database" _ "github.com/daglabs/btcd/database/ffldb" "github.com/daglabs/btcd/wire" @@ -28,7 +28,7 @@ var ( btcdHomeDir = btcutil.AppDataDir("btcd", false) defaultDataDir = filepath.Join(btcdHomeDir, "data") knownDbTypes = database.SupportedDrivers() - activeNetParams = &chaincfg.MainNetParams + activeNetParams = &dagconfig.MainNetParams ) // config defines the configuration options for findcheckpoint. @@ -58,13 +58,13 @@ func validDbType(dbType string) bool { // netName returns the name used when referring to a bitcoin network. At the // time of writing, btcd currently places blocks for testnet version 3 in the // data and log directory "testnet", which does not match the Name field of the -// chaincfg parameters. This function can be used to override this directory name +// dagconfig parameters. This function can be used to override this directory name // as "testnet" when the passed active network matches wire.TestNet3. // // A proper upgrade to move the data and log directories for this network to // "testnet3" is planned for the future, at which point this function can be // removed and the network parameter's name used instead. -func netName(chainParams *chaincfg.Params) string { +func netName(chainParams *dagconfig.Params) string { switch chainParams.Net { case wire.TestNet3: return "testnet" @@ -99,15 +99,15 @@ func loadConfig() (*config, []string, error) { // while we're at it if cfg.TestNet3 { numNets++ - activeNetParams = &chaincfg.TestNet3Params + activeNetParams = &dagconfig.TestNet3Params } if cfg.RegressionTest { numNets++ - activeNetParams = &chaincfg.RegressionNetParams + activeNetParams = &dagconfig.RegressionNetParams } if cfg.SimNet { numNets++ - activeNetParams = &chaincfg.SimNetParams + activeNetParams = &dagconfig.SimNetParams } if numNets > 1 { str := "%s: The testnet, regtest, and simnet params can't be " + diff --git a/cmd/findcheckpoint/findcheckpoint.go b/cmd/findcheckpoint/findcheckpoint.go index 89edbce31..af3f3c0ab 100644 --- a/cmd/findcheckpoint/findcheckpoint.go +++ b/cmd/findcheckpoint/findcheckpoint.go @@ -9,9 +9,9 @@ import ( "os" "path/filepath" - "github.com/daglabs/btcd/blockchain" - "github.com/daglabs/btcd/chaincfg" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/blockdag" + "github.com/daglabs/btcd/dagconfig" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/database" ) @@ -39,19 +39,19 @@ func loadBlockDB() (database.DB, error) { // candidates at the last checkpoint that is already hard coded into btcchain // since there is no point in finding candidates before already existing // checkpoints. -func findCandidates(chain *blockchain.BlockChain, latestHash *chainhash.Hash) ([]*chaincfg.Checkpoint, error) { +func findCandidates(dag *blockdag.BlockDAG, latestHash *daghash.Hash) ([]*dagconfig.Checkpoint, error) { // Start with the latest block of the main chain. - block, err := chain.BlockByHash(latestHash) + block, err := dag.BlockByHash(latestHash) if err != nil { return nil, err } // Get the latest known checkpoint. - latestCheckpoint := chain.LatestCheckpoint() + latestCheckpoint := dag.LatestCheckpoint() if latestCheckpoint == nil { // Set the latest checkpoint to the genesis block if there isn't // already one. - latestCheckpoint = &chaincfg.Checkpoint{ + latestCheckpoint = &dagconfig.Checkpoint{ Hash: activeNetParams.GenesisHash, Height: 0, } @@ -59,7 +59,7 @@ func findCandidates(chain *blockchain.BlockChain, latestHash *chainhash.Hash) ([ // The latest known block must be at least the last known checkpoint // plus required checkpoint confirmations. - checkpointConfirmations := int32(blockchain.CheckpointConfirmations) + checkpointConfirmations := int32(blockdag.CheckpointConfirmations) requiredHeight := latestCheckpoint.Height + checkpointConfirmations if block.Height() < requiredHeight { return nil, fmt.Errorf("the block database is only at height "+ @@ -83,7 +83,7 @@ func findCandidates(chain *blockchain.BlockChain, latestHash *chainhash.Hash) ([ defer fmt.Println() // Loop backwards through the chain to find checkpoint candidates. - candidates := make([]*chaincfg.Checkpoint, 0, cfg.NumCandidates) + candidates := make([]*dagconfig.Checkpoint, 0, cfg.NumCandidates) numTested := int32(0) for len(candidates) < cfg.NumCandidates && block.Height() > requiredHeight { // Display progress. @@ -92,7 +92,7 @@ func findCandidates(chain *blockchain.BlockChain, latestHash *chainhash.Hash) ([ } // Determine if this block is a checkpoint candidate. - isCandidate, err := chain.IsCheckpointCandidate(block) + isCandidate, err := dag.IsCheckpointCandidate(block) if err != nil { return nil, err } @@ -100,7 +100,7 @@ func findCandidates(chain *blockchain.BlockChain, latestHash *chainhash.Hash) ([ // All checks passed, so this node seems like a reasonable // checkpoint candidate. if isCandidate { - checkpoint := chaincfg.Checkpoint{ + checkpoint := dagconfig.Checkpoint{ Height: block.Height(), Hash: block.Hash(), } @@ -108,7 +108,7 @@ func findCandidates(chain *blockchain.BlockChain, latestHash *chainhash.Hash) ([ } prevHash := &block.MsgBlock().Header.PrevBlock - block, err = chain.BlockByHash(prevHash) + block, err = dag.BlockByHash(prevHash) if err != nil { return nil, err } @@ -120,7 +120,7 @@ func findCandidates(chain *blockchain.BlockChain, latestHash *chainhash.Hash) ([ // showCandidate display a checkpoint candidate using and output format // determined by the configuration parameters. The Go syntax output // uses the format the btcchain code expects for checkpoints added to the list. -func showCandidate(candidateNum int, checkpoint *chaincfg.Checkpoint) { +func showCandidate(candidateNum int, checkpoint *dagconfig.Checkpoint) { if cfg.UseGoOutput { fmt.Printf("Candidate %d -- {%d, newShaHashFromStr(\"%v\")},\n", candidateNum, checkpoint.Height, checkpoint.Hash) @@ -150,10 +150,10 @@ func main() { // Setup chain. Ignore notifications since they aren't needed for this // util. - chain, err := blockchain.New(&blockchain.Config{ - DB: db, - ChainParams: activeNetParams, - TimeSource: blockchain.NewMedianTime(), + dag, err := blockdag.New(&blockdag.Config{ + DB: db, + DAGParams: activeNetParams, + TimeSource: blockdag.NewMedianTime(), }) if err != nil { fmt.Fprintf(os.Stderr, "failed to initialize chain: %v\n", err) @@ -162,11 +162,11 @@ func main() { // Get the latest block hash and height from the database and report // status. - best := chain.BestSnapshot() - fmt.Printf("Block database loaded with block height %d\n", best.Height) + dagState := dag.GetDAGState() + fmt.Printf("Block database loaded with block height %d\n", dagState.SelectedTip.Height) // Find checkpoint candidates. - candidates, err := findCandidates(chain, &best.Hash) + candidates, err := findCandidates(dag, &dagState.SelectedTip.Hash) if err != nil { fmt.Fprintln(os.Stderr, "Unable to identify candidates:", err) return diff --git a/config.go b/config.go index 43321a26f..38f772d72 100644 --- a/config.go +++ b/config.go @@ -21,8 +21,8 @@ import ( "time" "github.com/btcsuite/go-socks/socks" - "github.com/daglabs/btcd/chaincfg" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/connmgr" "github.com/daglabs/btcd/database" _ "github.com/daglabs/btcd/database/ffldb" @@ -157,7 +157,7 @@ type config struct { lookup func(string) ([]net.IP, error) oniondial func(string, string, time.Duration) (net.Conn, error) dial func(string, string, time.Duration) (net.Conn, error) - addCheckpoints []chaincfg.Checkpoint + addCheckpoints []dagconfig.Checkpoint miningAddrs []btcutil.Address minRelayTxFee btcutil.Amount whitelists []*net.IPNet @@ -313,43 +313,43 @@ func normalizeAddresses(addrs []string, defaultPort string) []string { } // newCheckpointFromStr parses checkpoints in the ':' format. -func newCheckpointFromStr(checkpoint string) (chaincfg.Checkpoint, error) { +func newCheckpointFromStr(checkpoint string) (dagconfig.Checkpoint, error) { parts := strings.Split(checkpoint, ":") if len(parts) != 2 { - return chaincfg.Checkpoint{}, fmt.Errorf("unable to parse "+ + return dagconfig.Checkpoint{}, fmt.Errorf("unable to parse "+ "checkpoint %q -- use the syntax :", checkpoint) } height, err := strconv.ParseInt(parts[0], 10, 32) if err != nil { - return chaincfg.Checkpoint{}, fmt.Errorf("unable to parse "+ + return dagconfig.Checkpoint{}, fmt.Errorf("unable to parse "+ "checkpoint %q due to malformed height", checkpoint) } if len(parts[1]) == 0 { - return chaincfg.Checkpoint{}, fmt.Errorf("unable to parse "+ + return dagconfig.Checkpoint{}, fmt.Errorf("unable to parse "+ "checkpoint %q due to missing hash", checkpoint) } - hash, err := chainhash.NewHashFromStr(parts[1]) + hash, err := daghash.NewHashFromStr(parts[1]) if err != nil { - return chaincfg.Checkpoint{}, fmt.Errorf("unable to parse "+ + return dagconfig.Checkpoint{}, fmt.Errorf("unable to parse "+ "checkpoint %q due to malformed hash", checkpoint) } - return chaincfg.Checkpoint{ + return dagconfig.Checkpoint{ Height: int32(height), Hash: hash, }, nil } // parseCheckpoints checks the checkpoint strings for valid syntax -// (':') and parses them to chaincfg.Checkpoint instances. -func parseCheckpoints(checkpointStrings []string) ([]chaincfg.Checkpoint, error) { +// (':') and parses them to dagconfig.Checkpoint instances. +func parseCheckpoints(checkpointStrings []string) ([]dagconfig.Checkpoint, error) { if len(checkpointStrings) == 0 { return nil, nil } - checkpoints := make([]chaincfg.Checkpoint, len(checkpointStrings)) + checkpoints := make([]dagconfig.Checkpoint, len(checkpointStrings)) for i, cpString := range checkpointStrings { checkpoint, err := newCheckpointFromStr(cpString) if err != nil { diff --git a/connmgr/seed.go b/connmgr/seed.go index 48e2292d4..d743ee743 100644 --- a/connmgr/seed.go +++ b/connmgr/seed.go @@ -11,7 +11,7 @@ import ( "strconv" "time" - "github.com/daglabs/btcd/chaincfg" + "github.com/daglabs/btcd/dagconfig" "github.com/daglabs/btcd/wire" ) @@ -30,7 +30,7 @@ type OnSeed func(addrs []*wire.NetAddress) type LookupFunc func(string) ([]net.IP, error) // SeedFromDNS uses DNS seeding to populate the address manager with peers. -func SeedFromDNS(chainParams *chaincfg.Params, reqServices wire.ServiceFlag, +func SeedFromDNS(chainParams *dagconfig.Params, reqServices wire.ServiceFlag, lookupFn LookupFunc, seedFn OnSeed) { for _, dnsseed := range chainParams.DNSSeeds { diff --git a/chaincfg/README.md b/dagconfig/README.md similarity index 100% rename from chaincfg/README.md rename to dagconfig/README.md diff --git a/chaincfg/chainhash/README.md b/dagconfig/daghash/README.md similarity index 100% rename from chaincfg/chainhash/README.md rename to dagconfig/daghash/README.md diff --git a/chaincfg/chainhash/doc.go b/dagconfig/daghash/doc.go similarity index 62% rename from chaincfg/chainhash/doc.go rename to dagconfig/daghash/doc.go index c3eb43d32..f679572c4 100644 --- a/chaincfg/chainhash/doc.go +++ b/dagconfig/daghash/doc.go @@ -1,5 +1,5 @@ -// Package chainhash provides abstracted hash functionality. +// Package daghash provides abstracted hash functionality. // // This package provides a generic hash type and associated functions that // allows the specific hash algorithm to be abstracted. -package chainhash +package daghash diff --git a/chaincfg/chainhash/hash.go b/dagconfig/daghash/hash.go similarity index 99% rename from chaincfg/chainhash/hash.go rename to dagconfig/daghash/hash.go index 2b1cec022..fa6a5efb0 100644 --- a/chaincfg/chainhash/hash.go +++ b/dagconfig/daghash/hash.go @@ -3,7 +3,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package chainhash +package daghash import ( "encoding/hex" diff --git a/chaincfg/chainhash/hash_test.go b/dagconfig/daghash/hash_test.go similarity index 99% rename from chaincfg/chainhash/hash_test.go rename to dagconfig/daghash/hash_test.go index 07f54c776..b48a627cd 100644 --- a/chaincfg/chainhash/hash_test.go +++ b/dagconfig/daghash/hash_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package chainhash +package daghash import ( "bytes" diff --git a/chaincfg/chainhash/hashfuncs.go b/dagconfig/daghash/hashfuncs.go similarity index 97% rename from chaincfg/chainhash/hashfuncs.go rename to dagconfig/daghash/hashfuncs.go index bf74f73c3..3325232cf 100644 --- a/chaincfg/chainhash/hashfuncs.go +++ b/dagconfig/daghash/hashfuncs.go @@ -3,7 +3,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package chainhash +package daghash import "crypto/sha256" diff --git a/chaincfg/chainhash/hashfuncs_test.go b/dagconfig/daghash/hashfuncs_test.go similarity index 99% rename from chaincfg/chainhash/hashfuncs_test.go rename to dagconfig/daghash/hashfuncs_test.go index bcd6f2220..05e9a9172 100644 --- a/chaincfg/chainhash/hashfuncs_test.go +++ b/dagconfig/daghash/hashfuncs_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package chainhash +package daghash import ( "fmt" diff --git a/chaincfg/doc.go b/dagconfig/doc.go similarity index 85% rename from chaincfg/doc.go rename to dagconfig/doc.go index 32cea4975..d6b3253ea 100644 --- a/chaincfg/doc.go +++ b/dagconfig/doc.go @@ -1,4 +1,4 @@ -// Package chaincfg defines chain configuration parameters. +// Package dagconfig defines chain configuration parameters. // // In addition to the main Bitcoin network, which is intended for the transfer // of monetary value, there also exists two currently active standard networks: @@ -7,11 +7,11 @@ // handle errors where input intended for one network is used on an application // instance running on a different network. // -// For library packages, chaincfg provides the ability to lookup chain +// For library packages, dagconfig provides the ability to lookup chain // parameters and encoding magics when passed a *Params. Older APIs not updated // to the new convention of passing a *Params may lookup the parameters for a // wire.BitcoinNet using ParamsForNet, but be aware that this usage is -// deprecated and will be removed from chaincfg in the future. +// deprecated and will be removed from dagconfig in the future. // // For main packages, a (typically global) var may be assigned the address of // one of the standard Param vars for use as the application's "active" network. @@ -26,20 +26,20 @@ // "log" // // "github.com/daglabs/btcutil" -// "github.com/daglabs/btcd/chaincfg" +// "github.com/daglabs/btcd/dagconfig" // ) // // var testnet = flag.Bool("testnet", false, "operate on the testnet Bitcoin network") // // // By default (without -testnet), use mainnet. -// var chainParams = &chaincfg.MainNetParams +// var chainParams = &dagconfig.MainNetParams // // func main() { // flag.Parse() // // // Modify active network parameters if operating on testnet. // if *testnet { -// chainParams = &chaincfg.TestNet3Params +// chainParams = &dagconfig.TestNet3Params // } // // // later... @@ -58,4 +58,4 @@ // non-standard network. As a general rule of thumb, all network parameters // should be unique to the network, but parameter collisions can still occur // (unfortunately, this is the case with regtest and testnet3 sharing magics). -package chaincfg \ No newline at end of file +package dagconfig \ No newline at end of file diff --git a/chaincfg/genesis.go b/dagconfig/genesis.go similarity index 56% rename from chaincfg/genesis.go rename to dagconfig/genesis.go index 3d52b0437..61b0df439 100644 --- a/chaincfg/genesis.go +++ b/dagconfig/genesis.go @@ -2,13 +2,13 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package chaincfg +package dagconfig import ( "math" "time" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/wire" ) @@ -19,7 +19,7 @@ var genesisCoinbaseTx = wire.MsgTx{ TxIn: []*wire.TxIn{ { PreviousOutPoint: wire.OutPoint{ - Hash: chainhash.Hash{}, + Hash: daghash.Hash{}, Index: 0xffffffff, }, SignatureScript: []byte{ @@ -58,43 +58,44 @@ var genesisCoinbaseTx = wire.MsgTx{ // genesisHash is the hash of the first block in the block chain for the main // network (genesis block). -var genesisHash = chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy. - 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, - 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, - 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, - 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, +var genesisHash = daghash.Hash([daghash.HashSize]byte{ // Make go vet happy. + 0x32, 0x93, 0xa9, 0x6f, 0x69, 0xf3, 0x80, 0x4b, + 0x50, 0x35, 0x29, 0x16, 0xcd, 0x68, 0x4c, 0x36, + 0x99, 0xf7, 0x05, 0x08, 0xea, 0xbd, 0x8a, 0x78, + 0xc0, 0x39, 0xfb, 0x39, 0xf5, 0x84, 0x04, 0x2f, }) // genesisMerkleRoot is the hash of the first transaction in the genesis block // for the main network. -var genesisMerkleRoot = chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy. - 0x3b, 0xa3, 0xed, 0xfd, 0x7a, 0x7b, 0x12, 0xb2, - 0x7a, 0xc7, 0x2c, 0x3e, 0x67, 0x76, 0x8f, 0x61, - 0x7f, 0xc8, 0x1b, 0xc3, 0x88, 0x8a, 0x51, 0x32, - 0x3a, 0x9f, 0xb8, 0xaa, 0x4b, 0x1e, 0x5e, 0x4a, +var genesisMerkleRoot = daghash.Hash([daghash.HashSize]byte{ // Make go vet happy. + 0x69, 0x1f, 0x2e, 0x9c, 0x49, 0x12, 0x54, 0xc5, + 0xde, 0x1d, 0x8f, 0xd0, 0x95, 0x40, 0x07, 0xf0, + 0xda, 0x1b, 0x4c, 0x84, 0x3c, 0x07, 0x94, 0x87, + 0x6a, 0xd3, 0x0e, 0xca, 0x87, 0xcc, 0x4b, 0xb9, }) // genesisBlock defines the genesis block of the block chain which serves as the // public transaction ledger for the main network. var genesisBlock = wire.MsgBlock{ Header: wire.BlockHeader{ - Version: 1, - PrevBlock: chainhash.Hash{}, // 0000000000000000000000000000000000000000000000000000000000000000 - MerkleRoot: genesisMerkleRoot, // 4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b - Timestamp: time.Unix(0x495fab29, 0), // 2009-01-03 18:15:05 +0000 UTC - Bits: 0x1d00ffff, // 486604799 [00000000ffff0000000000000000000000000000000000000000000000000000] - Nonce: 0x7c2bac1d, // 2083236893 + Version: 1, + NumPrevBlocks: 0, + PrevBlocks: []daghash.Hash{}, + MerkleRoot: genesisMerkleRoot, // 4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b + Timestamp: time.Unix(0x5b28c4c8, 0), // 2018-06-19 08:54:32 +0000 UTC + Bits: 0x1e00ffff, // 503382015 [000000ffff000000000000000000000000000000000000000000000000000000] + Nonce: 0x400a4c8f, // 1074416783 }, Transactions: []*wire.MsgTx{&genesisCoinbaseTx}, } // regTestGenesisHash is the hash of the first block in the block chain for the // regression test network (genesis block). -var regTestGenesisHash = chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy. - 0x06, 0x22, 0x6e, 0x46, 0x11, 0x1a, 0x0b, 0x59, - 0xca, 0xaf, 0x12, 0x60, 0x43, 0xeb, 0x5b, 0xbf, - 0x28, 0xc3, 0x4f, 0x3a, 0x5e, 0x33, 0x2a, 0x1f, - 0xc7, 0xb2, 0xb7, 0x3c, 0xf1, 0x88, 0x91, 0x0f, +var regTestGenesisHash = daghash.Hash([daghash.HashSize]byte{ // Make go vet happy. + 0x5a, 0x3a, 0x5f, 0x58, 0x59, 0xdd, 0x14, 0xc3, + 0x34, 0xf2, 0x44, 0x13, 0x5c, 0xc8, 0xa9, 0xd2, + 0xed, 0xb7, 0xc4, 0x88, 0x6f, 0xec, 0xd2, 0x80, + 0x0f, 0x39, 0x70, 0x95, 0x19, 0x0a, 0xbe, 0x1d, }) // regTestGenesisMerkleRoot is the hash of the first transaction in the genesis @@ -106,23 +107,24 @@ var regTestGenesisMerkleRoot = genesisMerkleRoot // as the public transaction ledger for the regression test network. var regTestGenesisBlock = wire.MsgBlock{ Header: wire.BlockHeader{ - Version: 1, - PrevBlock: chainhash.Hash{}, // 0000000000000000000000000000000000000000000000000000000000000000 - MerkleRoot: regTestGenesisMerkleRoot, // 4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b - Timestamp: time.Unix(1296688602, 0), // 2011-02-02 23:16:42 +0000 UTC - Bits: 0x207fffff, // 545259519 [7fffff0000000000000000000000000000000000000000000000000000000000] - Nonce: 2, + Version: 1, + NumPrevBlocks: 0, + PrevBlocks: []daghash.Hash{}, + MerkleRoot: regTestGenesisMerkleRoot, // 4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b + Timestamp: time.Unix(0x5b28c636, 0), // 2018-06-19 09:00:38 +0000 UTC + Bits: 0x207fffff, // 545259519 [7fffff0000000000000000000000000000000000000000000000000000000000] + Nonce: 0xdffffff9, }, Transactions: []*wire.MsgTx{&genesisCoinbaseTx}, } // testNet3GenesisHash is the hash of the first block in the block chain for the // test network (version 3). -var testNet3GenesisHash = chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy. - 0x43, 0x49, 0x7f, 0xd7, 0xf8, 0x26, 0x95, 0x71, - 0x08, 0xf4, 0xa3, 0x0f, 0xd9, 0xce, 0xc3, 0xae, - 0xba, 0x79, 0x97, 0x20, 0x84, 0xe9, 0x0e, 0xad, - 0x01, 0xea, 0x33, 0x09, 0x00, 0x00, 0x00, 0x00, +var testNet3GenesisHash = daghash.Hash([daghash.HashSize]byte{ // Make go vet happy. + 0x91, 0x1c, 0xe0, 0x47, 0x77, 0xad, 0x5e, 0xc5, + 0x60, 0x10, 0x21, 0x32, 0x51, 0x1b, 0x39, 0x06, + 0x24, 0xb3, 0xbf, 0x08, 0x8e, 0x04, 0x8c, 0xd3, + 0x80, 0xb4, 0x83, 0x83, 0xed, 0x00, 0x00, 0x00, }) // testNet3GenesisMerkleRoot is the hash of the first transaction in the genesis @@ -134,23 +136,24 @@ var testNet3GenesisMerkleRoot = genesisMerkleRoot // serves as the public transaction ledger for the test network (version 3). var testNet3GenesisBlock = wire.MsgBlock{ Header: wire.BlockHeader{ - Version: 1, - PrevBlock: chainhash.Hash{}, // 0000000000000000000000000000000000000000000000000000000000000000 - MerkleRoot: testNet3GenesisMerkleRoot, // 4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b - Timestamp: time.Unix(1296688602, 0), // 2011-02-02 23:16:42 +0000 UTC - Bits: 0x1d00ffff, // 486604799 [00000000ffff0000000000000000000000000000000000000000000000000000] - Nonce: 0x18aea41a, // 414098458 + Version: 1, + NumPrevBlocks: 0, + PrevBlocks: []daghash.Hash{}, + MerkleRoot: testNet3GenesisMerkleRoot, // 4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b + Timestamp: time.Unix(0x5b28c706, 0), // 2018-06-19 09:04:06 +0000 UTC + Bits: 0x1e00ffff, // 503382015 [000000ffff000000000000000000000000000000000000000000000000000000] + Nonce: 0x802f1b3b, // 2150570811 }, Transactions: []*wire.MsgTx{&genesisCoinbaseTx}, } // simNetGenesisHash is the hash of the first block in the block chain for the // simulation test network. -var simNetGenesisHash = chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy. - 0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, - 0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0, - 0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91, - 0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68, +var simNetGenesisHash = daghash.Hash([daghash.HashSize]byte{ // Make go vet happy. + 0x93, 0x0f, 0x30, 0xd1, 0x0b, 0xaf, 0x9d, 0x5b, + 0x58, 0xdc, 0xad, 0x78, 0xee, 0x16, 0xd0, 0x12, + 0x10, 0xac, 0x2c, 0xa3, 0x08, 0xc4, 0x83, 0x33, + 0x57, 0xb2, 0xaf, 0x5a, 0x22, 0xa2, 0xf9, 0x20, }) // simNetGenesisMerkleRoot is the hash of the first transaction in the genesis @@ -162,12 +165,13 @@ var simNetGenesisMerkleRoot = genesisMerkleRoot // as the public transaction ledger for the simulation test network. var simNetGenesisBlock = wire.MsgBlock{ Header: wire.BlockHeader{ - Version: 1, - PrevBlock: chainhash.Hash{}, // 0000000000000000000000000000000000000000000000000000000000000000 - MerkleRoot: simNetGenesisMerkleRoot, // 4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b - Timestamp: time.Unix(1401292357, 0), // 2014-05-28 15:52:37 +0000 UTC - Bits: 0x207fffff, // 545259519 [7fffff0000000000000000000000000000000000000000000000000000000000] - Nonce: 2, + Version: 1, + NumPrevBlocks: 0, + PrevBlocks: []daghash.Hash{}, + MerkleRoot: simNetGenesisMerkleRoot, // 4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b + Timestamp: time.Unix(0x5b50a002, 0), // 2018-06-19 09:07:56 +0000 UTC + Bits: 0x207fffff, // 545259519 [7fffff0000000000000000000000000000000000000000000000000000000000] + Nonce: 0x5ffffffd, // 2684354555 }, Transactions: []*wire.MsgTx{&genesisCoinbaseTx}, } diff --git a/dagconfig/genesis_test.go b/dagconfig/genesis_test.go new file mode 100644 index 000000000..1eab65a53 --- /dev/null +++ b/dagconfig/genesis_test.go @@ -0,0 +1,252 @@ +// Copyright (c) 2014-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package dagconfig + +import ( + "bytes" + "testing" + + "github.com/davecgh/go-spew/spew" +) + +// TestGenesisBlock tests the genesis block of the main network for validity by +// checking the encoded bytes and hashes. +func TestGenesisBlock(t *testing.T) { + // Encode the genesis block to raw bytes. + var buf bytes.Buffer + err := MainNetParams.GenesisBlock.Serialize(&buf) + if err != nil { + t.Fatalf("TestGenesisBlock: %v", err) + } + + // Ensure the encoded block matches the expected bytes. + if !bytes.Equal(buf.Bytes(), genesisBlockBytes) { + t.Fatalf("TestGenesisBlock: Genesis block does not appear valid - "+ + "got %v, want %v", spew.Sdump(buf.Bytes()), + spew.Sdump(genesisBlockBytes)) + } + + // Check hash of the block against expected hash. + hash := MainNetParams.GenesisBlock.BlockHash() + if !MainNetParams.GenesisHash.IsEqual(&hash) { + t.Fatalf("TestGenesisBlock: Genesis block hash does not "+ + "appear valid - got %v, want %v", spew.Sdump(hash), + spew.Sdump(MainNetParams.GenesisHash)) + } +} + +// TestRegTestGenesisBlock tests the genesis block of the regression test +// network for validity by checking the encoded bytes and hashes. +func TestRegTestGenesisBlock(t *testing.T) { + // Encode the genesis block to raw bytes. + var buf bytes.Buffer + err := RegressionNetParams.GenesisBlock.Serialize(&buf) + if err != nil { + t.Fatalf("TestRegTestGenesisBlock: %v", err) + } + + // Ensure the encoded block matches the expected bytes. + if !bytes.Equal(buf.Bytes(), regTestGenesisBlockBytes) { + t.Fatalf("TestRegTestGenesisBlock: Genesis block does not "+ + "appear valid - got %v, want %v", + spew.Sdump(buf.Bytes()), + spew.Sdump(regTestGenesisBlockBytes)) + } + + // Check hash of the block against expected hash. + hash := RegressionNetParams.GenesisBlock.BlockHash() + if !RegressionNetParams.GenesisHash.IsEqual(&hash) { + t.Fatalf("TestRegTestGenesisBlock: Genesis block hash does "+ + "not appear valid - got %v, want %v", spew.Sdump(hash), + spew.Sdump(RegressionNetParams.GenesisHash)) + } +} + +// TestTestNet3GenesisBlock tests the genesis block of the test network (version +// 3) for validity by checking the encoded bytes and hashes. +func TestTestNet3GenesisBlock(t *testing.T) { + // Encode the genesis block to raw bytes. + var buf bytes.Buffer + err := TestNet3Params.GenesisBlock.Serialize(&buf) + if err != nil { + t.Fatalf("TestTestNet3GenesisBlock: %v", err) + } + + // Ensure the encoded block matches the expected bytes. + if !bytes.Equal(buf.Bytes(), testNet3GenesisBlockBytes) { + t.Fatalf("TestTestNet3GenesisBlock: Genesis block does not "+ + "appear valid - got %v, want %v", + spew.Sdump(buf.Bytes()), + spew.Sdump(testNet3GenesisBlockBytes)) + } + + // Check hash of the block against expected hash. + hash := TestNet3Params.GenesisBlock.BlockHash() + if !TestNet3Params.GenesisHash.IsEqual(&hash) { + t.Fatalf("TestTestNet3GenesisBlock: Genesis block hash does "+ + "not appear valid - got %v, want %v", spew.Sdump(hash), + spew.Sdump(TestNet3Params.GenesisHash)) + } +} + +// TestSimNetGenesisBlock tests the genesis block of the simulation test network +// for validity by checking the encoded bytes and hashes. +func TestSimNetGenesisBlock(t *testing.T) { + // Encode the genesis block to raw bytes. + var buf bytes.Buffer + err := SimNetParams.GenesisBlock.Serialize(&buf) + if err != nil { + t.Fatalf("TestSimNetGenesisBlock: %v", err) + } + + // Ensure the encoded block matches the expected bytes. + if !bytes.Equal(buf.Bytes(), simNetGenesisBlockBytes) { + t.Fatalf("TestSimNetGenesisBlock: Genesis block does not "+ + "appear valid - got %v, want %v", + spew.Sdump(buf.Bytes()), + spew.Sdump(simNetGenesisBlockBytes)) + } + + // Check hash of the block against expected hash. + hash := SimNetParams.GenesisBlock.BlockHash() + if !SimNetParams.GenesisHash.IsEqual(&hash) { + t.Fatalf("TestSimNetGenesisBlock: Genesis block hash does "+ + "not appear valid - got %v, want %v", spew.Sdump(hash), + spew.Sdump(SimNetParams.GenesisHash)) + } +} + +// genesisBlockBytes are the wire encoded bytes for the genesis block of the +// main network as of protocol version 60002. +var genesisBlockBytes = []byte{ + 0x01, 0x00, 0x00, 0x00, 0x00, 0x3B, 0xA3, 0xED, 0xFD, 0x7A, 0x7B, 0x12, 0xB2, 0x7A, 0xC7, 0x2C, + 0x3E, 0x67, 0x76, 0x8F, 0x61, 0x7F, 0xC8, 0x1B, 0xC3, 0x88, 0x8A, 0x51, 0x32, 0x3A, 0x9F, 0xB8, + 0xAA, 0x4B, 0x1E, 0x5E, 0x4A, 0xC8, 0xC4, 0x28, 0x5B, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x00, + 0x1E, 0x50, 0x25, 0x19, 0xC0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x4D, + 0x04, 0xFF, 0xFF, 0x00, 0x1D, 0x01, 0x04, 0x45, 0x54, 0x68, 0x65, 0x20, 0x54, 0x69, 0x6D, 0x65, + 0x73, 0x20, 0x30, 0x33, 0x2F, 0x4A, 0x61, 0x6E, 0x2F, 0x32, 0x30, 0x30, 0x39, 0x20, 0x43, 0x68, + 0x61, 0x6E, 0x63, 0x65, 0x6C, 0x6C, 0x6F, 0x72, 0x20, 0x6F, 0x6E, 0x20, 0x62, 0x72, 0x69, 0x6E, + 0x6B, 0x20, 0x6F, 0x66, 0x20, 0x73, 0x65, 0x63, 0x6F, 0x6E, 0x64, 0x20, 0x62, 0x61, 0x69, 0x6C, + 0x6F, 0x75, 0x74, 0x20, 0x66, 0x6F, 0x72, 0x20, 0x62, 0x61, 0x6E, 0x6B, 0x73, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, 0x00, 0xF2, 0x05, 0x2A, 0x01, 0x00, 0x00, 0x00, 0x43, 0x41, + 0x04, 0x67, 0x8A, 0xFD, 0xB0, 0xFE, 0x55, 0x48, 0x27, 0x19, 0x67, 0xF1, 0xA6, 0x71, 0x30, 0xB7, + 0x10, 0x5C, 0xD6, 0xA8, 0x28, 0xE0, 0x39, 0x09, 0xA6, 0x79, 0x62, 0xE0, 0xEA, 0x1F, 0x61, 0xDE, + 0xB6, 0x49, 0xF6, 0xBC, 0x3F, 0x4C, 0xEF, 0x38, 0xC4, 0xF3, 0x55, 0x04, 0xE5, 0x1E, 0xC1, 0x12, + 0xDE, 0x5C, 0x38, 0x4D, 0xF7, 0xBA, 0x0B, 0x8D, 0x57, 0x8A, 0x4C, 0x70, 0x2B, 0x6B, 0xF1, 0x1D, + 0x5F, 0xAC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +} + +// regTestGenesisBlockBytes are the wire encoded bytes for the genesis block of +// the regression test network as of protocol version 60002. +var regTestGenesisBlockBytes = []byte{ + 0x01, 0x00, 0x00, 0x00, 0x00, 0x3b, 0xa3, 0xed, /* |.....;..| */ + 0xfd, 0x7a, 0x7b, 0x12, 0xb2, 0x7a, 0xc7, 0x2c, /* |.z{..z.,| */ + 0x3e, 0x67, 0x76, 0x8f, 0x61, 0x7f, 0xc8, 0x1b, /* |>gv.a...| */ + 0xc3, 0x88, 0x8a, 0x51, 0x32, 0x3a, 0x9f, 0xb8, /* |...Q2:..| */ + 0xaa, 0x4b, 0x1e, 0x5e, 0x4a, 0x36, 0xc6, 0x28, /* |.K.^J6.(| */ + 0x5b, 0xff, 0xff, 0x7f, 0x20, 0x01, 0x00, 0x00, /* |[... ...| */ + 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, /* |........| */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, /* |........| */ + 0xff, 0xff, 0xff, 0x4d, 0x04, 0xff, 0xff, 0x00, /* |...M....| */ + 0x1d, 0x01, 0x04, 0x45, 0x54, 0x68, 0x65, 0x20, /* |...EThe | */ + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x20, 0x30, 0x33, /* |Times 03| */ + 0x2f, 0x4a, 0x61, 0x6e, 0x2f, 0x32, 0x30, 0x30, /* |/Jan/200| */ + 0x39, 0x20, 0x43, 0x68, 0x61, 0x6e, 0x63, 0x65, /* |9 Chance| */ + 0x6c, 0x6c, 0x6f, 0x72, 0x20, 0x6f, 0x6e, 0x20, /* |llor on | */ + 0x62, 0x72, 0x69, 0x6e, 0x6b, 0x20, 0x6f, 0x66, /* |brink of| */ + 0x20, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x20, /* | second | */ + 0x62, 0x61, 0x69, 0x6c, 0x6f, 0x75, 0x74, 0x20, /* |bailout | */ + 0x66, 0x6f, 0x72, 0x20, 0x62, 0x61, 0x6e, 0x6b, /* |for bank| */ + 0x73, 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0xf2, /* |s.......| */ + 0x05, 0x2a, 0x01, 0x00, 0x00, 0x00, 0x43, 0x41, /* |.*....CA| */ + 0x04, 0x67, 0x8a, 0xfd, 0xb0, 0xfe, 0x55, 0x48, /* |.g....UH| */ + 0x27, 0x19, 0x67, 0xf1, 0xa6, 0x71, 0x30, 0xb7, /* |'.g..q0.| */ + 0x10, 0x5c, 0xd6, 0xa8, 0x28, 0xe0, 0x39, 0x09, /* |.\..(.9.| */ + 0xa6, 0x79, 0x62, 0xe0, 0xea, 0x1f, 0x61, 0xde, /* |.yb...a.| */ + 0xb6, 0x49, 0xf6, 0xbc, 0x3f, 0x4c, 0xef, 0x38, /* |.I..?L.8| */ + 0xc4, 0xf3, 0x55, 0x04, 0xe5, 0x1e, 0xc1, 0x12, /* |..U.....| */ + 0xde, 0x5c, 0x38, 0x4d, 0xf7, 0xba, 0x0b, 0x8d, /* |.\8M....| */ + 0x57, 0x8a, 0x4c, 0x70, 0x2b, 0x6b, 0xf1, 0x1d, /* |W.Lp+k..| */ + 0x5f, 0xac, 0x00, 0x00, 0x00, 0x00, /* |_.....| */ +} + +// testNet3GenesisBlockBytes are the wire encoded bytes for the genesis block of +// the test network (version 3) as of protocol version 60002. +var testNet3GenesisBlockBytes = []byte{ + 0x01, 0x00, 0x00, 0x00, 0x00, 0x3b, 0xa3, 0xed, /* |.....;..| */ + 0xfd, 0x7a, 0x7b, 0x12, 0xb2, 0x7a, 0xc7, 0x2c, /* |.z{..z.,| */ + 0x3e, 0x67, 0x76, 0x8f, 0x61, 0x7f, 0xc8, 0x1b, /* |>gv.a...| */ + 0xc3, 0x88, 0x8a, 0x51, 0x32, 0x3a, 0x9f, 0xb8, /* |...Q2:..| */ + 0xaa, 0x4b, 0x1e, 0x5e, 0x4a, 0x06, 0xc7, 0x28, /* |.K.^J..(| */ + 0x5b, 0xff, 0xff, 0x00, 0x1e, 0x3b, 0x1b, 0x2f, /* |[....;./| */ + 0x80, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, /* |........| */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, /* |........| */ + 0xff, 0xff, 0xff, 0x4d, 0x04, 0xff, 0xff, 0x00, /* |...M....| */ + 0x1d, 0x01, 0x04, 0x45, 0x54, 0x68, 0x65, 0x20, /* |...EThe | */ + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x20, 0x30, 0x33, /* |Times 03| */ + 0x2f, 0x4a, 0x61, 0x6e, 0x2f, 0x32, 0x30, 0x30, /* |/Jan/200| */ + 0x39, 0x20, 0x43, 0x68, 0x61, 0x6e, 0x63, 0x65, /* |9 Chance| */ + 0x6c, 0x6c, 0x6f, 0x72, 0x20, 0x6f, 0x6e, 0x20, /* |llor on | */ + 0x62, 0x72, 0x69, 0x6e, 0x6b, 0x20, 0x6f, 0x66, /* |brink of| */ + 0x20, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x20, /* | second | */ + 0x62, 0x61, 0x69, 0x6c, 0x6f, 0x75, 0x74, 0x20, /* |bailout | */ + 0x66, 0x6f, 0x72, 0x20, 0x62, 0x61, 0x6e, 0x6b, /* |for bank| */ + 0x73, 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0xf2, /* |s.......| */ + 0x05, 0x2a, 0x01, 0x00, 0x00, 0x00, 0x43, 0x41, /* |.*....CA| */ + 0x04, 0x67, 0x8a, 0xfd, 0xb0, 0xfe, 0x55, 0x48, /* |.g....UH| */ + 0x27, 0x19, 0x67, 0xf1, 0xa6, 0x71, 0x30, 0xb7, /* |'.g..q0.| */ + 0x10, 0x5c, 0xd6, 0xa8, 0x28, 0xe0, 0x39, 0x09, /* |.\..(.9.| */ + 0xa6, 0x79, 0x62, 0xe0, 0xea, 0x1f, 0x61, 0xde, /* |.yb...a.| */ + 0xb6, 0x49, 0xf6, 0xbc, 0x3f, 0x4c, 0xef, 0x38, /* |.I..?L.8| */ + 0xc4, 0xf3, 0x55, 0x04, 0xe5, 0x1e, 0xc1, 0x12, /* |..U.....| */ + 0xde, 0x5c, 0x38, 0x4d, 0xf7, 0xba, 0x0b, 0x8d, /* |.\8M....| */ + 0x57, 0x8a, 0x4c, 0x70, 0x2b, 0x6b, 0xf1, 0x1d, /* |W.Lp+k..| */ + 0x5f, 0xac, 0x00, 0x00, 0x00, 0x00, /* |_.....| */ +} + +// simNetGenesisBlockBytes are the wire encoded bytes for the genesis block of +// the simulation test network as of protocol version 70002. +var simNetGenesisBlockBytes = []byte{ + 0x01, 0x00, 0x00, 0x00, 0x00, 0x3b, 0xa3, 0xed, /* |.....;..| */ + 0xfd, 0x7a, 0x7b, 0x12, 0xb2, 0x7a, 0xc7, 0x2c, /* |.z{..z.,| */ + 0x3e, 0x67, 0x76, 0x8f, 0x61, 0x7f, 0xc8, 0x1b, /* |>gv.a...| */ + 0xc3, 0x88, 0x8a, 0x51, 0x32, 0x3a, 0x9f, 0xb8, /* |...Q2:..| */ + 0xaa, 0x4b, 0x1e, 0x5e, 0x4a, 0xec, 0xc7, 0x28, /* |.K.^J..(| */ + 0x5b, 0xff, 0xff, 0x7f, 0x20, 0xfb, 0xff, 0xff, /* |[... ...| */ + 0x9f, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, /* |........| */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, /* |........| */ + 0xff, 0xff, 0xff, 0x4d, 0x04, 0xff, 0xff, 0x00, /* |...M....| */ + 0x1d, 0x01, 0x04, 0x45, 0x54, 0x68, 0x65, 0x20, /* |...EThe | */ + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x20, 0x30, 0x33, /* |Times 03| */ + 0x2f, 0x4a, 0x61, 0x6e, 0x2f, 0x32, 0x30, 0x30, /* |/Jan/200| */ + 0x39, 0x20, 0x43, 0x68, 0x61, 0x6e, 0x63, 0x65, /* |9 Chance| */ + 0x6c, 0x6c, 0x6f, 0x72, 0x20, 0x6f, 0x6e, 0x20, /* |llor on | */ + 0x62, 0x72, 0x69, 0x6e, 0x6b, 0x20, 0x6f, 0x66, /* |brink of| */ + 0x20, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x20, /* | second | */ + 0x62, 0x61, 0x69, 0x6c, 0x6f, 0x75, 0x74, 0x20, /* |bailout | */ + 0x66, 0x6f, 0x72, 0x20, 0x62, 0x61, 0x6e, 0x6b, /* |for bank| */ + 0x73, 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0xf2, /* |s.......| */ + 0x05, 0x2a, 0x01, 0x00, 0x00, 0x00, 0x43, 0x41, /* |.*....CA| */ + 0x04, 0x67, 0x8a, 0xfd, 0xb0, 0xfe, 0x55, 0x48, /* |.g....UH| */ + 0x27, 0x19, 0x67, 0xf1, 0xa6, 0x71, 0x30, 0xb7, /* |'.g..q0.| */ + 0x10, 0x5c, 0xd6, 0xa8, 0x28, 0xe0, 0x39, 0x09, /* |.\..(.9.| */ + 0xa6, 0x79, 0x62, 0xe0, 0xea, 0x1f, 0x61, 0xde, /* |.yb...a.| */ + 0xb6, 0x49, 0xf6, 0xbc, 0x3f, 0x4c, 0xef, 0x38, /* |.I..?L.8| */ + 0xc4, 0xf3, 0x55, 0x04, 0xe5, 0x1e, 0xc1, 0x12, /* |..U.....| */ + 0xde, 0x5c, 0x38, 0x4d, 0xf7, 0xba, 0x0b, 0x8d, /* |.\8M....| */ + 0x57, 0x8a, 0x4c, 0x70, 0x2b, 0x6b, 0xf1, 0x1d, /* |W.Lp+k..| */ + 0x5f, 0xac, 0x00, 0x00, 0x00, 0x00, /* |_.....| */ +} diff --git a/chaincfg/params.go b/dagconfig/params.go similarity index 97% rename from chaincfg/params.go rename to dagconfig/params.go index d98d65edc..3e95882a1 100644 --- a/chaincfg/params.go +++ b/dagconfig/params.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package chaincfg +package dagconfig import ( "errors" @@ -12,7 +12,7 @@ import ( "fmt" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/wire" ) @@ -24,8 +24,8 @@ var ( bigOne = big.NewInt(1) // mainPowLimit is the highest proof of work value a Bitcoin block can - // have for the main network. It is the value 2^224 - 1. - mainPowLimit = new(big.Int).Sub(new(big.Int).Lsh(bigOne, 224), bigOne) + // have for the main network. It is the value 2^232 - 1. + mainPowLimit = new(big.Int).Sub(new(big.Int).Lsh(bigOne, 232), bigOne) // regressionPowLimit is the highest proof of work value a Bitcoin block // can have for the regression test network. It is the value 2^255 - 1. @@ -33,8 +33,8 @@ var ( // testNet3PowLimit is the highest proof of work value a Bitcoin block // can have for the test network (version 3). It is the value - // 2^224 - 1. - testNet3PowLimit = new(big.Int).Sub(new(big.Int).Lsh(bigOne, 224), bigOne) + // 2^232 - 1. + testNet3PowLimit = new(big.Int).Sub(new(big.Int).Lsh(bigOne, 232), bigOne) // simNetPowLimit is the highest proof of work value a Bitcoin block // can have for the simulation test network. It is the value 2^255 - 1. @@ -50,7 +50,7 @@ var ( // selection criteria. type Checkpoint struct { Height int32 - Hash *chainhash.Hash + Hash *daghash.Hash } // DNSSeed identifies a DNS seed. @@ -166,7 +166,7 @@ type Params struct { GenesisBlock *wire.MsgBlock // GenesisHash is the starting block hash. - GenesisHash *chainhash.Hash + GenesisHash *daghash.Hash // PowLimit defines the highest allowed proof of work value for a block // as a uint256. @@ -608,11 +608,11 @@ func HDPrivateKeyToPublicKeyID(id []byte) ([]byte, error) { } // newHashFromStr converts the passed big-endian hex string into a -// chainhash.Hash. It only differs from the one available in chainhash in that +// daghash.Hash. It only differs from the one available in daghash in that // it panics on an error since it will only (and must only) be called with // hard-coded, and therefore known good, hashes. -func newHashFromStr(hexStr string) *chainhash.Hash { - hash, err := chainhash.NewHashFromStr(hexStr) +func newHashFromStr(hexStr string) *daghash.Hash { + hash, err := daghash.NewHashFromStr(hexStr) if err != nil { // Ordinarily I don't like panics in library code since it // can take applications down without them having a chance to diff --git a/chaincfg/params_test.go b/dagconfig/params_test.go similarity index 98% rename from chaincfg/params_test.go rename to dagconfig/params_test.go index 277a56bdd..41127082f 100644 --- a/chaincfg/params_test.go +++ b/dagconfig/params_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package chaincfg +package dagconfig import "testing" diff --git a/chaincfg/register_test.go b/dagconfig/register_test.go similarity index 98% rename from chaincfg/register_test.go rename to dagconfig/register_test.go index 3bff7c114..0613b12bb 100644 --- a/chaincfg/register_test.go +++ b/dagconfig/register_test.go @@ -1,11 +1,11 @@ -package chaincfg_test +package dagconfig_test import ( "bytes" "reflect" "testing" - . "github.com/daglabs/btcd/chaincfg" + . "github.com/daglabs/btcd/dagconfig" ) // Define some of the required parameters for a user-registered diff --git a/database/cmd/dbtool/fetchblock.go b/database/cmd/dbtool/fetchblock.go index 952704a07..c9802c23c 100644 --- a/database/cmd/dbtool/fetchblock.go +++ b/database/cmd/dbtool/fetchblock.go @@ -9,7 +9,7 @@ import ( "errors" "time" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/database" ) @@ -31,7 +31,7 @@ func (cmd *fetchBlockCmd) Execute(args []string) error { if len(args) < 1 { return errors.New("required block hash parameter not specified") } - blockHash, err := chainhash.NewHashFromStr(args[0]) + blockHash, err := daghash.NewHashFromStr(args[0]) if err != nil { return err } diff --git a/database/cmd/dbtool/fetchblockregion.go b/database/cmd/dbtool/fetchblockregion.go index 9dfb826cd..4c4f245f5 100644 --- a/database/cmd/dbtool/fetchblockregion.go +++ b/database/cmd/dbtool/fetchblockregion.go @@ -10,7 +10,7 @@ import ( "strconv" "time" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/database" ) @@ -44,7 +44,7 @@ func (cmd *blockRegionCmd) Execute(args []string) error { } // Parse arguments. - blockHash, err := chainhash.NewHashFromStr(args[0]) + blockHash, err := daghash.NewHashFromStr(args[0]) if err != nil { return err } @@ -78,7 +78,7 @@ func (cmd *blockRegionCmd) Execute(args []string) error { return err } log.Infof("Loaded block region in %v", time.Since(startTime)) - log.Infof("Double Hash: %s", chainhash.DoubleHashH(regionBytes)) + log.Infof("Double Hash: %s", daghash.DoubleHashH(regionBytes)) log.Infof("Region Hex: %s", hex.EncodeToString(regionBytes)) return nil }) diff --git a/database/cmd/dbtool/globalconfig.go b/database/cmd/dbtool/globalconfig.go index ec1fb8d61..5c9c87093 100644 --- a/database/cmd/dbtool/globalconfig.go +++ b/database/cmd/dbtool/globalconfig.go @@ -10,7 +10,7 @@ import ( "os" "path/filepath" - "github.com/daglabs/btcd/chaincfg" + "github.com/daglabs/btcd/dagconfig" "github.com/daglabs/btcd/database" _ "github.com/daglabs/btcd/database/ffldb" "github.com/daglabs/btcd/wire" @@ -20,7 +20,7 @@ import ( var ( btcdHomeDir = btcutil.AppDataDir("btcd", false) knownDbTypes = database.SupportedDrivers() - activeNetParams = &chaincfg.MainNetParams + activeNetParams = &dagconfig.MainNetParams // Default global config. cfg = &config{ @@ -62,13 +62,13 @@ func validDbType(dbType string) bool { // netName returns the name used when referring to a bitcoin network. At the // time of writing, btcd currently places blocks for testnet version 3 in the // data and log directory "testnet", which does not match the Name field of the -// chaincfg parameters. This function can be used to override this directory name +// dagconfig parameters. This function can be used to override this directory name // as "testnet" when the passed active network matches wire.TestNet3. // // A proper upgrade to move the data and log directories for this network to // "testnet3" is planned for the future, at which point this function can be // removed and the network parameter's name used instead. -func netName(chainParams *chaincfg.Params) string { +func netName(chainParams *dagconfig.Params) string { switch chainParams.Net { case wire.TestNet3: return "testnet" @@ -87,15 +87,15 @@ func setupGlobalConfig() error { numNets := 0 if cfg.TestNet3 { numNets++ - activeNetParams = &chaincfg.TestNet3Params + activeNetParams = &dagconfig.TestNet3Params } if cfg.RegressionTest { numNets++ - activeNetParams = &chaincfg.RegressionNetParams + activeNetParams = &dagconfig.RegressionNetParams } if cfg.SimNet { numNets++ - activeNetParams = &chaincfg.SimNetParams + activeNetParams = &dagconfig.SimNetParams } if numNets > 1 { return errors.New("The testnet, regtest, and simnet params " + diff --git a/database/cmd/dbtool/insecureimport.go b/database/cmd/dbtool/insecureimport.go index a04da4042..f60274ca5 100644 --- a/database/cmd/dbtool/insecureimport.go +++ b/database/cmd/dbtool/insecureimport.go @@ -12,7 +12,7 @@ import ( "sync" "time" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/database" "github.com/daglabs/btcd/wire" "github.com/daglabs/btcutil" @@ -33,7 +33,7 @@ var ( // zeroHash is a simply a hash with all zeros. It is defined here to // avoid creating it multiple times. - zeroHash = chainhash.Hash{} + zeroHash = daghash.Hash{} ) // importResults houses the stats and result as an import operation. @@ -131,11 +131,11 @@ func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) { } // Don't bother trying to process orphans. - prevHash := &block.MsgBlock().Header.PrevBlock - if !prevHash.IsEqual(&zeroHash) { + prevHashes := block.MsgBlock().Header.PrevBlocks + for _, prevHash := range prevHashes { var exists bool err := bi.db.View(func(tx database.Tx) error { - exists, err = tx.HasBlock(prevHash) + exists, err = tx.HasBlock(&prevHash) return err }) if err != nil { diff --git a/database/cmd/dbtool/loadheaders.go b/database/cmd/dbtool/loadheaders.go index 20f4edb1c..f348cdb85 100644 --- a/database/cmd/dbtool/loadheaders.go +++ b/database/cmd/dbtool/loadheaders.go @@ -7,7 +7,7 @@ package main import ( "time" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/database" ) @@ -52,7 +52,7 @@ func (cmd *headersCmd) Execute(args []string) error { numLoaded := 0 startTime := time.Now() blockIdxBucket.ForEach(func(k, v []byte) error { - var hash chainhash.Hash + var hash daghash.Hash copy(hash[:], k) _, err := tx.FetchBlockHeader(&hash) if err != nil { @@ -71,9 +71,9 @@ func (cmd *headersCmd) Execute(args []string) error { // Bulk load headers. err = db.View(func(tx database.Tx) error { blockIdxBucket := tx.Metadata().Bucket(blockIdxName) - hashes := make([]chainhash.Hash, 0, 500000) + hashes := make([]daghash.Hash, 0, 500000) blockIdxBucket.ForEach(func(k, v []byte) error { - var hash chainhash.Hash + var hash daghash.Hash copy(hash[:], k) hashes = append(hashes, hash) return nil diff --git a/database/example_test.go b/database/example_test.go index 7fe71eded..2deef653f 100644 --- a/database/example_test.go +++ b/database/example_test.go @@ -10,7 +10,7 @@ import ( "os" "path/filepath" - "github.com/daglabs/btcd/chaincfg" + "github.com/daglabs/btcd/dagconfig" "github.com/daglabs/btcd/database" _ "github.com/daglabs/btcd/database/ffldb" "github.com/daglabs/btcd/wire" @@ -135,7 +135,7 @@ func Example_blockStorageAndRetrieval() { // read-write transaction and store a genesis block in the database as // and example. err = db.Update(func(tx database.Tx) error { - genesisBlock := chaincfg.MainNetParams.GenesisBlock + genesisBlock := dagconfig.MainNetParams.GenesisBlock return tx.StoreBlock(btcutil.NewBlock(genesisBlock)) }) if err != nil { @@ -147,7 +147,7 @@ func Example_blockStorageAndRetrieval() { // transaction and fetch the block stored above. var loadedBlockBytes []byte err = db.Update(func(tx database.Tx) error { - genesisHash := chaincfg.MainNetParams.GenesisHash + genesisHash := dagconfig.MainNetParams.GenesisHash blockBytes, err := tx.FetchBlock(genesisHash) if err != nil { return err @@ -173,5 +173,5 @@ func Example_blockStorageAndRetrieval() { fmt.Printf("Serialized block size: %d bytes\n", len(loadedBlockBytes)) // Output: - // Serialized block size: 285 bytes + // Serialized block size: 254 bytes } diff --git a/database/ffldb/bench_test.go b/database/ffldb/bench_test.go index 7bd91d206..1fd9f98c1 100644 --- a/database/ffldb/bench_test.go +++ b/database/ffldb/bench_test.go @@ -9,7 +9,7 @@ import ( "path/filepath" "testing" - "github.com/daglabs/btcd/chaincfg" + "github.com/daglabs/btcd/dagconfig" "github.com/daglabs/btcd/database" "github.com/daglabs/btcutil" ) @@ -28,7 +28,7 @@ func BenchmarkBlockHeader(b *testing.B) { defer os.RemoveAll(dbPath) defer db.Close() err = db.Update(func(tx database.Tx) error { - block := btcutil.NewBlock(chaincfg.MainNetParams.GenesisBlock) + block := btcutil.NewBlock(dagconfig.MainNetParams.GenesisBlock) return tx.StoreBlock(block) }) if err != nil { @@ -38,7 +38,7 @@ func BenchmarkBlockHeader(b *testing.B) { b.ReportAllocs() b.ResetTimer() err = db.View(func(tx database.Tx) error { - blockHash := chaincfg.MainNetParams.GenesisHash + blockHash := dagconfig.MainNetParams.GenesisHash for i := 0; i < b.N; i++ { _, err := tx.FetchBlockHeader(blockHash) if err != nil { @@ -69,7 +69,7 @@ func BenchmarkBlock(b *testing.B) { defer os.RemoveAll(dbPath) defer db.Close() err = db.Update(func(tx database.Tx) error { - block := btcutil.NewBlock(chaincfg.MainNetParams.GenesisBlock) + block := btcutil.NewBlock(dagconfig.MainNetParams.GenesisBlock) return tx.StoreBlock(block) }) if err != nil { @@ -79,7 +79,7 @@ func BenchmarkBlock(b *testing.B) { b.ReportAllocs() b.ResetTimer() err = db.View(func(tx database.Tx) error { - blockHash := chaincfg.MainNetParams.GenesisHash + blockHash := dagconfig.MainNetParams.GenesisHash for i := 0; i < b.N; i++ { _, err := tx.FetchBlock(blockHash) if err != nil { diff --git a/database/ffldb/blockio.go b/database/ffldb/blockio.go index fbad57866..378b00b5e 100644 --- a/database/ffldb/blockio.go +++ b/database/ffldb/blockio.go @@ -17,7 +17,7 @@ import ( "path/filepath" "sync" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/database" "github.com/daglabs/btcd/wire" ) @@ -505,7 +505,7 @@ func (s *blockStore) writeBlock(rawBlock []byte) (blockLocation, error) { // read from the file. // // Format: -func (s *blockStore) readBlock(hash *chainhash.Hash, loc blockLocation) ([]byte, error) { +func (s *blockStore) readBlock(hash *daghash.Hash, loc blockLocation) ([]byte, error) { // Get the referenced block file handle opening the file as needed. The // function also handles closing files as needed to avoid going over the // max allowed open files. diff --git a/database/ffldb/db.go b/database/ffldb/db.go index 4d11bc24e..834621219 100644 --- a/database/ffldb/db.go +++ b/database/ffldb/db.go @@ -14,11 +14,6 @@ import ( "sort" "sync" - "github.com/daglabs/btcd/chaincfg/chainhash" - "github.com/daglabs/btcd/database" - "github.com/daglabs/btcd/database/internal/treap" - "github.com/daglabs/btcd/wire" - "github.com/daglabs/btcutil" "github.com/btcsuite/goleveldb/leveldb" "github.com/btcsuite/goleveldb/leveldb/comparer" ldberrors "github.com/btcsuite/goleveldb/leveldb/errors" @@ -26,17 +21,17 @@ import ( "github.com/btcsuite/goleveldb/leveldb/iterator" "github.com/btcsuite/goleveldb/leveldb/opt" "github.com/btcsuite/goleveldb/leveldb/util" + "github.com/daglabs/btcd/dagconfig/daghash" + "github.com/daglabs/btcd/database" + "github.com/daglabs/btcd/database/internal/treap" + "github.com/daglabs/btcd/wire" + "github.com/daglabs/btcutil" ) const ( // metadataDbName is the name used for the metadata database. metadataDbName = "metadata" - // blockHdrSize is the size of a block header. This is simply the - // constant from wire and is only provided here for convenience since - // wire.MaxBlockHeaderPayload is quite long. - blockHdrSize = wire.MaxBlockHeaderPayload - // blockHdrOffset defines the offsets into a block index row for the // block header. // @@ -945,7 +940,7 @@ func (b *bucket) Delete(key []byte) error { // pendingBlock houses a block that will be written to disk when the database // transaction is committed. type pendingBlock struct { - hash *chainhash.Hash + hash *daghash.Hash bytes []byte } @@ -963,7 +958,7 @@ type transaction struct { // Blocks that need to be stored on commit. The pendingBlocks map is // kept to allow quick lookups of pending data by block hash. - pendingBlocks map[chainhash.Hash]int + pendingBlocks map[daghash.Hash]int pendingBlockData []pendingBlock // Keys that need to be stored or deleted on commit. @@ -1125,7 +1120,7 @@ func (tx *transaction) Metadata() database.Bucket { } // hasBlock returns whether or not a block with the given hash exists. -func (tx *transaction) hasBlock(hash *chainhash.Hash) bool { +func (tx *transaction) hasBlock(hash *daghash.Hash) bool { // Return true if the block is pending to be written on commit since // it exists from the viewpoint of this transaction. if _, exists := tx.pendingBlocks[*hash]; exists { @@ -1177,7 +1172,7 @@ func (tx *transaction) StoreBlock(block *btcutil.Block) error { // map so it is easy to determine the block is pending based on the // block hash. if tx.pendingBlocks == nil { - tx.pendingBlocks = make(map[chainhash.Hash]int) + tx.pendingBlocks = make(map[daghash.Hash]int) } tx.pendingBlocks[*blockHash] = len(tx.pendingBlockData) tx.pendingBlockData = append(tx.pendingBlockData, pendingBlock{ @@ -1196,7 +1191,7 @@ func (tx *transaction) StoreBlock(block *btcutil.Block) error { // - ErrTxClosed if the transaction has already been closed // // This function is part of the database.Tx interface implementation. -func (tx *transaction) HasBlock(hash *chainhash.Hash) (bool, error) { +func (tx *transaction) HasBlock(hash *daghash.Hash) (bool, error) { // Ensure transaction state is valid. if err := tx.checkClosed(); err != nil { return false, err @@ -1212,7 +1207,7 @@ func (tx *transaction) HasBlock(hash *chainhash.Hash) (bool, error) { // - ErrTxClosed if the transaction has already been closed // // This function is part of the database.Tx interface implementation. -func (tx *transaction) HasBlocks(hashes []chainhash.Hash) ([]bool, error) { +func (tx *transaction) HasBlocks(hashes []daghash.Hash) ([]bool, error) { // Ensure transaction state is valid. if err := tx.checkClosed(); err != nil { return nil, err @@ -1228,7 +1223,7 @@ func (tx *transaction) HasBlocks(hashes []chainhash.Hash) ([]bool, error) { // fetchBlockRow fetches the metadata stored in the block index for the provided // hash. It will return ErrBlockNotFound if there is no entry. -func (tx *transaction) fetchBlockRow(hash *chainhash.Hash) ([]byte, error) { +func (tx *transaction) fetchBlockRow(hash *daghash.Hash) ([]byte, error) { blockRow := tx.blockIdxBucket.Get(hash[:]) if blockRow == nil { str := fmt.Sprintf("block %s does not exist", hash) @@ -1238,6 +1233,25 @@ func (tx *transaction) fetchBlockRow(hash *chainhash.Hash) ([]byte, error) { return blockRow, nil } +// The offset in a block header at which numPrevBlocks resides. +const numPrevBlocksOffset = 4 + +// fetchBlockHeaderSize fetches the numPrevBlocks field out of the block header +// and uses it to compute the total size of the block header +func (tx *transaction) fetchBlockHeaderSize(hash *daghash.Hash) (byte, error) { + r, err := tx.FetchBlockRegion(&database.BlockRegion{ + Hash: hash, + Offset: numPrevBlocksOffset, + Len: 1, + }) + if err != nil { + return 0, err + } + + numPrevBlocks := r[0] + return numPrevBlocks*daghash.HashSize + wire.BaseBlockHeaderPayload, nil +} + // FetchBlockHeader returns the raw serialized bytes for the block header // identified by the given hash. The raw bytes are in the format returned by // Serialize on a wire.BlockHeader. @@ -1254,14 +1268,42 @@ func (tx *transaction) fetchBlockRow(hash *chainhash.Hash) ([]byte, error) { // implementations. // // This function is part of the database.Tx interface implementation. -func (tx *transaction) FetchBlockHeader(hash *chainhash.Hash) ([]byte, error) { +func (tx *transaction) FetchBlockHeader(hash *daghash.Hash) ([]byte, error) { + headerSize, err := tx.fetchBlockHeaderSize(hash) + if err != nil { + return nil, err + } + return tx.FetchBlockRegion(&database.BlockRegion{ Hash: hash, Offset: 0, - Len: blockHdrSize, + Len: uint32(headerSize), }) } +// fetchBlockHeadersSizes fetches the numPrevBlocks fields out of the block headers +// and uses it to compute the total sizes of the block headers +func (tx *transaction) fetchBlockHeadersSizes(hashes []daghash.Hash) ([]byte, error) { + regions := make([]database.BlockRegion, len(hashes)) + for i := range hashes { + regions[i].Hash = &hashes[i] + regions[i].Offset = numPrevBlocksOffset + regions[i].Len = 1 + } + rs, err := tx.FetchBlockRegions(regions) + if err != nil { + return nil, err + } + + sizes := make([]byte, len(hashes)) + for i, r := range rs { + numPrevBlocks := r[0] + sizes[i] = numPrevBlocks*daghash.HashSize + wire.BaseBlockHeaderPayload + } + + return sizes, nil +} + // FetchBlockHeaders returns the raw serialized bytes for the block headers // identified by the given hashes. The raw bytes are in the format returned by // Serialize on a wire.BlockHeader. @@ -1277,12 +1319,17 @@ func (tx *transaction) FetchBlockHeader(hash *chainhash.Hash) ([]byte, error) { // allows support for memory-mapped database implementations. // // This function is part of the database.Tx interface implementation. -func (tx *transaction) FetchBlockHeaders(hashes []chainhash.Hash) ([][]byte, error) { +func (tx *transaction) FetchBlockHeaders(hashes []daghash.Hash) ([][]byte, error) { + headerSizes, err := tx.fetchBlockHeadersSizes(hashes) + if err != nil { + return nil, err + } + regions := make([]database.BlockRegion, len(hashes)) for i := range hashes { regions[i].Hash = &hashes[i] regions[i].Offset = 0 - regions[i].Len = blockHdrSize + regions[i].Len = uint32(headerSizes[i]) } return tx.FetchBlockRegions(regions) } @@ -1305,7 +1352,7 @@ func (tx *transaction) FetchBlockHeaders(hashes []chainhash.Hash) ([][]byte, err // allows support for memory-mapped database implementations. // // This function is part of the database.Tx interface implementation. -func (tx *transaction) FetchBlock(hash *chainhash.Hash) ([]byte, error) { +func (tx *transaction) FetchBlock(hash *daghash.Hash) ([]byte, error) { // Ensure transaction state is valid. if err := tx.checkClosed(); err != nil { return nil, err @@ -1352,7 +1399,7 @@ func (tx *transaction) FetchBlock(hash *chainhash.Hash) ([]byte, error) { // allows support for memory-mapped database implementations. // // This function is part of the database.Tx interface implementation. -func (tx *transaction) FetchBlocks(hashes []chainhash.Hash) ([][]byte, error) { +func (tx *transaction) FetchBlocks(hashes []daghash.Hash) ([][]byte, error) { // Ensure transaction state is valid. if err := tx.checkClosed(); err != nil { return nil, err diff --git a/database/ffldb/driver_test.go b/database/ffldb/driver_test.go index fef7043e7..cc13a5c4c 100644 --- a/database/ffldb/driver_test.go +++ b/database/ffldb/driver_test.go @@ -12,7 +12,7 @@ import ( "runtime" "testing" - "github.com/daglabs/btcd/chaincfg" + "github.com/daglabs/btcd/dagconfig" "github.com/daglabs/btcd/database" "github.com/daglabs/btcd/database/ffldb" "github.com/daglabs/btcutil" @@ -171,8 +171,8 @@ func TestPersistence(t *testing.T) { "b1key2": "foo2", "b1key3": "foo3", } - genesisBlock := btcutil.NewBlock(chaincfg.MainNetParams.GenesisBlock) - genesisHash := chaincfg.MainNetParams.GenesisHash + genesisBlock := btcutil.NewBlock(dagconfig.MainNetParams.GenesisBlock) + genesisHash := dagconfig.MainNetParams.GenesisHash err = db.Update(func(tx database.Tx) error { metadataBucket := tx.Metadata() if metadataBucket == nil { diff --git a/database/ffldb/interface_test.go b/database/ffldb/interface_test.go index 57ee9cb60..f488e59bf 100644 --- a/database/ffldb/interface_test.go +++ b/database/ffldb/interface_test.go @@ -25,8 +25,8 @@ import ( "testing" "time" - "github.com/daglabs/btcd/chaincfg" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/database" "github.com/daglabs/btcd/wire" "github.com/daglabs/btcutil" @@ -63,7 +63,7 @@ func loadBlocks(t *testing.T, dataFile string, network wire.BitcoinNet) ([]*btcu // Set the first block as the genesis block. blocks := make([]*btcutil.Block, 0, 256) - genesis := btcutil.NewBlock(chaincfg.MainNetParams.GenesisBlock) + genesis := btcutil.NewBlock(dagconfig.MainNetParams.GenesisBlock) blocks = append(blocks, genesis) // Load the remaining blocks. @@ -1117,7 +1117,7 @@ func testFetchBlockIOMissing(tc *testContext, tx database.Tx) bool { // Test the individual block APIs one block at a time to ensure they // return the expected error. Also, build the data needed to test the // bulk APIs below while looping. - allBlockHashes := make([]chainhash.Hash, len(tc.blocks)) + allBlockHashes := make([]daghash.Hash, len(tc.blocks)) allBlockRegions := make([]database.BlockRegion, len(tc.blocks)) for i, block := range tc.blocks { blockHash := block.Hash() @@ -1222,10 +1222,11 @@ func testFetchBlockIO(tc *testContext, tx database.Tx) bool { // Test the individual block APIs one block at a time. Also, build the // data needed to test the bulk APIs below while looping. - allBlockHashes := make([]chainhash.Hash, len(tc.blocks)) + allBlockHashes := make([]daghash.Hash, len(tc.blocks)) allBlockBytes := make([][]byte, len(tc.blocks)) allBlockTxLocs := make([][]wire.TxLoc, len(tc.blocks)) allBlockRegions := make([]database.BlockRegion, len(tc.blocks)) + allBlockHeaderSizes := make([]int, len(tc.blocks)) for i, block := range tc.blocks { blockHash := block.Hash() allBlockHashes[i] = *blockHash @@ -1238,6 +1239,8 @@ func testFetchBlockIO(tc *testContext, tx database.Tx) bool { } allBlockBytes[i] = blockBytes + allBlockHeaderSizes[i] = block.MsgBlock().Header.SerializeSize() + txLocs, err := block.TxLoc() if err != nil { tc.t.Errorf("block.TxLoc(%d): unexpected error: %v", i, @@ -1260,9 +1263,7 @@ func testFetchBlockIO(tc *testContext, tx database.Tx) bool { return false } - // Ensure the block header fetched from the database matches the - // expected bytes. - wantHeaderBytes := blockBytes[0:wire.MaxBlockHeaderPayload] + wantHeaderBytes := blockBytes[0:allBlockHeaderSizes[i]] gotHeaderBytes, err := tx.FetchBlockHeader(blockHash) if err != nil { tc.t.Errorf("FetchBlockHeader(%s): unexpected error: %v", @@ -1319,7 +1320,7 @@ func testFetchBlockIO(tc *testContext, tx database.Tx) bool { // Ensure fetching a block that doesn't exist returns the // expected error. - badBlockHash := &chainhash.Hash{} + badBlockHash := &daghash.Hash{} testName := fmt.Sprintf("FetchBlock(%s) invalid block", badBlockHash) wantErrCode := database.ErrBlockNotFound @@ -1405,7 +1406,7 @@ func testFetchBlockIO(tc *testContext, tx database.Tx) bool { } for i := 0; i < len(blockHeaderData); i++ { blockHash := allBlockHashes[i] - wantHeaderBytes := allBlockBytes[i][0:wire.MaxBlockHeaderPayload] + wantHeaderBytes := allBlockBytes[i][0:allBlockHeaderSizes[i]] gotHeaderBytes := blockHeaderData[i] if !bytes.Equal(gotHeaderBytes, wantHeaderBytes) { tc.t.Errorf("FetchBlockHeaders(%s): bytes mismatch: "+ @@ -1462,9 +1463,9 @@ func testFetchBlockIO(tc *testContext, tx database.Tx) bool { // Ensure fetching blocks for which one doesn't exist returns the // expected error. testName := "FetchBlocks invalid hash" - badBlockHashes := make([]chainhash.Hash, len(allBlockHashes)+1) + badBlockHashes := make([]daghash.Hash, len(allBlockHashes)+1) copy(badBlockHashes, allBlockHashes) - badBlockHashes[len(badBlockHashes)-1] = chainhash.Hash{} + badBlockHashes[len(badBlockHashes)-1] = daghash.Hash{} wantErrCode := database.ErrBlockNotFound _, err = tx.FetchBlocks(badBlockHashes) if !checkDbError(tc.t, testName, err, wantErrCode) { @@ -1484,7 +1485,7 @@ func testFetchBlockIO(tc *testContext, tx database.Tx) bool { testName = "FetchBlockRegions invalid hash" badBlockRegions := make([]database.BlockRegion, len(allBlockRegions)+1) copy(badBlockRegions, allBlockRegions) - badBlockRegions[len(badBlockRegions)-1].Hash = &chainhash.Hash{} + badBlockRegions[len(badBlockRegions)-1].Hash = &daghash.Hash{} wantErrCode = database.ErrBlockNotFound _, err = tx.FetchBlockRegions(badBlockRegions) if !checkDbError(tc.t, testName, err, wantErrCode) { @@ -1836,7 +1837,7 @@ func testClosedTxInterface(tc *testContext, tx database.Tx) bool { // Test the individual block APIs one block at a time to ensure they // return the expected error. Also, build the data needed to test the // bulk APIs below while looping. - allBlockHashes := make([]chainhash.Hash, len(tc.blocks)) + allBlockHashes := make([]daghash.Hash, len(tc.blocks)) allBlockRegions := make([]database.BlockRegion, len(tc.blocks)) for i, block := range tc.blocks { blockHash := block.Hash() diff --git a/database/ffldb/whitebox_test.go b/database/ffldb/whitebox_test.go index b3b4eba6b..72373aab4 100644 --- a/database/ffldb/whitebox_test.go +++ b/database/ffldb/whitebox_test.go @@ -17,7 +17,7 @@ import ( "path/filepath" "testing" - "github.com/daglabs/btcd/chaincfg" + "github.com/daglabs/btcd/dagconfig" "github.com/daglabs/btcd/database" "github.com/daglabs/btcd/wire" "github.com/daglabs/btcutil" @@ -56,7 +56,7 @@ func loadBlocks(t *testing.T, dataFile string, network wire.BitcoinNet) ([]*btcu // Set the first block as the genesis block. blocks := make([]*btcutil.Block, 0, 256) - genesis := btcutil.NewBlock(chaincfg.MainNetParams.GenesisBlock) + genesis := btcutil.NewBlock(dagconfig.MainNetParams.GenesisBlock) blocks = append(blocks, genesis) // Load the remaining blocks. diff --git a/database/interface.go b/database/interface.go index c619da433..882e3a70c 100644 --- a/database/interface.go +++ b/database/interface.go @@ -8,7 +8,7 @@ package database import ( - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcutil" ) @@ -198,7 +198,7 @@ type Bucket interface { // BlockRegion specifies a particular region of a block identified by the // specified hash, given an offset and length. type BlockRegion struct { - Hash *chainhash.Hash + Hash *daghash.Hash Offset uint32 Len uint32 } @@ -237,7 +237,7 @@ type Tx interface { // - ErrTxClosed if the transaction has already been closed // // Other errors are possible depending on the implementation. - HasBlock(hash *chainhash.Hash) (bool, error) + HasBlock(hash *daghash.Hash) (bool, error) // HasBlocks returns whether or not the blocks with the provided hashes // exist in the database. @@ -247,7 +247,7 @@ type Tx interface { // - ErrTxClosed if the transaction has already been closed // // Other errors are possible depending on the implementation. - HasBlocks(hashes []chainhash.Hash) ([]bool, error) + HasBlocks(hashes []daghash.Hash) ([]bool, error) // FetchBlockHeader returns the raw serialized bytes for the block // header identified by the given hash. The raw bytes are in the format @@ -270,7 +270,7 @@ type Tx interface { // has ended results in undefined behavior. This constraint prevents // additional data copies and allows support for memory-mapped database // implementations. - FetchBlockHeader(hash *chainhash.Hash) ([]byte, error) + FetchBlockHeader(hash *daghash.Hash) ([]byte, error) // FetchBlockHeaders returns the raw serialized bytes for the block // headers identified by the given hashes. The raw bytes are in the @@ -297,7 +297,7 @@ type Tx interface { // has ended results in undefined behavior. This constraint prevents // additional data copies and allows support for memory-mapped database // implementations. - FetchBlockHeaders(hashes []chainhash.Hash) ([][]byte, error) + FetchBlockHeaders(hashes []daghash.Hash) ([][]byte, error) // FetchBlock returns the raw serialized bytes for the block identified // by the given hash. The raw bytes are in the format returned by @@ -314,7 +314,7 @@ type Tx interface { // has ended results in undefined behavior. This constraint prevents // additional data copies and allows support for memory-mapped database // implementations. - FetchBlock(hash *chainhash.Hash) ([]byte, error) + FetchBlock(hash *daghash.Hash) ([]byte, error) // FetchBlocks returns the raw serialized bytes for the blocks // identified by the given hashes. The raw bytes are in the format @@ -332,7 +332,7 @@ type Tx interface { // has ended results in undefined behavior. This constraint prevents // additional data copies and allows support for memory-mapped database // implementations. - FetchBlocks(hashes []chainhash.Hash) ([][]byte, error) + FetchBlocks(hashes []daghash.Hash) ([][]byte, error) // FetchBlockRegion returns the raw serialized bytes for the given // block region. diff --git a/database/testdata/blocks1-256.bz2 b/database/testdata/blocks1-256.bz2 index 6b8bda442..13d959499 100644 Binary files a/database/testdata/blocks1-256.bz2 and b/database/testdata/blocks1-256.bz2 differ diff --git a/database/testdata/generator.go b/database/testdata/generator.go new file mode 100644 index 000000000..220de9e58 --- /dev/null +++ b/database/testdata/generator.go @@ -0,0 +1,154 @@ +// This is a small tool to generate testdata blocks file + +package main + +import ( + "bytes" + "encoding/binary" + "fmt" + "os" + "strconv" + "time" + + "github.com/daglabs/btcd/dagconfig" + "github.com/daglabs/btcd/dagconfig/daghash" + "github.com/daglabs/btcd/wire" +) + +func main() { + targetFile, numBlocks := parseArgs() + + out, err := os.Create(targetFile) + if err != nil { + panic(fmt.Errorf("error reading target file: %s", err)) + } + defer func() { + err := out.Close() + if err != nil { + panic(fmt.Errorf("error closing target file: %s", err)) + } + }() + + generateBlocks(out, numBlocks) +} + +func generateBlocks(out *os.File, numBlocks int) { + lastBlock := dagconfig.MainNetParams.GenesisBlock + + for i := 0; i < numBlocks; i++ { + lastBlock = generateBlock(lastBlock) + writeBlock(out, lastBlock) + } +} + +func generateBlock(parent *wire.MsgBlock) *wire.MsgBlock { + return &wire.MsgBlock{ + Header: wire.BlockHeader{ + Version: 1, + NumPrevBlocks: 1, + PrevBlocks: []daghash.Hash{parent.BlockHash()}, + MerkleRoot: genesisMerkleRoot, + Timestamp: time.Unix(0x5b28c4c8, 0), // 2018-06-19 08:54:32 +0000 UTC + Bits: 0x1e00ffff, // 503382015 [000000ffff000000000000000000000000000000000000000000000000000000] + Nonce: 0xc0192550, // 2148484547 + }, + Transactions: []*wire.MsgTx{&genesisCoinbaseTx}, + } +} + +func writeBlock(out *os.File, block *wire.MsgBlock) { + writeNet(out) + + blockLen := uint32(block.SerializeSize()) + buf := bytes.NewBuffer(make([]byte, 0, blockLen)) + + err := block.Serialize(buf) + if err != nil { + panic(fmt.Errorf("error serializing block: %s", err)) + } + + err = binary.Write(out, binary.LittleEndian, blockLen) + if err != nil { + panic(fmt.Errorf("error writing blockLen: %s", err)) + } + + _, err = out.Write(buf.Bytes()) + if err != nil { + panic(fmt.Errorf("error writing block: %s", err)) + } +} + +func writeNet(out *os.File) { + err := binary.Write(out, binary.LittleEndian, wire.MainNet) + if err != nil { + panic(fmt.Errorf("error writing net to file: %s", err)) + } +} + +func parseArgs() (targetFile string, numBlocks int) { + if len(os.Args) != 3 { + printUsage() + } + + targetFile = os.Args[1] + numBlocks, err := strconv.Atoi(os.Args[2]) + if err != nil { + printUsage() + } + + return +} + +func printUsage() { + fmt.Println("Usage: generator [targetFile] [numBlocks]") + os.Exit(1) +} + +var genesisCoinbaseTx = wire.MsgTx{ + Version: 1, + TxIn: []*wire.TxIn{ + { + PreviousOutPoint: wire.OutPoint{ + Hash: daghash.Hash{}, + Index: 0xffffffff, + }, + SignatureScript: []byte{ + 0x04, 0xff, 0xff, 0x00, 0x1d, 0x01, 0x04, 0x45, /* |.......E| */ + 0x54, 0x68, 0x65, 0x20, 0x54, 0x69, 0x6d, 0x65, /* |The Time| */ + 0x73, 0x20, 0x30, 0x33, 0x2f, 0x4a, 0x61, 0x6e, /* |s 03/Jan| */ + 0x2f, 0x32, 0x30, 0x30, 0x39, 0x20, 0x43, 0x68, /* |/2009 Ch| */ + 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x6c, 0x6f, 0x72, /* |ancellor| */ + 0x20, 0x6f, 0x6e, 0x20, 0x62, 0x72, 0x69, 0x6e, /* | on brin| */ + 0x6b, 0x20, 0x6f, 0x66, 0x20, 0x73, 0x65, 0x63, /* |k of sec|*/ + 0x6f, 0x6e, 0x64, 0x20, 0x62, 0x61, 0x69, 0x6c, /* |ond bail| */ + 0x6f, 0x75, 0x74, 0x20, 0x66, 0x6f, 0x72, 0x20, /* |out for |*/ + 0x62, 0x61, 0x6e, 0x6b, 0x73, /* |banks| */ + }, + Sequence: 0xffffffff, + }, + }, + TxOut: []*wire.TxOut{ + { + Value: 0x12a05f200, + PkScript: []byte{ + 0x41, 0x04, 0x67, 0x8a, 0xfd, 0xb0, 0xfe, 0x55, /* |A.g....U| */ + 0x48, 0x27, 0x19, 0x67, 0xf1, 0xa6, 0x71, 0x30, /* |H'.g..q0| */ + 0xb7, 0x10, 0x5c, 0xd6, 0xa8, 0x28, 0xe0, 0x39, /* |..\..(.9| */ + 0x09, 0xa6, 0x79, 0x62, 0xe0, 0xea, 0x1f, 0x61, /* |..yb...a| */ + 0xde, 0xb6, 0x49, 0xf6, 0xbc, 0x3f, 0x4c, 0xef, /* |..I..?L.| */ + 0x38, 0xc4, 0xf3, 0x55, 0x04, 0xe5, 0x1e, 0xc1, /* |8..U....| */ + 0x12, 0xde, 0x5c, 0x38, 0x4d, 0xf7, 0xba, 0x0b, /* |..\8M...| */ + 0x8d, 0x57, 0x8a, 0x4c, 0x70, 0x2b, 0x6b, 0xf1, /* |.W.Lp+k.| */ + 0x1d, 0x5f, 0xac, /* |._.| */ + }, + }, + }, + LockTime: 0, +} + +var genesisMerkleRoot = daghash.Hash([daghash.HashSize]byte{ // Make go vet happy. + 0x3b, 0xa3, 0xed, 0xfd, 0x7a, 0x7b, 0x12, 0xb2, + 0x7a, 0xc7, 0x2c, 0x3e, 0x67, 0x76, 0x8f, 0x61, + 0x7f, 0xc8, 0x1b, 0xc3, 0x88, 0x8a, 0x51, 0x32, + 0x3a, 0x9f, 0xb8, 0xaa, 0x4b, 0x1e, 0x5e, 0x4a, +}) diff --git a/docs/json_rpc_api.md b/docs/json_rpc_api.md index 5eb3b9a81..e2a23805b 100644 --- a/docs/json_rpc_api.md +++ b/docs/json_rpc_api.md @@ -208,7 +208,7 @@ the method name for further details such as parameter and return information. | | | |---|---| |Method|createrawtransaction| -|Parameters|1. transaction inputs (JSON array, required) - json array of json objects
`[`
  `{`
    `"txid": "hash", (string, required) the hash of the input transaction`
    `"vout": n (numeric, required) the specific output of the input transaction to redeem`
  `}, ...`
`]`
2. addresses and amounts (JSON object, required) - json object with addresses as keys and amounts as values
`{`
  `"address": n.nnn (numeric, required) the address to send to as the key and the amount in BTC as the value`
  `, ...`
`}`
3. locktime (int64, optional, default=0) - specifies the transaction locktime. If non-zero, the inputs will also have their locktimes activated. | +|Parameters|1. transaction inputs (JSON array, required) - json array of json objects
`[`
  `{`
    `"txid": "hash", (string, required) the hash of the input transaction`
    `"vout": n (numeric, required) the specific output of the input transaction to redeem`
  `}, ...`
`]`
2. addresses and amounts (JSON object, required) - json object with addresses as keys and amounts as values
`{`
  `"address": n.nnn (numeric, required) the address to send to as the key and the amount in BTC as the value`
  `, ...`
`}`
3. locktime (uint64, optional, default=0) - specifies the transaction locktime. If non-zero, the inputs will also have their locktimes activated. | |Description|Returns a new transaction spending the provided inputs and sending to the provided addresses.
The transaction inputs are not signed in the created transaction.
The `signrawtransaction` RPC command provided by wallet must be used to sign the resulting transaction.| |Returns|`"transaction" (string) hex-encoded bytes of the serialized transaction`| |Example Parameters|1. transaction inputs `[{"txid":"e6da89de7a6b8508ce8f371a3d0535b04b5e108cb1a6e9284602d3bfd357c018","vout":1}]`
2. addresses and amounts `{"13cgrTP7wgbZYWrY9BZ22BV6p82QXQT3nY": 0.49213337}`
3. locktime `0`| diff --git a/integration/bip0009_test.go b/integration/bip0009_test.go index e572dd869..ecf062d92 100644 --- a/integration/bip0009_test.go +++ b/integration/bip0009_test.go @@ -13,9 +13,9 @@ import ( "testing" "time" - "github.com/daglabs/btcd/blockchain" - "github.com/daglabs/btcd/chaincfg" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/blockdag" + "github.com/daglabs/btcd/dagconfig" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/integration/rpctest" ) @@ -32,7 +32,7 @@ const ( // assertVersionBit gets the passed block hash from the given test harness and // ensures its version either has the provided bit set or unset per the set // flag. -func assertVersionBit(r *rpctest.Harness, t *testing.T, hash *chainhash.Hash, bit uint8, set bool) { +func assertVersionBit(r *rpctest.Harness, t *testing.T, hash *daghash.Hash, bit uint8, set bool) { block, err := r.Node.GetBlock(hash) if err != nil { t.Fatalf("failed to retrieve block %v: %v", hash, err) @@ -66,17 +66,17 @@ func assertChainHeight(r *rpctest.Harness, t *testing.T, expectedHeight uint32) // thresholdStateToStatus converts the passed threshold state to the equivalent // status string returned in the getblockchaininfo RPC. -func thresholdStateToStatus(state blockchain.ThresholdState) (string, error) { +func thresholdStateToStatus(state blockdag.ThresholdState) (string, error) { switch state { - case blockchain.ThresholdDefined: + case blockdag.ThresholdDefined: return "defined", nil - case blockchain.ThresholdStarted: + case blockdag.ThresholdStarted: return "started", nil - case blockchain.ThresholdLockedIn: + case blockdag.ThresholdLockedIn: return "lockedin", nil - case blockchain.ThresholdActive: + case blockdag.ThresholdActive: return "active", nil - case blockchain.ThresholdFailed: + case blockdag.ThresholdFailed: return "failed", nil } @@ -86,7 +86,7 @@ func thresholdStateToStatus(state blockchain.ThresholdState) (string, error) { // assertSoftForkStatus retrieves the current blockchain info from the given // test harness and ensures the provided soft fork key is both available and its // status is the equivalent of the passed state. -func assertSoftForkStatus(r *rpctest.Harness, t *testing.T, forkKey string, state blockchain.ThresholdState) { +func assertSoftForkStatus(r *rpctest.Harness, t *testing.T, forkKey string, state blockdag.ThresholdState) { // Convert the expected threshold state into the equivalent // getblockchaininfo RPC status string. status, err := thresholdStateToStatus(state) @@ -129,7 +129,7 @@ func assertSoftForkStatus(r *rpctest.Harness, t *testing.T, forkKey string, stat // specific soft fork deployment to test. func testBIP0009(t *testing.T, forkKey string, deploymentID uint32) { // Initialize the primary mining node with only the genesis block. - r, err := rpctest.New(&chaincfg.RegressionNetParams, nil, nil) + r, err := rpctest.New(&dagconfig.RegressionNetParams, nil, nil) if err != nil { t.Fatalf("unable to create primary harness: %v", err) } @@ -143,7 +143,7 @@ func testBIP0009(t *testing.T, forkKey string, deploymentID uint32) { // Assert the chain height is the expected value and the soft fork // status starts out as defined. assertChainHeight(r, t, 0) - assertSoftForkStatus(r, t, forkKey, blockchain.ThresholdDefined) + assertSoftForkStatus(r, t, forkKey, blockdag.ThresholdDefined) // *** ThresholdDefined part 2 - 1 block prior to ThresholdStarted *** // @@ -168,7 +168,7 @@ func testBIP0009(t *testing.T, forkKey string, deploymentID uint32) { } } assertChainHeight(r, t, confirmationWindow-2) - assertSoftForkStatus(r, t, forkKey, blockchain.ThresholdDefined) + assertSoftForkStatus(r, t, forkKey, blockdag.ThresholdDefined) // *** ThresholdStarted *** // @@ -181,7 +181,7 @@ func testBIP0009(t *testing.T, forkKey string, deploymentID uint32) { t.Fatalf("failed to generated block: %v", err) } assertChainHeight(r, t, confirmationWindow-1) - assertSoftForkStatus(r, t, forkKey, blockchain.ThresholdStarted) + assertSoftForkStatus(r, t, forkKey, blockdag.ThresholdStarted) // *** ThresholdStarted part 2 - Fail to achieve ThresholdLockedIn *** // @@ -212,7 +212,7 @@ func testBIP0009(t *testing.T, forkKey string, deploymentID uint32) { } } assertChainHeight(r, t, (confirmationWindow*2)-1) - assertSoftForkStatus(r, t, forkKey, blockchain.ThresholdStarted) + assertSoftForkStatus(r, t, forkKey, blockdag.ThresholdStarted) // *** ThresholdLockedIn *** // @@ -237,7 +237,7 @@ func testBIP0009(t *testing.T, forkKey string, deploymentID uint32) { } } assertChainHeight(r, t, (confirmationWindow*3)-1) - assertSoftForkStatus(r, t, forkKey, blockchain.ThresholdLockedIn) + assertSoftForkStatus(r, t, forkKey, blockdag.ThresholdLockedIn) // *** ThresholdLockedIn part 2 -- 1 block prior to ThresholdActive *** // @@ -255,7 +255,7 @@ func testBIP0009(t *testing.T, forkKey string, deploymentID uint32) { } } assertChainHeight(r, t, (confirmationWindow*4)-2) - assertSoftForkStatus(r, t, forkKey, blockchain.ThresholdLockedIn) + assertSoftForkStatus(r, t, forkKey, blockdag.ThresholdLockedIn) // *** ThresholdActive *** // @@ -269,7 +269,7 @@ func testBIP0009(t *testing.T, forkKey string, deploymentID uint32) { t.Fatalf("failed to generated block: %v", err) } assertChainHeight(r, t, (confirmationWindow*4)-1) - assertSoftForkStatus(r, t, forkKey, blockchain.ThresholdActive) + assertSoftForkStatus(r, t, forkKey, blockdag.ThresholdActive) } // TestBIP0009 ensures the BIP0009 soft fork mechanism follows the state @@ -298,7 +298,7 @@ func testBIP0009(t *testing.T, forkKey string, deploymentID uint32) { func TestBIP0009(t *testing.T) { t.Parallel() - testBIP0009(t, "dummy", chaincfg.DeploymentTestDummy) + testBIP0009(t, "dummy", dagconfig.DeploymentTestDummy) } // TestBIP0009Mining ensures blocks built via btcd's CPU miner follow the rules @@ -319,7 +319,7 @@ func TestBIP0009Mining(t *testing.T) { t.Parallel() // Initialize the primary mining node with only the genesis block. - r, err := rpctest.New(&chaincfg.SimNetParams, nil, nil) + r, err := rpctest.New(&dagconfig.SimNetParams, nil, nil) if err != nil { t.Fatalf("unable to create primary harness: %v", err) } @@ -336,7 +336,7 @@ func TestBIP0009Mining(t *testing.T) { // Generate a block that extends the genesis block. It should not have // the test dummy bit set in the version since the first window is // in the defined threshold state. - deployment := &r.ActiveNet.Deployments[chaincfg.DeploymentTestDummy] + deployment := &r.ActiveNet.Deployments[dagconfig.DeploymentTestDummy] testDummyBitNum := deployment.BitNumber hashes, err := r.Node.Generate(1) if err != nil { diff --git a/integration/csv_test.go b/integration/csv_test.go index c50f4a098..0efaae5d5 100644 --- a/integration/csv_test.go +++ b/integration/csv_test.go @@ -14,10 +14,10 @@ import ( "testing" "time" - "github.com/daglabs/btcd/blockchain" + "github.com/daglabs/btcd/blockdag" "github.com/daglabs/btcd/btcec" - "github.com/daglabs/btcd/chaincfg" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/integration/rpctest" "github.com/daglabs/btcd/txscript" "github.com/daglabs/btcd/wire" @@ -98,7 +98,7 @@ func TestBIP0113(t *testing.T) { t.Parallel() btcdCfg := []string{"--rejectnonstd"} - r, err := rpctest.New(&chaincfg.SimNetParams, nil, btcdCfg) + r, err := rpctest.New(&dagconfig.SimNetParams, nil, btcdCfg) if err != nil { t.Fatal("unable to create primary harness: ", err) } @@ -242,7 +242,7 @@ func createCSVOutput(r *rpctest.Harness, t *testing.T, // Convert the time-lock to the proper sequence lock based according to // if the lock is seconds or time based. - sequenceLock := blockchain.LockTimeToSequence(isSeconds, + sequenceLock := blockdag.LockTimeToSequence(isSeconds, int64(timeLock)) // Our CSV script is simply: OP_CSV @@ -318,8 +318,8 @@ func spendCSVOutput(redeemScript []byte, csvUTXO *wire.OutPoint, // assertTxInBlock asserts a transaction with the specified txid is found // within the block with the passed block hash. -func assertTxInBlock(r *rpctest.Harness, t *testing.T, blockHash *chainhash.Hash, - txid *chainhash.Hash) { +func assertTxInBlock(r *rpctest.Harness, t *testing.T, blockHash *daghash.Hash, + txid *daghash.Hash) { block, err := r.Node.GetBlock(blockHash) if err != nil { @@ -351,7 +351,7 @@ func TestBIP0068AndCsv(t *testing.T) { // relative lock times. btcdCfg := []string{"--rejectnonstd"} - r, err := rpctest.New(&chaincfg.SimNetParams, nil, btcdCfg) + r, err := rpctest.New(&dagconfig.SimNetParams, nil, btcdCfg) if err != nil { t.Fatal("unable to create primary harness: ", err) } @@ -478,7 +478,7 @@ func TestBIP0068AndCsv(t *testing.T) { // bit it set. The transaction should be rejected as a result. { tx: makeTxCase( - blockchain.LockTimeToSequence(false, 1)|wire.SequenceLockTimeDisabled, + blockdag.LockTimeToSequence(false, 1)|wire.SequenceLockTimeDisabled, 1, ), accept: false, @@ -488,14 +488,14 @@ func TestBIP0068AndCsv(t *testing.T) { // but the CSV output requires a 10 block relative lock-time. // Therefore, the transaction should be rejected. { - tx: makeTxCase(blockchain.LockTimeToSequence(false, 9), 1), + tx: makeTxCase(blockdag.LockTimeToSequence(false, 9), 1), accept: false, }, // A transaction with a single input having a 10 block // relative time lock. The referenced input is 11 blocks old so // the transaction should be accepted. { - tx: makeTxCase(blockchain.LockTimeToSequence(false, 10), 1), + tx: makeTxCase(blockdag.LockTimeToSequence(false, 10), 1), accept: true, }, // A transaction with a single input having a 11 block @@ -503,14 +503,14 @@ func TestBIP0068AndCsv(t *testing.T) { // 11 and the CSV op-code requires 10 blocks to have passed, so // this transaction should be accepted. { - tx: makeTxCase(blockchain.LockTimeToSequence(false, 11), 1), + tx: makeTxCase(blockdag.LockTimeToSequence(false, 11), 1), accept: true, }, // A transaction whose input has a 1000 blck relative time // lock. This should be rejected as the input's age is only 11 // blocks. { - tx: makeTxCase(blockchain.LockTimeToSequence(false, 1000), 1), + tx: makeTxCase(blockdag.LockTimeToSequence(false, 1000), 1), accept: false, }, // A transaction with a single input having a 512,000 second @@ -518,14 +518,14 @@ func TestBIP0068AndCsv(t *testing.T) { // days worth of blocks haven't yet been mined. The referenced // input doesn't have sufficient age. { - tx: makeTxCase(blockchain.LockTimeToSequence(true, 512000), 1), + tx: makeTxCase(blockdag.LockTimeToSequence(true, 512000), 1), accept: false, }, // A transaction whose single input has a 512 second // relative time-lock. This transaction should be accepted as // finalized. { - tx: makeTxCase(blockchain.LockTimeToSequence(true, 512), 1), + tx: makeTxCase(blockdag.LockTimeToSequence(true, 512), 1), accept: true, }, } diff --git a/integration/rpcserver_test.go b/integration/rpcserver_test.go index 962528893..816716282 100644 --- a/integration/rpcserver_test.go +++ b/integration/rpcserver_test.go @@ -14,7 +14,7 @@ import ( "runtime/debug" "testing" - "github.com/daglabs/btcd/chaincfg" + "github.com/daglabs/btcd/dagconfig" "github.com/daglabs/btcd/integration/rpctest" ) @@ -109,7 +109,7 @@ func TestMain(m *testing.M) { // ensure that non-standard transactions aren't accepted into the // mempool or relayed. btcdCfg := []string{"--rejectnonstd"} - primaryHarness, err = rpctest.New(&chaincfg.SimNetParams, nil, btcdCfg) + primaryHarness, err = rpctest.New(&dagconfig.SimNetParams, nil, btcdCfg) if err != nil { fmt.Println("unable to create primary harness: ", err) os.Exit(1) diff --git a/integration/rpctest/blockgen.go b/integration/rpctest/blockgen.go index 19d1c9e90..1d6a8f0bb 100644 --- a/integration/rpctest/blockgen.go +++ b/integration/rpctest/blockgen.go @@ -11,9 +11,9 @@ import ( "runtime" "time" - "github.com/daglabs/btcd/blockchain" - "github.com/daglabs/btcd/chaincfg" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/blockdag" + "github.com/daglabs/btcd/dagconfig" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/txscript" "github.com/daglabs/btcd/wire" "github.com/daglabs/btcutil" @@ -44,7 +44,7 @@ func solveBlock(header *wire.BlockHeader, targetDifficulty *big.Int) bool { default: hdr.Nonce = i hash := hdr.BlockHash() - if blockchain.HashToBig(&hash).Cmp(targetDifficulty) <= 0 { + if blockdag.HashToBig(&hash).Cmp(targetDifficulty) <= 0 { select { case results <- sbResult{true, i}: return @@ -97,7 +97,7 @@ func standardCoinbaseScript(nextBlockHeight int32, extraNonce uint64) ([]byte, e // subsidy based on the passed block height to the provided address. func createCoinbaseTx(coinbaseScript []byte, nextBlockHeight int32, addr btcutil.Address, mineTo []wire.TxOut, - net *chaincfg.Params) (*btcutil.Tx, error) { + net *dagconfig.Params) (*btcutil.Tx, error) { // Create the script to pay to the provided payment address. pkScript, err := txscript.PayToAddrScript(addr) @@ -109,14 +109,14 @@ func createCoinbaseTx(coinbaseScript []byte, nextBlockHeight int32, tx.AddTxIn(&wire.TxIn{ // Coinbase transactions have no inputs, so previous outpoint is // zero hash and max index. - PreviousOutPoint: *wire.NewOutPoint(&chainhash.Hash{}, + PreviousOutPoint: *wire.NewOutPoint(&daghash.Hash{}, wire.MaxPrevOutIndex), SignatureScript: coinbaseScript, Sequence: wire.MaxTxInSequenceNum, }) if len(mineTo) == 0 { tx.AddTxOut(&wire.TxOut{ - Value: blockchain.CalcBlockSubsidy(nextBlockHeight, net), + Value: blockdag.CalcBlockSubsidy(nextBlockHeight, net), PkScript: pkScript, }) } else { @@ -134,10 +134,10 @@ func createCoinbaseTx(coinbaseScript []byte, nextBlockHeight int32, // builds off of the genesis block for the specified chain. func CreateBlock(prevBlock *btcutil.Block, inclusionTxs []*btcutil.Tx, blockVersion int32, blockTime time.Time, miningAddr btcutil.Address, - mineTo []wire.TxOut, net *chaincfg.Params) (*btcutil.Block, error) { + mineTo []wire.TxOut, net *dagconfig.Params) (*btcutil.Block, error) { var ( - prevHash *chainhash.Hash + prevHash *daghash.Hash blockHeight int32 prevBlockTime time.Time ) @@ -181,7 +181,7 @@ func CreateBlock(prevBlock *btcutil.Block, inclusionTxs []*btcutil.Tx, if inclusionTxs != nil { blockTxns = append(blockTxns, inclusionTxs...) } - merkles := blockchain.BuildMerkleTreeStore(blockTxns) + merkles := blockdag.BuildMerkleTreeStore(blockTxns) var block wire.MsgBlock block.Header = wire.BlockHeader{ Version: blockVersion, diff --git a/integration/rpctest/memwallet.go b/integration/rpctest/memwallet.go index 7d146e656..72f8d4b5b 100644 --- a/integration/rpctest/memwallet.go +++ b/integration/rpctest/memwallet.go @@ -10,10 +10,10 @@ import ( "fmt" "sync" - "github.com/daglabs/btcd/blockchain" + "github.com/daglabs/btcd/blockdag" "github.com/daglabs/btcd/btcec" - "github.com/daglabs/btcd/chaincfg" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/rpcclient" "github.com/daglabs/btcd/txscript" "github.com/daglabs/btcd/wire" @@ -25,7 +25,7 @@ var ( // hdSeed is the BIP 32 seed used by the memWallet to initialize it's // HD root key. This value is hard coded in order to ensure // deterministic behavior across test runs. - hdSeed = [chainhash.HashSize]byte{ + hdSeed = [daghash.HashSize]byte{ 0x79, 0xa6, 0x1a, 0xdb, 0xc6, 0xe5, 0xa2, 0xe1, 0x39, 0xd2, 0x71, 0x3a, 0x54, 0x6e, 0xc7, 0xc8, 0x75, 0x63, 0x2e, 0x75, 0xf1, 0xdf, 0x9c, 0x3f, @@ -101,7 +101,7 @@ type memWallet struct { chainUpdateSignal chan struct{} chainMtx sync.Mutex - net *chaincfg.Params + net *dagconfig.Params rpc *rpcclient.Client @@ -110,13 +110,13 @@ type memWallet struct { // newMemWallet creates and returns a fully initialized instance of the // memWallet given a particular blockchain's parameters. -func newMemWallet(net *chaincfg.Params, harnessID uint32) (*memWallet, error) { +func newMemWallet(net *dagconfig.Params, harnessID uint32) (*memWallet, error) { // The wallet's final HD seed is: hdSeed || harnessID. This method // ensures that each harness instance uses a deterministic root seed // based on its harness ID. - var harnessHDSeed [chainhash.HashSize + 4]byte + var harnessHDSeed [daghash.HashSize + 4]byte copy(harnessHDSeed[:], hdSeed[:]) - binary.BigEndian.PutUint32(harnessHDSeed[:chainhash.HashSize], harnessID) + binary.BigEndian.PutUint32(harnessHDSeed[:daghash.HashSize], harnessID) hdRoot, err := hdkeychain.NewMaster(harnessHDSeed[:], net) if err != nil { @@ -207,7 +207,7 @@ func (m *memWallet) ingestBlock(update *chainUpdate) { } for _, tx := range update.filteredTxns { mtx := tx.MsgTx() - isCoinbase := blockchain.IsCoinBaseTx(mtx) + isCoinbase := blockdag.IsCoinBaseTx(mtx) txHash := mtx.TxHash() m.evalOutputs(mtx.TxOut, &txHash, isCoinbase, undo) m.evalInputs(mtx.TxIn, undo) @@ -247,7 +247,7 @@ func (m *memWallet) chainSyncer() { // evalOutputs evaluates each of the passed outputs, creating a new matching // utxo within the wallet if we're able to spend the output. -func (m *memWallet) evalOutputs(outputs []*wire.TxOut, txHash *chainhash.Hash, +func (m *memWallet) evalOutputs(outputs []*wire.TxOut, txHash *daghash.Hash, isCoinbase bool, undo *undoEntry) { for i, output := range outputs { @@ -446,7 +446,7 @@ func (m *memWallet) fundTx(tx *wire.MsgTx, amt btcutil.Amount, feeRate btcutil.A // while observing the passed fee rate. The passed fee rate should be expressed // in satoshis-per-byte. func (m *memWallet) SendOutputs(outputs []*wire.TxOut, - feeRate btcutil.Amount) (*chainhash.Hash, error) { + feeRate btcutil.Amount) (*daghash.Hash, error) { tx, err := m.CreateTransaction(outputs, feeRate) if err != nil { diff --git a/integration/rpctest/rpc_harness.go b/integration/rpctest/rpc_harness.go index 83eeed557..8e8bf1b40 100644 --- a/integration/rpctest/rpc_harness.go +++ b/integration/rpctest/rpc_harness.go @@ -15,8 +15,8 @@ import ( "testing" "time" - "github.com/daglabs/btcd/chaincfg" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/rpcclient" "github.com/daglabs/btcd/wire" "github.com/daglabs/btcutil" @@ -76,7 +76,7 @@ type HarnessTestCase func(r *Harness, t *testing.T) type Harness struct { // ActiveNet is the parameters of the blockchain the Harness belongs // to. - ActiveNet *chaincfg.Params + ActiveNet *dagconfig.Params Node *rpcclient.Client node *node @@ -97,7 +97,7 @@ type Harness struct { // used. // // NOTE: This function is safe for concurrent access. -func New(activeNet *chaincfg.Params, handlers *rpcclient.NotificationHandlers, +func New(activeNet *dagconfig.Params, handlers *rpcclient.NotificationHandlers, extraArgs []string) (*Harness, error) { harnessStateMtx.Lock() @@ -351,7 +351,7 @@ func (h *Harness) ConfirmedBalance() btcutil.Amount { // // This function is safe for concurrent access. func (h *Harness) SendOutputs(targetOutputs []*wire.TxOut, - feeRate btcutil.Amount) (*chainhash.Hash, error) { + feeRate btcutil.Amount) (*daghash.Hash, error) { return h.wallet.SendOutputs(targetOutputs, feeRate) } diff --git a/integration/rpctest/rpc_harness_test.go b/integration/rpctest/rpc_harness_test.go index 130619497..76c695293 100644 --- a/integration/rpctest/rpc_harness_test.go +++ b/integration/rpctest/rpc_harness_test.go @@ -13,15 +13,15 @@ import ( "testing" "time" - "github.com/daglabs/btcd/chaincfg" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/txscript" "github.com/daglabs/btcd/wire" "github.com/daglabs/btcutil" ) func testSendOutputs(r *Harness, t *testing.T) { - genSpend := func(amt btcutil.Amount) *chainhash.Hash { + genSpend := func(amt btcutil.Amount) *daghash.Hash { // Grab a fresh address from the wallet. addr, err := r.NewAddress() if err != nil { @@ -42,7 +42,7 @@ func testSendOutputs(r *Harness, t *testing.T) { return txid } - assertTxMined := func(txid *chainhash.Hash, blockHash *chainhash.Hash) { + assertTxMined := func(txid *daghash.Hash, blockHash *daghash.Hash) { block, err := r.Node.GetBlock(blockHash) if err != nil { t.Fatalf("unable to get block: %v", err) @@ -105,7 +105,7 @@ func assertConnectedTo(t *testing.T, nodeA *Harness, nodeB *Harness) { func testConnectNode(r *Harness, t *testing.T) { // Create a fresh test harness. - harness, err := New(&chaincfg.SimNetParams, nil, nil) + harness, err := New(&dagconfig.SimNetParams, nil, nil) if err != nil { t.Fatal(err) } @@ -153,7 +153,7 @@ func testActiveHarnesses(r *Harness, t *testing.T) { numInitialHarnesses := len(ActiveHarnesses()) // Create a single test harness. - harness1, err := New(&chaincfg.SimNetParams, nil, nil) + harness1, err := New(&dagconfig.SimNetParams, nil, nil) if err != nil { t.Fatal(err) } @@ -181,7 +181,7 @@ func testJoinMempools(r *Harness, t *testing.T) { // Create a local test harness with only the genesis block. The nodes // will be synced below so the same transaction can be sent to both // nodes without it being an orphan. - harness, err := New(&chaincfg.SimNetParams, nil, nil) + harness, err := New(&dagconfig.SimNetParams, nil, nil) if err != nil { t.Fatal(err) } @@ -281,7 +281,7 @@ func testJoinMempools(r *Harness, t *testing.T) { func testJoinBlocks(r *Harness, t *testing.T) { // Create a second harness with only the genesis block so it is behind // the main harness. - harness, err := New(&chaincfg.SimNetParams, nil, nil) + harness, err := New(&dagconfig.SimNetParams, nil, nil) if err != nil { t.Fatal(err) } @@ -469,7 +469,7 @@ func testGenerateAndSubmitBlockWithCustomCoinbaseOutputs(r *Harness, func testMemWalletReorg(r *Harness, t *testing.T) { // Create a fresh harness, we'll be using the main harness to force a // re-org on this local harness. - harness, err := New(&chaincfg.SimNetParams, nil, nil) + harness, err := New(&dagconfig.SimNetParams, nil, nil) if err != nil { t.Fatal(err) } @@ -566,7 +566,7 @@ const ( func TestMain(m *testing.M) { var err error - mainHarness, err = New(&chaincfg.SimNetParams, nil, nil) + mainHarness, err = New(&dagconfig.SimNetParams, nil, nil) if err != nil { fmt.Println("unable to create main harness: ", err) os.Exit(1) diff --git a/integration/rpctest/utils.go b/integration/rpctest/utils.go index d86897745..8fca3e20c 100644 --- a/integration/rpctest/utils.go +++ b/integration/rpctest/utils.go @@ -8,7 +8,7 @@ import ( "reflect" "time" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/rpcclient" ) @@ -81,7 +81,7 @@ func syncBlocks(nodes []*Harness) error { retry: for !blocksMatch { - var prevHash *chainhash.Hash + var prevHash *daghash.Hash var prevHeight int32 for _, node := range nodes { blockHash, blockHeight, err := node.Node.GetBestBlock() diff --git a/log.go b/log.go index 2192c61c9..4199fd565 100644 --- a/log.go +++ b/log.go @@ -11,8 +11,8 @@ import ( "path/filepath" "github.com/daglabs/btcd/addrmgr" - "github.com/daglabs/btcd/blockchain" - "github.com/daglabs/btcd/blockchain/indexers" + "github.com/daglabs/btcd/blockdag" + "github.com/daglabs/btcd/blockdag/indexers" "github.com/daglabs/btcd/connmgr" "github.com/daglabs/btcd/database" "github.com/daglabs/btcd/mempool" @@ -76,7 +76,7 @@ func init() { addrmgr.UseLogger(amgrLog) connmgr.UseLogger(cmgrLog) database.UseLogger(bcdbLog) - blockchain.UseLogger(chanLog) + blockdag.UseLogger(chanLog) indexers.UseLogger(indxLog) mining.UseLogger(minrLog) cpuminer.UseLogger(minrLog) diff --git a/mempool/error.go b/mempool/error.go index 03e3f7ca4..1c89f8b8c 100644 --- a/mempool/error.go +++ b/mempool/error.go @@ -5,7 +5,7 @@ package mempool import ( - "github.com/daglabs/btcd/blockchain" + "github.com/daglabs/btcd/blockdag" "github.com/daglabs/btcd/wire" ) @@ -52,7 +52,7 @@ func txRuleError(c wire.RejectCode, desc string) RuleError { // chainRuleError returns a RuleError that encapsulates the given // blockchain.RuleError. -func chainRuleError(chainErr blockchain.RuleError) RuleError { +func chainRuleError(chainErr blockdag.RuleError) RuleError { return RuleError{ Err: chainErr, } @@ -68,26 +68,26 @@ func extractRejectCode(err error) (wire.RejectCode, bool) { } switch err := err.(type) { - case blockchain.RuleError: + case blockdag.RuleError: // Convert the chain error to a reject code. var code wire.RejectCode switch err.ErrorCode { // Rejected due to duplicate. - case blockchain.ErrDuplicateBlock: + case blockdag.ErrDuplicateBlock: code = wire.RejectDuplicate // Rejected due to obsolete version. - case blockchain.ErrBlockVersionTooOld: + case blockdag.ErrBlockVersionTooOld: code = wire.RejectObsolete // Rejected due to checkpoint. - case blockchain.ErrCheckpointTimeTooOld: + case blockdag.ErrCheckpointTimeTooOld: fallthrough - case blockchain.ErrDifficultyTooLow: + case blockdag.ErrDifficultyTooLow: fallthrough - case blockchain.ErrBadCheckpoint: + case blockdag.ErrBadCheckpoint: fallthrough - case blockchain.ErrForkTooOld: + case blockdag.ErrForkTooOld: code = wire.RejectCheckpoint // Everything else is due to the block or transaction being invalid. diff --git a/mempool/estimatefee.go b/mempool/estimatefee.go index a461c7567..baface3ef 100644 --- a/mempool/estimatefee.go +++ b/mempool/estimatefee.go @@ -16,7 +16,7 @@ import ( "strings" "sync" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/mining" "github.com/daglabs/btcutil" ) @@ -94,7 +94,7 @@ func NewSatoshiPerByte(fee btcutil.Amount, size uint32) SatoshiPerByte { // additional data required for the fee estimation algorithm. type observedTransaction struct { // A transaction hash. - hash chainhash.Hash + hash daghash.Hash // The fee per byte of the transaction in satoshis. feeRate SatoshiPerByte @@ -135,7 +135,7 @@ func deserializeObservedTransaction(r io.Reader) (*observedTransaction, error) { // is used if Rollback is called to reverse the effect of registering // a block. type registeredBlock struct { - hash chainhash.Hash + hash daghash.Hash transactions []*observedTransaction } @@ -169,7 +169,7 @@ type FeeEstimator struct { numBlocksRegistered uint32 mtx sync.RWMutex - observed map[chainhash.Hash]*observedTransaction + observed map[daghash.Hash]*observedTransaction bin [estimateFeeDepth][]*observedTransaction // The cached estimates. @@ -190,7 +190,7 @@ func NewFeeEstimator(maxRollback, minRegisteredBlocks uint32) *FeeEstimator { lastKnownHeight: mining.UnminedHeight, binSize: estimateFeeBinSize, maxReplacements: estimateFeeMaxReplacements, - observed: make(map[chainhash.Hash]*observedTransaction), + observed: make(map[daghash.Hash]*observedTransaction), dropped: make([]*registeredBlock, 0, maxRollback), } } @@ -343,7 +343,7 @@ func (ef *FeeEstimator) LastKnownHeight() int32 { // deleted if they have been observed too long ago. That means the result // of Rollback won't always be exactly the same as if the last block had not // happened, but it should be close enough. -func (ef *FeeEstimator) Rollback(hash *chainhash.Hash) error { +func (ef *FeeEstimator) Rollback(hash *daghash.Hash) error { ef.mtx.Lock() defer ef.mtx.Unlock() @@ -691,7 +691,7 @@ func RestoreFeeEstimator(data FeeEstimatorState) (*FeeEstimator, error) { } ef := &FeeEstimator{ - observed: make(map[chainhash.Hash]*observedTransaction), + observed: make(map[daghash.Hash]*observedTransaction), } // Read basic parameters. diff --git a/mempool/estimatefee_test.go b/mempool/estimatefee_test.go index 7bd6d4e75..9d2c2ec82 100644 --- a/mempool/estimatefee_test.go +++ b/mempool/estimatefee_test.go @@ -9,7 +9,7 @@ import ( "math/rand" "testing" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/mining" "github.com/daglabs/btcd/wire" "github.com/daglabs/btcutil" @@ -24,7 +24,7 @@ func newTestFeeEstimator(binSize, maxReplacements, maxRollback uint32) *FeeEstim binSize: int32(binSize), minRegisteredBlocks: 0, maxReplacements: int32(maxReplacements), - observed: make(map[chainhash.Hash]*observedTransaction), + observed: make(map[daghash.Hash]*observedTransaction), dropped: make([]*registeredBlock, 0, maxRollback), } } @@ -32,7 +32,7 @@ func newTestFeeEstimator(binSize, maxReplacements, maxRollback uint32) *FeeEstim // lastBlock is a linked list of the block hashes which have been // processed by the test FeeEstimator. type lastBlock struct { - hash *chainhash.Hash + hash *daghash.Hash prev *lastBlock } diff --git a/mempool/mempool.go b/mempool/mempool.go index 1934bbb12..492136fec 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -12,11 +12,11 @@ import ( "sync/atomic" "time" - "github.com/daglabs/btcd/blockchain" - "github.com/daglabs/btcd/blockchain/indexers" + "github.com/daglabs/btcd/blockdag" + "github.com/daglabs/btcd/blockdag/indexers" "github.com/daglabs/btcd/btcjson" - "github.com/daglabs/btcd/chaincfg" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/mining" "github.com/daglabs/btcd/txscript" "github.com/daglabs/btcd/wire" @@ -53,11 +53,11 @@ type Config struct { // ChainParams identifies which chain parameters the txpool is // associated with. - ChainParams *chaincfg.Params + ChainParams *dagconfig.Params // FetchUtxoView defines the function to use to fetch unspent // transaction output information. - FetchUtxoView func(*btcutil.Tx) (*blockchain.UtxoViewpoint, error) + FetchUtxoView func(*btcutil.Tx) (*blockdag.UtxoViewpoint, error) // BestHeight defines the function to use to access the block height of // the current best chain. @@ -71,7 +71,7 @@ type Config struct { // CalcSequenceLock defines the function to use in order to generate // the current sequence lock for the given transaction using the passed // utxo view. - CalcSequenceLock func(*btcutil.Tx, *blockchain.UtxoViewpoint) (*blockchain.SequenceLock, error) + CalcSequenceLock func(*btcutil.Tx, *blockdag.UtxoViewpoint) (*blockdag.SequenceLock, error) // IsDeploymentActive returns true if the target deploymentID is // active, and false otherwise. The mempool uses this function to gauge @@ -160,9 +160,9 @@ type TxPool struct { mtx sync.RWMutex cfg Config - pool map[chainhash.Hash]*TxDesc - orphans map[chainhash.Hash]*orphanTx - orphansByPrev map[wire.OutPoint]map[chainhash.Hash]*btcutil.Tx + pool map[daghash.Hash]*TxDesc + orphans map[daghash.Hash]*orphanTx + orphansByPrev map[wire.OutPoint]map[daghash.Hash]*btcutil.Tx outpoints map[wire.OutPoint]*btcutil.Tx pennyTotal float64 // exponentially decaying total for penny spends. lastPennyUnix int64 // unix time of last ``penny spend'' @@ -320,7 +320,7 @@ func (mp *TxPool) addOrphan(tx *btcutil.Tx, tag Tag) { for _, txIn := range tx.MsgTx().TxIn { if _, exists := mp.orphansByPrev[txIn.PreviousOutPoint]; !exists { mp.orphansByPrev[txIn.PreviousOutPoint] = - make(map[chainhash.Hash]*btcutil.Tx) + make(map[daghash.Hash]*btcutil.Tx) } mp.orphansByPrev[txIn.PreviousOutPoint][*tx.Hash()] = tx } @@ -377,7 +377,7 @@ func (mp *TxPool) removeOrphanDoubleSpends(tx *btcutil.Tx) { // exists in the main pool. // // This function MUST be called with the mempool lock held (for reads). -func (mp *TxPool) isTransactionInPool(hash *chainhash.Hash) bool { +func (mp *TxPool) isTransactionInPool(hash *daghash.Hash) bool { if _, exists := mp.pool[*hash]; exists { return true } @@ -389,7 +389,7 @@ func (mp *TxPool) isTransactionInPool(hash *chainhash.Hash) bool { // exists in the main pool. // // This function is safe for concurrent access. -func (mp *TxPool) IsTransactionInPool(hash *chainhash.Hash) bool { +func (mp *TxPool) IsTransactionInPool(hash *daghash.Hash) bool { // Protect concurrent access. mp.mtx.RLock() inPool := mp.isTransactionInPool(hash) @@ -402,7 +402,7 @@ func (mp *TxPool) IsTransactionInPool(hash *chainhash.Hash) bool { // in the orphan pool. // // This function MUST be called with the mempool lock held (for reads). -func (mp *TxPool) isOrphanInPool(hash *chainhash.Hash) bool { +func (mp *TxPool) isOrphanInPool(hash *daghash.Hash) bool { if _, exists := mp.orphans[*hash]; exists { return true } @@ -414,7 +414,7 @@ func (mp *TxPool) isOrphanInPool(hash *chainhash.Hash) bool { // in the orphan pool. // // This function is safe for concurrent access. -func (mp *TxPool) IsOrphanInPool(hash *chainhash.Hash) bool { +func (mp *TxPool) IsOrphanInPool(hash *daghash.Hash) bool { // Protect concurrent access. mp.mtx.RLock() inPool := mp.isOrphanInPool(hash) @@ -427,7 +427,7 @@ func (mp *TxPool) IsOrphanInPool(hash *chainhash.Hash) bool { // in the main pool or in the orphan pool. // // This function MUST be called with the mempool lock held (for reads). -func (mp *TxPool) haveTransaction(hash *chainhash.Hash) bool { +func (mp *TxPool) haveTransaction(hash *daghash.Hash) bool { return mp.isTransactionInPool(hash) || mp.isOrphanInPool(hash) } @@ -435,7 +435,7 @@ func (mp *TxPool) haveTransaction(hash *chainhash.Hash) bool { // in the main pool or in the orphan pool. // // This function is safe for concurrent access. -func (mp *TxPool) HaveTransaction(hash *chainhash.Hash) bool { +func (mp *TxPool) HaveTransaction(hash *daghash.Hash) bool { // Protect concurrent access. mp.mtx.RLock() haveTx := mp.haveTransaction(hash) @@ -515,7 +515,7 @@ func (mp *TxPool) RemoveDoubleSpends(tx *btcutil.Tx) { // helper for maybeAcceptTransaction. // // This function MUST be called with the mempool lock held (for writes). -func (mp *TxPool) addTransaction(utxoView *blockchain.UtxoViewpoint, tx *btcutil.Tx, height int32, fee int64) *TxDesc { +func (mp *TxPool) addTransaction(utxoView *blockdag.UtxoViewpoint, tx *btcutil.Tx, height int32, fee int64) *TxDesc { // Add the transaction to the pool and mark the referenced outpoints // as spent by the pool. txD := &TxDesc{ @@ -585,7 +585,7 @@ func (mp *TxPool) CheckSpend(op wire.OutPoint) *btcutil.Tx { // transaction pool. // // This function MUST be called with the mempool lock held (for reads). -func (mp *TxPool) fetchInputUtxos(tx *btcutil.Tx) (*blockchain.UtxoViewpoint, error) { +func (mp *TxPool) fetchInputUtxos(tx *btcutil.Tx) (*blockdag.UtxoViewpoint, error) { utxoView, err := mp.cfg.FetchUtxoView(tx) if err != nil { return nil, err @@ -615,7 +615,7 @@ func (mp *TxPool) fetchInputUtxos(tx *btcutil.Tx) (*blockchain.UtxoViewpoint, er // orphans. // // This function is safe for concurrent access. -func (mp *TxPool) FetchTransaction(txHash *chainhash.Hash) (*btcutil.Tx, error) { +func (mp *TxPool) FetchTransaction(txHash *daghash.Hash) (*btcutil.Tx, error) { // Protect concurrent access. mp.mtx.RLock() txDesc, exists := mp.pool[*txHash] @@ -633,7 +633,7 @@ func (mp *TxPool) FetchTransaction(txHash *chainhash.Hash) (*btcutil.Tx, error) // more details. // // This function MUST be called with the mempool lock held (for writes). -func (mp *TxPool) maybeAcceptTransaction(tx *btcutil.Tx, isNew, rateLimit, rejectDupOrphans bool) ([]*chainhash.Hash, *TxDesc, error) { +func (mp *TxPool) maybeAcceptTransaction(tx *btcutil.Tx, isNew, rateLimit, rejectDupOrphans bool) ([]*daghash.Hash, *TxDesc, error) { txHash := tx.Hash() // Don't accept the transaction if it already exists in the pool. This @@ -650,16 +650,16 @@ func (mp *TxPool) maybeAcceptTransaction(tx *btcutil.Tx, isNew, rateLimit, rejec // Perform preliminary sanity checks on the transaction. This makes // use of blockchain which contains the invariant rules for what // transactions are allowed into blocks. - err := blockchain.CheckTransactionSanity(tx) + err := blockdag.CheckTransactionSanity(tx) if err != nil { - if cerr, ok := err.(blockchain.RuleError); ok { + if cerr, ok := err.(blockdag.RuleError); ok { return nil, nil, chainRuleError(cerr) } return nil, nil, err } // A standalone transaction must not be a coinbase transaction. - if blockchain.IsCoinBase(tx) { + if blockdag.IsCoinBase(tx) { str := fmt.Sprintf("transaction %v is an individual coinbase", txHash) return nil, nil, txRuleError(wire.RejectInvalid, str) @@ -712,7 +712,7 @@ func (mp *TxPool) maybeAcceptTransaction(tx *btcutil.Tx, isNew, rateLimit, rejec // without needing to do a separate lookup. utxoView, err := mp.fetchInputUtxos(tx) if err != nil { - if cerr, ok := err.(blockchain.RuleError); ok { + if cerr, ok := err.(blockdag.RuleError); ok { return nil, nil, chainRuleError(cerr) } return nil, nil, err @@ -735,7 +735,7 @@ func (mp *TxPool) maybeAcceptTransaction(tx *btcutil.Tx, isNew, rateLimit, rejec // don't exist or are already spent. Adding orphans to the orphan pool // is not handled by this function, and the caller should use // maybeAddOrphan if this behavior is desired. - var missingParents []*chainhash.Hash + var missingParents []*daghash.Hash for outpoint, entry := range utxoView.Entries() { if entry == nil || entry.IsSpent() { // Must make a copy of the hash here since the iterator @@ -755,12 +755,12 @@ func (mp *TxPool) maybeAcceptTransaction(tx *btcutil.Tx, isNew, rateLimit, rejec // with respect to its defined relative lock times. sequenceLock, err := mp.cfg.CalcSequenceLock(tx, utxoView) if err != nil { - if cerr, ok := err.(blockchain.RuleError); ok { + if cerr, ok := err.(blockdag.RuleError); ok { return nil, nil, chainRuleError(cerr) } return nil, nil, err } - if !blockchain.SequenceLockActive(sequenceLock, nextBlockHeight, + if !blockdag.SequenceLockActive(sequenceLock, nextBlockHeight, medianTimePast) { return nil, nil, txRuleError(wire.RejectNonstandard, "transaction's sequence locks on inputs not met") @@ -770,10 +770,10 @@ func (mp *TxPool) maybeAcceptTransaction(tx *btcutil.Tx, isNew, rateLimit, rejec // rules in blockchain for what transactions are allowed into blocks. // Also returns the fees associated with the transaction which will be // used later. - txFee, err := blockchain.CheckTransactionInputs(tx, nextBlockHeight, + txFee, err := blockdag.CheckTransactionInputs(tx, nextBlockHeight, utxoView, mp.cfg.ChainParams) if err != nil { - if cerr, ok := err.(blockchain.RuleError); ok { + if cerr, ok := err.(blockdag.RuleError); ok { return nil, nil, chainRuleError(cerr) } return nil, nil, err @@ -806,9 +806,9 @@ func (mp *TxPool) maybeAcceptTransaction(tx *btcutil.Tx, isNew, rateLimit, rejec // the coinbase address itself can contain signature operations, the // maximum allowed signature operations per transaction is less than // the maximum allowed signature operations per block. - sigOpCount, err := blockchain.CountP2SHSigOps(tx, false, utxoView) + sigOpCount, err := blockdag.CountP2SHSigOps(tx, false, utxoView) if err != nil { - if cerr, ok := err.(blockchain.RuleError); ok { + if cerr, ok := err.(blockdag.RuleError); ok { return nil, nil, chainRuleError(cerr) } return nil, nil, err @@ -881,10 +881,10 @@ func (mp *TxPool) maybeAcceptTransaction(tx *btcutil.Tx, isNew, rateLimit, rejec // Verify crypto signatures for each input and reject the transaction if // any don't verify. - err = blockchain.ValidateTransactionScripts(tx, utxoView, + err = blockdag.ValidateTransactionScripts(tx, utxoView, txscript.StandardVerifyFlags, mp.cfg.SigCache) if err != nil { - if cerr, ok := err.(blockchain.RuleError); ok { + if cerr, ok := err.(blockdag.RuleError); ok { return nil, nil, chainRuleError(cerr) } return nil, nil, err @@ -910,7 +910,7 @@ func (mp *TxPool) maybeAcceptTransaction(tx *btcutil.Tx, isNew, rateLimit, rejec // be added to the orphan pool. // // This function is safe for concurrent access. -func (mp *TxPool) MaybeAcceptTransaction(tx *btcutil.Tx, isNew, rateLimit bool) ([]*chainhash.Hash, *TxDesc, error) { +func (mp *TxPool) MaybeAcceptTransaction(tx *btcutil.Tx, isNew, rateLimit bool) ([]*daghash.Hash, *TxDesc, error) { // Protect concurrent access. mp.mtx.Lock() hashes, txD, err := mp.maybeAcceptTransaction(tx, isNew, rateLimit, true) @@ -1100,9 +1100,9 @@ func (mp *TxPool) Count() int { // pool. // // This function is safe for concurrent access. -func (mp *TxPool) TxHashes() []*chainhash.Hash { +func (mp *TxPool) TxHashes() []*daghash.Hash { mp.mtx.RLock() - hashes := make([]*chainhash.Hash, len(mp.pool)) + hashes := make([]*daghash.Hash, len(mp.pool)) i := 0 for hash := range mp.pool { hashCopy := hash @@ -1209,9 +1209,9 @@ func (mp *TxPool) LastUpdated() time.Time { func New(cfg *Config) *TxPool { return &TxPool{ cfg: *cfg, - pool: make(map[chainhash.Hash]*TxDesc), - orphans: make(map[chainhash.Hash]*orphanTx), - orphansByPrev: make(map[wire.OutPoint]map[chainhash.Hash]*btcutil.Tx), + pool: make(map[daghash.Hash]*TxDesc), + orphans: make(map[daghash.Hash]*orphanTx), + orphansByPrev: make(map[wire.OutPoint]map[daghash.Hash]*btcutil.Tx), nextExpireScan: time.Now().Add(orphanExpireScanInterval), outpoints: make(map[wire.OutPoint]*btcutil.Tx), } diff --git a/mempool/mempool_test.go b/mempool/mempool_test.go index c1b483607..18fc08c25 100644 --- a/mempool/mempool_test.go +++ b/mempool/mempool_test.go @@ -12,10 +12,10 @@ import ( "testing" "time" - "github.com/daglabs/btcd/blockchain" + "github.com/daglabs/btcd/blockdag" "github.com/daglabs/btcd/btcec" - "github.com/daglabs/btcd/chaincfg" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/txscript" "github.com/daglabs/btcd/wire" "github.com/daglabs/btcutil" @@ -26,7 +26,7 @@ import ( // transactions to appear as though they are spending completely valid utxos. type fakeChain struct { sync.RWMutex - utxos *blockchain.UtxoViewpoint + utxos *blockdag.UtxoViewpoint currentHeight int32 medianTimePast time.Time } @@ -37,7 +37,7 @@ type fakeChain struct { // view can be examined for duplicate transactions. // // This function is safe for concurrent access however the returned view is NOT. -func (s *fakeChain) FetchUtxoView(tx *btcutil.Tx) (*blockchain.UtxoViewpoint, error) { +func (s *fakeChain) FetchUtxoView(tx *btcutil.Tx) (*blockdag.UtxoViewpoint, error) { s.RLock() defer s.RUnlock() @@ -45,7 +45,7 @@ func (s *fakeChain) FetchUtxoView(tx *btcutil.Tx) (*blockchain.UtxoViewpoint, er // do not affect the fake chain's view. // Add an entry for the tx itself to the new view. - viewpoint := blockchain.NewUtxoViewpoint() + viewpoint := blockdag.NewUtxoViewpoint() prevOut := wire.OutPoint{Hash: *tx.Hash()} for txOutIdx := range tx.MsgTx().TxOut { prevOut.Index = uint32(txOutIdx) @@ -98,9 +98,9 @@ func (s *fakeChain) SetMedianTimePast(mtp time.Time) { // CalcSequenceLock returns the current sequence lock for the passed // transaction associated with the fake chain instance. func (s *fakeChain) CalcSequenceLock(tx *btcutil.Tx, - view *blockchain.UtxoViewpoint) (*blockchain.SequenceLock, error) { + view *blockdag.UtxoViewpoint) (*blockdag.SequenceLock, error) { - return &blockchain.SequenceLock{ + return &blockdag.SequenceLock{ Seconds: -1, BlockHeight: -1, }, nil @@ -135,7 +135,7 @@ type poolHarness struct { signKey *btcec.PrivateKey payAddr btcutil.Address payScript []byte - chainParams *chaincfg.Params + chainParams *dagconfig.Params chain *fakeChain txPool *TxPool @@ -159,12 +159,12 @@ func (p *poolHarness) CreateCoinbaseTx(blockHeight int32, numOutputs uint32) (*b tx.AddTxIn(&wire.TxIn{ // Coinbase transactions have no inputs, so previous outpoint is // zero hash and max index. - PreviousOutPoint: *wire.NewOutPoint(&chainhash.Hash{}, + PreviousOutPoint: *wire.NewOutPoint(&daghash.Hash{}, wire.MaxPrevOutIndex), SignatureScript: coinbaseScript, Sequence: wire.MaxTxInSequenceNum, }) - totalInput := blockchain.CalcBlockSubsidy(blockHeight, p.chainParams) + totalInput := blockdag.CalcBlockSubsidy(blockHeight, p.chainParams) amountPerOutput := totalInput / int64(numOutputs) remainder := totalInput - amountPerOutput*int64(numOutputs) for i := uint32(0); i < numOutputs; i++ { @@ -276,7 +276,7 @@ func (p *poolHarness) CreateTxChain(firstOutput spendableOutput, numTxns uint32) // for testing. Also, the fake chain is populated with the returned spendable // outputs so the caller can easily create new valid transactions which build // off of it. -func newPoolHarness(chainParams *chaincfg.Params) (*poolHarness, []spendableOutput, error) { +func newPoolHarness(chainParams *dagconfig.Params) (*poolHarness, []spendableOutput, error) { // Use a hard coded key pair for deterministic results. keyBytes, err := hex.DecodeString("700868df1838811ffbdf918fb482c1f7e" + "ad62db4b97bd7012c23e726485e577d") @@ -299,7 +299,7 @@ func newPoolHarness(chainParams *chaincfg.Params) (*poolHarness, []spendableOutp } // Create a new fake chain and harness bound to it. - chain := &fakeChain{utxos: blockchain.NewUtxoViewpoint()} + chain := &fakeChain{utxos: blockdag.NewUtxoViewpoint()} harness := poolHarness{ signKey: signKey, payAddr: payAddr, @@ -313,7 +313,7 @@ func newPoolHarness(chainParams *chaincfg.Params) (*poolHarness, []spendableOutp FreeTxRelayLimit: 15.0, MaxOrphanTxs: 5, MaxOrphanTxSize: 1000, - MaxSigOpsPerTx: blockchain.MaxSigOpsPerBlock / 5, + MaxSigOpsPerTx: blockdag.MaxSigOpsPerBlock / 5, MinRelayTxFee: 1000, // 1 Satoshi per byte MaxTxVersion: 1, }, @@ -394,7 +394,7 @@ func testPoolMembership(tc *testContext, tx *btcutil.Tx, inOrphanPool, inTxPool func TestSimpleOrphanChain(t *testing.T) { t.Parallel() - harness, spendableOuts, err := newPoolHarness(&chaincfg.MainNetParams) + harness, spendableOuts, err := newPoolHarness(&dagconfig.MainNetParams) if err != nil { t.Fatalf("unable to create test pool: %v", err) } @@ -457,7 +457,7 @@ func TestSimpleOrphanChain(t *testing.T) { func TestOrphanReject(t *testing.T) { t.Parallel() - harness, outputs, err := newPoolHarness(&chaincfg.MainNetParams) + harness, outputs, err := newPoolHarness(&dagconfig.MainNetParams) if err != nil { t.Fatalf("unable to create test pool: %v", err) } @@ -512,7 +512,7 @@ func TestOrphanReject(t *testing.T) { func TestOrphanEviction(t *testing.T) { t.Parallel() - harness, outputs, err := newPoolHarness(&chaincfg.MainNetParams) + harness, outputs, err := newPoolHarness(&dagconfig.MainNetParams) if err != nil { t.Fatalf("unable to create test pool: %v", err) } @@ -577,7 +577,7 @@ func TestBasicOrphanRemoval(t *testing.T) { t.Parallel() const maxOrphans = 4 - harness, spendableOuts, err := newPoolHarness(&chaincfg.MainNetParams) + harness, spendableOuts, err := newPoolHarness(&dagconfig.MainNetParams) if err != nil { t.Fatalf("unable to create test pool: %v", err) } @@ -617,7 +617,7 @@ func TestBasicOrphanRemoval(t *testing.T) { // and ensure the state of all other orphans are unaffected. nonChainedOrphanTx, err := harness.CreateSignedTx([]spendableOutput{{ amount: btcutil.Amount(5000000000), - outPoint: wire.OutPoint{Hash: chainhash.Hash{}, Index: 0}, + outPoint: wire.OutPoint{Hash: daghash.Hash{}, Index: 0}, }}, 1) if err != nil { t.Fatalf("unable to create signed tx: %v", err) @@ -652,7 +652,7 @@ func TestOrphanChainRemoval(t *testing.T) { t.Parallel() const maxOrphans = 10 - harness, spendableOuts, err := newPoolHarness(&chaincfg.MainNetParams) + harness, spendableOuts, err := newPoolHarness(&dagconfig.MainNetParams) if err != nil { t.Fatalf("unable to create test pool: %v", err) } @@ -715,7 +715,7 @@ func TestMultiInputOrphanDoubleSpend(t *testing.T) { t.Parallel() const maxOrphans = 4 - harness, outputs, err := newPoolHarness(&chaincfg.MainNetParams) + harness, outputs, err := newPoolHarness(&dagconfig.MainNetParams) if err != nil { t.Fatalf("unable to create test pool: %v", err) } @@ -803,7 +803,7 @@ func TestMultiInputOrphanDoubleSpend(t *testing.T) { func TestCheckSpend(t *testing.T) { t.Parallel() - harness, outputs, err := newPoolHarness(&chaincfg.MainNetParams) + harness, outputs, err := newPoolHarness(&dagconfig.MainNetParams) if err != nil { t.Fatalf("unable to create test pool: %v", err) } diff --git a/mempool/policy.go b/mempool/policy.go index 891d8a878..1f6c45f7f 100644 --- a/mempool/policy.go +++ b/mempool/policy.go @@ -8,7 +8,7 @@ import ( "fmt" "time" - "github.com/daglabs/btcd/blockchain" + "github.com/daglabs/btcd/blockdag" "github.com/daglabs/btcd/txscript" "github.com/daglabs/btcd/wire" "github.com/daglabs/btcutil" @@ -90,7 +90,7 @@ func calcMinRequiredTxRelayFee(serializedSize int64, minRelayTxFee btcutil.Amoun // not perform those checks because the script engine already does this more // accurately and concisely via the txscript.ScriptVerifyCleanStack and // txscript.ScriptVerifySigPushOnly flags. -func checkInputsStandard(tx *btcutil.Tx, utxoView *blockchain.UtxoViewpoint) error { +func checkInputsStandard(tx *btcutil.Tx, utxoView *blockdag.UtxoViewpoint) error { // NOTE: The reference implementation also does a coinbase check here, // but coinbases have already been rejected prior to calling this // function so no need to recheck. @@ -265,7 +265,7 @@ func checkTransactionStandard(tx *btcutil.Tx, height int32, // The transaction must be finalized to be standard and therefore // considered for inclusion in a block. - if !blockchain.IsFinalizedTransaction(tx, height, medianTimePast) { + if !blockdag.IsFinalizedTransaction(tx, height, medianTimePast) { return txRuleError(wire.RejectNonstandard, "transaction is not finalized") } diff --git a/mempool/policy_test.go b/mempool/policy_test.go index 85ef1a25f..505b95dc6 100644 --- a/mempool/policy_test.go +++ b/mempool/policy_test.go @@ -10,8 +10,8 @@ import ( "time" "github.com/daglabs/btcd/btcec" - "github.com/daglabs/btcd/chaincfg" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/txscript" "github.com/daglabs/btcd/wire" "github.com/daglabs/btcutil" @@ -280,7 +280,7 @@ func TestDust(t *testing.T) { // TestCheckTransactionStandard tests the checkTransactionStandard API. func TestCheckTransactionStandard(t *testing.T) { // Create some dummy, but otherwise standard, data for transactions. - prevOutHash, err := chainhash.NewHashFromStr("01") + prevOutHash, err := daghash.NewHashFromStr("01") if err != nil { t.Fatalf("NewShaHashFromStr: unexpected error: %v", err) } @@ -293,7 +293,7 @@ func TestCheckTransactionStandard(t *testing.T) { } addrHash := [20]byte{0x01} addr, err := btcutil.NewAddressPubKeyHash(addrHash[:], - &chaincfg.TestNet3Params) + &dagconfig.TestNet3Params) if err != nil { t.Fatalf("NewAddressPubKeyHash: unexpected error: %v", err) } diff --git a/mining/cpuminer/cpuminer.go b/mining/cpuminer/cpuminer.go index da2fcf21d..f3184f293 100644 --- a/mining/cpuminer/cpuminer.go +++ b/mining/cpuminer/cpuminer.go @@ -12,9 +12,9 @@ import ( "sync" "time" - "github.com/daglabs/btcd/blockchain" - "github.com/daglabs/btcd/chaincfg" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/blockdag" + "github.com/daglabs/btcd/dagconfig" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/mining" "github.com/daglabs/btcd/wire" "github.com/daglabs/btcutil" @@ -51,7 +51,7 @@ var ( type Config struct { // ChainParams identifies which chain parameters the cpu miner is // associated with. - ChainParams *chaincfg.Params + ChainParams *dagconfig.Params // BlockTemplateGenerator identifies the instance to use in order to // generate block templates that the miner will attempt to solve. @@ -64,7 +64,7 @@ type Config struct { // ProcessBlock defines the function to call with any solved blocks. // It typically must run the provided block through the same set of // rules and handling as any other block coming from the network. - ProcessBlock func(*btcutil.Block, blockchain.BehaviorFlags) (bool, error) + ProcessBlock func(*btcutil.Block, blockdag.BehaviorFlags) (bool, error) // ConnectedCount defines the function to use to obtain how many other // peers the server is connected to. This is used by the automatic @@ -162,7 +162,7 @@ func (m *CPUMiner) submitBlock(block *btcutil.Block) bool { // a new block, but the check only happens periodically, so it is // possible a block was found and submitted in between. msgBlock := block.MsgBlock() - if !msgBlock.Header.PrevBlock.IsEqual(&m.g.BestSnapshot().Hash) { + if !msgBlock.Header.PrevBlock.IsEqual(&m.g.GetDAGState().SelectedTip.Hash) { log.Debugf("Block submitted via CPU miner with previous "+ "block %s is stale", msgBlock.Header.PrevBlock) return false @@ -170,11 +170,11 @@ func (m *CPUMiner) submitBlock(block *btcutil.Block) bool { // Process this block using the same rules as blocks coming from other // nodes. This will in turn relay it to the network like normal. - isOrphan, err := m.cfg.ProcessBlock(block, blockchain.BFNone) + isOrphan, err := m.cfg.ProcessBlock(block, blockdag.BFNone) if err != nil { // Anything other than a rule violation is an unexpected error, // so log that error as an internal error. - if _, ok := err.(blockchain.RuleError); !ok { + if _, ok := err.(blockdag.RuleError); !ok { log.Errorf("Unexpected error while processing "+ "block submitted via CPU miner: %v", err) return false @@ -218,7 +218,7 @@ func (m *CPUMiner) solveBlock(msgBlock *wire.MsgBlock, blockHeight int32, // Create some convenience variables. header := &msgBlock.Header - targetDifficulty := blockchain.CompactToBig(header.Bits) + targetDifficulty := blockdag.CompactToBig(header.Bits) // Initial state. lastGenerated := time.Now() @@ -246,10 +246,9 @@ func (m *CPUMiner) solveBlock(msgBlock *wire.MsgBlock, blockHeight int32, m.updateHashes <- hashesCompleted hashesCompleted = 0 - // The current block is stale if the best block - // has changed. - best := m.g.BestSnapshot() - if !header.PrevBlock.IsEqual(&best.Hash) { + // The current block is stale if the DAG has changed. + dagState := m.g.GetDAGState() + if !header.PrevBlock.IsEqual(&dagState.SelectedTip.Hash) { return false } @@ -279,7 +278,7 @@ func (m *CPUMiner) solveBlock(msgBlock *wire.MsgBlock, blockHeight int32, // The block is solved when the new block hash is less // than the target difficulty. Yay! - if blockchain.HashToBig(&hash).Cmp(targetDifficulty) <= 0 { + if blockdag.HashToBig(&hash).Cmp(targetDifficulty) <= 0 { m.updateHashes <- hashesCompleted return true } @@ -327,7 +326,7 @@ out: // this would otherwise end up building a new block template on // a block that is in the process of becoming stale. m.submitBlockLock.Lock() - curHeight := m.g.BestSnapshot().Height + curHeight := m.g.GetDAGState().SelectedTip.Height if curHeight != 0 && !m.cfg.IsCurrent() { m.submitBlockLock.Unlock() time.Sleep(time.Second) @@ -544,7 +543,7 @@ func (m *CPUMiner) NumWorkers() int32 { // detecting when it is performing stale work and reacting accordingly by // generating a new block template. When a block is solved, it is submitted. // The function returns a list of the hashes of generated blocks. -func (m *CPUMiner) GenerateNBlocks(n uint32) ([]*chainhash.Hash, error) { +func (m *CPUMiner) GenerateNBlocks(n uint32) ([]*daghash.Hash, error) { m.Lock() // Respond with an error if server is already mining. @@ -566,7 +565,7 @@ func (m *CPUMiner) GenerateNBlocks(n uint32) ([]*chainhash.Hash, error) { log.Tracef("Generating %d blocks", n) i := uint32(0) - blockHashes := make([]*chainhash.Hash, n) + blockHashes := make([]*daghash.Hash, n) // Start a ticker which is used to signal checks for stale work and // updates to the speed monitor. @@ -586,7 +585,7 @@ func (m *CPUMiner) GenerateNBlocks(n uint32) ([]*chainhash.Hash, error) { // be changing and this would otherwise end up building a new block // template on a block that is in the process of becoming stale. m.submitBlockLock.Lock() - curHeight := m.g.BestSnapshot().Height + curHeight := m.g.GetDAGState().SelectedTip.Height // Choose a payment address at random. rand.Seed(time.Now().UnixNano()) diff --git a/mining/mining.go b/mining/mining.go index ef1e98d2f..36ece3eae 100644 --- a/mining/mining.go +++ b/mining/mining.go @@ -9,9 +9,9 @@ import ( "fmt" "time" - "github.com/daglabs/btcd/blockchain" - "github.com/daglabs/btcd/chaincfg" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/blockdag" + "github.com/daglabs/btcd/dagconfig" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/txscript" "github.com/daglabs/btcd/wire" "github.com/daglabs/btcutil" @@ -68,7 +68,7 @@ type TxSource interface { // HaveTransaction returns whether or not the passed transaction hash // exists in the source pool. - HaveTransaction(hash *chainhash.Hash) bool + HaveTransaction(hash *daghash.Hash) bool } // txPrioItem houses a transaction along with extra information that allows the @@ -84,7 +84,7 @@ type txPrioItem struct { // on. It will only be set when the transaction references other // transactions in the source pool and hence must come after them in // a block. - dependsOn map[chainhash.Hash]struct{} + dependsOn map[daghash.Hash]struct{} } // txPriorityQueueLessFunc describes a function that can be used as a compare @@ -216,7 +216,7 @@ type BlockTemplate struct { // viewA will contain all of its original entries plus all of the entries // in viewB. It will replace any entries in viewB which also exist in viewA // if the entry in viewA is spent. -func mergeUtxoView(viewA *blockchain.UtxoViewpoint, viewB *blockchain.UtxoViewpoint) { +func mergeUtxoView(viewA *blockdag.UtxoViewpoint, viewB *blockdag.UtxoViewpoint) { viewAEntries := viewA.Entries() for outpoint, entryB := range viewB.Entries() { if entryA, exists := viewAEntries[outpoint]; !exists || @@ -243,7 +243,7 @@ func standardCoinbaseScript(nextBlockHeight int32, extraNonce uint64) ([]byte, e // // See the comment for NewBlockTemplate for more information about why the nil // address handling is useful. -func createCoinbaseTx(params *chaincfg.Params, coinbaseScript []byte, nextBlockHeight int32, addr btcutil.Address) (*btcutil.Tx, error) { +func createCoinbaseTx(params *dagconfig.Params, coinbaseScript []byte, nextBlockHeight int32, addr btcutil.Address) (*btcutil.Tx, error) { // Create the script to pay to the provided payment address if one was // specified. Otherwise create a script that allows the coinbase to be // redeemable by anyone. @@ -267,13 +267,13 @@ func createCoinbaseTx(params *chaincfg.Params, coinbaseScript []byte, nextBlockH tx.AddTxIn(&wire.TxIn{ // Coinbase transactions have no inputs, so previous outpoint is // zero hash and max index. - PreviousOutPoint: *wire.NewOutPoint(&chainhash.Hash{}, + PreviousOutPoint: *wire.NewOutPoint(&daghash.Hash{}, wire.MaxPrevOutIndex), SignatureScript: coinbaseScript, Sequence: wire.MaxTxInSequenceNum, }) tx.AddTxOut(&wire.TxOut{ - Value: blockchain.CalcBlockSubsidy(nextBlockHeight, params), + Value: blockdag.CalcBlockSubsidy(nextBlockHeight, params), PkScript: pkScript, }) return btcutil.NewTx(tx), nil @@ -282,7 +282,7 @@ func createCoinbaseTx(params *chaincfg.Params, coinbaseScript []byte, nextBlockH // spendTransaction updates the passed view by marking the inputs to the passed // transaction as spent. It also adds all outputs in the passed transaction // which are not provably unspendable as available unspent transaction outputs. -func spendTransaction(utxoView *blockchain.UtxoViewpoint, tx *btcutil.Tx, height int32) error { +func spendTransaction(utxoView *blockdag.UtxoViewpoint, tx *btcutil.Tx, height int32) error { for _, txIn := range tx.MsgTx().TxIn { entry := utxoView.LookupEntry(txIn.PreviousOutPoint) if entry != nil { @@ -296,7 +296,7 @@ func spendTransaction(utxoView *blockchain.UtxoViewpoint, tx *btcutil.Tx, height // logSkippedDeps logs any dependencies which are also skipped as a result of // skipping a transaction while generating a block template at the trace level. -func logSkippedDeps(tx *btcutil.Tx, deps map[chainhash.Hash]*txPrioItem) { +func logSkippedDeps(tx *btcutil.Tx, deps map[daghash.Hash]*txPrioItem) { if deps == nil { return } @@ -311,14 +311,14 @@ func logSkippedDeps(tx *btcutil.Tx, deps map[chainhash.Hash]*txPrioItem) { // on the end of the provided best chain. In particular, it is one second after // the median timestamp of the last several blocks per the chain consensus // rules. -func MinimumMedianTime(chainState *blockchain.BestState) time.Time { - return chainState.MedianTime.Add(time.Second) +func MinimumMedianTime(dagState *blockdag.DAGState) time.Time { + return dagState.SelectedTip.MedianTime.Add(time.Second) } // medianAdjustedTime returns the current time adjusted to ensure it is at least // one second after the median timestamp of the last several blocks per the // chain consensus rules. -func medianAdjustedTime(chainState *blockchain.BestState, timeSource blockchain.MedianTimeSource) time.Time { +func medianAdjustedTime(chainState *blockdag.DAGState, timeSource blockdag.MedianTimeSource) time.Time { // The timestamp for the block must not be before the median timestamp // of the last several blocks. Thus, choose the maximum between the // current time and one second after the past median time. The current @@ -340,10 +340,10 @@ func medianAdjustedTime(chainState *blockchain.BestState, timeSource blockchain. // are built on top of the current best chain and adhere to the consensus rules. type BlkTmplGenerator struct { policy *Policy - chainParams *chaincfg.Params + chainParams *dagconfig.Params txSource TxSource - chain *blockchain.BlockChain - timeSource blockchain.MedianTimeSource + dag *blockdag.BlockDAG + timeSource blockdag.MedianTimeSource sigCache *txscript.SigCache } @@ -353,16 +353,16 @@ type BlkTmplGenerator struct { // The additional state-related fields are required in order to ensure the // templates are built on top of the current best chain and adhere to the // consensus rules. -func NewBlkTmplGenerator(policy *Policy, params *chaincfg.Params, - txSource TxSource, chain *blockchain.BlockChain, - timeSource blockchain.MedianTimeSource, +func NewBlkTmplGenerator(policy *Policy, params *dagconfig.Params, + txSource TxSource, dag *blockdag.BlockDAG, + timeSource blockdag.MedianTimeSource, sigCache *txscript.SigCache) *BlkTmplGenerator { return &BlkTmplGenerator{ policy: policy, chainParams: params, txSource: txSource, - chain: chain, + dag: dag, timeSource: timeSource, sigCache: sigCache, } @@ -432,8 +432,8 @@ func NewBlkTmplGenerator(policy *Policy, params *chaincfg.Params, // ----------------------------------- -- func (g *BlkTmplGenerator) NewBlockTemplate(payToAddress btcutil.Address) (*BlockTemplate, error) { // Extend the most recently known best block. - best := g.chain.BestSnapshot() - nextBlockHeight := best.Height + 1 + dagState := g.dag.GetDAGState() + nextBlockHeight := dagState.SelectedTip.Height + 1 // Create a standard coinbase transaction paying to the provided // address. NOTE: The coinbase value will be updated to include the @@ -453,7 +453,7 @@ func (g *BlkTmplGenerator) NewBlockTemplate(payToAddress btcutil.Address) (*Bloc if err != nil { return nil, err } - numCoinbaseSigOps := int64(blockchain.CountSigOps(coinbaseTx)) + numCoinbaseSigOps := int64(blockdag.CountSigOps(coinbaseTx)) // Get the current source transactions and create a priority queue to // hold the transactions which are ready for inclusion into a block @@ -471,14 +471,14 @@ func (g *BlkTmplGenerator) NewBlockTemplate(payToAddress btcutil.Address) (*Bloc // avoided. blockTxns := make([]*btcutil.Tx, 0, len(sourceTxns)) blockTxns = append(blockTxns, coinbaseTx) - blockUtxos := blockchain.NewUtxoViewpoint() + blockUtxos := blockdag.NewUtxoViewpoint() // dependers is used to track transactions which depend on another // transaction in the source pool. This, in conjunction with the // dependsOn map kept with each dependent transaction helps quickly // determine which dependent transactions are now eligible for inclusion // in the block once each transaction has been included. - dependers := make(map[chainhash.Hash]map[chainhash.Hash]*txPrioItem) + dependers := make(map[daghash.Hash]map[daghash.Hash]*txPrioItem) // Create slices to hold the fees and number of signature operations // for each of the selected transactions and add an entry for the @@ -499,11 +499,11 @@ mempoolLoop: // A block can't have more than one coinbase or contain // non-finalized transactions. tx := txDesc.Tx - if blockchain.IsCoinBase(tx) { + if blockdag.IsCoinBase(tx) { log.Tracef("Skipping coinbase tx %s", tx.Hash()) continue } - if !blockchain.IsFinalizedTransaction(tx, nextBlockHeight, + if !blockdag.IsFinalizedTransaction(tx, nextBlockHeight, g.timeSource.AdjustedTime()) { log.Tracef("Skipping non-finalized tx %s", tx.Hash()) @@ -515,7 +515,7 @@ mempoolLoop: // mempool since a transaction which depends on other // transactions in the mempool must come after those // dependencies in the final generated block. - utxos, err := g.chain.FetchUtxoView(tx) + utxos, err := g.dag.FetchUtxoView(tx) if err != nil { log.Warnf("Unable to fetch utxo view for tx %s: %v", tx.Hash(), err) @@ -543,13 +543,13 @@ mempoolLoop: // ordering dependency. deps, exists := dependers[*originHash] if !exists { - deps = make(map[chainhash.Hash]*txPrioItem) + deps = make(map[daghash.Hash]*txPrioItem) dependers[*originHash] = deps } deps[*prioItem.tx.Hash()] = prioItem if prioItem.dependsOn == nil { prioItem.dependsOn = make( - map[chainhash.Hash]struct{}) + map[daghash.Hash]struct{}) } prioItem.dependsOn[*originHash] = struct{}{} @@ -615,15 +615,15 @@ mempoolLoop: // Enforce maximum signature operations per block. Also check // for overflow. - numSigOps := int64(blockchain.CountSigOps(tx)) + numSigOps := int64(blockdag.CountSigOps(tx)) if blockSigOps+numSigOps < blockSigOps || - blockSigOps+numSigOps > blockchain.MaxSigOpsPerBlock { + blockSigOps+numSigOps > blockdag.MaxSigOpsPerBlock { log.Tracef("Skipping tx %s because it would exceed "+ "the maximum sigops per block", tx.Hash()) logSkippedDeps(tx, deps) continue } - numP2SHSigOps, err := blockchain.CountP2SHSigOps(tx, false, + numP2SHSigOps, err := blockdag.CountP2SHSigOps(tx, false, blockUtxos) if err != nil { log.Tracef("Skipping tx %s due to error in "+ @@ -633,7 +633,7 @@ mempoolLoop: } numSigOps += int64(numP2SHSigOps) if blockSigOps+numSigOps < blockSigOps || - blockSigOps+numSigOps > blockchain.MaxSigOpsPerBlock { + blockSigOps+numSigOps > blockdag.MaxSigOpsPerBlock { log.Tracef("Skipping tx %s because it would "+ "exceed the maximum sigops per block", tx.Hash()) logSkippedDeps(tx, deps) @@ -686,7 +686,7 @@ mempoolLoop: // Ensure the transaction inputs pass all of the necessary // preconditions before allowing it to be added to the block. - _, err = blockchain.CheckTransactionInputs(tx, nextBlockHeight, + _, err = blockdag.CheckTransactionInputs(tx, nextBlockHeight, blockUtxos, g.chainParams) if err != nil { log.Tracef("Skipping tx %s due to error in "+ @@ -694,7 +694,7 @@ mempoolLoop: logSkippedDeps(tx, deps) continue } - err = blockchain.ValidateTransactionScripts(tx, blockUtxos, + err = blockdag.ValidateTransactionScripts(tx, blockUtxos, txscript.StandardVerifyFlags, g.sigCache) if err != nil { log.Tracef("Skipping tx %s due to error in "+ @@ -746,25 +746,25 @@ mempoolLoop: // Calculate the required difficulty for the block. The timestamp // is potentially adjusted to ensure it comes after the median time of // the last several blocks per the chain consensus rules. - ts := medianAdjustedTime(best, g.timeSource) - reqDifficulty, err := g.chain.CalcNextRequiredDifficulty(ts) + ts := medianAdjustedTime(dagState, g.timeSource) + reqDifficulty, err := g.dag.CalcNextRequiredDifficulty(ts) if err != nil { return nil, err } // Calculate the next expected block version based on the state of the // rule change deployments. - nextBlockVersion, err := g.chain.CalcNextBlockVersion() + nextBlockVersion, err := g.dag.CalcNextBlockVersion() if err != nil { return nil, err } // Create a new block ready to be solved. - merkles := blockchain.BuildMerkleTreeStore(blockTxns) + merkles := blockdag.BuildMerkleTreeStore(blockTxns) var msgBlock wire.MsgBlock msgBlock.Header = wire.BlockHeader{ Version: nextBlockVersion, - PrevBlock: best.Hash, + PrevBlock: dagState.SelectedTip.Hash, MerkleRoot: *merkles[len(merkles)-1], Timestamp: ts, Bits: reqDifficulty, @@ -780,14 +780,14 @@ mempoolLoop: // chain with no issues. block := btcutil.NewBlock(&msgBlock) block.SetHeight(nextBlockHeight) - if err := g.chain.CheckConnectBlockTemplate(block); err != nil { + if err := g.dag.CheckConnectBlockTemplate(block); err != nil { return nil, err } log.Debugf("Created new block template (%d transactions, %d in fees, "+ "%d signature operations, %d bytes, target difficulty %064x)", len(msgBlock.Transactions), totalFees, blockSigOps, blockSize, - blockchain.CompactToBig(msgBlock.Header.Bits)) + blockdag.CompactToBig(msgBlock.Header.Bits)) return &BlockTemplate{ Block: &msgBlock, @@ -808,12 +808,12 @@ func (g *BlkTmplGenerator) UpdateBlockTime(msgBlock *wire.MsgBlock) error { // The new timestamp is potentially adjusted to ensure it comes after // the median time of the last several blocks per the chain consensus // rules. - newTime := medianAdjustedTime(g.chain.BestSnapshot(), g.timeSource) + newTime := medianAdjustedTime(g.dag.GetDAGState(), g.timeSource) msgBlock.Header.Timestamp = newTime // Recalculate the difficulty if running on a network that requires it. if g.chainParams.ReduceMinDifficulty { - difficulty, err := g.chain.CalcNextRequiredDifficulty(newTime) + difficulty, err := g.dag.CalcNextRequiredDifficulty(newTime) if err != nil { return err } @@ -832,11 +832,11 @@ func (g *BlkTmplGenerator) UpdateExtraNonce(msgBlock *wire.MsgBlock, blockHeight if err != nil { return err } - if len(coinbaseScript) > blockchain.MaxCoinbaseScriptLen { + if len(coinbaseScript) > blockdag.MaxCoinbaseScriptLen { return fmt.Errorf("coinbase transaction script length "+ "of %d is out of range (min: %d, max: %d)", - len(coinbaseScript), blockchain.MinCoinbaseScriptLen, - blockchain.MaxCoinbaseScriptLen) + len(coinbaseScript), blockdag.MinCoinbaseScriptLen, + blockdag.MaxCoinbaseScriptLen) } msgBlock.Transactions[0].TxIn[0].SignatureScript = coinbaseScript @@ -846,19 +846,19 @@ func (g *BlkTmplGenerator) UpdateExtraNonce(msgBlock *wire.MsgBlock, blockHeight // Recalculate the merkle root with the updated extra nonce. block := btcutil.NewBlock(msgBlock) - merkles := blockchain.BuildMerkleTreeStore(block.Transactions()) + merkles := blockdag.BuildMerkleTreeStore(block.Transactions()) msgBlock.Header.MerkleRoot = *merkles[len(merkles)-1] return nil } -// BestSnapshot returns information about the current best chain block and -// related state as of the current point in time using the chain instance +// GetDAGState returns information about the current state +// as of the current point in time using the DAG instance // associated with the block template generator. The returned state must be // treated as immutable since it is shared by all callers. // // This function is safe for concurrent access. -func (g *BlkTmplGenerator) BestSnapshot() *blockchain.BestState { - return g.chain.BestSnapshot() +func (g *BlkTmplGenerator) GetDAGState() *blockdag.DAGState { + return g.dag.GetDAGState() } // TxSource returns the associated transaction source. diff --git a/mining/policy.go b/mining/policy.go index fddb7381d..43759d54f 100644 --- a/mining/policy.go +++ b/mining/policy.go @@ -5,7 +5,7 @@ package mining import ( - "github.com/daglabs/btcd/blockchain" + "github.com/daglabs/btcd/blockdag" "github.com/daglabs/btcd/wire" "github.com/daglabs/btcutil" ) @@ -54,7 +54,7 @@ func minInt(a, b int) int { // age is the sum of this value for each txin. Any inputs to the transaction // which are currently in the mempool and hence not mined into a block yet, // contribute no additional input age to the transaction. -func calcInputValueAge(tx *wire.MsgTx, utxoView *blockchain.UtxoViewpoint, nextBlockHeight int32) float64 { +func calcInputValueAge(tx *wire.MsgTx, utxoView *blockdag.UtxoViewpoint, nextBlockHeight int32) float64 { var totalInputAge float64 for _, txIn := range tx.TxIn { // Don't attempt to accumulate the total input age if the @@ -86,7 +86,7 @@ func calcInputValueAge(tx *wire.MsgTx, utxoView *blockchain.UtxoViewpoint, nextB // of each of its input values multiplied by their age (# of confirmations). // Thus, the final formula for the priority is: // sum(inputValue * inputAge) / adjustedTxSize -func CalcPriority(tx *wire.MsgTx, utxoView *blockchain.UtxoViewpoint, nextBlockHeight int32) float64 { +func CalcPriority(tx *wire.MsgTx, utxoView *blockdag.UtxoViewpoint, nextBlockHeight int32) float64 { // In order to encourage spending multiple old unspent transaction // outputs thereby reducing the total set, don't count the constant // overhead for each input as well as enough bytes of the signature diff --git a/mining/policy_test.go b/mining/policy_test.go index 01eebd6ae..07a229622 100644 --- a/mining/policy_test.go +++ b/mining/policy_test.go @@ -9,18 +9,18 @@ import ( "math" "testing" - "github.com/daglabs/btcd/blockchain" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/blockdag" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/wire" "github.com/daglabs/btcutil" ) // newHashFromStr converts the passed big-endian hex string into a -// chainhash.Hash. It only differs from the one available in chainhash in that +// daghash.Hash. It only differs from the one available in daghash in that // it panics on an error since it will only (and must only) be called with // hard-coded, and therefore known good, hashes. -func newHashFromStr(hexStr string) *chainhash.Hash { - hash, err := chainhash.NewHashFromStr(hexStr) +func newHashFromStr(hexStr string) *daghash.Hash { + hash, err := daghash.NewHashFromStr(hexStr) if err != nil { panic("invalid hash in source file: " + hexStr) } @@ -43,12 +43,12 @@ func hexToBytes(s string) []byte { // provided source transactions as if there were available at the respective // block height specified in the heights slice. The length of the source txns // and source tx heights must match or it will panic. -func newUtxoViewpoint(sourceTxns []*wire.MsgTx, sourceTxHeights []int32) *blockchain.UtxoViewpoint { +func newUtxoViewpoint(sourceTxns []*wire.MsgTx, sourceTxHeights []int32) *blockdag.UtxoViewpoint { if len(sourceTxns) != len(sourceTxHeights) { panic("each transaction must have its block height specified") } - view := blockchain.NewUtxoViewpoint() + view := blockdag.NewUtxoViewpoint() for i, tx := range sourceTxns { view.AddTxOuts(btcutil.NewTx(tx), sourceTxHeights[i]) } @@ -66,7 +66,7 @@ func TestCalcPriority(t *testing.T) { Version: 1, TxIn: []*wire.TxIn{{ PreviousOutPoint: wire.OutPoint{ - Hash: chainhash.Hash{}, + Hash: daghash.Hash{}, Index: wire.MaxPrevOutIndex, }, SignatureScript: hexToBytes("04ffff001d0134"), @@ -118,11 +118,11 @@ func TestCalcPriority(t *testing.T) { } tests := []struct { - name string // test description - tx *wire.MsgTx // tx to calc priority for - utxoView *blockchain.UtxoViewpoint // inputs to tx - nextHeight int32 // height for priority calc - want float64 // expected priority + name string // test description + tx *wire.MsgTx // tx to calc priority for + utxoView *blockdag.UtxoViewpoint // inputs to tx + nextHeight int32 // height for priority calc + want float64 // expected priority }{ { name: "one height 7 input, prio tx height 169", diff --git a/netsync/interface.go b/netsync/interface.go index 0a8cc075d..647b19693 100644 --- a/netsync/interface.go +++ b/netsync/interface.go @@ -5,9 +5,9 @@ package netsync import ( - "github.com/daglabs/btcd/blockchain" - "github.com/daglabs/btcd/chaincfg" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/blockdag" + "github.com/daglabs/btcd/dagconfig" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/mempool" "github.com/daglabs/btcd/peer" "github.com/daglabs/btcd/wire" @@ -20,7 +20,7 @@ import ( type PeerNotifier interface { AnnounceNewTransactions(newTxs []*mempool.TxDesc) - UpdatePeerHeights(latestBlkHash *chainhash.Hash, latestHeight int32, updateSource *peer.Peer) + UpdatePeerHeights(latestBlkHash *daghash.Hash, latestHeight int32, updateSource *peer.Peer) RelayInventory(invVect *wire.InvVect, data interface{}) @@ -30,9 +30,9 @@ type PeerNotifier interface { // Config is a configuration struct used to initialize a new SyncManager. type Config struct { PeerNotifier PeerNotifier - Chain *blockchain.BlockChain + DAG *blockdag.BlockDAG TxMemPool *mempool.TxPool - ChainParams *chaincfg.Params + ChainParams *dagconfig.Params DisableCheckpoints bool MaxPeers int diff --git a/netsync/manager.go b/netsync/manager.go index c2f8b0d24..ae309972f 100644 --- a/netsync/manager.go +++ b/netsync/manager.go @@ -11,9 +11,9 @@ import ( "sync/atomic" "time" - "github.com/daglabs/btcd/blockchain" - "github.com/daglabs/btcd/chaincfg" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/blockdag" + "github.com/daglabs/btcd/dagconfig" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/database" "github.com/daglabs/btcd/mempool" peerpkg "github.com/daglabs/btcd/peer" @@ -41,7 +41,7 @@ const ( ) // zeroHash is the zero value hash (all zeros). It is defined as a convenience. -var zeroHash chainhash.Hash +var zeroHash daghash.Hash // newPeerMsg signifies a newly connected peer to the block handler. type newPeerMsg struct { @@ -103,7 +103,7 @@ type processBlockResponse struct { // way to call ProcessBlock on the internal block chain instance. type processBlockMsg struct { block *btcutil.Block - flags blockchain.BehaviorFlags + flags blockdag.BehaviorFlags reply chan processBlockResponse } @@ -126,7 +126,7 @@ type pauseMsg struct { // between checkpoints. type headerNode struct { height int32 - hash *chainhash.Hash + hash *daghash.Hash } // peerSyncState stores additional information that the SyncManager tracks @@ -134,8 +134,8 @@ type headerNode struct { type peerSyncState struct { syncCandidate bool requestQueue []*wire.InvVect - requestedTxns map[chainhash.Hash]struct{} - requestedBlocks map[chainhash.Hash]struct{} + requestedTxns map[daghash.Hash]struct{} + requestedBlocks map[daghash.Hash]struct{} } // SyncManager is used to communicate block related messages with peers. The @@ -147,18 +147,18 @@ type SyncManager struct { peerNotifier PeerNotifier started int32 shutdown int32 - chain *blockchain.BlockChain + dag *blockdag.BlockDAG txMemPool *mempool.TxPool - chainParams *chaincfg.Params + chainParams *dagconfig.Params progressLogger *blockProgressLogger msgChan chan interface{} wg sync.WaitGroup quit chan struct{} // These fields should only be accessed from the blockHandler thread - rejectedTxns map[chainhash.Hash]struct{} - requestedTxns map[chainhash.Hash]struct{} - requestedBlocks map[chainhash.Hash]struct{} + rejectedTxns map[daghash.Hash]struct{} + requestedTxns map[daghash.Hash]struct{} + requestedBlocks map[daghash.Hash]struct{} syncPeer *peerpkg.Peer peerStates map[*peerpkg.Peer]*peerSyncState @@ -166,7 +166,7 @@ type SyncManager struct { headersFirstMode bool headerList *list.List startHeader *list.Element - nextCheckpoint *chaincfg.Checkpoint + nextCheckpoint *dagconfig.Checkpoint // An optional fee estimator. feeEstimator *mempool.FeeEstimator @@ -174,7 +174,7 @@ type SyncManager struct { // resetHeaderState sets the headers-first mode state to values appropriate for // syncing from a new peer. -func (sm *SyncManager) resetHeaderState(newestHash *chainhash.Hash, newestHeight int32) { +func (sm *SyncManager) resetHeaderState(newestHash *daghash.Hash, newestHeight int32) { sm.headersFirstMode = false sm.headerList.Init() sm.startHeader = nil @@ -192,8 +192,8 @@ func (sm *SyncManager) resetHeaderState(newestHash *chainhash.Hash, newestHeight // It returns nil when there is not one either because the height is already // later than the final checkpoint or some other reason such as disabled // checkpoints. -func (sm *SyncManager) findNextHeaderCheckpoint(height int32) *chaincfg.Checkpoint { - checkpoints := sm.chain.Checkpoints() +func (sm *SyncManager) findNextHeaderCheckpoint(height int32) *dagconfig.Checkpoint { + checkpoints := sm.dag.Checkpoints() if len(checkpoints) == 0 { return nil } @@ -226,7 +226,7 @@ func (sm *SyncManager) startSync() { return } - best := sm.chain.BestSnapshot() + dagState := sm.dag.GetDAGState() var bestPeer *peerpkg.Peer for peer, state := range sm.peerStates { if !state.syncCandidate { @@ -239,7 +239,7 @@ func (sm *SyncManager) startSync() { // doesn't have a later block when it's equal, it will likely // have one soon so it is a reasonable choice. It also allows // the case where both are at 0 such as during regression test. - if peer.LastBlock() < best.Height { + if peer.LastBlock() < dagState.SelectedTip.Height { state.syncCandidate = false continue } @@ -254,9 +254,9 @@ func (sm *SyncManager) startSync() { // Clear the requestedBlocks if the sync peer changes, otherwise // we may ignore blocks we need that the last sync peer failed // to send. - sm.requestedBlocks = make(map[chainhash.Hash]struct{}) + sm.requestedBlocks = make(map[daghash.Hash]struct{}) - locator, err := sm.chain.LatestBlockLocator() + locator, err := sm.dag.LatestBlockLocator() if err != nil { log.Errorf("Failed to get block locator for the "+ "latest block: %v", err) @@ -284,13 +284,13 @@ func (sm *SyncManager) startSync() { // not support the headers-first approach so do normal block // downloads when in regression test mode. if sm.nextCheckpoint != nil && - best.Height < sm.nextCheckpoint.Height && - sm.chainParams != &chaincfg.RegressionNetParams { + dagState.SelectedTip.Height < sm.nextCheckpoint.Height && + sm.chainParams != &dagconfig.RegressionNetParams { bestPeer.PushGetHeadersMsg(locator, sm.nextCheckpoint.Hash) sm.headersFirstMode = true log.Infof("Downloading headers for blocks %d to "+ - "%d from peer %s", best.Height+1, + "%d from peer %s", dagState.SelectedTip.Height+1, sm.nextCheckpoint.Height, bestPeer.Addr()) } else { bestPeer.PushGetBlocksMsg(locator, &zeroHash) @@ -307,7 +307,7 @@ func (sm *SyncManager) isSyncCandidate(peer *peerpkg.Peer) bool { // Typically a peer is not a candidate for sync if it's not a full node, // however regression test is special in that the regression tool is // not a full node and still needs to be considered a sync candidate. - if sm.chainParams == &chaincfg.RegressionNetParams { + if sm.chainParams == &dagconfig.RegressionNetParams { // The peer is not a candidate if it's not coming from localhost // or the hostname can't be determined for some reason. host, _, err := net.SplitHostPort(peer.Addr()) @@ -346,8 +346,8 @@ func (sm *SyncManager) handleNewPeerMsg(peer *peerpkg.Peer) { isSyncCandidate := sm.isSyncCandidate(peer) sm.peerStates[peer] = &peerSyncState{ syncCandidate: isSyncCandidate, - requestedTxns: make(map[chainhash.Hash]struct{}), - requestedBlocks: make(map[chainhash.Hash]struct{}), + requestedTxns: make(map[daghash.Hash]struct{}), + requestedBlocks: make(map[daghash.Hash]struct{}), } // Start syncing by choosing the best candidate if needed. @@ -392,8 +392,8 @@ func (sm *SyncManager) handleDonePeerMsg(peer *peerpkg.Peer) { if sm.syncPeer == peer { sm.syncPeer = nil if sm.headersFirstMode { - best := sm.chain.BestSnapshot() - sm.resetHeaderState(&best.Hash, best.Height) + dagState := sm.dag.GetDAGState() + sm.resetHeaderState(&dagState.SelectedTip.Hash, dagState.SelectedTip.Height) } sm.startSync() } @@ -470,7 +470,7 @@ func (sm *SyncManager) handleTxMsg(tmsg *txMsg) { // current returns true if we believe we are synced with our peers, false if we // still have blocks to check func (sm *SyncManager) current() bool { - if !sm.chain.IsCurrent() { + if !sm.dag.IsCurrent() { return false } @@ -482,7 +482,7 @@ func (sm *SyncManager) current() bool { // No matter what chain thinks, if we are below the block we are syncing // to we are not current. - if sm.chain.BestSnapshot().Height < sm.syncPeer.LastBlock() { + if sm.dag.GetDAGState().SelectedTip.Height < sm.syncPeer.LastBlock() { return false } return true @@ -505,7 +505,7 @@ func (sm *SyncManager) handleBlockMsg(bmsg *blockMsg) { // the peer or ignore the block when we're in regression test // mode in this case so the chain code is actually fed the // duplicate blocks. - if sm.chainParams != &chaincfg.RegressionNetParams { + if sm.chainParams != &dagconfig.RegressionNetParams { log.Warnf("Got unrequested block %v from %s -- "+ "disconnecting", blockHash, peer.Addr()) peer.Disconnect() @@ -521,13 +521,13 @@ func (sm *SyncManager) handleBlockMsg(bmsg *blockMsg) { // since it is needed to verify the next round of headers links // properly. isCheckpointBlock := false - behaviorFlags := blockchain.BFNone + behaviorFlags := blockdag.BFNone if sm.headersFirstMode { firstNodeEl := sm.headerList.Front() if firstNodeEl != nil { firstNode := firstNodeEl.Value.(*headerNode) if blockHash.IsEqual(firstNode.hash) { - behaviorFlags |= blockchain.BFFastAdd + behaviorFlags |= blockdag.BFFastAdd if firstNode.hash.IsEqual(sm.nextCheckpoint.Hash) { isCheckpointBlock = true } else { @@ -543,15 +543,14 @@ func (sm *SyncManager) handleBlockMsg(bmsg *blockMsg) { delete(state.requestedBlocks, *blockHash) delete(sm.requestedBlocks, *blockHash) - // Process the block to include validation, best chain selection, orphan - // handling, etc. - _, isOrphan, err := sm.chain.ProcessBlock(bmsg.block, behaviorFlags) + // Process the block to include validation, orphan handling, etc. + isOrphan, err := sm.dag.ProcessBlock(bmsg.block, behaviorFlags) if err != nil { // When the error is a rule error, it means the block was simply // rejected as opposed to something actually going wrong, so log // it as such. Otherwise, something really did go wrong, so log // it as an actual error. - if _, ok := err.(blockchain.RuleError); ok { + if _, ok := err.(blockdag.RuleError); ok { log.Infof("Rejected block %v from %s: %v", blockHash, peer, err) } else { @@ -580,7 +579,7 @@ func (sm *SyncManager) handleBlockMsg(bmsg *blockMsg) { // if we are actively syncing while the chain is not yet current or // who may have lost the lock announcment race. var heightUpdate int32 - var blkHashUpdate *chainhash.Hash + var blkHashUpdate *daghash.Hash // Request the parents for the orphan block from the peer that sent it. if isOrphan { @@ -590,9 +589,9 @@ func (sm *SyncManager) handleBlockMsg(bmsg *blockMsg) { // Extraction is only attempted if the block's version is // high enough (ver 2+). header := &bmsg.block.MsgBlock().Header - if blockchain.ShouldHaveSerializedBlockHeight(header) { + if blockdag.ShouldHaveSerializedBlockHeight(header) { coinbaseTx := bmsg.block.Transactions()[0] - cbHeight, err := blockchain.ExtractCoinbaseHeight(coinbaseTx) + cbHeight, err := blockdag.ExtractCoinbaseHeight(coinbaseTx) if err != nil { log.Warnf("Unable to extract height from "+ "coinbase tx: %v", err) @@ -604,8 +603,8 @@ func (sm *SyncManager) handleBlockMsg(bmsg *blockMsg) { } } - orphanRoot := sm.chain.GetOrphanRoot(blockHash) - locator, err := sm.chain.LatestBlockLocator() + orphanRoot := sm.dag.GetOrphanRoot(blockHash) + locator, err := sm.dag.LatestBlockLocator() if err != nil { log.Warnf("Failed to get block locator for the "+ "latest block: %v", err) @@ -619,12 +618,12 @@ func (sm *SyncManager) handleBlockMsg(bmsg *blockMsg) { // Update this peer's latest block height, for future // potential sync node candidacy. - best := sm.chain.BestSnapshot() - heightUpdate = best.Height - blkHashUpdate = &best.Hash + dagState := sm.dag.GetDAGState() + heightUpdate = dagState.SelectedTip.Height + blkHashUpdate = &dagState.SelectedTip.Hash // Clear the rejected transactions. - sm.rejectedTxns = make(map[chainhash.Hash]struct{}) + sm.rejectedTxns = make(map[daghash.Hash]struct{}) } // Update the block height for this peer. But only send a message to @@ -663,7 +662,7 @@ func (sm *SyncManager) handleBlockMsg(bmsg *blockMsg) { prevHash := sm.nextCheckpoint.Hash sm.nextCheckpoint = sm.findNextHeaderCheckpoint(prevHeight) if sm.nextCheckpoint != nil { - locator := blockchain.BlockLocator([]*chainhash.Hash{prevHash}) + locator := blockdag.BlockLocator([]*daghash.Hash{prevHash}) err := peer.PushGetHeadersMsg(locator, sm.nextCheckpoint.Hash) if err != nil { log.Warnf("Failed to send getheaders message to "+ @@ -682,7 +681,7 @@ func (sm *SyncManager) handleBlockMsg(bmsg *blockMsg) { sm.headersFirstMode = false sm.headerList.Init() log.Infof("Reached the final checkpoint -- switching to normal mode") - locator := blockchain.BlockLocator([]*chainhash.Hash{blockHash}) + locator := blockdag.BlockLocator([]*daghash.Hash{blockHash}) err = peer.PushGetBlocksMsg(locator, &zeroHash) if err != nil { log.Warnf("Failed to send getblocks message to peer %s: %v", @@ -766,7 +765,7 @@ func (sm *SyncManager) handleHeadersMsg(hmsg *headersMsg) { // Process all of the received headers ensuring each one connects to the // previous and that checkpoints match. receivedCheckpoint := false - var finalHash *chainhash.Hash + var finalHash *daghash.Hash for _, blockHeader := range msg.Headers { blockHash := blockHeader.BlockHash() finalHash = &blockHash @@ -837,7 +836,7 @@ func (sm *SyncManager) handleHeadersMsg(hmsg *headersMsg) { // This header is not a checkpoint, so request the next batch of // headers starting from the latest known header and ending with the // next checkpoint. - locator := blockchain.BlockLocator([]*chainhash.Hash{finalHash}) + locator := blockdag.BlockLocator([]*daghash.Hash{finalHash}) err := peer.PushGetHeadersMsg(locator, sm.nextCheckpoint.Hash) if err != nil { log.Warnf("Failed to send getheaders message to "+ @@ -856,7 +855,7 @@ func (sm *SyncManager) haveInventory(invVect *wire.InvVect) (bool, error) { case wire.InvTypeBlock: // Ask chain if the block is known to it in any form (main // chain, side chain, or orphan). - return sm.chain.HaveBlock(&invVect.Hash) + return sm.dag.HaveBlock(&invVect.Hash) case wire.InvTypeTx: // Ask the transaction memory pool if the transaction is known @@ -876,7 +875,7 @@ func (sm *SyncManager) haveInventory(invVect *wire.InvVect) (bool, error) { prevOut := wire.OutPoint{Hash: invVect.Hash} for i := uint32(0); i < 2; i++ { prevOut.Index = i - entry, err := sm.chain.FetchUtxoEntry(prevOut) + entry, err := sm.dag.FetchUtxoEntry(prevOut) if err != nil { return false, err } @@ -932,7 +931,7 @@ func (sm *SyncManager) handleInvMsg(imsg *invMsg) { // If our chain is current and a peer announces a block we already // know of, then update their current block height. if lastBlock != -1 && sm.current() { - blkHeight, err := sm.chain.BlockHeightByHash(&invVects[lastBlock].Hash) + blkHeight, err := sm.dag.BlockHeightByHash(&invVects[lastBlock].Hash) if err == nil { peer.UpdateLastBlockHeight(blkHeight) } @@ -993,12 +992,12 @@ func (sm *SyncManager) handleInvMsg(imsg *invMsg) { // resending the orphan block as an available block // to signal there are more missing blocks that need to // be requested. - if sm.chain.IsKnownOrphan(&iv.Hash) { + if sm.dag.IsKnownOrphan(&iv.Hash) { // Request blocks starting at the latest known // up to the root of the orphan that just came // in. - orphanRoot := sm.chain.GetOrphanRoot(&iv.Hash) - locator, err := sm.chain.LatestBlockLocator() + orphanRoot := sm.dag.GetOrphanRoot(&iv.Hash) + locator, err := sm.dag.LatestBlockLocator() if err != nil { log.Errorf("PEER: Failed to get block "+ "locator for the latest block: "+ @@ -1017,7 +1016,7 @@ func (sm *SyncManager) handleInvMsg(imsg *invMsg) { // Request blocks after this one up to the // final one the remote peer knows about (zero // stop hash). - locator := sm.chain.BlockLocatorFromHash(&iv.Hash) + locator := sm.dag.BlockLocatorFromHash(&iv.Hash) peer.PushGetBlocksMsg(locator, &zeroHash) } } @@ -1072,7 +1071,7 @@ func (sm *SyncManager) handleInvMsg(imsg *invMsg) { // limitMap is a helper function for maps that require a maximum limit by // evicting a random transaction if adding a new value would cause it to // overflow the maximum allowed. -func (sm *SyncManager) limitMap(m map[chainhash.Hash]struct{}, limit int) { +func (sm *SyncManager) limitMap(m map[daghash.Hash]struct{}, limit int) { if len(m)+1 > limit { // Remove a random entry from the map. For most compilers, Go's // range statement iterates starting at a random item although @@ -1127,7 +1126,7 @@ out: msg.reply <- peerID case processBlockMsg: - _, isOrphan, err := sm.chain.ProcessBlock( + isOrphan, err := sm.dag.ProcessBlock( msg.block, msg.flags) if err != nil { msg.reply <- processBlockResponse{ @@ -1165,11 +1164,11 @@ out: // handleBlockchainNotification handles notifications from blockchain. It does // things such as request orphan block parents and relay accepted blocks to // connected peers. -func (sm *SyncManager) handleBlockchainNotification(notification *blockchain.Notification) { +func (sm *SyncManager) handleBlockchainNotification(notification *blockdag.Notification) { switch notification.Type { // A block has been accepted into the block chain. Relay it to other // peers. - case blockchain.NTBlockAccepted: + case blockdag.NTBlockAccepted: // Don't relay if we are not current. Other peers that are // current should already know about it. if !sm.current() { @@ -1187,7 +1186,7 @@ func (sm *SyncManager) handleBlockchainNotification(notification *blockchain.Not sm.peerNotifier.RelayInventory(iv, block.MsgBlock().Header) // A block has been connected to the main block chain. - case blockchain.NTBlockConnected: + case blockdag.NTBlockConnected: block, ok := notification.Data.(*btcutil.Block) if !ok { log.Warnf("Chain connected notification is not a block.") @@ -1225,7 +1224,7 @@ func (sm *SyncManager) handleBlockchainNotification(notification *blockchain.Not } // A block has been disconnected from the main block chain. - case blockchain.NTBlockDisconnected: + case blockdag.NTBlockDisconnected: block, ok := notification.Data.(*btcutil.Block) if !ok { log.Warnf("Chain disconnected notification is not a block.") @@ -1356,7 +1355,7 @@ func (sm *SyncManager) SyncPeerID() int32 { // ProcessBlock makes use of ProcessBlock on an internal instance of a block // chain. -func (sm *SyncManager) ProcessBlock(block *btcutil.Block, flags blockchain.BehaviorFlags) (bool, error) { +func (sm *SyncManager) ProcessBlock(block *btcutil.Block, flags blockdag.BehaviorFlags) (bool, error) { reply := make(chan processBlockResponse, 1) sm.msgChan <- processBlockMsg{block: block, flags: flags, reply: reply} response := <-reply @@ -1386,12 +1385,12 @@ func (sm *SyncManager) Pause() chan<- struct{} { func New(config *Config) (*SyncManager, error) { sm := SyncManager{ peerNotifier: config.PeerNotifier, - chain: config.Chain, + dag: config.DAG, txMemPool: config.TxMemPool, chainParams: config.ChainParams, - rejectedTxns: make(map[chainhash.Hash]struct{}), - requestedTxns: make(map[chainhash.Hash]struct{}), - requestedBlocks: make(map[chainhash.Hash]struct{}), + rejectedTxns: make(map[daghash.Hash]struct{}), + requestedTxns: make(map[daghash.Hash]struct{}), + requestedBlocks: make(map[daghash.Hash]struct{}), peerStates: make(map[*peerpkg.Peer]*peerSyncState), progressLogger: newBlockProgressLogger("Processed", log), msgChan: make(chan interface{}, config.MaxPeers*3), @@ -1400,18 +1399,18 @@ func New(config *Config) (*SyncManager, error) { feeEstimator: config.FeeEstimator, } - best := sm.chain.BestSnapshot() + dagState := sm.dag.GetDAGState() if !config.DisableCheckpoints { // Initialize the next checkpoint based on the current height. - sm.nextCheckpoint = sm.findNextHeaderCheckpoint(best.Height) + sm.nextCheckpoint = sm.findNextHeaderCheckpoint(dagState.SelectedTip.Height) if sm.nextCheckpoint != nil { - sm.resetHeaderState(&best.Hash, best.Height) + sm.resetHeaderState(&dagState.SelectedTip.Hash, dagState.SelectedTip.Height) } } else { log.Info("Checkpoints are disabled") } - sm.chain.Subscribe(sm.handleBlockchainNotification) + sm.dag.Subscribe(sm.handleBlockchainNotification) return &sm, nil } diff --git a/params.go b/params.go index c86028e99..3fb6cf138 100644 --- a/params.go +++ b/params.go @@ -5,7 +5,7 @@ package main import ( - "github.com/daglabs/btcd/chaincfg" + "github.com/daglabs/btcd/dagconfig" "github.com/daglabs/btcd/wire" ) @@ -16,7 +16,7 @@ var activeNetParams = &mainNetParams // params is used to group parameters for various networks such as the main // network and test networks. type params struct { - *chaincfg.Params + *dagconfig.Params rpcPort string } @@ -27,7 +27,7 @@ type params struct { // it does not handle on to btcd. This approach allows the wallet process // to emulate the full reference implementation RPC API. var mainNetParams = params{ - Params: &chaincfg.MainNetParams, + Params: &dagconfig.MainNetParams, rpcPort: "8334", } @@ -36,7 +36,7 @@ var mainNetParams = params{ // than the reference implementation - see the mainNetParams comment for // details. var regressionNetParams = params{ - Params: &chaincfg.RegressionNetParams, + Params: &dagconfig.RegressionNetParams, rpcPort: "18334", } @@ -44,21 +44,21 @@ var regressionNetParams = params{ // (wire.TestNet3). NOTE: The RPC port is intentionally different than the // reference implementation - see the mainNetParams comment for details. var testNet3Params = params{ - Params: &chaincfg.TestNet3Params, + Params: &dagconfig.TestNet3Params, rpcPort: "18334", } // simNetParams contains parameters specific to the simulation test network // (wire.SimNet). var simNetParams = params{ - Params: &chaincfg.SimNetParams, + Params: &dagconfig.SimNetParams, rpcPort: "18556", } // netName returns the name used when referring to a bitcoin network. At the // time of writing, btcd currently places blocks for testnet version 3 in the // data and log directory "testnet", which does not match the Name field of the -// chaincfg parameters. This function can be used to override this directory +// dagconfig parameters. This function can be used to override this directory // name as "testnet" when the passed active network matches wire.TestNet3. // // A proper upgrade to move the data and log directories for this network to diff --git a/peer/example_test.go b/peer/example_test.go index 063c34666..e482146bf 100644 --- a/peer/example_test.go +++ b/peer/example_test.go @@ -9,7 +9,7 @@ import ( "net" "time" - "github.com/daglabs/btcd/chaincfg" + "github.com/daglabs/btcd/dagconfig" "github.com/daglabs/btcd/peer" "github.com/daglabs/btcd/wire" ) @@ -22,7 +22,7 @@ func mockRemotePeer() error { peerCfg := &peer.Config{ UserAgentName: "peer", // User agent name to advertise. UserAgentVersion: "1.0.0", // User agent version to advertise. - ChainParams: &chaincfg.SimNetParams, + ChainParams: &dagconfig.SimNetParams, } // Accept connections on the simnet port. @@ -67,7 +67,7 @@ func Example_newOutboundPeer() { peerCfg := &peer.Config{ UserAgentName: "peer", // User agent name to advertise. UserAgentVersion: "1.0.0", // User agent version to advertise. - ChainParams: &chaincfg.SimNetParams, + ChainParams: &dagconfig.SimNetParams, Services: 0, Listeners: peer.MessageListeners{ OnVersion: func(p *peer.Peer, msg *wire.MsgVersion) { diff --git a/peer/log.go b/peer/log.go index 5b1244f1e..c980548e7 100644 --- a/peer/log.go +++ b/peer/log.go @@ -10,7 +10,7 @@ import ( "time" "github.com/btcsuite/btclog" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/txscript" "github.com/daglabs/btcd/wire" ) @@ -105,7 +105,7 @@ func invSummary(invList []*wire.InvVect) string { } // locatorSummary returns a block locator as a human-readable string. -func locatorSummary(locator []*chainhash.Hash, stopHash *chainhash.Hash) string { +func locatorSummary(locator []*daghash.Hash, stopHash *daghash.Hash) string { if len(locator) > 0 { return fmt.Sprintf("locator %s, stop %s", locator[0], stopHash) } diff --git a/peer/mruinvmap_test.go b/peer/mruinvmap_test.go index 7772a5dfe..352557a23 100644 --- a/peer/mruinvmap_test.go +++ b/peer/mruinvmap_test.go @@ -9,7 +9,7 @@ import ( "fmt" "testing" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/wire" ) @@ -22,7 +22,7 @@ func TestMruInventoryMap(t *testing.T) { numInvVects := 10 invVects := make([]*wire.InvVect, 0, numInvVects) for i := 0; i < numInvVects; i++ { - hash := &chainhash.Hash{byte(i)} + hash := &daghash.Hash{byte(i)} iv := wire.NewInvVect(wire.InvTypeBlock, hash) invVects = append(invVects, iv) } @@ -82,7 +82,7 @@ testLoop: mruInvMap.Add(invVects[origLruIndex]) iv := wire.NewInvVect(wire.InvTypeBlock, - &chainhash.Hash{0x00, 0x01}) + &daghash.Hash{0x00, 0x01}) mruInvMap.Add(iv) // Ensure the original lru entry still exists since it @@ -122,8 +122,8 @@ testLoop: func TestMruInventoryMapStringer(t *testing.T) { // Create a couple of fake inventory vectors to use in testing the mru // inventory stringer code. - hash1 := &chainhash.Hash{0x01} - hash2 := &chainhash.Hash{0x02} + hash1 := &daghash.Hash{0x01} + hash2 := &daghash.Hash{0x02} iv1 := wire.NewInvVect(wire.InvTypeBlock, hash1) iv2 := wire.NewInvVect(wire.InvTypeBlock, hash2) @@ -153,9 +153,9 @@ func BenchmarkMruInventoryList(b *testing.B) { numInvVects := 100000 invVects := make([]*wire.InvVect, 0, numInvVects) for i := 0; i < numInvVects; i++ { - hashBytes := make([]byte, chainhash.HashSize) + hashBytes := make([]byte, daghash.HashSize) rand.Read(hashBytes) - hash, _ := chainhash.NewHash(hashBytes) + hash, _ := daghash.NewHash(hashBytes) iv := wire.NewInvVect(wire.InvTypeBlock, hash) invVects = append(invVects, iv) } diff --git a/peer/peer.go b/peer/peer.go index a9e059330..6b9a5b5be 100644 --- a/peer/peer.go +++ b/peer/peer.go @@ -18,9 +18,9 @@ import ( "time" "github.com/btcsuite/go-socks/socks" - "github.com/daglabs/btcd/blockchain" - "github.com/daglabs/btcd/chaincfg" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/blockdag" + "github.com/daglabs/btcd/dagconfig" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/wire" "github.com/davecgh/go-spew/spew" ) @@ -77,7 +77,7 @@ var ( // zeroHash is the zero value hash (all zeros). It is defined as a // convenience. - zeroHash chainhash.Hash + zeroHash daghash.Hash // sentNonces houses the unique nonces that are generated when pushing // version messages that are used to detect self connections. @@ -249,7 +249,7 @@ type Config struct { // ChainParams identifies which chain parameters the peer is associated // with. It is highly recommended to specify this field, however it can // be omitted in which case the test network will be used. - ChainParams *chaincfg.Params + ChainParams *dagconfig.Params // Services specifies which services to advertise as supported by the // local peer. This field can be omitted in which case it will be 0 @@ -375,7 +375,7 @@ type StatsSnap struct { // HashFunc is a function which returns a block hash, height and error // It is used as a callback to get newest block details. -type HashFunc func() (hash *chainhash.Hash, height int32, err error) +type HashFunc func() (hash *daghash.Hash, height int32, err error) // AddrFunc is a func which takes an address and returns a related address. type AddrFunc func(remoteAddr *wire.NetAddress) *wire.NetAddress @@ -441,11 +441,11 @@ type Peer struct { knownInventory *mruInventoryMap prevGetBlocksMtx sync.Mutex - prevGetBlocksBegin *chainhash.Hash - prevGetBlocksStop *chainhash.Hash + prevGetBlocksBegin *daghash.Hash + prevGetBlocksStop *daghash.Hash prevGetHdrsMtx sync.Mutex - prevGetHdrsBegin *chainhash.Hash - prevGetHdrsStop *chainhash.Hash + prevGetHdrsBegin *daghash.Hash + prevGetHdrsStop *daghash.Hash // These fields keep track of statistics for the peer and are protected // by the statsMtx mutex. @@ -454,7 +454,7 @@ type Peer struct { timeConnected time.Time startingHeight int32 lastBlock int32 - lastAnnouncedBlock *chainhash.Hash + lastAnnouncedBlock *daghash.Hash lastPingNonce uint64 // Set to nonce if we have a pending ping. lastPingTime time.Time // Time we sent last ping. lastPingMicros int64 // Time for last ping to return. @@ -493,7 +493,7 @@ func (p *Peer) UpdateLastBlockHeight(newHeight int32) { // peer is known to have announced. // // This function is safe for concurrent access. -func (p *Peer) UpdateLastAnnouncedBlock(blkHash *chainhash.Hash) { +func (p *Peer) UpdateLastAnnouncedBlock(blkHash *daghash.Hash) { log.Tracef("Updating last blk for peer %v, %v", p.addr, blkHash) p.statsMtx.Lock() @@ -611,7 +611,7 @@ func (p *Peer) UserAgent() string { // LastAnnouncedBlock returns the last announced block of the remote peer. // // This function is safe for concurrent access. -func (p *Peer) LastAnnouncedBlock() *chainhash.Hash { +func (p *Peer) LastAnnouncedBlock() *daghash.Hash { p.statsMtx.RLock() lastAnnouncedBlock := p.lastAnnouncedBlock p.statsMtx.RUnlock() @@ -904,10 +904,10 @@ func (p *Peer) PushAddrMsg(addresses []*wire.NetAddress) ([]*wire.NetAddress, er // and stop hash. It will ignore back-to-back duplicate requests. // // This function is safe for concurrent access. -func (p *Peer) PushGetBlocksMsg(locator blockchain.BlockLocator, stopHash *chainhash.Hash) error { +func (p *Peer) PushGetBlocksMsg(locator blockdag.BlockLocator, stopHash *daghash.Hash) error { // Extract the begin hash from the block locator, if one was specified, // to use for filtering duplicate getblocks requests. - var beginHash *chainhash.Hash + var beginHash *daghash.Hash if len(locator) > 0 { beginHash = locator[0] } @@ -948,10 +948,10 @@ func (p *Peer) PushGetBlocksMsg(locator blockchain.BlockLocator, stopHash *chain // and stop hash. It will ignore back-to-back duplicate requests. // // This function is safe for concurrent access. -func (p *Peer) PushGetHeadersMsg(locator blockchain.BlockLocator, stopHash *chainhash.Hash) error { +func (p *Peer) PushGetHeadersMsg(locator blockdag.BlockLocator, stopHash *daghash.Hash) error { // Extract the begin hash from the block locator, if one was specified, // to use for filtering duplicate getheaders requests. - var beginHash *chainhash.Hash + var beginHash *daghash.Hash if len(locator) > 0 { beginHash = locator[0] } @@ -995,7 +995,7 @@ func (p *Peer) PushGetHeadersMsg(locator blockchain.BlockLocator, stopHash *chai // function to block until the reject message has actually been sent. // // This function is safe for concurrent access. -func (p *Peer) PushRejectMsg(command string, code wire.RejectCode, reason string, hash *chainhash.Hash, wait bool) { +func (p *Peer) PushRejectMsg(command string, code wire.RejectCode, reason string, hash *daghash.Hash, wait bool) { // Don't bother sending the reject message if the protocol version // is too low. if p.VersionKnown() && p.ProtocolVersion() < wire.RejectVersion { @@ -2125,7 +2125,7 @@ func newPeerBase(origCfg *Config, inbound bool) *Peer { // Set the chain parameters to testnet if the caller did not specify any. if cfg.ChainParams == nil { - cfg.ChainParams = &chaincfg.TestNet3Params + cfg.ChainParams = &dagconfig.TestNet3Params } p := Peer{ diff --git a/peer/peer_test.go b/peer/peer_test.go index 72df13990..cb78120d5 100644 --- a/peer/peer_test.go +++ b/peer/peer_test.go @@ -13,8 +13,8 @@ import ( "time" "github.com/btcsuite/go-socks/socks" - "github.com/daglabs/btcd/chaincfg" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/peer" "github.com/daglabs/btcd/wire" ) @@ -226,7 +226,7 @@ func TestPeerConnection(t *testing.T) { UserAgentName: "peer", UserAgentVersion: "1.0", UserAgentComments: []string{"comment"}, - ChainParams: &chaincfg.MainNetParams, + ChainParams: &dagconfig.MainNetParams, ProtocolVersion: wire.RejectVersion, // Configure with older version Services: 0, } @@ -235,7 +235,7 @@ func TestPeerConnection(t *testing.T) { UserAgentName: "peer", UserAgentVersion: "1.0", UserAgentComments: []string{"comment"}, - ChainParams: &chaincfg.MainNetParams, + ChainParams: &dagconfig.MainNetParams, Services: wire.SFNodeNetwork, } @@ -436,7 +436,7 @@ func TestPeerListeners(t *testing.T) { UserAgentName: "peer", UserAgentVersion: "1.0", UserAgentComments: []string{"comment"}, - ChainParams: &chaincfg.MainNetParams, + ChainParams: &dagconfig.MainNetParams, Services: wire.SFNodeBloom, } inConn, outConn := pipe( @@ -502,7 +502,7 @@ func TestPeerListeners(t *testing.T) { { "OnBlock", wire.NewMsgBlock(wire.NewBlockHeader(1, - &chainhash.Hash{}, &chainhash.Hash{}, 1, 1)), + []daghash.Hash{}, &daghash.Hash{}, 1, 1)), }, { "OnInv", @@ -522,7 +522,7 @@ func TestPeerListeners(t *testing.T) { }, { "OnGetBlocks", - wire.NewMsgGetBlocks(&chainhash.Hash{}), + wire.NewMsgGetBlocks(&daghash.Hash{}), }, { "OnGetHeaders", @@ -530,19 +530,19 @@ func TestPeerListeners(t *testing.T) { }, { "OnGetCFilters", - wire.NewMsgGetCFilters(wire.GCSFilterRegular, 0, &chainhash.Hash{}), + wire.NewMsgGetCFilters(wire.GCSFilterRegular, 0, &daghash.Hash{}), }, { "OnGetCFHeaders", - wire.NewMsgGetCFHeaders(wire.GCSFilterRegular, 0, &chainhash.Hash{}), + wire.NewMsgGetCFHeaders(wire.GCSFilterRegular, 0, &daghash.Hash{}), }, { "OnGetCFCheckpt", - wire.NewMsgGetCFCheckpt(wire.GCSFilterRegular, &chainhash.Hash{}), + wire.NewMsgGetCFCheckpt(wire.GCSFilterRegular, &daghash.Hash{}), }, { "OnCFilter", - wire.NewMsgCFilter(wire.GCSFilterRegular, &chainhash.Hash{}, + wire.NewMsgCFilter(wire.GCSFilterRegular, &daghash.Hash{}, []byte("payload")), }, { @@ -568,7 +568,7 @@ func TestPeerListeners(t *testing.T) { { "OnMerkleBlock", wire.NewMsgMerkleBlock(wire.NewBlockHeader(1, - &chainhash.Hash{}, &chainhash.Hash{}, 1, 1)), + []daghash.Hash{}, &daghash.Hash{}, 1, 1)), }, // only one version message is allowed // only one verack message is allowed @@ -600,13 +600,13 @@ func TestPeerListeners(t *testing.T) { func TestOutboundPeer(t *testing.T) { peerCfg := &peer.Config{ - NewestBlock: func() (*chainhash.Hash, int32, error) { + NewestBlock: func() (*daghash.Hash, int32, error) { return nil, 0, errors.New("newest block not found") }, UserAgentName: "peer", UserAgentVersion: "1.0", UserAgentComments: []string{"comment"}, - ChainParams: &chaincfg.MainNetParams, + ChainParams: &dagconfig.MainNetParams, Services: 0, } @@ -641,7 +641,7 @@ func TestOutboundPeer(t *testing.T) { } // Test Queue Inv - fakeBlockHash := &chainhash.Hash{0: 0x00, 1: 0x01} + fakeBlockHash := &daghash.Hash{0: 0x00, 1: 0x01} fakeInv := wire.NewInvVect(wire.InvTypeBlock, fakeBlockHash) // Should be noops as the peer could not connect. @@ -657,9 +657,9 @@ func TestOutboundPeer(t *testing.T) { p.Disconnect() // Test NewestBlock - var newestBlock = func() (*chainhash.Hash, int32, error) { + var newestBlock = func() (*daghash.Hash, int32, error) { hashStr := "14a0810ac680a3eb3f82edc878cea25ec41d6b790744e5daeef" - hash, err := chainhash.NewHashFromStr(hashStr) + hash, err := daghash.NewHashFromStr(hashStr) if err != nil { return nil, 0, err } @@ -677,7 +677,7 @@ func TestOutboundPeer(t *testing.T) { p1.AssociateConnection(c1) // Test update latest block - latestBlockHash, err := chainhash.NewHashFromStr("1a63f9cdff1752e6375c8c76e543a71d239e1a2e5c6db1aa679") + latestBlockHash, err := daghash.NewHashFromStr("1a63f9cdff1752e6375c8c76e543a71d239e1a2e5c6db1aa679") if err != nil { t.Errorf("NewHashFromStr: unexpected err %v\n", err) return @@ -695,7 +695,7 @@ func TestOutboundPeer(t *testing.T) { p1.Disconnect() // Test regression - peerCfg.ChainParams = &chaincfg.RegressionNetParams + peerCfg.ChainParams = &dagconfig.RegressionNetParams peerCfg.Services = wire.SFNodeBloom r2, w2 := io.Pipe() c2 := &conn{raddr: "10.0.0.1:8333", Writer: w2, Reader: r2} @@ -716,11 +716,11 @@ func TestOutboundPeer(t *testing.T) { t.Errorf("PushAddrMsg: unexpected err %v\n", err) return } - if err := p2.PushGetBlocksMsg(nil, &chainhash.Hash{}); err != nil { + if err := p2.PushGetBlocksMsg(nil, &daghash.Hash{}); err != nil { t.Errorf("PushGetBlocksMsg: unexpected err %v\n", err) return } - if err := p2.PushGetHeadersMsg(nil, &chainhash.Hash{}); err != nil { + if err := p2.PushGetHeadersMsg(nil, &daghash.Hash{}); err != nil { t.Errorf("PushGetHeadersMsg: unexpected err %v\n", err) return } @@ -746,7 +746,7 @@ func TestUnsupportedVersionPeer(t *testing.T) { UserAgentName: "peer", UserAgentVersion: "1.0", UserAgentComments: []string{"comment"}, - ChainParams: &chaincfg.MainNetParams, + ChainParams: &dagconfig.MainNetParams, Services: 0, } diff --git a/rpcadapters.go b/rpcadapters.go index b7ae5e2ea..9686a4d78 100644 --- a/rpcadapters.go +++ b/rpcadapters.go @@ -7,8 +7,8 @@ package main import ( "sync/atomic" - "github.com/daglabs/btcd/blockchain" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/blockdag" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/mempool" "github.com/daglabs/btcd/netsync" "github.com/daglabs/btcd/peer" @@ -247,7 +247,7 @@ func (b *rpcSyncMgr) IsCurrent() bool { // // This function is safe for concurrent access and is part of the // rpcserverSyncManager interface implementation. -func (b *rpcSyncMgr) SubmitBlock(block *btcutil.Block, flags blockchain.BehaviorFlags) (bool, error) { +func (b *rpcSyncMgr) SubmitBlock(block *btcutil.Block, flags blockdag.BehaviorFlags) (bool, error) { return b.syncMgr.ProcessBlock(block, flags) } @@ -274,6 +274,6 @@ func (b *rpcSyncMgr) SyncPeerID() int32 { // // This function is safe for concurrent access and is part of the // rpcserverSyncManager interface implementation. -func (b *rpcSyncMgr) LocateHeaders(locators []*chainhash.Hash, hashStop *chainhash.Hash) []wire.BlockHeader { - return b.server.chain.LocateHeaders(locators, hashStop) +func (b *rpcSyncMgr) LocateHeaders(locators []*daghash.Hash, hashStop *daghash.Hash) []wire.BlockHeader { + return b.server.dag.LocateHeaders(locators, hashStop) } diff --git a/rpcclient/chain.go b/rpcclient/dag.go similarity index 82% rename from rpcclient/chain.go rename to rpcclient/dag.go index 6f58246c8..e01f375eb 100644 --- a/rpcclient/chain.go +++ b/rpcclient/dag.go @@ -11,7 +11,7 @@ import ( "encoding/json" "github.com/daglabs/btcd/btcjson" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/wire" ) @@ -20,8 +20,8 @@ import ( type FutureGetBestBlockHashResult chan *response // Receive waits for the response promised by the future and returns the hash of -// the best block in the longest block chain. -func (r FutureGetBestBlockHashResult) Receive() (*chainhash.Hash, error) { +// the best block in the longest block dag. +func (r FutureGetBestBlockHashResult) Receive() (*daghash.Hash, error) { res, err := receiveFuture(r) if err != nil { return nil, err @@ -33,7 +33,7 @@ func (r FutureGetBestBlockHashResult) Receive() (*chainhash.Hash, error) { if err != nil { return nil, err } - return chainhash.NewHashFromStr(txHashStr) + return daghash.NewHashFromStr(txHashStr) } // GetBestBlockHashAsync returns an instance of a type that can be used to get @@ -47,8 +47,8 @@ func (c *Client) GetBestBlockHashAsync() FutureGetBestBlockHashResult { } // GetBestBlockHash returns the hash of the best block in the longest block -// chain. -func (c *Client) GetBestBlockHash() (*chainhash.Hash, error) { +// dag. +func (c *Client) GetBestBlockHash() (*daghash.Hash, error) { return c.GetBestBlockHashAsync().Receive() } @@ -91,7 +91,7 @@ func (r FutureGetBlockResult) Receive() (*wire.MsgBlock, error) { // returned instance. // // See GetBlock for the blocking version and more details. -func (c *Client) GetBlockAsync(blockHash *chainhash.Hash) FutureGetBlockResult { +func (c *Client) GetBlockAsync(blockHash *daghash.Hash) FutureGetBlockResult { hash := "" if blockHash != nil { hash = blockHash.String() @@ -105,7 +105,7 @@ func (c *Client) GetBlockAsync(blockHash *chainhash.Hash) FutureGetBlockResult { // // See GetBlockVerbose to retrieve a data structure with information about the // block instead. -func (c *Client) GetBlock(blockHash *chainhash.Hash) (*wire.MsgBlock, error) { +func (c *Client) GetBlock(blockHash *daghash.Hash) (*wire.MsgBlock, error) { return c.GetBlockAsync(blockHash).Receive() } @@ -135,7 +135,7 @@ func (r FutureGetBlockVerboseResult) Receive() (*btcjson.GetBlockVerboseResult, // the returned instance. // // See GetBlockVerbose for the blocking version and more details. -func (c *Client) GetBlockVerboseAsync(blockHash *chainhash.Hash) FutureGetBlockVerboseResult { +func (c *Client) GetBlockVerboseAsync(blockHash *daghash.Hash) FutureGetBlockVerboseResult { hash := "" if blockHash != nil { hash = blockHash.String() @@ -150,7 +150,7 @@ func (c *Client) GetBlockVerboseAsync(blockHash *chainhash.Hash) FutureGetBlockV // // See GetBlockVerboseTx to retrieve transaction data structures as well. // See GetBlock to retrieve a raw block instead. -func (c *Client) GetBlockVerbose(blockHash *chainhash.Hash) (*btcjson.GetBlockVerboseResult, error) { +func (c *Client) GetBlockVerbose(blockHash *daghash.Hash) (*btcjson.GetBlockVerboseResult, error) { return c.GetBlockVerboseAsync(blockHash).Receive() } @@ -159,7 +159,7 @@ func (c *Client) GetBlockVerbose(blockHash *chainhash.Hash) (*btcjson.GetBlockVe // the returned instance. // // See GetBlockVerboseTx or the blocking version and more details. -func (c *Client) GetBlockVerboseTxAsync(blockHash *chainhash.Hash) FutureGetBlockVerboseResult { +func (c *Client) GetBlockVerboseTxAsync(blockHash *daghash.Hash) FutureGetBlockVerboseResult { hash := "" if blockHash != nil { hash = blockHash.String() @@ -174,7 +174,7 @@ func (c *Client) GetBlockVerboseTxAsync(blockHash *chainhash.Hash) FutureGetBloc // // See GetBlockVerbose if only transaction hashes are preferred. // See GetBlock to retrieve a raw block instead. -func (c *Client) GetBlockVerboseTx(blockHash *chainhash.Hash) (*btcjson.GetBlockVerboseResult, error) { +func (c *Client) GetBlockVerboseTx(blockHash *daghash.Hash) (*btcjson.GetBlockVerboseResult, error) { return c.GetBlockVerboseTxAsync(blockHash).Receive() } @@ -183,7 +183,7 @@ func (c *Client) GetBlockVerboseTx(blockHash *chainhash.Hash) (*btcjson.GetBlock type FutureGetBlockCountResult chan *response // Receive waits for the response promised by the future and returns the number -// of blocks in the longest block chain. +// of blocks in the longest block dag. func (r FutureGetBlockCountResult) Receive() (int64, error) { res, err := receiveFuture(r) if err != nil { @@ -209,7 +209,7 @@ func (c *Client) GetBlockCountAsync() FutureGetBlockCountResult { return c.sendCmd(cmd) } -// GetBlockCount returns the number of blocks in the longest block chain. +// GetBlockCount returns the number of blocks in the longest block dag. func (c *Client) GetBlockCount() (int64, error) { return c.GetBlockCountAsync().Receive() } @@ -251,40 +251,40 @@ func (c *Client) GetDifficulty() (float64, error) { return c.GetDifficultyAsync().Receive() } -// FutureGetBlockChainInfoResult is a promise to deliver the result of a -// GetBlockChainInfoAsync RPC invocation (or an applicable error). -type FutureGetBlockChainInfoResult chan *response +// FutureGetBlockDAGInfoResult is a promise to deliver the result of a +// GetBlockDAGInfoAsync RPC invocation (or an applicable error). +type FutureGetBlockDAGInfoResult chan *response -// Receive waits for the response promised by the future and returns chain info +// Receive waits for the response promised by the future and returns dag info // result provided by the server. -func (r FutureGetBlockChainInfoResult) Receive() (*btcjson.GetBlockChainInfoResult, error) { +func (r FutureGetBlockDAGInfoResult) Receive() (*btcjson.GetBlockDAGInfoResult, error) { res, err := receiveFuture(r) if err != nil { return nil, err } - var chainInfo btcjson.GetBlockChainInfoResult - if err := json.Unmarshal(res, &chainInfo); err != nil { + var dagInfo btcjson.GetBlockDAGInfoResult + if err := json.Unmarshal(res, &dagInfo); err != nil { return nil, err } - return &chainInfo, nil + return &dagInfo, nil } -// GetBlockChainInfoAsync returns an instance of a type that can be used to get +// GetBlockDAGInfoAsync returns an instance of a type that can be used to get // the result of the RPC at some future time by invoking the Receive function // on the returned instance. // -// See GetBlockChainInfo for the blocking version and more details. -func (c *Client) GetBlockChainInfoAsync() FutureGetBlockChainInfoResult { - cmd := btcjson.NewGetBlockChainInfoCmd() +// See GetBlockDAGInfo for the blocking version and more details. +func (c *Client) GetBlockDAGInfoAsync() FutureGetBlockDAGInfoResult { + cmd := btcjson.NewGetBlockDAGInfoCmd() return c.sendCmd(cmd) } -// GetBlockChainInfo returns information related to the processing state of -// various chain-specific details such as the current difficulty from the tip -// of the main chain. -func (c *Client) GetBlockChainInfo() (*btcjson.GetBlockChainInfoResult, error) { - return c.GetBlockChainInfoAsync().Receive() +// GetBlockDAGInfo returns information related to the processing state of +// various dag-specific details such as the current difficulty from the tip +// of the main dag. +func (c *Client) GetBlockDAGInfo() (*btcjson.GetBlockDAGInfoResult, error) { + return c.GetBlockDAGInfoAsync().Receive() } // FutureGetBlockHashResult is a future promise to deliver the result of a @@ -292,8 +292,8 @@ func (c *Client) GetBlockChainInfo() (*btcjson.GetBlockChainInfoResult, error) { type FutureGetBlockHashResult chan *response // Receive waits for the response promised by the future and returns the hash of -// the block in the best block chain at the given height. -func (r FutureGetBlockHashResult) Receive() (*chainhash.Hash, error) { +// the block in the best block dag at the given height. +func (r FutureGetBlockHashResult) Receive() (*daghash.Hash, error) { res, err := receiveFuture(r) if err != nil { return nil, err @@ -305,7 +305,7 @@ func (r FutureGetBlockHashResult) Receive() (*chainhash.Hash, error) { if err != nil { return nil, err } - return chainhash.NewHashFromStr(txHashStr) + return daghash.NewHashFromStr(txHashStr) } // GetBlockHashAsync returns an instance of a type that can be used to get the @@ -318,9 +318,9 @@ func (c *Client) GetBlockHashAsync(blockHeight int64) FutureGetBlockHashResult { return c.sendCmd(cmd) } -// GetBlockHash returns the hash of the block in the best block chain at the +// GetBlockHash returns the hash of the block in the best block dag at the // given height. -func (c *Client) GetBlockHash(blockHeight int64) (*chainhash.Hash, error) { +func (c *Client) GetBlockHash(blockHeight int64) (*daghash.Hash, error) { return c.GetBlockHashAsync(blockHeight).Receive() } @@ -363,7 +363,7 @@ func (r FutureGetBlockHeaderResult) Receive() (*wire.BlockHeader, error) { // returned instance. // // See GetBlockHeader for the blocking version and more details. -func (c *Client) GetBlockHeaderAsync(blockHash *chainhash.Hash) FutureGetBlockHeaderResult { +func (c *Client) GetBlockHeaderAsync(blockHash *daghash.Hash) FutureGetBlockHeaderResult { hash := "" if blockHash != nil { hash = blockHash.String() @@ -377,7 +377,7 @@ func (c *Client) GetBlockHeaderAsync(blockHash *chainhash.Hash) FutureGetBlockHe // // See GetBlockHeaderVerbose to retrieve a data structure with information about the // block instead. -func (c *Client) GetBlockHeader(blockHash *chainhash.Hash) (*wire.BlockHeader, error) { +func (c *Client) GetBlockHeader(blockHash *daghash.Hash) (*wire.BlockHeader, error) { return c.GetBlockHeaderAsync(blockHash).Receive() } @@ -408,7 +408,7 @@ func (r FutureGetBlockHeaderVerboseResult) Receive() (*btcjson.GetBlockHeaderVer // returned instance. // // See GetBlockHeader for the blocking version and more details. -func (c *Client) GetBlockHeaderVerboseAsync(blockHash *chainhash.Hash) FutureGetBlockHeaderVerboseResult { +func (c *Client) GetBlockHeaderVerboseAsync(blockHash *daghash.Hash) FutureGetBlockHeaderVerboseResult { hash := "" if blockHash != nil { hash = blockHash.String() @@ -422,7 +422,7 @@ func (c *Client) GetBlockHeaderVerboseAsync(blockHash *chainhash.Hash) FutureGet // blockheader from the server given its hash. // // See GetBlockHeader to retrieve a blockheader instead. -func (c *Client) GetBlockHeaderVerbose(blockHash *chainhash.Hash) (*btcjson.GetBlockHeaderVerboseResult, error) { +func (c *Client) GetBlockHeaderVerbose(blockHash *daghash.Hash) (*btcjson.GetBlockHeaderVerboseResult, error) { return c.GetBlockHeaderVerboseAsync(blockHash).Receive() } @@ -471,7 +471,7 @@ type FutureGetRawMempoolResult chan *response // Receive waits for the response promised by the future and returns the hashes // of all transactions in the memory pool. -func (r FutureGetRawMempoolResult) Receive() ([]*chainhash.Hash, error) { +func (r FutureGetRawMempoolResult) Receive() ([]*daghash.Hash, error) { res, err := receiveFuture(r) if err != nil { return nil, err @@ -485,9 +485,9 @@ func (r FutureGetRawMempoolResult) Receive() ([]*chainhash.Hash, error) { } // Create a slice of ShaHash arrays from the string slice. - txHashes := make([]*chainhash.Hash, 0, len(txHashStrs)) + txHashes := make([]*daghash.Hash, 0, len(txHashStrs)) for _, hashStr := range txHashStrs { - txHash, err := chainhash.NewHashFromStr(hashStr) + txHash, err := daghash.NewHashFromStr(hashStr) if err != nil { return nil, err } @@ -511,7 +511,7 @@ func (c *Client) GetRawMempoolAsync() FutureGetRawMempoolResult { // // See GetRawMempoolVerbose to retrieve data structures with information about // the transactions instead. -func (c *Client) GetRawMempool() ([]*chainhash.Hash, error) { +func (c *Client) GetRawMempool() ([]*daghash.Hash, error) { return c.GetRawMempoolAsync().Receive() } @@ -594,15 +594,15 @@ func (c *Client) EstimateFee(numBlocks int64) (float64, error) { return c.EstimateFeeAsync(numBlocks).Receive() } -// FutureVerifyChainResult is a future promise to deliver the result of a -// VerifyChainAsync, VerifyChainLevelAsyncRPC, or VerifyChainBlocksAsync +// FutureVerifyDAGResult is a future promise to deliver the result of a +// VerifyDAGAsync, VerifyDAGLevelAsyncRPC, or VerifyDAGBlocksAsync // invocation (or an applicable error). -type FutureVerifyChainResult chan *response +type FutureVerifyDAGResult chan *response // Receive waits for the response promised by the future and returns whether -// or not the chain verified based on the check level and number of blocks +// or not the dag verified based on the check level and number of blocks // to verify specified in the original call. -func (r FutureVerifyChainResult) Receive() (bool, error) { +func (r FutureVerifyDAGResult) Receive() (bool, error) { res, err := receiveFuture(r) if err != nil { return false, err @@ -617,58 +617,58 @@ func (r FutureVerifyChainResult) Receive() (bool, error) { return verified, nil } -// VerifyChainAsync returns an instance of a type that can be used to get the +// VerifyDAGAsync returns an instance of a type that can be used to get the // result of the RPC at some future time by invoking the Receive function on the // returned instance. // -// See VerifyChain for the blocking version and more details. -func (c *Client) VerifyChainAsync() FutureVerifyChainResult { - cmd := btcjson.NewVerifyChainCmd(nil, nil) +// See VerifyDAG for the blocking version and more details. +func (c *Client) VerifyDAGAsync() FutureVerifyDAGResult { + cmd := btcjson.NewVerifyDAGCmd(nil, nil) return c.sendCmd(cmd) } -// VerifyChain requests the server to verify the block chain database using +// VerifyDAG requests the server to verify the block dag database using // the default check level and number of blocks to verify. // -// See VerifyChainLevel and VerifyChainBlocks to override the defaults. -func (c *Client) VerifyChain() (bool, error) { - return c.VerifyChainAsync().Receive() +// See VerifyDAGLevel and VerifyDAGBlocks to override the defaults. +func (c *Client) VerifyDAG() (bool, error) { + return c.VerifyDAGAsync().Receive() } -// VerifyChainLevelAsync returns an instance of a type that can be used to get +// VerifyDAGLevelAsync returns an instance of a type that can be used to get // the result of the RPC at some future time by invoking the Receive function on // the returned instance. // -// See VerifyChainLevel for the blocking version and more details. -func (c *Client) VerifyChainLevelAsync(checkLevel int32) FutureVerifyChainResult { - cmd := btcjson.NewVerifyChainCmd(&checkLevel, nil) +// See VerifyDAGLevel for the blocking version and more details. +func (c *Client) VerifyDAGLevelAsync(checkLevel int32) FutureVerifyDAGResult { + cmd := btcjson.NewVerifyDAGCmd(&checkLevel, nil) return c.sendCmd(cmd) } -// VerifyChainLevel requests the server to verify the block chain database using +// VerifyDAGLevel requests the server to verify the block dag database using // the passed check level and default number of blocks to verify. // // The check level controls how thorough the verification is with higher numbers // increasing the amount of checks done as consequently how long the // verification takes. // -// See VerifyChain to use the default check level and VerifyChainBlocks to +// See VerifyDAG to use the default check level and VerifyDAGBlocks to // override the number of blocks to verify. -func (c *Client) VerifyChainLevel(checkLevel int32) (bool, error) { - return c.VerifyChainLevelAsync(checkLevel).Receive() +func (c *Client) VerifyDAGLevel(checkLevel int32) (bool, error) { + return c.VerifyDAGLevelAsync(checkLevel).Receive() } -// VerifyChainBlocksAsync returns an instance of a type that can be used to get +// VerifyDAGBlocksAsync returns an instance of a type that can be used to get // the result of the RPC at some future time by invoking the Receive function on // the returned instance. // -// See VerifyChainBlocks for the blocking version and more details. -func (c *Client) VerifyChainBlocksAsync(checkLevel, numBlocks int32) FutureVerifyChainResult { - cmd := btcjson.NewVerifyChainCmd(&checkLevel, &numBlocks) +// See VerifyDAGBlocks for the blocking version and more details. +func (c *Client) VerifyDAGBlocksAsync(checkLevel, numBlocks int32) FutureVerifyDAGResult { + cmd := btcjson.NewVerifyDAGCmd(&checkLevel, &numBlocks) return c.sendCmd(cmd) } -// VerifyChainBlocks requests the server to verify the block chain database +// VerifyDAGBlocks requests the server to verify the block dag database // using the passed check level and number of blocks to verify. // // The check level controls how thorough the verification is with higher numbers @@ -676,11 +676,11 @@ func (c *Client) VerifyChainBlocksAsync(checkLevel, numBlocks int32) FutureVerif // verification takes. // // The number of blocks refers to the number of blocks from the end of the -// current longest chain. +// current longest dag. // -// See VerifyChain and VerifyChainLevel to use defaults. -func (c *Client) VerifyChainBlocks(checkLevel, numBlocks int32) (bool, error) { - return c.VerifyChainBlocksAsync(checkLevel, numBlocks).Receive() +// See VerifyDAG and VerifyDAGLevel to use defaults. +func (c *Client) VerifyDAGBlocks(checkLevel, numBlocks int32) (bool, error) { + return c.VerifyDAGBlocksAsync(checkLevel, numBlocks).Receive() } // FutureGetTxOutResult is a future promise to deliver the result of a @@ -716,7 +716,7 @@ func (r FutureGetTxOutResult) Receive() (*btcjson.GetTxOutResult, error) { // the returned instance. // // See GetTxOut for the blocking version and more details. -func (c *Client) GetTxOutAsync(txHash *chainhash.Hash, index uint32, mempool bool) FutureGetTxOutResult { +func (c *Client) GetTxOutAsync(txHash *daghash.Hash, index uint32, mempool bool) FutureGetTxOutResult { hash := "" if txHash != nil { hash = txHash.String() @@ -728,7 +728,7 @@ func (c *Client) GetTxOutAsync(txHash *chainhash.Hash, index uint32, mempool boo // GetTxOut returns the transaction output info if it's unspent and // nil, otherwise. -func (c *Client) GetTxOut(txHash *chainhash.Hash, index uint32, mempool bool) (*btcjson.GetTxOutResult, error) { +func (c *Client) GetTxOut(txHash *daghash.Hash, index uint32, mempool bool) (*btcjson.GetTxOutResult, error) { return c.GetTxOutAsync(txHash, index, mempool).Receive() } @@ -767,7 +767,7 @@ func (r FutureRescanBlocksResult) Receive() ([]btcjson.RescannedBlock, error) { // // NOTE: This is a btcsuite extension ported from // github.com/decred/dcrrpcclient. -func (c *Client) RescanBlocksAsync(blockHashes []chainhash.Hash) FutureRescanBlocksResult { +func (c *Client) RescanBlocksAsync(blockHashes []daghash.Hash) FutureRescanBlocksResult { strBlockHashes := make([]string, len(blockHashes)) for i := range blockHashes { strBlockHashes[i] = blockHashes[i].String() @@ -779,11 +779,11 @@ func (c *Client) RescanBlocksAsync(blockHashes []chainhash.Hash) FutureRescanBlo // RescanBlocks rescans the blocks identified by blockHashes, in order, using // the client's loaded transaction filter. The blocks do not need to be on the -// main chain, but they do need to be adjacent to each other. +// main dag, but they do need to be adjacent to each other. // // NOTE: This is a btcsuite extension ported from // github.com/decred/dcrrpcclient. -func (c *Client) RescanBlocks(blockHashes []chainhash.Hash) ([]btcjson.RescannedBlock, error) { +func (c *Client) RescanBlocks(blockHashes []daghash.Hash) ([]btcjson.RescannedBlock, error) { return c.RescanBlocksAsync(blockHashes).Receive() } @@ -804,7 +804,7 @@ func (r FutureInvalidateBlockResult) Receive() error { // returned instance. // // See InvalidateBlock for the blocking version and more details. -func (c *Client) InvalidateBlockAsync(blockHash *chainhash.Hash) FutureInvalidateBlockResult { +func (c *Client) InvalidateBlockAsync(blockHash *daghash.Hash) FutureInvalidateBlockResult { hash := "" if blockHash != nil { hash = blockHash.String() @@ -815,7 +815,7 @@ func (c *Client) InvalidateBlockAsync(blockHash *chainhash.Hash) FutureInvalidat } // InvalidateBlock invalidates a specific block. -func (c *Client) InvalidateBlock(blockHash *chainhash.Hash) error { +func (c *Client) InvalidateBlock(blockHash *daghash.Hash) error { return c.InvalidateBlockAsync(blockHash).Receive() } @@ -857,7 +857,7 @@ func (r FutureGetCFilterResult) Receive() (*wire.MsgCFilter, error) { // returned instance. // // See GetCFilter for the blocking version and more details. -func (c *Client) GetCFilterAsync(blockHash *chainhash.Hash, +func (c *Client) GetCFilterAsync(blockHash *daghash.Hash, filterType wire.FilterType) FutureGetCFilterResult { hash := "" if blockHash != nil { @@ -869,7 +869,7 @@ func (c *Client) GetCFilterAsync(blockHash *chainhash.Hash, } // GetCFilter returns a raw filter from the server given its block hash. -func (c *Client) GetCFilter(blockHash *chainhash.Hash, +func (c *Client) GetCFilter(blockHash *daghash.Hash, filterType wire.FilterType) (*wire.MsgCFilter, error) { return c.GetCFilterAsync(blockHash, filterType).Receive() } @@ -894,7 +894,7 @@ func (r FutureGetCFilterHeaderResult) Receive() (*wire.MsgCFHeaders, error) { } // Assign the decoded header into a hash - headerHash, err := chainhash.NewHashFromStr(headerHex) + headerHash, err := daghash.NewHashFromStr(headerHex) if err != nil { return nil, err } @@ -910,7 +910,7 @@ func (r FutureGetCFilterHeaderResult) Receive() (*wire.MsgCFHeaders, error) { // on the returned instance. // // See GetCFilterHeader for the blocking version and more details. -func (c *Client) GetCFilterHeaderAsync(blockHash *chainhash.Hash, +func (c *Client) GetCFilterHeaderAsync(blockHash *daghash.Hash, filterType wire.FilterType) FutureGetCFilterHeaderResult { hash := "" if blockHash != nil { @@ -923,7 +923,7 @@ func (c *Client) GetCFilterHeaderAsync(blockHash *chainhash.Hash, // GetCFilterHeader returns a raw filter header from the server given its block // hash. -func (c *Client) GetCFilterHeader(blockHash *chainhash.Hash, +func (c *Client) GetCFilterHeader(blockHash *daghash.Hash, filterType wire.FilterType) (*wire.MsgCFHeaders, error) { return c.GetCFilterHeaderAsync(blockHash, filterType).Receive() } diff --git a/rpcclient/extensions.go b/rpcclient/extensions.go index 97f921d80..7d92a1e6b 100644 --- a/rpcclient/extensions.go +++ b/rpcclient/extensions.go @@ -13,7 +13,7 @@ import ( "fmt" "github.com/daglabs/btcd/btcjson" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/wire" "github.com/daglabs/btcutil" ) @@ -152,7 +152,7 @@ type FutureGetBestBlockResult chan *response // Receive waits for the response promised by the future and returns the hash // and height of the block in the longest (best) chain. -func (r FutureGetBestBlockResult) Receive() (*chainhash.Hash, int32, error) { +func (r FutureGetBestBlockResult) Receive() (*daghash.Hash, int32, error) { res, err := receiveFuture(r) if err != nil { return nil, 0, err @@ -166,7 +166,7 @@ func (r FutureGetBestBlockResult) Receive() (*chainhash.Hash, int32, error) { } // Convert to hash from string. - hash, err := chainhash.NewHashFromStr(bestBlock.Hash) + hash, err := daghash.NewHashFromStr(bestBlock.Hash) if err != nil { return nil, 0, err } @@ -190,7 +190,7 @@ func (c *Client) GetBestBlockAsync() FutureGetBestBlockResult { // chain. // // NOTE: This is a btcd extension. -func (c *Client) GetBestBlock() (*chainhash.Hash, int32, error) { +func (c *Client) GetBestBlock() (*daghash.Hash, int32, error) { return c.GetBestBlockAsync().Receive() } @@ -282,7 +282,7 @@ func (r FutureGetHeadersResult) Receive() ([]wire.BlockHeader, error) { // // NOTE: This is a btcsuite extension ported from // github.com/decred/dcrrpcclient. -func (c *Client) GetHeadersAsync(blockLocators []chainhash.Hash, hashStop *chainhash.Hash) FutureGetHeadersResult { +func (c *Client) GetHeadersAsync(blockLocators []daghash.Hash, hashStop *daghash.Hash) FutureGetHeadersResult { locators := make([]string, len(blockLocators)) for i := range blockLocators { locators[i] = blockLocators[i].String() @@ -301,7 +301,7 @@ func (c *Client) GetHeadersAsync(blockLocators []chainhash.Hash, hashStop *chain // // NOTE: This is a btcsuite extension ported from // github.com/decred/dcrrpcclient. -func (c *Client) GetHeaders(blockLocators []chainhash.Hash, hashStop *chainhash.Hash) ([]wire.BlockHeader, error) { +func (c *Client) GetHeaders(blockLocators []daghash.Hash, hashStop *daghash.Hash) ([]wire.BlockHeader, error) { return c.GetHeadersAsync(blockLocators, hashStop).Receive() } diff --git a/rpcclient/mining.go b/rpcclient/mining.go index 9fa315ae2..4389330ea 100644 --- a/rpcclient/mining.go +++ b/rpcclient/mining.go @@ -10,7 +10,7 @@ import ( "errors" "github.com/daglabs/btcd/btcjson" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcutil" ) @@ -20,7 +20,7 @@ type FutureGenerateResult chan *response // Receive waits for the response promised by the future and returns a list of // block hashes generated by the call. -func (r FutureGenerateResult) Receive() ([]*chainhash.Hash, error) { +func (r FutureGenerateResult) Receive() ([]*daghash.Hash, error) { res, err := receiveFuture(r) if err != nil { return nil, err @@ -33,11 +33,11 @@ func (r FutureGenerateResult) Receive() ([]*chainhash.Hash, error) { return nil, err } - // Convert each block hash to a chainhash.Hash and store a pointer to + // Convert each block hash to a daghash.Hash and store a pointer to // each. - convertedResult := make([]*chainhash.Hash, len(result)) + convertedResult := make([]*daghash.Hash, len(result)) for i, hashString := range result { - convertedResult[i], err = chainhash.NewHashFromStr(hashString) + convertedResult[i], err = daghash.NewHashFromStr(hashString) if err != nil { return nil, err } @@ -57,7 +57,7 @@ func (c *Client) GenerateAsync(numBlocks uint32) FutureGenerateResult { } // Generate generates numBlocks blocks and returns their hashes. -func (c *Client) Generate(numBlocks uint32) ([]*chainhash.Hash, error) { +func (c *Client) Generate(numBlocks uint32) ([]*daghash.Hash, error) { return c.GenerateAsync(numBlocks).Receive() } diff --git a/rpcclient/notify.go b/rpcclient/notify.go index ae274a343..417f3048e 100644 --- a/rpcclient/notify.go +++ b/rpcclient/notify.go @@ -14,7 +14,7 @@ import ( "time" "github.com/daglabs/btcd/btcjson" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/wire" "github.com/daglabs/btcutil" ) @@ -96,7 +96,7 @@ type NotificationHandlers struct { // function is non-nil. // // NOTE: Deprecated. Use OnFilteredBlockConnected instead. - OnBlockConnected func(hash *chainhash.Hash, height int32, t time.Time) + OnBlockConnected func(hash *daghash.Hash, height int32, t time.Time) // OnFilteredBlockConnected is invoked when a block is connected to the // longest (best) chain. It will only be invoked if a preceding call to @@ -112,7 +112,7 @@ type NotificationHandlers struct { // function is non-nil. // // NOTE: Deprecated. Use OnFilteredBlockDisconnected instead. - OnBlockDisconnected func(hash *chainhash.Hash, height int32, t time.Time) + OnBlockDisconnected func(hash *daghash.Hash, height int32, t time.Time) // OnFilteredBlockDisconnected is invoked when a block is disconnected // from the longest (best) chain. It will only be invoked if a @@ -158,20 +158,20 @@ type NotificationHandlers struct { // notifications after the rescan request has already returned. // // NOTE: Deprecated. Not used with RescanBlocks. - OnRescanFinished func(hash *chainhash.Hash, height int32, blkTime time.Time) + OnRescanFinished func(hash *daghash.Hash, height int32, blkTime time.Time) // OnRescanProgress is invoked periodically when a rescan is underway. // It will only be invoked if a preceding call to Rescan or // RescanEndHeight has been made and the function is non-nil. // // NOTE: Deprecated. Not used with RescanBlocks. - OnRescanProgress func(hash *chainhash.Hash, height int32, blkTime time.Time) + OnRescanProgress func(hash *daghash.Hash, height int32, blkTime time.Time) // OnTxAccepted is invoked when a transaction is accepted into the // memory pool. It will only be invoked if a preceding call to // NotifyNewTransactions with the verbose flag set to false has been // made to register for the notification and the function is non-nil. - OnTxAccepted func(hash *chainhash.Hash, amount btcutil.Amount) + OnTxAccepted func(hash *daghash.Hash, amount btcutil.Amount) // OnTxAccepted is invoked when a transaction is accepted into the // memory pool. It will only be invoked if a preceding call to @@ -485,7 +485,7 @@ func (e wrongNumParams) Error() string { // parseChainNtfnParams parses out the block hash and height from the parameters // of blockconnected and blockdisconnected notifications. -func parseChainNtfnParams(params []json.RawMessage) (*chainhash.Hash, +func parseChainNtfnParams(params []json.RawMessage) (*daghash.Hash, int32, time.Time, error) { if len(params) != 3 { @@ -514,7 +514,7 @@ func parseChainNtfnParams(params []json.RawMessage) (*chainhash.Hash, } // Create hash from block hash string. - blockHash, err := chainhash.NewHashFromStr(blockHashStr) + blockHash, err := daghash.NewHashFromStr(blockHashStr) if err != nil { return nil, 0, time.Time{}, err } @@ -674,13 +674,13 @@ func parseChainTxNtfnParams(params []json.RawMessage) (*btcutil.Tx, // TODO: Change recvtx and redeemingtx callback signatures to use // nicer types for details about the block (block hash as a - // chainhash.Hash, block time as a time.Time, etc.). + // daghash.Hash, block time as a time.Time, etc.). return btcutil.NewTx(&msgTx), block, nil } // parseRescanProgressParams parses out the height of the last rescanned block // from the parameters of rescanfinished and rescanprogress notifications. -func parseRescanProgressParams(params []json.RawMessage) (*chainhash.Hash, int32, time.Time, error) { +func parseRescanProgressParams(params []json.RawMessage) (*daghash.Hash, int32, time.Time, error) { if len(params) != 3 { return nil, 0, time.Time{}, wrongNumParams(len(params)) } @@ -707,7 +707,7 @@ func parseRescanProgressParams(params []json.RawMessage) (*chainhash.Hash, int32 } // Decode string encoding of block hash. - hash, err := chainhash.NewHashFromStr(hashStr) + hash, err := daghash.NewHashFromStr(hashStr) if err != nil { return nil, 0, time.Time{}, err } @@ -717,7 +717,7 @@ func parseRescanProgressParams(params []json.RawMessage) (*chainhash.Hash, int32 // parseTxAcceptedNtfnParams parses out the transaction hash and total amount // from the parameters of a txaccepted notification. -func parseTxAcceptedNtfnParams(params []json.RawMessage) (*chainhash.Hash, +func parseTxAcceptedNtfnParams(params []json.RawMessage) (*daghash.Hash, btcutil.Amount, error) { if len(params) != 2 { @@ -745,7 +745,7 @@ func parseTxAcceptedNtfnParams(params []json.RawMessage) (*chainhash.Hash, } // Decode string encoding of transaction sha. - txHash, err := chainhash.NewHashFromStr(txHashStr) + txHash, err := daghash.NewHashFromStr(txHashStr) if err != nil { return nil, 0, err } @@ -1151,7 +1151,7 @@ func (r FutureRescanResult) Receive() error { // NOTE: This is a btcd extension and requires a websocket connection. // // NOTE: Deprecated. Use RescanBlocksAsync instead. -func (c *Client) RescanAsync(startBlock *chainhash.Hash, +func (c *Client) RescanAsync(startBlock *daghash.Hash, addresses []btcutil.Address, outpoints []*wire.OutPoint) FutureRescanResult { @@ -1216,7 +1216,7 @@ func (c *Client) RescanAsync(startBlock *chainhash.Hash, // NOTE: This is a btcd extension and requires a websocket connection. // // NOTE: Deprecated. Use RescanBlocks instead. -func (c *Client) Rescan(startBlock *chainhash.Hash, +func (c *Client) Rescan(startBlock *daghash.Hash, addresses []btcutil.Address, outpoints []*wire.OutPoint) error { @@ -1232,9 +1232,9 @@ func (c *Client) Rescan(startBlock *chainhash.Hash, // NOTE: This is a btcd extension and requires a websocket connection. // // NOTE: Deprecated. Use RescanBlocksAsync instead. -func (c *Client) RescanEndBlockAsync(startBlock *chainhash.Hash, +func (c *Client) RescanEndBlockAsync(startBlock *daghash.Hash, addresses []btcutil.Address, outpoints []*wire.OutPoint, - endBlock *chainhash.Hash) FutureRescanResult { + endBlock *daghash.Hash) FutureRescanResult { // Not supported in HTTP POST mode. if c.config.HTTPPostMode { @@ -1294,9 +1294,9 @@ func (c *Client) RescanEndBlockAsync(startBlock *chainhash.Hash, // NOTE: This is a btcd extension and requires a websocket connection. // // NOTE: Deprecated. Use RescanBlocks instead. -func (c *Client) RescanEndHeight(startBlock *chainhash.Hash, +func (c *Client) RescanEndHeight(startBlock *daghash.Hash, addresses []btcutil.Address, outpoints []*wire.OutPoint, - endBlock *chainhash.Hash) error { + endBlock *daghash.Hash) error { return c.RescanEndBlockAsync(startBlock, addresses, outpoints, endBlock).Receive() diff --git a/rpcclient/rawtransactions.go b/rpcclient/rawtransactions.go index 759b81f7d..240e99aaa 100644 --- a/rpcclient/rawtransactions.go +++ b/rpcclient/rawtransactions.go @@ -10,7 +10,7 @@ import ( "encoding/json" "github.com/daglabs/btcd/btcjson" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/wire" "github.com/daglabs/btcutil" ) @@ -96,7 +96,7 @@ func (r FutureGetRawTransactionResult) Receive() (*btcutil.Tx, error) { // the returned instance. // // See GetRawTransaction for the blocking version and more details. -func (c *Client) GetRawTransactionAsync(txHash *chainhash.Hash) FutureGetRawTransactionResult { +func (c *Client) GetRawTransactionAsync(txHash *daghash.Hash) FutureGetRawTransactionResult { hash := "" if txHash != nil { hash = txHash.String() @@ -110,7 +110,7 @@ func (c *Client) GetRawTransactionAsync(txHash *chainhash.Hash) FutureGetRawTran // // See GetRawTransactionVerbose to obtain additional information about the // transaction. -func (c *Client) GetRawTransaction(txHash *chainhash.Hash) (*btcutil.Tx, error) { +func (c *Client) GetRawTransaction(txHash *daghash.Hash) (*btcutil.Tx, error) { return c.GetRawTransactionAsync(txHash).Receive() } @@ -142,7 +142,7 @@ func (r FutureGetRawTransactionVerboseResult) Receive() (*btcjson.TxRawResult, e // function on the returned instance. // // See GetRawTransactionVerbose for the blocking version and more details. -func (c *Client) GetRawTransactionVerboseAsync(txHash *chainhash.Hash) FutureGetRawTransactionVerboseResult { +func (c *Client) GetRawTransactionVerboseAsync(txHash *daghash.Hash) FutureGetRawTransactionVerboseResult { hash := "" if txHash != nil { hash = txHash.String() @@ -156,7 +156,7 @@ func (c *Client) GetRawTransactionVerboseAsync(txHash *chainhash.Hash) FutureGet // its hash. // // See GetRawTransaction to obtain only the transaction already deserialized. -func (c *Client) GetRawTransactionVerbose(txHash *chainhash.Hash) (*btcjson.TxRawResult, error) { +func (c *Client) GetRawTransactionVerbose(txHash *daghash.Hash) (*btcjson.TxRawResult, error) { return c.GetRawTransactionVerboseAsync(txHash).Receive() } @@ -264,7 +264,7 @@ type FutureSendRawTransactionResult chan *response // Receive waits for the response promised by the future and returns the result // of submitting the encoded transaction to the server which then relays it to // the network. -func (r FutureSendRawTransactionResult) Receive() (*chainhash.Hash, error) { +func (r FutureSendRawTransactionResult) Receive() (*daghash.Hash, error) { res, err := receiveFuture(r) if err != nil { return nil, err @@ -277,7 +277,7 @@ func (r FutureSendRawTransactionResult) Receive() (*chainhash.Hash, error) { return nil, err } - return chainhash.NewHashFromStr(txHashStr) + return daghash.NewHashFromStr(txHashStr) } // SendRawTransactionAsync returns an instance of a type that can be used to get @@ -302,7 +302,7 @@ func (c *Client) SendRawTransactionAsync(tx *wire.MsgTx, allowHighFees bool) Fut // SendRawTransaction submits the encoded transaction to the server which will // then relay it to the network. -func (c *Client) SendRawTransaction(tx *wire.MsgTx, allowHighFees bool) (*chainhash.Hash, error) { +func (c *Client) SendRawTransaction(tx *wire.MsgTx, allowHighFees bool) (*daghash.Hash, error) { return c.SendRawTransactionAsync(tx, allowHighFees).Receive() } diff --git a/rpcclient/wallet.go b/rpcclient/wallet.go index 67c9d29ee..1389b31e6 100644 --- a/rpcclient/wallet.go +++ b/rpcclient/wallet.go @@ -9,8 +9,8 @@ import ( "strconv" "github.com/daglabs/btcd/btcjson" - "github.com/daglabs/btcd/chaincfg" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/wire" "github.com/daglabs/btcutil" ) @@ -46,7 +46,7 @@ func (r FutureGetTransactionResult) Receive() (*btcjson.GetTransactionResult, er // the returned instance. // // See GetTransaction for the blocking version and more details. -func (c *Client) GetTransactionAsync(txHash *chainhash.Hash) FutureGetTransactionResult { +func (c *Client) GetTransactionAsync(txHash *daghash.Hash) FutureGetTransactionResult { hash := "" if txHash != nil { hash = txHash.String() @@ -59,7 +59,7 @@ func (c *Client) GetTransactionAsync(txHash *chainhash.Hash) FutureGetTransactio // GetTransaction returns detailed information about a wallet transaction. // // See GetRawTransaction to return the raw transaction instead. -func (c *Client) GetTransaction(txHash *chainhash.Hash) (*btcjson.GetTransactionResult, error) { +func (c *Client) GetTransaction(txHash *daghash.Hash) (*btcjson.GetTransactionResult, error) { return c.GetTransactionAsync(txHash).Receive() } @@ -269,7 +269,7 @@ func (r FutureListSinceBlockResult) Receive() (*btcjson.ListSinceBlockResult, er // the returned instance. // // See ListSinceBlock for the blocking version and more details. -func (c *Client) ListSinceBlockAsync(blockHash *chainhash.Hash) FutureListSinceBlockResult { +func (c *Client) ListSinceBlockAsync(blockHash *daghash.Hash) FutureListSinceBlockResult { var hash *string if blockHash != nil { hash = btcjson.String(blockHash.String()) @@ -284,7 +284,7 @@ func (c *Client) ListSinceBlockAsync(blockHash *chainhash.Hash) FutureListSinceB // minimum confirmations as a filter. // // See ListSinceBlockMinConf to override the minimum number of confirmations. -func (c *Client) ListSinceBlock(blockHash *chainhash.Hash) (*btcjson.ListSinceBlockResult, error) { +func (c *Client) ListSinceBlock(blockHash *daghash.Hash) (*btcjson.ListSinceBlockResult, error) { return c.ListSinceBlockAsync(blockHash).Receive() } @@ -293,7 +293,7 @@ func (c *Client) ListSinceBlock(blockHash *chainhash.Hash) (*btcjson.ListSinceBl // function on the returned instance. // // See ListSinceBlockMinConf for the blocking version and more details. -func (c *Client) ListSinceBlockMinConfAsync(blockHash *chainhash.Hash, minConfirms int) FutureListSinceBlockResult { +func (c *Client) ListSinceBlockMinConfAsync(blockHash *daghash.Hash, minConfirms int) FutureListSinceBlockResult { var hash *string if blockHash != nil { hash = btcjson.String(blockHash.String()) @@ -308,7 +308,7 @@ func (c *Client) ListSinceBlockMinConfAsync(blockHash *chainhash.Hash, minConfir // number of minimum confirmations as a filter. // // See ListSinceBlock to use the default minimum number of confirmations. -func (c *Client) ListSinceBlockMinConf(blockHash *chainhash.Hash, minConfirms int) (*btcjson.ListSinceBlockResult, error) { +func (c *Client) ListSinceBlockMinConf(blockHash *daghash.Hash, minConfirms int) (*btcjson.ListSinceBlockResult, error) { return c.ListSinceBlockMinConfAsync(blockHash, minConfirms).Receive() } @@ -387,7 +387,7 @@ func (r FutureListLockUnspentResult) Receive() ([]*wire.OutPoint, error) { // Create a slice of outpoints from the transaction input structs. ops := make([]*wire.OutPoint, len(inputs)) for i, input := range inputs { - sha, err := chainhash.NewHashFromStr(input.Txid) + sha, err := daghash.NewHashFromStr(input.Txid) if err != nil { return nil, err } @@ -448,7 +448,7 @@ type FutureSendToAddressResult chan *response // Receive waits for the response promised by the future and returns the hash // of the transaction sending the passed amount to the given address. -func (r FutureSendToAddressResult) Receive() (*chainhash.Hash, error) { +func (r FutureSendToAddressResult) Receive() (*daghash.Hash, error) { res, err := receiveFuture(r) if err != nil { return nil, err @@ -461,7 +461,7 @@ func (r FutureSendToAddressResult) Receive() (*chainhash.Hash, error) { return nil, err } - return chainhash.NewHashFromStr(txHash) + return daghash.NewHashFromStr(txHash) } // SendToAddressAsync returns an instance of a type that can be used to get the @@ -483,7 +483,7 @@ func (c *Client) SendToAddressAsync(address btcutil.Address, amount btcutil.Amou // // NOTE: This function requires to the wallet to be unlocked. See the // WalletPassphrase function for more details. -func (c *Client) SendToAddress(address btcutil.Address, amount btcutil.Amount) (*chainhash.Hash, error) { +func (c *Client) SendToAddress(address btcutil.Address, amount btcutil.Amount) (*daghash.Hash, error) { return c.SendToAddressAsync(address, amount).Receive() } @@ -514,7 +514,7 @@ func (c *Client) SendToAddressCommentAsync(address btcutil.Address, // // NOTE: This function requires to the wallet to be unlocked. See the // WalletPassphrase function for more details. -func (c *Client) SendToAddressComment(address btcutil.Address, amount btcutil.Amount, comment, commentTo string) (*chainhash.Hash, error) { +func (c *Client) SendToAddressComment(address btcutil.Address, amount btcutil.Amount, comment, commentTo string) (*daghash.Hash, error) { return c.SendToAddressCommentAsync(address, amount, comment, commentTo).Receive() } @@ -527,7 +527,7 @@ type FutureSendFromResult chan *response // Receive waits for the response promised by the future and returns the hash // of the transaction sending amount to the given address using the provided // account as a source of funds. -func (r FutureSendFromResult) Receive() (*chainhash.Hash, error) { +func (r FutureSendFromResult) Receive() (*daghash.Hash, error) { res, err := receiveFuture(r) if err != nil { return nil, err @@ -540,7 +540,7 @@ func (r FutureSendFromResult) Receive() (*chainhash.Hash, error) { return nil, err } - return chainhash.NewHashFromStr(txHash) + return daghash.NewHashFromStr(txHash) } // SendFromAsync returns an instance of a type that can be used to get the @@ -563,7 +563,7 @@ func (c *Client) SendFromAsync(fromAccount string, toAddress btcutil.Address, am // // NOTE: This function requires to the wallet to be unlocked. See the // WalletPassphrase function for more details. -func (c *Client) SendFrom(fromAccount string, toAddress btcutil.Address, amount btcutil.Amount) (*chainhash.Hash, error) { +func (c *Client) SendFrom(fromAccount string, toAddress btcutil.Address, amount btcutil.Amount) (*daghash.Hash, error) { return c.SendFromAsync(fromAccount, toAddress, amount).Receive() } @@ -588,7 +588,7 @@ func (c *Client) SendFromMinConfAsync(fromAccount string, toAddress btcutil.Addr // // NOTE: This function requires to the wallet to be unlocked. See the // WalletPassphrase function for more details. -func (c *Client) SendFromMinConf(fromAccount string, toAddress btcutil.Address, amount btcutil.Amount, minConfirms int) (*chainhash.Hash, error) { +func (c *Client) SendFromMinConf(fromAccount string, toAddress btcutil.Address, amount btcutil.Amount, minConfirms int) (*daghash.Hash, error) { return c.SendFromMinConfAsync(fromAccount, toAddress, amount, minConfirms).Receive() } @@ -621,7 +621,7 @@ func (c *Client) SendFromCommentAsync(fromAccount string, // WalletPassphrase function for more details. func (c *Client) SendFromComment(fromAccount string, toAddress btcutil.Address, amount btcutil.Amount, minConfirms int, - comment, commentTo string) (*chainhash.Hash, error) { + comment, commentTo string) (*daghash.Hash, error) { return c.SendFromCommentAsync(fromAccount, toAddress, amount, minConfirms, comment, commentTo).Receive() @@ -635,7 +635,7 @@ type FutureSendManyResult chan *response // Receive waits for the response promised by the future and returns the hash // of the transaction sending multiple amounts to multiple addresses using the // provided account as a source of funds. -func (r FutureSendManyResult) Receive() (*chainhash.Hash, error) { +func (r FutureSendManyResult) Receive() (*daghash.Hash, error) { res, err := receiveFuture(r) if err != nil { return nil, err @@ -648,7 +648,7 @@ func (r FutureSendManyResult) Receive() (*chainhash.Hash, error) { return nil, err } - return chainhash.NewHashFromStr(txHash) + return daghash.NewHashFromStr(txHash) } // SendManyAsync returns an instance of a type that can be used to get the @@ -673,7 +673,7 @@ func (c *Client) SendManyAsync(fromAccount string, amounts map[btcutil.Address]b // // NOTE: This function requires to the wallet to be unlocked. See the // WalletPassphrase function for more details. -func (c *Client) SendMany(fromAccount string, amounts map[btcutil.Address]btcutil.Amount) (*chainhash.Hash, error) { +func (c *Client) SendMany(fromAccount string, amounts map[btcutil.Address]btcutil.Amount) (*daghash.Hash, error) { return c.SendManyAsync(fromAccount, amounts).Receive() } @@ -706,7 +706,7 @@ func (c *Client) SendManyMinConfAsync(fromAccount string, // WalletPassphrase function for more details. func (c *Client) SendManyMinConf(fromAccount string, amounts map[btcutil.Address]btcutil.Amount, - minConfirms int) (*chainhash.Hash, error) { + minConfirms int) (*daghash.Hash, error) { return c.SendManyMinConfAsync(fromAccount, amounts, minConfirms).Receive() } @@ -741,7 +741,7 @@ func (c *Client) SendManyCommentAsync(fromAccount string, // WalletPassphrase function for more details. func (c *Client) SendManyComment(fromAccount string, amounts map[btcutil.Address]btcutil.Amount, minConfirms int, - comment string) (*chainhash.Hash, error) { + comment string) (*daghash.Hash, error) { return c.SendManyCommentAsync(fromAccount, amounts, minConfirms, comment).Receive() @@ -771,7 +771,7 @@ func (r FutureAddMultisigAddressResult) Receive() (btcutil.Address, error) { return nil, err } - return btcutil.DecodeAddress(addr, &chaincfg.MainNetParams) + return btcutil.DecodeAddress(addr, &dagconfig.MainNetParams) } // AddMultisigAddressAsync returns an instance of a type that can be used to get @@ -885,7 +885,7 @@ func (r FutureGetNewAddressResult) Receive() (btcutil.Address, error) { return nil, err } - return btcutil.DecodeAddress(addr, &chaincfg.MainNetParams) + return btcutil.DecodeAddress(addr, &dagconfig.MainNetParams) } // GetNewAddressAsync returns an instance of a type that can be used to get the @@ -923,7 +923,7 @@ func (r FutureGetRawChangeAddressResult) Receive() (btcutil.Address, error) { return nil, err } - return btcutil.DecodeAddress(addr, &chaincfg.MainNetParams) + return btcutil.DecodeAddress(addr, &dagconfig.MainNetParams) } // GetRawChangeAddressAsync returns an instance of a type that can be used to @@ -962,7 +962,7 @@ func (r FutureGetAccountAddressResult) Receive() (btcutil.Address, error) { return nil, err } - return btcutil.DecodeAddress(addr, &chaincfg.MainNetParams) + return btcutil.DecodeAddress(addr, &dagconfig.MainNetParams) } // GetAccountAddressAsync returns an instance of a type that can be used to get @@ -1068,7 +1068,7 @@ func (r FutureGetAddressesByAccountResult) Receive() ([]btcutil.Address, error) addrs := make([]btcutil.Address, 0, len(addrStrings)) for _, addrStr := range addrStrings { addr, err := btcutil.DecodeAddress(addrStr, - &chaincfg.MainNetParams) + &dagconfig.MainNetParams) if err != nil { return nil, err } diff --git a/rpcserver.go b/rpcserver.go index aa013e49a..088d2650c 100644 --- a/rpcserver.go +++ b/rpcserver.go @@ -28,12 +28,12 @@ import ( "time" "github.com/btcsuite/websocket" - "github.com/daglabs/btcd/blockchain" - "github.com/daglabs/btcd/blockchain/indexers" + "github.com/daglabs/btcd/blockdag" + "github.com/daglabs/btcd/blockdag/indexers" "github.com/daglabs/btcd/btcec" "github.com/daglabs/btcd/btcjson" - "github.com/daglabs/btcd/chaincfg" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/database" "github.com/daglabs/btcd/mempool" "github.com/daglabs/btcd/mining" @@ -318,7 +318,7 @@ func rpcDecodeHexError(gotHex string) *btcjson.RPCError { // rpcNoTxInfoError is a convenience function for returning a nicely formatted // RPC error which indicates there is no information available for the provided // transaction hash. -func rpcNoTxInfoError(txHash *chainhash.Hash) *btcjson.RPCError { +func rpcNoTxInfoError(txHash *daghash.Hash) *btcjson.RPCError { return btcjson.NewRPCError(btcjson.ErrRPCNoTxInfo, fmt.Sprintf("No information available about transaction %v", txHash)) @@ -330,18 +330,18 @@ type gbtWorkState struct { sync.Mutex lastTxUpdate time.Time lastGenerated time.Time - prevHash *chainhash.Hash + prevHash *daghash.Hash minTimestamp time.Time template *mining.BlockTemplate - notifyMap map[chainhash.Hash]map[int64]chan struct{} - timeSource blockchain.MedianTimeSource + notifyMap map[daghash.Hash]map[int64]chan struct{} + timeSource blockdag.MedianTimeSource } // newGbtWorkState returns a new instance of a gbtWorkState with all internal // fields initialized and ready to use. -func newGbtWorkState(timeSource blockchain.MedianTimeSource) *gbtWorkState { +func newGbtWorkState(timeSource blockdag.MedianTimeSource) *gbtWorkState { return &gbtWorkState{ - notifyMap: make(map[chainhash.Hash]map[int64]chan struct{}), + notifyMap: make(map[daghash.Hash]map[int64]chan struct{}), timeSource: timeSource, } } @@ -525,7 +525,7 @@ func handleCreateRawTransaction(s *rpcServer, cmd interface{}, closeChan <-chan // some validity checks. mtx := wire.NewMsgTx(wire.TxVersion) for _, input := range c.Inputs { - txHash, err := chainhash.NewHashFromStr(input.Txid) + txHash, err := daghash.NewHashFromStr(input.Txid) if err != nil { return nil, rpcDecodeHexError(input.Txid) } @@ -639,7 +639,7 @@ func handleDebugLevel(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) func createVinList(mtx *wire.MsgTx) []btcjson.Vin { // Coinbase transactions only have a single txin by definition. vinList := make([]btcjson.Vin, len(mtx.TxIn)) - if blockchain.IsCoinBaseTx(mtx) { + if blockdag.IsCoinBaseTx(mtx) { txIn := mtx.TxIn[0] vinList[0].Coinbase = hex.EncodeToString(txIn.SignatureScript) vinList[0].Sequence = txIn.Sequence @@ -667,7 +667,7 @@ func createVinList(mtx *wire.MsgTx) []btcjson.Vin { // createVoutList returns a slice of JSON objects for the outputs of the passed // transaction. -func createVoutList(mtx *wire.MsgTx, chainParams *chaincfg.Params, filterAddrMap map[string]struct{}) []btcjson.Vout { +func createVoutList(mtx *wire.MsgTx, chainParams *dagconfig.Params, filterAddrMap map[string]struct{}) []btcjson.Vout { voutList := make([]btcjson.Vout, 0, len(mtx.TxOut)) for i, v := range mtx.TxOut { // The disassembled string will contain [error] inline if the @@ -719,7 +719,7 @@ func createVoutList(mtx *wire.MsgTx, chainParams *chaincfg.Params, filterAddrMap // createTxRawResult converts the passed transaction and associated parameters // to a raw transaction JSON object. -func createTxRawResult(chainParams *chaincfg.Params, mtx *wire.MsgTx, +func createTxRawResult(chainParams *dagconfig.Params, mtx *wire.MsgTx, txHash string, blkHeader *wire.BlockHeader, blkHash string, blkHeight int32, chainHeight int32) (*btcjson.TxRawResult, error) { @@ -1002,29 +1002,29 @@ func handleGetBestBlock(s *rpcServer, cmd interface{}, closeChan <-chan struct{} // All other "get block" commands give either the height, the // hash, or both but require the block SHA. This gets both for // the best block. - best := s.cfg.Chain.BestSnapshot() + dagState := s.cfg.DAG.GetDAGState() result := &btcjson.GetBestBlockResult{ - Hash: best.Hash.String(), - Height: best.Height, + Hash: dagState.SelectedTip.Hash.String(), + Height: dagState.SelectedTip.Height, } return result, nil } // handleGetBestBlockHash implements the getbestblockhash command. func handleGetBestBlockHash(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - best := s.cfg.Chain.BestSnapshot() - return best.Hash.String(), nil + dagState := s.cfg.DAG.GetDAGState() + return dagState.SelectedTip.Hash.String(), nil } // getDifficultyRatio returns the proof-of-work difficulty as a multiple of the // minimum difficulty using the passed bits field from the header of a block. -func getDifficultyRatio(bits uint32, params *chaincfg.Params) float64 { +func getDifficultyRatio(bits uint32, params *dagconfig.Params) float64 { // The minimum difficulty is the max possible proof-of-work limit bits // converted back to a number. Note this is not the same as the proof of // work limit directly because the block difficulty is encoded in a block // with the compact form which loses precision. - max := blockchain.CompactToBig(params.PowLimitBits) - target := blockchain.CompactToBig(bits) + max := blockdag.CompactToBig(params.PowLimitBits) + target := blockdag.CompactToBig(bits) difficulty := new(big.Rat).SetFrac(max, target) outString := difficulty.FloatString(8) @@ -1041,7 +1041,7 @@ func handleGetBlock(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (i c := cmd.(*btcjson.GetBlockCmd) // Load the raw block bytes from the database. - hash, err := chainhash.NewHashFromStr(c.Hash) + hash, err := daghash.NewHashFromStr(c.Hash) if err != nil { return nil, rpcDecodeHexError(c.Hash) } @@ -1074,18 +1074,18 @@ func handleGetBlock(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (i } // Get the block height from chain. - blockHeight, err := s.cfg.Chain.BlockHeightByHash(hash) + blockHeight, err := s.cfg.DAG.BlockHeightByHash(hash) if err != nil { context := "Failed to obtain block height" return nil, internalRPCError(err.Error(), context) } blk.SetHeight(blockHeight) - best := s.cfg.Chain.BestSnapshot() + dagState := s.cfg.DAG.GetDAGState() // Get next block hash unless there are none. var nextHashString string - if blockHeight < best.Height { - nextHash, err := s.cfg.Chain.BlockHashByHeight(blockHeight + 1) + if blockHeight < dagState.SelectedTip.Height { + nextHash, err := s.cfg.DAG.BlockHashByHeight(blockHeight + 1) if err != nil { context := "No next block" return nil, internalRPCError(err.Error(), context) @@ -1103,7 +1103,7 @@ func handleGetBlock(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (i PreviousHash: blockHeader.PrevBlock.String(), Nonce: blockHeader.Nonce, Time: blockHeader.Timestamp.Unix(), - Confirmations: uint64(1 + best.Height - blockHeight), + Confirmations: uint64(1 + dagState.SelectedTip.Height - blockHeight), Height: int64(blockHeight), Size: int32(len(blkBytes)), Bits: strconv.FormatInt(int64(blockHeader.Bits), 16), @@ -1125,7 +1125,7 @@ func handleGetBlock(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (i for i, tx := range txns { rawTxn, err := createTxRawResult(params, tx.MsgTx(), tx.Hash().String(), blockHeader, hash.String(), - blockHeight, best.Height) + blockHeight, dagState.SelectedTip.Height) if err != nil { return nil, err } @@ -1139,17 +1139,17 @@ func handleGetBlock(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (i // softForkStatus converts a ThresholdState state into a human readable string // corresponding to the particular state. -func softForkStatus(state blockchain.ThresholdState) (string, error) { +func softForkStatus(state blockdag.ThresholdState) (string, error) { switch state { - case blockchain.ThresholdDefined: + case blockdag.ThresholdDefined: return "defined", nil - case blockchain.ThresholdStarted: + case blockdag.ThresholdStarted: return "started", nil - case blockchain.ThresholdLockedIn: + case blockdag.ThresholdLockedIn: return "lockedin", nil - case blockchain.ThresholdActive: + case blockdag.ThresholdActive: return "active", nil - case blockchain.ThresholdFailed: + case blockdag.ThresholdFailed: return "failed", nil default: return "", fmt.Errorf("unknown deployment state: %v", state) @@ -1161,16 +1161,16 @@ func handleGetBlockChainInfo(s *rpcServer, cmd interface{}, closeChan <-chan str // Obtain a snapshot of the current best known blockchain state. We'll // populate the response to this call primarily from this snapshot. params := s.cfg.ChainParams - chain := s.cfg.Chain - chainSnapshot := chain.BestSnapshot() + chain := s.cfg.DAG + dagState := chain.GetDAGState() chainInfo := &btcjson.GetBlockChainInfoResult{ Chain: params.Name, - Blocks: chainSnapshot.Height, - Headers: chainSnapshot.Height, - BestBlockHash: chainSnapshot.Hash.String(), - Difficulty: getDifficultyRatio(chainSnapshot.Bits, params), - MedianTime: chainSnapshot.MedianTime.Unix(), + Blocks: dagState.SelectedTip.Height, + Headers: dagState.SelectedTip.Height, + BestBlockHash: dagState.SelectedTip.Hash.String(), + Difficulty: getDifficultyRatio(dagState.SelectedTip.Bits, params), + MedianTime: dagState.SelectedTip.MedianTime.Unix(), Pruned: false, Bip9SoftForks: make(map[string]*btcjson.Bip9SoftForkDescription), } @@ -1178,7 +1178,7 @@ func handleGetBlockChainInfo(s *rpcServer, cmd interface{}, closeChan <-chan str // Next, populate the response with information describing the current // status of soft-forks deployed via the super-majority block // signalling mechanism. - height := chainSnapshot.Height + height := dagState.SelectedTip.Height chainInfo.SoftForks = []*btcjson.SoftForkDescription{ { ID: "bip34", @@ -1207,7 +1207,7 @@ func handleGetBlockChainInfo(s *rpcServer, cmd interface{}, closeChan <-chan str // fork-name. var forkName string switch deployment { - case chaincfg.DeploymentTestDummy: + case dagconfig.DeploymentTestDummy: forkName = "dummy" default: @@ -1253,14 +1253,14 @@ func handleGetBlockChainInfo(s *rpcServer, cmd interface{}, closeChan <-chan str // handleGetBlockCount implements the getblockcount command. func handleGetBlockCount(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - best := s.cfg.Chain.BestSnapshot() - return int64(best.Height), nil + dagState := s.cfg.DAG.GetDAGState() + return int64(dagState.SelectedTip.Height), nil } // handleGetBlockHash implements the getblockhash command. func handleGetBlockHash(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { c := cmd.(*btcjson.GetBlockHashCmd) - hash, err := s.cfg.Chain.BlockHashByHeight(int32(c.Index)) + hash, err := s.cfg.DAG.BlockHashByHeight(int32(c.Index)) if err != nil { return nil, &btcjson.RPCError{ Code: btcjson.ErrRPCOutOfRange, @@ -1276,11 +1276,11 @@ func handleGetBlockHeader(s *rpcServer, cmd interface{}, closeChan <-chan struct c := cmd.(*btcjson.GetBlockHeaderCmd) // Fetch the header from chain. - hash, err := chainhash.NewHashFromStr(c.Hash) + hash, err := daghash.NewHashFromStr(c.Hash) if err != nil { return nil, rpcDecodeHexError(c.Hash) } - blockHeader, err := s.cfg.Chain.FetchHeader(hash) + blockHeader, err := s.cfg.DAG.FetchHeader(hash) if err != nil { return nil, &btcjson.RPCError{ Code: btcjson.ErrRPCBlockNotFound, @@ -1303,17 +1303,17 @@ func handleGetBlockHeader(s *rpcServer, cmd interface{}, closeChan <-chan struct // The verbose flag is set, so generate the JSON object and return it. // Get the block height from chain. - blockHeight, err := s.cfg.Chain.BlockHeightByHash(hash) + blockHeight, err := s.cfg.DAG.BlockHeightByHash(hash) if err != nil { context := "Failed to obtain block height" return nil, internalRPCError(err.Error(), context) } - best := s.cfg.Chain.BestSnapshot() + dagState := s.cfg.DAG.GetDAGState() // Get next block hash unless there are none. var nextHashString string - if blockHeight < best.Height { - nextHash, err := s.cfg.Chain.BlockHashByHeight(blockHeight + 1) + if blockHeight < dagState.SelectedTip.Height { + nextHash, err := s.cfg.DAG.BlockHashByHeight(blockHeight + 1) if err != nil { context := "No next block" return nil, internalRPCError(err.Error(), context) @@ -1324,7 +1324,7 @@ func handleGetBlockHeader(s *rpcServer, cmd interface{}, closeChan <-chan struct params := s.cfg.ChainParams blockHeaderReply := btcjson.GetBlockHeaderVerboseResult{ Hash: c.Hash, - Confirmations: uint64(1 + best.Height - blockHeight), + Confirmations: uint64(1 + dagState.SelectedTip.Height - blockHeight), Height: blockHeight, Version: blockHeader.Version, VersionHex: fmt.Sprintf("%08x", blockHeader.Version), @@ -1341,7 +1341,7 @@ func handleGetBlockHeader(s *rpcServer, cmd interface{}, closeChan <-chan struct // encodeTemplateID encodes the passed details into an ID that can be used to // uniquely identify a block template. -func encodeTemplateID(prevHash *chainhash.Hash, lastGenerated time.Time) string { +func encodeTemplateID(prevHash *daghash.Hash, lastGenerated time.Time) string { return fmt.Sprintf("%s-%d", prevHash.String(), lastGenerated.Unix()) } @@ -1350,13 +1350,13 @@ func encodeTemplateID(prevHash *chainhash.Hash, lastGenerated time.Time) string // that are using long polling for block templates. The ID consists of the // previous block hash for the associated template and the time the associated // template was generated. -func decodeTemplateID(templateID string) (*chainhash.Hash, int64, error) { +func decodeTemplateID(templateID string) (*daghash.Hash, int64, error) { fields := strings.Split(templateID, "-") if len(fields) != 2 { return nil, 0, errors.New("invalid longpollid format") } - prevHash, err := chainhash.NewHashFromStr(fields[0]) + prevHash, err := daghash.NewHashFromStr(fields[0]) if err != nil { return nil, 0, errors.New("invalid longpollid format") } @@ -1372,7 +1372,7 @@ func decodeTemplateID(templateID string) (*chainhash.Hash, int64, error) { // notified when block templates are stale. // // This function MUST be called with the state locked. -func (state *gbtWorkState) notifyLongPollers(latestHash *chainhash.Hash, lastGenerated time.Time) { +func (state *gbtWorkState) notifyLongPollers(latestHash *daghash.Hash, lastGenerated time.Time) { // Notify anything that is waiting for a block template update from a // hash which is not the hash of the tip of the best chain since their // work is now invalid. @@ -1419,7 +1419,7 @@ func (state *gbtWorkState) notifyLongPollers(latestHash *chainhash.Hash, lastGen // NotifyBlockConnected uses the newly-connected block to notify any long poll // clients with a new block template when their existing block template is // stale due to the newly connected block. -func (state *gbtWorkState) NotifyBlockConnected(blockHash *chainhash.Hash) { +func (state *gbtWorkState) NotifyBlockConnected(blockHash *daghash.Hash) { go func() { state.Lock() defer state.Unlock() @@ -1458,7 +1458,7 @@ func (state *gbtWorkState) NotifyMempoolTx(lastUpdated time.Time) { // without requiring a different channel for each client. // // This function MUST be called with the state locked. -func (state *gbtWorkState) templateUpdateChan(prevHash *chainhash.Hash, lastGenerated int64) chan struct{} { +func (state *gbtWorkState) templateUpdateChan(prevHash *daghash.Hash, lastGenerated int64) chan struct{} { // Either get the current list of channels waiting for updates about // changes to block template for the previous hash or create a new one. channels, ok := state.notifyMap[*prevHash] @@ -1504,7 +1504,7 @@ func (state *gbtWorkState) updateBlockTemplate(s *rpcServer, useCoinbaseValue bo // generated. var msgBlock *wire.MsgBlock var targetDifficulty string - latestHash := &s.cfg.Chain.BestSnapshot().Hash + latestHash := &s.cfg.DAG.GetDAGState().SelectedTip.Hash template := state.template if template == nil || state.prevHash == nil || !state.prevHash.IsEqual(latestHash) || @@ -1538,13 +1538,13 @@ func (state *gbtWorkState) updateBlockTemplate(s *rpcServer, useCoinbaseValue bo template = blkTemplate msgBlock = template.Block targetDifficulty = fmt.Sprintf("%064x", - blockchain.CompactToBig(msgBlock.Header.Bits)) + blockdag.CompactToBig(msgBlock.Header.Bits)) // Get the minimum allowed timestamp for the block based on the // median timestamp of the last several blocks per the chain // consensus rules. - best := s.cfg.Chain.BestSnapshot() - minTimestamp := mining.MinimumMedianTime(best) + dagState := s.cfg.DAG.GetDAGState() + minTimestamp := mining.MinimumMedianTime(dagState) // Update work state to ensure another block template isn't // generated until needed. @@ -1591,14 +1591,14 @@ func (state *gbtWorkState) updateBlockTemplate(s *rpcServer, useCoinbaseValue bo // Update the merkle root. block := btcutil.NewBlock(template.Block) - merkles := blockchain.BuildMerkleTreeStore(block.Transactions()) + merkles := blockdag.BuildMerkleTreeStore(block.Transactions()) template.Block.Header.MerkleRoot = *merkles[len(merkles)-1] } // Set locals for convenience. msgBlock = template.Block targetDifficulty = fmt.Sprintf("%064x", - blockchain.CompactToBig(msgBlock.Header.Bits)) + blockdag.CompactToBig(msgBlock.Header.Bits)) // Update the time of the block template to the current time // while accounting for the median time of the past several @@ -1628,7 +1628,7 @@ func (state *gbtWorkState) blockTemplateResult(useCoinbaseValue bool, submitOld msgBlock := template.Block header := &msgBlock.Header adjustedTime := state.timeSource.AdjustedTime() - maxTime := adjustedTime.Add(time.Second * blockchain.MaxTimeOffsetSeconds) + maxTime := adjustedTime.Add(time.Second * blockdag.MaxTimeOffsetSeconds) if header.Timestamp.After(maxTime) { return nil, &btcjson.RPCError{ Code: btcjson.ErrRPCOutOfRange, @@ -1644,7 +1644,7 @@ func (state *gbtWorkState) blockTemplateResult(useCoinbaseValue bool, submitOld // the adjustments to the various lengths and indices. numTx := len(msgBlock.Transactions) transactions := make([]btcjson.GetBlockTemplateResultTx, 0, numTx-1) - txIndex := make(map[chainhash.Hash]int64, numTx) + txIndex := make(map[daghash.Hash]int64, numTx) for i, tx := range msgBlock.Transactions { txHash := tx.TxHash() txIndex[txHash] = int64(i) @@ -1692,14 +1692,14 @@ func (state *gbtWorkState) blockTemplateResult(useCoinbaseValue bool, submitOld // implied by the included or omission of fields: // Including MinTime -> time/decrement // Omitting CoinbaseTxn -> coinbase, generation - targetDifficulty := fmt.Sprintf("%064x", blockchain.CompactToBig(header.Bits)) + targetDifficulty := fmt.Sprintf("%064x", blockdag.CompactToBig(header.Bits)) templateID := encodeTemplateID(state.prevHash, state.lastGenerated) reply := btcjson.GetBlockTemplateResult{ Bits: strconv.FormatInt(int64(header.Bits), 16), CurTime: header.Timestamp.Unix(), Height: int64(template.Height), PreviousHash: header.PrevBlock.String(), - SigOpLimit: blockchain.MaxSigOpsPerBlock, + SigOpLimit: blockdag.MaxSigOpsPerBlock, SizeLimit: wire.MaxBlockPayload, Transactions: transactions, Version: header.Version, @@ -1900,7 +1900,7 @@ func handleGetBlockTemplateRequest(s *rpcServer, request *btcjson.TemplateReques } // No point in generating or accepting work before the chain is synced. - currentHeight := s.cfg.Chain.BestSnapshot().Height + currentHeight := s.cfg.DAG.GetDAGState().SelectedTip.Height if currentHeight != 0 && !s.cfg.SyncMgr.IsCurrent() { return nil, &btcjson.RPCError{ Code: btcjson.ErrRPCClientInInitialDownload, @@ -1939,89 +1939,89 @@ func handleGetBlockTemplateRequest(s *rpcServer, request *btcjson.TemplateReques func chainErrToGBTErrString(err error) string { // When the passed error is not a RuleError, just return a generic // rejected string with the error text. - ruleErr, ok := err.(blockchain.RuleError) + ruleErr, ok := err.(blockdag.RuleError) if !ok { return "rejected: " + err.Error() } switch ruleErr.ErrorCode { - case blockchain.ErrDuplicateBlock: + case blockdag.ErrDuplicateBlock: return "duplicate" - case blockchain.ErrBlockTooBig: + case blockdag.ErrBlockTooBig: return "bad-blk-length" - case blockchain.ErrBlockVersionTooOld: + case blockdag.ErrBlockVersionTooOld: return "bad-version" - case blockchain.ErrInvalidTime: + case blockdag.ErrInvalidTime: return "bad-time" - case blockchain.ErrTimeTooOld: + case blockdag.ErrTimeTooOld: return "time-too-old" - case blockchain.ErrTimeTooNew: + case blockdag.ErrTimeTooNew: return "time-too-new" - case blockchain.ErrDifficultyTooLow: + case blockdag.ErrDifficultyTooLow: return "bad-diffbits" - case blockchain.ErrUnexpectedDifficulty: + case blockdag.ErrUnexpectedDifficulty: return "bad-diffbits" - case blockchain.ErrHighHash: + case blockdag.ErrHighHash: return "high-hash" - case blockchain.ErrBadMerkleRoot: + case blockdag.ErrBadMerkleRoot: return "bad-txnmrklroot" - case blockchain.ErrBadCheckpoint: + case blockdag.ErrBadCheckpoint: return "bad-checkpoint" - case blockchain.ErrForkTooOld: + case blockdag.ErrForkTooOld: return "fork-too-old" - case blockchain.ErrCheckpointTimeTooOld: + case blockdag.ErrCheckpointTimeTooOld: return "checkpoint-time-too-old" - case blockchain.ErrNoTransactions: + case blockdag.ErrNoTransactions: return "bad-txns-none" - case blockchain.ErrNoTxInputs: + case blockdag.ErrNoTxInputs: return "bad-txns-noinputs" - case blockchain.ErrNoTxOutputs: + case blockdag.ErrNoTxOutputs: return "bad-txns-nooutputs" - case blockchain.ErrTxTooBig: + case blockdag.ErrTxTooBig: return "bad-txns-size" - case blockchain.ErrBadTxOutValue: + case blockdag.ErrBadTxOutValue: return "bad-txns-outputvalue" - case blockchain.ErrDuplicateTxInputs: + case blockdag.ErrDuplicateTxInputs: return "bad-txns-dupinputs" - case blockchain.ErrBadTxInput: + case blockdag.ErrBadTxInput: return "bad-txns-badinput" - case blockchain.ErrMissingTxOut: + case blockdag.ErrMissingTxOut: return "bad-txns-missinginput" - case blockchain.ErrUnfinalizedTx: + case blockdag.ErrUnfinalizedTx: return "bad-txns-unfinalizedtx" - case blockchain.ErrDuplicateTx: + case blockdag.ErrDuplicateTx: return "bad-txns-duplicate" - case blockchain.ErrOverwriteTx: + case blockdag.ErrOverwriteTx: return "bad-txns-overwrite" - case blockchain.ErrImmatureSpend: + case blockdag.ErrImmatureSpend: return "bad-txns-maturity" - case blockchain.ErrSpendTooHigh: + case blockdag.ErrSpendTooHigh: return "bad-txns-highspend" - case blockchain.ErrBadFees: + case blockdag.ErrBadFees: return "bad-txns-fees" - case blockchain.ErrTooManySigOps: + case blockdag.ErrTooManySigOps: return "high-sigops" - case blockchain.ErrFirstTxNotCoinbase: + case blockdag.ErrFirstTxNotCoinbase: return "bad-txns-nocoinbase" - case blockchain.ErrMultipleCoinbases: + case blockdag.ErrMultipleCoinbases: return "bad-txns-multicoinbase" - case blockchain.ErrBadCoinbaseScriptLen: + case blockdag.ErrBadCoinbaseScriptLen: return "bad-cb-length" - case blockchain.ErrBadCoinbaseValue: + case blockdag.ErrBadCoinbaseValue: return "bad-cb-value" - case blockchain.ErrMissingCoinbaseHeight: + case blockdag.ErrMissingCoinbaseHeight: return "bad-cb-height" - case blockchain.ErrBadCoinbaseHeight: + case blockdag.ErrBadCoinbaseHeight: return "bad-cb-height" - case blockchain.ErrScriptMalformed: + case blockdag.ErrScriptMalformed: return "bad-script-malformed" - case blockchain.ErrScriptValidation: + case blockdag.ErrScriptValidation: return "bad-script-validate" - case blockchain.ErrPreviousBlockUnknown: + case blockdag.ErrPreviousBlockUnknown: return "prev-blk-not-found" - case blockchain.ErrInvalidAncestorBlock: + case blockdag.ErrInvalidAncestorBlock: return "bad-prevblk" - case blockchain.ErrPrevBlockNotBest: + case blockdag.ErrPrevBlockNotBest: return "inconclusive-not-best-prvblk" } @@ -2065,14 +2065,14 @@ func handleGetBlockTemplateProposal(s *rpcServer, request *btcjson.TemplateReque block := btcutil.NewBlock(&msgBlock) // Ensure the block is building from the expected previous block. - expectedPrevHash := s.cfg.Chain.BestSnapshot().Hash + expectedPrevHash := s.cfg.DAG.GetDAGState().SelectedTip.Hash prevHash := &block.MsgBlock().Header.PrevBlock if !expectedPrevHash.IsEqual(prevHash) { return "bad-prevblk", nil } - if err := s.cfg.Chain.CheckConnectBlockTemplate(block); err != nil { - if _, ok := err.(blockchain.RuleError); !ok { + if err := s.cfg.DAG.CheckConnectBlockTemplate(block); err != nil { + if _, ok := err.(blockdag.RuleError); !ok { errStr := fmt.Sprintf("Failed to process block proposal: %v", err) rpcsLog.Error(errStr) return nil, &btcjson.RPCError{ @@ -2125,7 +2125,7 @@ func handleGetCFilter(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) } c := cmd.(*btcjson.GetCFilterCmd) - hash, err := chainhash.NewHashFromStr(c.Hash) + hash, err := daghash.NewHashFromStr(c.Hash) if err != nil { return nil, rpcDecodeHexError(c.Hash) } @@ -2154,7 +2154,7 @@ func handleGetCFilterHeader(s *rpcServer, cmd interface{}, closeChan <-chan stru } c := cmd.(*btcjson.GetCFilterHeaderCmd) - hash, err := chainhash.NewHashFromStr(c.Hash) + hash, err := daghash.NewHashFromStr(c.Hash) if err != nil { return nil, rpcDecodeHexError(c.Hash) } @@ -2187,8 +2187,8 @@ func handleGetCurrentNet(s *rpcServer, cmd interface{}, closeChan <-chan struct{ // handleGetDifficulty implements the getdifficulty command. func handleGetDifficulty(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - best := s.cfg.Chain.BestSnapshot() - return getDifficultyRatio(best.Bits, s.cfg.ChainParams), nil + dagState := s.cfg.DAG.GetDAGState() + return getDifficultyRatio(dagState.SelectedTip.Bits, s.cfg.ChainParams), nil } // handleGetGenerate implements the getgenerate command. @@ -2210,17 +2210,17 @@ func handleGetHeaders(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) // Fetch the requested headers from chain while respecting the provided // block locators and stop hash. - blockLocators := make([]*chainhash.Hash, len(c.BlockLocators)) + blockLocators := make([]*daghash.Hash, len(c.BlockLocators)) for i := range c.BlockLocators { - blockLocator, err := chainhash.NewHashFromStr(c.BlockLocators[i]) + blockLocator, err := daghash.NewHashFromStr(c.BlockLocators[i]) if err != nil { return nil, rpcDecodeHexError(c.BlockLocators[i]) } blockLocators[i] = blockLocator } - var hashStop chainhash.Hash + var hashStop daghash.Hash if c.HashStop != "" { - err := chainhash.Decode(&hashStop, c.HashStop) + err := daghash.Decode(&hashStop, c.HashStop) if err != nil { return nil, rpcDecodeHexError(c.HashStop) } @@ -2245,15 +2245,15 @@ func handleGetHeaders(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) // handleGetInfo implements the getinfo command. We only return the fields // that are not related to wallet functionality. func handleGetInfo(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - best := s.cfg.Chain.BestSnapshot() + dagState := s.cfg.DAG.GetDAGState() ret := &btcjson.InfoChainResult{ Version: int32(1000000*appMajor + 10000*appMinor + 100*appPatch), ProtocolVersion: int32(maxProtocolVersion), - Blocks: best.Height, + Blocks: dagState.SelectedTip.Height, TimeOffset: int64(s.cfg.TimeSource.Offset().Seconds()), Connections: s.cfg.ConnMgr.ConnectedCount(), Proxy: cfg.Proxy, - Difficulty: getDifficultyRatio(best.Bits, s.cfg.ChainParams), + Difficulty: getDifficultyRatio(dagState.SelectedTip.Bits, s.cfg.ChainParams), TestNet: cfg.TestNet3, RelayFee: cfg.minRelayTxFee.ToBTC(), } @@ -2297,12 +2297,12 @@ func handleGetMiningInfo(s *rpcServer, cmd interface{}, closeChan <-chan struct{ } } - best := s.cfg.Chain.BestSnapshot() + dagState := s.cfg.DAG.GetDAGState() result := btcjson.GetMiningInfoResult{ - Blocks: int64(best.Height), - CurrentBlockSize: best.BlockSize, - CurrentBlockTx: best.NumTxns, - Difficulty: getDifficultyRatio(best.Bits, s.cfg.ChainParams), + Blocks: int64(dagState.SelectedTip.Height), + CurrentBlockSize: dagState.SelectedTip.BlockSize, + CurrentBlockTx: dagState.SelectedTip.NumTxs, + Difficulty: getDifficultyRatio(dagState.SelectedTip.Bits, s.cfg.ChainParams), Generate: s.cfg.CPUMiner.IsMining(), GenProcLimit: s.cfg.CPUMiner.NumWorkers(), HashesPerSec: int64(s.cfg.CPUMiner.HashesPerSecond()), @@ -2336,16 +2336,16 @@ func handleGetNetworkHashPS(s *rpcServer, cmd interface{}, closeChan <-chan stru // since we can't reasonably calculate the number of network hashes // per second from invalid values. When it's negative, use the current // best block height. - best := s.cfg.Chain.BestSnapshot() + dagState := s.cfg.DAG.GetDAGState() endHeight := int32(-1) if c.Height != nil { endHeight = int32(*c.Height) } - if endHeight > best.Height || endHeight == 0 { + if endHeight > dagState.SelectedTip.Height || endHeight == 0 { return int64(0), nil } if endHeight < 0 { - endHeight = best.Height + endHeight = dagState.SelectedTip.Height } // Calculate the number of blocks per retarget interval based on the @@ -2378,14 +2378,14 @@ func handleGetNetworkHashPS(s *rpcServer, cmd interface{}, closeChan <-chan stru var minTimestamp, maxTimestamp time.Time totalWork := big.NewInt(0) for curHeight := startHeight; curHeight <= endHeight; curHeight++ { - hash, err := s.cfg.Chain.BlockHashByHeight(curHeight) + hash, err := s.cfg.DAG.BlockHashByHeight(curHeight) if err != nil { context := "Failed to fetch block hash" return nil, internalRPCError(err.Error(), context) } // Fetch the header from chain. - header, err := s.cfg.Chain.FetchHeader(hash) + header, err := s.cfg.DAG.FetchHeader(hash) if err != nil { context := "Failed to fetch block header" return nil, internalRPCError(err.Error(), context) @@ -2395,7 +2395,7 @@ func handleGetNetworkHashPS(s *rpcServer, cmd interface{}, closeChan <-chan stru minTimestamp = header.Timestamp maxTimestamp = minTimestamp } else { - totalWork.Add(totalWork, blockchain.CalcWork(header.Bits)) + totalWork.Add(totalWork, blockdag.CalcWork(header.Bits)) if minTimestamp.After(header.Timestamp) { minTimestamp = header.Timestamp @@ -2482,7 +2482,7 @@ func handleGetRawTransaction(s *rpcServer, cmd interface{}, closeChan <-chan str c := cmd.(*btcjson.GetRawTransactionCmd) // Convert the provided transaction hash hex to a Hash. - txHash, err := chainhash.NewHashFromStr(c.Txid) + txHash, err := daghash.NewHashFromStr(c.Txid) if err != nil { return nil, rpcDecodeHexError(c.Txid) } @@ -2495,7 +2495,7 @@ func handleGetRawTransaction(s *rpcServer, cmd interface{}, closeChan <-chan str // Try to fetch the transaction from the memory pool and if that fails, // try the block database. var mtx *wire.MsgTx - var blkHash *chainhash.Hash + var blkHash *daghash.Hash var blkHeight int32 tx, err := s.cfg.TxMemPool.FetchTransaction(txHash) if err != nil { @@ -2538,7 +2538,7 @@ func handleGetRawTransaction(s *rpcServer, cmd interface{}, closeChan <-chan str // Grab the block height. blkHash = blockRegion.Hash - blkHeight, err = s.cfg.Chain.BlockHeightByHash(blkHash) + blkHeight, err = s.cfg.DAG.BlockHeightByHash(blkHash) if err != nil { context := "Failed to retrieve block height" return nil, internalRPCError(err.Error(), context) @@ -2577,7 +2577,7 @@ func handleGetRawTransaction(s *rpcServer, cmd interface{}, closeChan <-chan str var chainHeight int32 if blkHash != nil { // Fetch the header from chain. - header, err := s.cfg.Chain.FetchHeader(blkHash) + header, err := s.cfg.DAG.FetchHeader(blkHash) if err != nil { context := "Failed to fetch block header" return nil, internalRPCError(err.Error(), context) @@ -2585,7 +2585,7 @@ func handleGetRawTransaction(s *rpcServer, cmd interface{}, closeChan <-chan str blkHeader = &header blkHashStr = blkHash.String() - chainHeight = s.cfg.Chain.BestSnapshot().Height + chainHeight = s.cfg.DAG.GetDAGState().SelectedTip.Height } rawTxn, err := createTxRawResult(s.cfg.ChainParams, mtx, txHash.String(), @@ -2601,7 +2601,7 @@ func handleGetTxOut(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (i c := cmd.(*btcjson.GetTxOutCmd) // Convert the provided transaction hash hex to a Hash. - txHash, err := chainhash.NewHashFromStr(c.Txid) + txHash, err := daghash.NewHashFromStr(c.Txid) if err != nil { return nil, rpcDecodeHexError(c.Txid) } @@ -2641,15 +2641,15 @@ func handleGetTxOut(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (i return nil, internalRPCError(errStr, "") } - best := s.cfg.Chain.BestSnapshot() - bestBlockHash = best.Hash.String() + dagState := s.cfg.DAG.GetDAGState() + bestBlockHash = dagState.SelectedTip.Hash.String() confirmations = 0 value = txOut.Value pkScript = txOut.PkScript - isCoinbase = blockchain.IsCoinBaseTx(mtx) + isCoinbase = blockdag.IsCoinBaseTx(mtx) } else { out := wire.OutPoint{Hash: *txHash, Index: c.Vout} - entry, err := s.cfg.Chain.FetchUtxoEntry(out) + entry, err := s.cfg.DAG.FetchUtxoEntry(out) if err != nil { return nil, rpcNoTxInfoError(txHash) } @@ -2663,9 +2663,9 @@ func handleGetTxOut(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (i return nil, nil } - best := s.cfg.Chain.BestSnapshot() - bestBlockHash = best.Hash.String() - confirmations = 1 + best.Height - entry.BlockHeight() + dagState := s.cfg.DAG.GetDAGState() + bestBlockHash = dagState.SelectedTip.Hash.String() + confirmations = 1 + dagState.SelectedTip.Height - entry.BlockHeight() value = entry.Amount() pkScript = entry.PkScript() isCoinbase = entry.IsCoinBase() @@ -2763,7 +2763,7 @@ func handlePing(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (inter // possible. type retrievedTx struct { txBytes []byte - blkHash *chainhash.Hash // Only set when transaction is in a block. + blkHash *daghash.Hash // Only set when transaction is in a block. tx *btcutil.Tx } @@ -2835,9 +2835,9 @@ func fetchInputTxos(s *rpcServer, tx *wire.MsgTx) (map[wire.OutPoint]wire.TxOut, // createVinListPrevOut returns a slice of JSON objects for the inputs of the // passed transaction. -func createVinListPrevOut(s *rpcServer, mtx *wire.MsgTx, chainParams *chaincfg.Params, vinExtra bool, filterAddrMap map[string]struct{}) ([]btcjson.VinPrevOut, error) { +func createVinListPrevOut(s *rpcServer, mtx *wire.MsgTx, chainParams *dagconfig.Params, vinExtra bool, filterAddrMap map[string]struct{}) ([]btcjson.VinPrevOut, error) { // Coinbase transactions only have a single txin by definition. - if blockchain.IsCoinBaseTx(mtx) { + if blockdag.IsCoinBaseTx(mtx) { // Only include the transaction if the filter map is empty // because a coinbase input has no addresses and so would never // match a non-empty filter. @@ -3158,7 +3158,7 @@ func handleSearchRawTransactions(s *rpcServer, cmd interface{}, closeChan <-chan } // The verbose flag is set, so generate the JSON object and return it. - best := s.cfg.Chain.BestSnapshot() + dagState := s.cfg.DAG.GetDAGState() srtList := make([]btcjson.SearchRawTransactionsResult, len(addressTxns)) for i := range addressTxns { // The deserialized transaction is needed, so deserialize the @@ -3201,7 +3201,7 @@ func handleSearchRawTransactions(s *rpcServer, cmd interface{}, closeChan <-chan var blkHeight int32 if blkHash := rtx.blkHash; blkHash != nil { // Fetch the header from chain. - header, err := s.cfg.Chain.FetchHeader(blkHash) + header, err := s.cfg.DAG.FetchHeader(blkHash) if err != nil { return nil, &btcjson.RPCError{ Code: btcjson.ErrRPCBlockNotFound, @@ -3210,7 +3210,7 @@ func handleSearchRawTransactions(s *rpcServer, cmd interface{}, closeChan <-chan } // Get the block height from chain. - height, err := s.cfg.Chain.BlockHeightByHash(blkHash) + height, err := s.cfg.DAG.BlockHeightByHash(blkHash) if err != nil { context := "Failed to obtain block height" return nil, internalRPCError(err.Error(), context) @@ -3228,7 +3228,7 @@ func handleSearchRawTransactions(s *rpcServer, cmd interface{}, closeChan <-chan result.Time = uint64(blkHeader.Timestamp.Unix()) result.Blocktime = uint64(blkHeader.Timestamp.Unix()) result.BlockHash = blkHashStr - result.Confirmations = uint64(1 + best.Height - blkHeight) + result.Confirmations = uint64(1 + dagState.SelectedTip.Height - blkHeight) } } @@ -3381,7 +3381,7 @@ func handleSubmitBlock(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) // Process this block using the same rules as blocks coming from other // nodes. This will in turn relay it to the network like normal. - _, err = s.cfg.SyncMgr.SubmitBlock(block, blockchain.BFNone) + _, err = s.cfg.SyncMgr.SubmitBlock(block, blockdag.BFNone) if err != nil { return fmt.Sprintf("rejected: %s", err.Error()), nil } @@ -3413,17 +3413,17 @@ func handleValidateAddress(s *rpcServer, cmd interface{}, closeChan <-chan struc } func verifyChain(s *rpcServer, level, depth int32) error { - best := s.cfg.Chain.BestSnapshot() - finishHeight := best.Height - depth + dagState := s.cfg.DAG.GetDAGState() + finishHeight := dagState.SelectedTip.Height - depth if finishHeight < 0 { finishHeight = 0 } rpcsLog.Infof("Verifying chain for %d blocks at level %d", - best.Height-finishHeight, level) + dagState.SelectedTip.Height-finishHeight, level) - for height := best.Height; height > finishHeight; height-- { + for height := dagState.SelectedTip.Height; height > finishHeight; height-- { // Level 0 just looks up the block. - block, err := s.cfg.Chain.BlockByHeight(height) + block, err := s.cfg.DAG.BlockByHeight(height) if err != nil { rpcsLog.Errorf("Verify is unable to fetch block at "+ "height %d: %v", height, err) @@ -3432,7 +3432,7 @@ func verifyChain(s *rpcServer, level, depth int32) error { // Level 1 does basic chain sanity checks. if level > 0 { - err := blockchain.CheckBlockSanity(block, + err := blockdag.CheckBlockSanity(block, s.cfg.ChainParams.PowLimit, s.cfg.TimeSource) if err != nil { rpcsLog.Errorf("Verify is unable to validate "+ @@ -3499,7 +3499,7 @@ func handleVerifyMessage(s *rpcServer, cmd interface{}, closeChan <-chan struct{ var buf bytes.Buffer wire.WriteVarString(&buf, 0, "Bitcoin Signed Message:\n") wire.WriteVarString(&buf, 0, c.Message) - expectedMessageHash := chainhash.DoubleHashB(buf.Bytes()) + expectedMessageHash := daghash.DoubleHashB(buf.Bytes()) pk, wasCompressed, err := btcec.RecoverCompact(btcec.S256(), sig, expectedMessageHash) if err != nil { @@ -4146,7 +4146,7 @@ type rpcserverSyncManager interface { // SubmitBlock submits the provided block to the network after // processing it locally. - SubmitBlock(block *btcutil.Block, flags blockchain.BehaviorFlags) (bool, error) + SubmitBlock(block *btcutil.Block, flags blockdag.BehaviorFlags) (bool, error) // Pause pauses the sync manager until the returned channel is closed. Pause() chan<- struct{} @@ -4159,7 +4159,7 @@ type rpcserverSyncManager interface { // block in the provided locators until the provided stop hash or the // current tip is reached, up to a max of wire.MaxBlockHeadersPerMsg // hashes. - LocateHeaders(locators []*chainhash.Hash, hashStop *chainhash.Hash) []wire.BlockHeader + LocateHeaders(locators []*daghash.Hash, hashStop *daghash.Hash) []wire.BlockHeader } // rpcserverConfig is a descriptor containing the RPC server configuration. @@ -4185,9 +4185,9 @@ type rpcserverConfig struct { // These fields allow the RPC server to interface with the local block // chain data and state. - TimeSource blockchain.MedianTimeSource - Chain *blockchain.BlockChain - ChainParams *chaincfg.Params + TimeSource blockdag.MedianTimeSource + DAG *blockdag.BlockDAG + ChainParams *dagconfig.Params DB database.DB // TxMemPool defines the transaction memory pool to interact with. @@ -4233,16 +4233,16 @@ func newRPCServer(config *rpcserverConfig) (*rpcServer, error) { rpc.limitauthsha = sha256.Sum256([]byte(auth)) } rpc.ntfnMgr = newWsNotificationManager(&rpc) - rpc.cfg.Chain.Subscribe(rpc.handleBlockchainNotification) + rpc.cfg.DAG.Subscribe(rpc.handleBlockchainNotification) return &rpc, nil } // Callback for notifications from blockchain. It notifies clients that are // long polling for changes or subscribed to websockets notifications. -func (s *rpcServer) handleBlockchainNotification(notification *blockchain.Notification) { +func (s *rpcServer) handleBlockchainNotification(notification *blockdag.Notification) { switch notification.Type { - case blockchain.NTBlockAccepted: + case blockdag.NTBlockAccepted: block, ok := notification.Data.(*btcutil.Block) if !ok { rpcsLog.Warnf("Chain accepted notification is not a block.") @@ -4254,7 +4254,7 @@ func (s *rpcServer) handleBlockchainNotification(notification *blockchain.Notifi // their old block template to become stale. s.gbtWorkState.NotifyBlockConnected(block.Hash()) - case blockchain.NTBlockConnected: + case blockdag.NTBlockConnected: block, ok := notification.Data.(*btcutil.Block) if !ok { rpcsLog.Warnf("Chain connected notification is not a block.") @@ -4264,7 +4264,7 @@ func (s *rpcServer) handleBlockchainNotification(notification *blockchain.Notifi // Notify registered websocket clients of incoming block. s.ntfnMgr.NotifyBlockConnected(block) - case blockchain.NTBlockDisconnected: + case blockdag.NTBlockDisconnected: block, ok := notification.Data.(*btcutil.Block) if !ok { rpcsLog.Warnf("Chain disconnected notification is not a block.") diff --git a/rpcwebsocket.go b/rpcwebsocket.go index 9eba21665..f69069951 100644 --- a/rpcwebsocket.go +++ b/rpcwebsocket.go @@ -22,10 +22,10 @@ import ( "golang.org/x/crypto/ripemd160" - "github.com/daglabs/btcd/blockchain" + "github.com/daglabs/btcd/blockdag" "github.com/daglabs/btcd/btcjson" - "github.com/daglabs/btcd/chaincfg" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/database" "github.com/daglabs/btcd/txscript" "github.com/daglabs/btcd/wire" @@ -273,7 +273,7 @@ type wsClientFilter struct { // for a websocket client. // // NOTE: This extension was ported from github.com/decred/dcrd -func newWSClientFilter(addresses []string, unspentOutPoints []wire.OutPoint, params *chaincfg.Params) *wsClientFilter { +func newWSClientFilter(addresses []string, unspentOutPoints []wire.OutPoint, params *dagconfig.Params) *wsClientFilter { filter := &wsClientFilter{ pubKeyHashes: map[[ripemd160.Size]byte]struct{}{}, scriptHashes: map[[ripemd160.Size]byte]struct{}{}, @@ -328,7 +328,7 @@ func (f *wsClientFilter) addAddress(a btcutil.Address) { // wsClientFilter using addAddress. // // NOTE: This extension was ported from github.com/decred/dcrd -func (f *wsClientFilter) addAddressStr(s string, params *chaincfg.Params) { +func (f *wsClientFilter) addAddressStr(s string, params *dagconfig.Params) { // If address can't be decoded, no point in saving it since it should also // impossible to create the address from an inspected transaction output // script. @@ -412,7 +412,7 @@ func (f *wsClientFilter) removeAddress(a btcutil.Address) { // wsClientFilter using removeAddress. // // NOTE: This extension was ported from github.com/decred/dcrd -func (f *wsClientFilter) removeAddressStr(s string, params *chaincfg.Params) { +func (f *wsClientFilter) removeAddressStr(s string, params *dagconfig.Params) { a, err := btcutil.DecodeAddress(s, params) if err == nil { f.removeAddress(a) @@ -903,7 +903,7 @@ func (m *wsNotificationManager) addSpentRequests(opMap map[wire.OutPoint]map[cha // Check if any transactions spending these outputs already exists in // the mempool, if so send the notification immediately. - spends := make(map[chainhash.Hash]*btcutil.Tx) + spends := make(map[daghash.Hash]*btcutil.Tx) for _, op := range ops { spend := m.server.cfg.TxMemPool.CheckSpend(*op) if spend != nil { @@ -1790,7 +1790,7 @@ func handleLoadTxFilter(wsc *wsClient, icmd interface{}) (interface{}, error) { outPoints := make([]wire.OutPoint, len(cmd.OutPoints)) for i := range cmd.OutPoints { - hash, err := chainhash.NewHashFromStr(cmd.OutPoints[i].Hash) + hash, err := daghash.NewHashFromStr(cmd.OutPoints[i].Hash) if err != nil { return nil, &btcjson.RPCError{ Code: btcjson.ErrRPCInvalidParameter, @@ -1948,7 +1948,7 @@ func handleStopNotifyReceived(wsc *wsClient, icmd interface{}) (interface{}, err // string slice. It does this by attempting to decode each address using the // current active network parameters. If any single address fails to decode // properly, the function returns an error. Otherwise, nil is returned. -func checkAddressValidity(addrs []string, params *chaincfg.Params) error { +func checkAddressValidity(addrs []string, params *dagconfig.Params) error { for _, addr := range addrs { _, err := btcutil.DecodeAddress(addr, params) if err != nil { @@ -1966,7 +1966,7 @@ func checkAddressValidity(addrs []string, params *chaincfg.Params) error { func deserializeOutpoints(serializedOuts []btcjson.OutPoint) ([]*wire.OutPoint, error) { outpoints := make([]*wire.OutPoint, 0, len(serializedOuts)) for i := range serializedOuts { - blockHash, err := chainhash.NewHashFromStr(serializedOuts[i].Hash) + blockHash, err := daghash.NewHashFromStr(serializedOuts[i].Hash) if err != nil { return nil, rpcDecodeHexError(serializedOuts[i].Hash) } @@ -2144,7 +2144,7 @@ func rescanBlock(wsc *wsClient, lookups *rescanKeys, blk *btcutil.Block) { // a string slice. // // NOTE: This extension is ported from github.com/decred/dcrd -func rescanBlockFilter(filter *wsClientFilter, block *btcutil.Block, params *chaincfg.Params) []string { +func rescanBlockFilter(filter *wsClientFilter, block *btcutil.Block, params *dagconfig.Params) []string { var transactions []string filter.mu.Lock() @@ -2156,7 +2156,7 @@ func rescanBlockFilter(filter *wsClientFilter, block *btcutil.Block, params *cha added := false // Scan inputs if not a coinbase transaction. - if !blockchain.IsCoinBaseTx(msgTx) { + if !blockdag.IsCoinBaseTx(msgTx) { for _, input := range msgTx.TxIn { if !filter.existsUnspentOutPoint(&input.PreviousOutPoint) { continue @@ -2223,10 +2223,10 @@ func handleRescanBlocks(wsc *wsClient, icmd interface{}) (interface{}, error) { } } - blockHashes := make([]*chainhash.Hash, len(cmd.BlockHashes)) + blockHashes := make([]*daghash.Hash, len(cmd.BlockHashes)) for i := range cmd.BlockHashes { - hash, err := chainhash.NewHashFromStr(cmd.BlockHashes[i]) + hash, err := daghash.NewHashFromStr(cmd.BlockHashes[i]) if err != nil { return nil, err } @@ -2237,9 +2237,9 @@ func handleRescanBlocks(wsc *wsClient, icmd interface{}) (interface{}, error) { // Iterate over each block in the request and rescan. When a block // contains relevant transactions, add it to the response. - bc := wsc.server.cfg.Chain + bc := wsc.server.cfg.DAG params := wsc.server.cfg.ChainParams - var lastBlockHash *chainhash.Hash + var lastBlockHash *daghash.Hash for i := range blockHashes { block, err := bc.BlockByHash(blockHashes[i]) if err != nil { @@ -2274,10 +2274,10 @@ func handleRescanBlocks(wsc *wsClient, icmd interface{}) (interface{}, error) { // verifies that the new range of blocks is on the same fork as a previous // range of blocks. If this condition does not hold true, the JSON-RPC error // for an unrecoverable reorganize is returned. -func recoverFromReorg(chain *blockchain.BlockChain, minBlock, maxBlock int32, - lastBlock *chainhash.Hash) ([]chainhash.Hash, error) { +func recoverFromReorg(dag *blockdag.BlockDAG, minBlock, maxBlock int32, + lastBlock *daghash.Hash) ([]daghash.Hash, error) { - hashList, err := chain.HeightRange(minBlock, maxBlock) + hashList, err := dag.HeightRange(minBlock, maxBlock) if err != nil { rpcsLog.Errorf("Error looking up block range: %v", err) return nil, &btcjson.RPCError{ @@ -2289,7 +2289,7 @@ func recoverFromReorg(chain *blockchain.BlockChain, minBlock, maxBlock int32, return hashList, nil } - blk, err := chain.BlockByHash(&hashList[0]) + blk, err := dag.BlockByHash(&hashList[0]) if err != nil { rpcsLog.Errorf("Error looking up possibly reorged block: %v", err) @@ -2307,7 +2307,7 @@ func recoverFromReorg(chain *blockchain.BlockChain, minBlock, maxBlock int32, // descendantBlock returns the appropriate JSON-RPC error if a current block // fetched during a reorganize is not a direct child of the parent block hash. -func descendantBlock(prevHash *chainhash.Hash, curBlock *btcutil.Block) error { +func descendantBlock(prevHash *daghash.Hash, curBlock *btcutil.Block) error { curHash := &curBlock.MsgBlock().Header.PrevBlock if !prevHash.IsEqual(curHash) { rpcsLog.Errorf("Stopping rescan for reorged block %v "+ @@ -2336,7 +2336,7 @@ func handleRescan(wsc *wsClient, icmd interface{}) (interface{}, error) { outpoints := make([]*wire.OutPoint, 0, len(cmd.OutPoints)) for i := range cmd.OutPoints { cmdOutpoint := &cmd.OutPoints[i] - blockHash, err := chainhash.NewHashFromStr(cmdOutpoint.Hash) + blockHash, err := daghash.NewHashFromStr(cmdOutpoint.Hash) if err != nil { return nil, rpcDecodeHexError(cmdOutpoint.Hash) } @@ -2410,13 +2410,13 @@ func handleRescan(wsc *wsClient, icmd interface{}) (interface{}, error) { lookups.unspent[*outpoint] = struct{}{} } - chain := wsc.server.cfg.Chain + dag := wsc.server.cfg.DAG - minBlockHash, err := chainhash.NewHashFromStr(cmd.BeginBlock) + minBlockHash, err := daghash.NewHashFromStr(cmd.BeginBlock) if err != nil { return nil, rpcDecodeHexError(cmd.BeginBlock) } - minBlock, err := chain.BlockHeightByHash(minBlockHash) + minBlock, err := dag.BlockHeightByHash(minBlockHash) if err != nil { return nil, &btcjson.RPCError{ Code: btcjson.ErrRPCBlockNotFound, @@ -2426,11 +2426,11 @@ func handleRescan(wsc *wsClient, icmd interface{}) (interface{}, error) { maxBlock := int32(math.MaxInt32) if cmd.EndBlock != nil { - maxBlockHash, err := chainhash.NewHashFromStr(*cmd.EndBlock) + maxBlockHash, err := daghash.NewHashFromStr(*cmd.EndBlock) if err != nil { return nil, rpcDecodeHexError(*cmd.EndBlock) } - maxBlock, err = chain.BlockHeightByHash(maxBlockHash) + maxBlock, err = dag.BlockHeightByHash(maxBlockHash) if err != nil { return nil, &btcjson.RPCError{ Code: btcjson.ErrRPCBlockNotFound, @@ -2442,7 +2442,7 @@ func handleRescan(wsc *wsClient, icmd interface{}) (interface{}, error) { // lastBlock and lastBlockHash track the previously-rescanned block. // They equal nil when no previous blocks have been rescanned. var lastBlock *btcutil.Block - var lastBlockHash *chainhash.Hash + var lastBlockHash *daghash.Hash // A ticker is created to wait at least 10 seconds before notifying the // websocket client of the current progress completed by the rescan. @@ -2462,7 +2462,7 @@ fetchRange: if maxLoopBlock-minBlock > wire.MaxInvPerMsg { maxLoopBlock = minBlock + wire.MaxInvPerMsg } - hashList, err := chain.HeightRange(minBlock, maxLoopBlock) + hashList, err := dag.HeightRange(minBlock, maxLoopBlock) if err != nil { rpcsLog.Errorf("Error looking up block range: %v", err) return nil, &btcjson.RPCError{ @@ -2491,8 +2491,8 @@ fetchRange: // continue the fetch loop again to rescan the new // blocks (or error due to an irrecoverable reorganize). pauseGuard := wsc.server.cfg.SyncMgr.Pause() - best := wsc.server.cfg.Chain.BestSnapshot() - curHash := &best.Hash + dagState := wsc.server.cfg.DAG.GetDAGState() + curHash := &dagState.SelectedTip.Hash again := true if lastBlockHash == nil || *lastBlockHash == *curHash { again = false @@ -2518,7 +2518,7 @@ fetchRange: loopHashList: for i := range hashList { - blk, err := chain.BlockByHash(&hashList[i]) + blk, err := dag.BlockByHash(&hashList[i]) if err != nil { // Only handle reorgs if a block could not be // found for the hash. @@ -2554,7 +2554,7 @@ fetchRange: // before the range was evaluated, as it must be // reevaluated for the new hashList. minBlock += int32(i) - hashList, err = recoverFromReorg(chain, + hashList, err = recoverFromReorg(dag, minBlock, maxBlock, lastBlockHash) if err != nil { return nil, err diff --git a/server.go b/server.go index 9af8518bc..281562870 100644 --- a/server.go +++ b/server.go @@ -23,10 +23,10 @@ import ( "time" "github.com/daglabs/btcd/addrmgr" - "github.com/daglabs/btcd/blockchain" - "github.com/daglabs/btcd/blockchain/indexers" - "github.com/daglabs/btcd/chaincfg" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/blockdag" + "github.com/daglabs/btcd/blockdag/indexers" + "github.com/daglabs/btcd/dagconfig" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/connmgr" "github.com/daglabs/btcd/database" "github.com/daglabs/btcd/mempool" @@ -69,7 +69,7 @@ var ( ) // zeroHash is the zero value hash (all zeros). It is defined as a convenience. -var zeroHash chainhash.Hash +var zeroHash daghash.Hash // onionAddr implements the net.Addr interface and represents a tor address. type onionAddr struct { @@ -144,7 +144,7 @@ type relayMsg struct { // updates, peer heights will be kept up to date, allowing for fresh data when // selecting sync peer candidacy. type updatePeerHeightsMsg struct { - newHash *chainhash.Hash + newHash *daghash.Hash newHeight int32 originPeer *peer.Peer } @@ -188,8 +188,8 @@ func (ps *peerState) forAllPeers(closure func(sp *serverPeer)) { // cfHeaderKV is a tuple of a filter header and its associated block hash. The // struct is used to cache cfcheckpt responses. type cfHeaderKV struct { - blockHash chainhash.Hash - filterHeader chainhash.Hash + blockHash daghash.Hash + filterHeader daghash.Hash } // server provides a bitcoin server for handling communications to and from @@ -204,13 +204,13 @@ type server struct { shutdownSched int32 startupTime int64 - chainParams *chaincfg.Params + chainParams *dagconfig.Params addrManager *addrmgr.AddrManager connManager *connmgr.ConnManager sigCache *txscript.SigCache rpcServer *rpcServer syncManager *netsync.SyncManager - chain *blockchain.BlockChain + dag *blockdag.BlockDAG txMemPool *mempool.TxPool cpuMiner *cpuminer.CPUMiner modifyRebroadcastInv chan interface{} @@ -225,7 +225,7 @@ type server struct { quit chan struct{} nat NAT db database.DB - timeSource blockchain.MedianTimeSource + timeSource blockdag.MedianTimeSource services wire.ServiceFlag // The following fields are used for optional indexes. They will be nil @@ -257,7 +257,7 @@ type serverPeer struct { connReq *connmgr.ConnReq server *server persistent bool - continueHash *chainhash.Hash + continueHash *daghash.Hash relayMtx sync.Mutex disableRelayTx bool sentAddrs bool @@ -287,9 +287,9 @@ func newServerPeer(s *server, isPersistent bool) *serverPeer { // newestBlock returns the current best block hash and height using the format // required by the configuration for the peer package. -func (sp *serverPeer) newestBlock() (*chainhash.Hash, int32, error) { - best := sp.server.chain.BestSnapshot() - return &best.Hash, best.Height, nil +func (sp *serverPeer) newestBlock() (*daghash.Hash, int32, error) { + dagState := sp.server.dag.GetDAGState() + return &dagState.SelectedTip.Hash, dagState.SelectedTip.Height, nil } // addKnownAddresses adds the given addresses to the set of known addresses to @@ -669,7 +669,7 @@ func (sp *serverPeer) OnGetBlocks(_ *peer.Peer, msg *wire.MsgGetBlocks) { // over with the genesis block if unknown block locators are provided. // // This mirrors the behavior in the reference implementation. - chain := sp.server.chain + chain := sp.server.dag hashList := chain.LocateBlocks(msg.BlockLocatorHashes, &msg.HashStop, wire.MaxBlocksPerMsg) @@ -713,7 +713,7 @@ func (sp *serverPeer) OnGetHeaders(_ *peer.Peer, msg *wire.MsgGetHeaders) { // over with the genesis block if unknown block locators are provided. // // This mirrors the behavior in the reference implementation. - chain := sp.server.chain + chain := sp.server.dag headers := chain.LocateHeaders(msg.BlockLocatorHashes, &msg.HashStop) if len(headers) == 0 { // Nothing to send. @@ -735,16 +735,16 @@ func (sp *serverPeer) OnGetCFilters(_ *peer.Peer, msg *wire.MsgGetCFilters) { return } - hashes, err := sp.server.chain.HeightToHashRange(int32(msg.StartHeight), + hashes, err := sp.server.dag.HeightToHashRange(int32(msg.StartHeight), &msg.StopHash, wire.MaxGetCFiltersReqRange) if err != nil { peerLog.Debugf("Invalid getcfilters request: %v", err) return } - // Create []*chainhash.Hash from []chainhash.Hash to pass to + // Create []*daghash.Hash from []daghash.Hash to pass to // FiltersByBlockHashes. - hashPtrs := make([]*chainhash.Hash, len(hashes)) + hashPtrs := make([]*daghash.Hash, len(hashes)) for i := range hashes { hashPtrs[i] = &hashes[i] } @@ -784,7 +784,7 @@ func (sp *serverPeer) OnGetCFHeaders(_ *peer.Peer, msg *wire.MsgGetCFHeaders) { } // Fetch the hashes from the block index. - hashList, err := sp.server.chain.HeightToHashRange(startHeight, + hashList, err := sp.server.dag.HeightToHashRange(startHeight, &msg.StopHash, maxResults) if err != nil { peerLog.Debugf("Invalid getcfheaders request: %v", err) @@ -798,9 +798,9 @@ func (sp *serverPeer) OnGetCFHeaders(_ *peer.Peer, msg *wire.MsgGetCFHeaders) { return } - // Create []*chainhash.Hash from []chainhash.Hash to pass to + // Create []*daghash.Hash from []daghash.Hash to pass to // FilterHeadersByBlockHashes. - hashPtrs := make([]*chainhash.Hash, len(hashList)) + hashPtrs := make([]*daghash.Hash, len(hashList)) for i := range hashList { hashPtrs[i] = &hashList[i] } @@ -853,7 +853,7 @@ func (sp *serverPeer) OnGetCFHeaders(_ *peer.Peer, msg *wire.MsgGetCFHeaders) { } // Deserialize the hash. - filterHash, err := chainhash.NewHash(hashBytes) + filterHash, err := daghash.NewHash(hashBytes) if err != nil { peerLog.Warnf("Committed filter hash deserialize "+ "failed: %v", err) @@ -875,7 +875,7 @@ func (sp *serverPeer) OnGetCFCheckpt(_ *peer.Peer, msg *wire.MsgGetCFCheckpt) { return } - blockHashes, err := sp.server.chain.IntervalBlockHashes(&msg.StopHash, + blockHashes, err := sp.server.dag.IntervalBlockHashes(&msg.StopHash, wire.CFCheckptInterval) if err != nil { peerLog.Debugf("Invalid getcfilters request: %v", err) @@ -926,7 +926,7 @@ func (sp *serverPeer) OnGetCFCheckpt(_ *peer.Peer, msg *wire.MsgGetCFCheckpt) { } // Look up any filter headers that aren't cached. - blockHashPtrs := make([]*chainhash.Hash, 0, len(blockHashes)-forkIdx) + blockHashPtrs := make([]*daghash.Hash, 0, len(blockHashes)-forkIdx) for i := forkIdx; i < len(blockHashes); i++ { blockHashPtrs = append(blockHashPtrs, &blockHashes[i]) } @@ -944,7 +944,7 @@ func (sp *serverPeer) OnGetCFCheckpt(_ *peer.Peer, msg *wire.MsgGetCFCheckpt) { return } - filterHeader, err := chainhash.NewHash(filterHeaderBytes) + filterHeader, err := daghash.NewHash(filterHeaderBytes) if err != nil { peerLog.Warnf("Committed filter header deserialize "+ "failed: %v", err) @@ -1254,7 +1254,7 @@ func (s *server) TransactionConfirmed(tx *btcutil.Tx) { // pushTxMsg sends a tx message for the provided transaction hash to the // connected peer. An error is returned if the transaction hash is not known. -func (s *server) pushTxMsg(sp *serverPeer, hash *chainhash.Hash, doneChan chan<- struct{}, +func (s *server) pushTxMsg(sp *serverPeer, hash *daghash.Hash, doneChan chan<- struct{}, waitChan <-chan struct{}) error { // Attempt to fetch the requested transaction from the pool. A @@ -1283,7 +1283,7 @@ func (s *server) pushTxMsg(sp *serverPeer, hash *chainhash.Hash, doneChan chan<- // pushBlockMsg sends a block message for the provided block hash to the // connected peer. An error is returned if the block hash is not known. -func (s *server) pushBlockMsg(sp *serverPeer, hash *chainhash.Hash, doneChan chan<- struct{}, +func (s *server) pushBlockMsg(sp *serverPeer, hash *daghash.Hash, doneChan chan<- struct{}, waitChan <-chan struct{}) error { // Fetch the raw block bytes from the database. @@ -1337,9 +1337,9 @@ func (s *server) pushBlockMsg(sp *serverPeer, hash *chainhash.Hash, doneChan cha // to trigger it to issue another getblocks message for the next // batch of inventory. if sendInv { - best := sp.server.chain.BestSnapshot() + dagState := sp.server.dag.GetDAGState() invMsg := wire.NewMsgInvSizeHint(1) - iv := wire.NewInvVect(wire.InvTypeBlock, &best.Hash) + iv := wire.NewInvVect(wire.InvTypeBlock, &dagState.SelectedTip.Hash) invMsg.AddInvVect(iv) sp.QueueMessage(invMsg, doneChan) sp.continueHash = nil @@ -1351,7 +1351,7 @@ func (s *server) pushBlockMsg(sp *serverPeer, hash *chainhash.Hash, doneChan cha // the connected peer. Since a merkle block requires the peer to have a filter // loaded, this call will simply be ignored if there is no filter loaded. An // error is returned if the block hash is not known. -func (s *server) pushMerkleBlockMsg(sp *serverPeer, hash *chainhash.Hash, +func (s *server) pushMerkleBlockMsg(sp *serverPeer, hash *daghash.Hash, doneChan chan<- struct{}, waitChan <-chan struct{}) error { // Do not send a response if the peer doesn't have a filter loaded. @@ -1363,7 +1363,7 @@ func (s *server) pushMerkleBlockMsg(sp *serverPeer, hash *chainhash.Hash, } // Fetch the raw block bytes from the database. - blk, err := sp.server.chain.BlockByHash(hash) + blk, err := sp.server.dag.BlockByHash(hash) if err != nil { peerLog.Tracef("Unable to fetch requested block hash %v: %v", hash, err) @@ -2062,7 +2062,7 @@ func (s *server) NetTotals() (uint64, uint64) { // the latest connected main chain block, or a recognized orphan. These height // updates allow us to dynamically refresh peer heights, ensuring sync peer // selection has access to the latest block heights for each peer. -func (s *server) UpdatePeerHeights(latestBlkHash *chainhash.Hash, latestHeight int32, updateSource *peer.Peer) { +func (s *server) UpdatePeerHeights(latestBlkHash *daghash.Hash, latestHeight int32, updateSource *peer.Peer) { s.peerHeightsUpdate <- updatePeerHeightsMsg{ newHash: latestBlkHash, newHeight: latestHeight, @@ -2393,7 +2393,7 @@ func setupRPCListeners() ([]net.Listener, error) { // newServer returns a new btcd server configured to listen on addr for the // bitcoin network type specified by chainParams. Use start to begin accepting // connections from peers. -func newServer(listenAddrs []string, db database.DB, chainParams *chaincfg.Params, interrupt <-chan struct{}) (*server, error) { +func newServer(listenAddrs []string, db database.DB, chainParams *dagconfig.Params, interrupt <-chan struct{}) (*server, error) { services := defaultServices if cfg.NoPeerBloomFilters { services &^= wire.SFNodeBloom @@ -2431,7 +2431,7 @@ func newServer(listenAddrs []string, db database.DB, chainParams *chaincfg.Param peerHeightsUpdate: make(chan updatePeerHeightsMsg), nat: nat, db: db, - timeSource: blockchain.NewMedianTime(), + timeSource: blockdag.NewMedianTime(), services: services, sigCache: txscript.NewSigCache(cfg.SigCacheMaxSize), cfCheckptCaches: make(map[wire.FilterType][]cfHeaderKV), @@ -2470,23 +2470,23 @@ func newServer(listenAddrs []string, db database.DB, chainParams *chaincfg.Param } // Create an index manager if any of the optional indexes are enabled. - var indexManager blockchain.IndexManager + var indexManager blockdag.IndexManager if len(indexes) > 0 { indexManager = indexers.NewManager(db, indexes) } // Merge given checkpoints with the default ones unless they are disabled. - var checkpoints []chaincfg.Checkpoint + var checkpoints []dagconfig.Checkpoint if !cfg.DisableCheckpoints { checkpoints = mergeCheckpoints(s.chainParams.Checkpoints, cfg.addCheckpoints) } // Create a new block chain instance with the appropriate configuration. var err error - s.chain, err = blockchain.New(&blockchain.Config{ + s.dag, err = blockdag.New(&blockdag.Config{ DB: s.db, Interrupt: interrupt, - ChainParams: s.chainParams, + DAGParams: s.chainParams, Checkpoints: checkpoints, TimeSource: s.timeSource, SigCache: s.sigCache, @@ -2520,7 +2520,7 @@ func newServer(listenAddrs []string, db database.DB, chainParams *chaincfg.Param // If no feeEstimator has been found, or if the one that has been found // is behind somehow, create a new one and start over. - if s.feeEstimator == nil || s.feeEstimator.LastKnownHeight() != s.chain.BestSnapshot().Height { + if s.feeEstimator == nil || s.feeEstimator.LastKnownHeight() != s.dag.GetDAGState().SelectedTip.Height { s.feeEstimator = mempool.NewFeeEstimator( mempool.DefaultEstimateFeeMaxRollback, mempool.DefaultEstimateFeeMinRegisteredBlocks) @@ -2533,18 +2533,18 @@ func newServer(listenAddrs []string, db database.DB, chainParams *chaincfg.Param FreeTxRelayLimit: cfg.FreeTxRelayLimit, MaxOrphanTxs: cfg.MaxOrphanTxs, MaxOrphanTxSize: defaultMaxOrphanTxSize, - MaxSigOpsPerTx: blockchain.MaxSigOpsPerBlock / 5, + MaxSigOpsPerTx: blockdag.MaxSigOpsPerBlock / 5, MinRelayTxFee: cfg.minRelayTxFee, MaxTxVersion: 2, }, ChainParams: chainParams, - FetchUtxoView: s.chain.FetchUtxoView, - BestHeight: func() int32 { return s.chain.BestSnapshot().Height }, - MedianTimePast: func() time.Time { return s.chain.BestSnapshot().MedianTime }, - CalcSequenceLock: func(tx *btcutil.Tx, view *blockchain.UtxoViewpoint) (*blockchain.SequenceLock, error) { - return s.chain.CalcSequenceLock(tx, view, true) + FetchUtxoView: s.dag.FetchUtxoView, + BestHeight: func() int32 { return s.dag.GetDAGState().SelectedTip.Height }, + MedianTimePast: func() time.Time { return s.dag.GetDAGState().SelectedTip.MedianTime }, + CalcSequenceLock: func(tx *btcutil.Tx, view *blockdag.UtxoViewpoint) (*blockdag.SequenceLock, error) { + return s.dag.CalcSequenceLock(tx, view, true) }, - IsDeploymentActive: s.chain.IsDeploymentActive, + IsDeploymentActive: s.dag.IsDeploymentActive, SigCache: s.sigCache, AddrIndex: s.addrIndex, FeeEstimator: s.feeEstimator, @@ -2553,7 +2553,7 @@ func newServer(listenAddrs []string, db database.DB, chainParams *chaincfg.Param s.syncManager, err = netsync.New(&netsync.Config{ PeerNotifier: &s, - Chain: s.chain, + DAG: s.dag, TxMemPool: s.txMemPool, ChainParams: s.chainParams, DisableCheckpoints: cfg.DisableCheckpoints, @@ -2576,7 +2576,7 @@ func newServer(listenAddrs []string, db database.DB, chainParams *chaincfg.Param TxMinFreeFee: cfg.minRelayTxFee, } blockTemplateGenerator := mining.NewBlkTmplGenerator(&policy, - s.chainParams, s.txMemPool, s.chain, s.timeSource, s.sigCache) + s.chainParams, s.txMemPool, s.dag, s.timeSource, s.sigCache) s.cpuMiner = cpuminer.New(&cpuminer.Config{ ChainParams: chainParams, BlockTemplateGenerator: blockTemplateGenerator, @@ -2685,7 +2685,7 @@ func newServer(listenAddrs []string, db database.DB, chainParams *chaincfg.Param ConnMgr: &rpcConnManager{&s}, SyncMgr: &rpcSyncMgr{&s, s.syncManager}, TimeSource: s.timeSource, - Chain: s.chain, + DAG: s.dag, ChainParams: chainParams, DB: db, TxMemPool: s.txMemPool, @@ -2932,7 +2932,7 @@ func isWhitelisted(addr net.Addr) bool { // checkpointSorter implements sort.Interface to allow a slice of checkpoints to // be sorted. -type checkpointSorter []chaincfg.Checkpoint +type checkpointSorter []dagconfig.Checkpoint // Len returns the number of checkpoints in the slice. It is part of the // sort.Interface implementation. @@ -2957,10 +2957,10 @@ func (s checkpointSorter) Less(i, j int) bool { // checkpoints contain a checkpoint with the same height as a checkpoint in the // default checkpoints, the additional checkpoint will take precedence and // overwrite the default one. -func mergeCheckpoints(defaultCheckpoints, additional []chaincfg.Checkpoint) []chaincfg.Checkpoint { +func mergeCheckpoints(defaultCheckpoints, additional []dagconfig.Checkpoint) []dagconfig.Checkpoint { // Create a map of the additional checkpoints to remove duplicates while // leaving the most recently-specified checkpoint. - extra := make(map[int32]chaincfg.Checkpoint) + extra := make(map[int32]dagconfig.Checkpoint) for _, checkpoint := range additional { extra[checkpoint.Height] = checkpoint } @@ -2968,7 +2968,7 @@ func mergeCheckpoints(defaultCheckpoints, additional []chaincfg.Checkpoint) []ch // Add all default checkpoints that do not have an override in the // additional checkpoints. numDefault := len(defaultCheckpoints) - checkpoints := make([]chaincfg.Checkpoint, 0, numDefault+len(extra)) + checkpoints := make([]dagconfig.Checkpoint, 0, numDefault+len(extra)) for _, checkpoint := range defaultCheckpoints { if _, exists := extra[checkpoint.Height]; !exists { checkpoints = append(checkpoints, checkpoint) diff --git a/txscript/engine_test.go b/txscript/engine_test.go index 3d5179cb2..687dd31bf 100644 --- a/txscript/engine_test.go +++ b/txscript/engine_test.go @@ -7,7 +7,7 @@ package txscript import ( "testing" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/wire" ) @@ -29,7 +29,7 @@ func TestBadPC(t *testing.T) { TxIn: []*wire.TxIn{ { PreviousOutPoint: wire.OutPoint{ - Hash: chainhash.Hash([32]byte{ + Hash: daghash.Hash([32]byte{ 0xc9, 0x97, 0xa5, 0xe5, 0x6e, 0x10, 0x41, 0x02, 0xfa, 0x20, 0x9c, 0x6a, @@ -87,7 +87,7 @@ func TestCheckErrorCondition(t *testing.T) { Version: 1, TxIn: []*wire.TxIn{{ PreviousOutPoint: wire.OutPoint{ - Hash: chainhash.Hash([32]byte{ + Hash: daghash.Hash([32]byte{ 0xc9, 0x97, 0xa5, 0xe5, 0x6e, 0x10, 0x41, 0x02, 0xfa, 0x20, 0x9c, 0x6a, @@ -160,7 +160,7 @@ func TestInvalidFlagCombinations(t *testing.T) { TxIn: []*wire.TxIn{ { PreviousOutPoint: wire.OutPoint{ - Hash: chainhash.Hash([32]byte{ + Hash: daghash.Hash([32]byte{ 0xc9, 0x97, 0xa5, 0xe5, 0x6e, 0x10, 0x41, 0x02, 0xfa, 0x20, 0x9c, 0x6a, diff --git a/txscript/example_test.go b/txscript/example_test.go index ace3d96b4..1922b7beb 100644 --- a/txscript/example_test.go +++ b/txscript/example_test.go @@ -9,8 +9,8 @@ import ( "fmt" "github.com/daglabs/btcd/btcec" - "github.com/daglabs/btcd/chaincfg" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/txscript" "github.com/daglabs/btcd/wire" "github.com/daglabs/btcutil" @@ -25,7 +25,7 @@ func ExamplePayToAddrScript() { // the address type. It is also required for the upcoming call to // PayToAddrScript. addressStr := "dagcoin:qqfgqp8l9l90zwetj84k2jcac2m8falvvy9uastr55" - address, err := btcutil.DecodeAddress(addressStr, &chaincfg.MainNetParams) + address, err := btcutil.DecodeAddress(addressStr, &dagconfig.MainNetParams) if err != nil { fmt.Println(err) return @@ -64,7 +64,7 @@ func ExampleExtractPkScriptAddrs() { // Extract and print details from the script. scriptClass, addresses, reqSigs, err := txscript.ExtractPkScriptAddrs( - script, &chaincfg.MainNetParams) + script, &dagconfig.MainNetParams) if err != nil { fmt.Println(err) return @@ -92,7 +92,7 @@ func ExampleSignTxOutput() { privKey, pubKey := btcec.PrivKeyFromBytes(btcec.S256(), privKeyBytes) pubKeyHash := btcutil.Hash160(pubKey.SerializeCompressed()) addr, err := btcutil.NewAddressPubKeyHash(pubKeyHash, - &chaincfg.MainNetParams) + &dagconfig.MainNetParams) if err != nil { fmt.Println(err) return @@ -102,7 +102,7 @@ func ExampleSignTxOutput() { // would ordinarily be the real transaction that is being spent. It // contains a single output that pays to address in the amount of 1 BTC. originTx := wire.NewMsgTx(wire.TxVersion) - prevOut := wire.NewOutPoint(&chainhash.Hash{}, ^uint32(0)) + prevOut := wire.NewOutPoint(&daghash.Hash{}, ^uint32(0)) txIn := wire.NewTxIn(prevOut, []byte{txscript.Op0, txscript.Op0}) originTx.AddTxIn(txIn) pkScript, err := txscript.PayToAddrScript(addr) @@ -151,7 +151,7 @@ func ExampleSignTxOutput() { // Notice that the script database parameter is nil here since it isn't // used. It must be specified when pay-to-script-hash transactions are // being signed. - sigScript, err := txscript.SignTxOutput(&chaincfg.MainNetParams, + sigScript, err := txscript.SignTxOutput(&dagconfig.MainNetParams, redeemTx, 0, originTx.TxOut[0].PkScript, txscript.SigHashAll, txscript.KeyClosure(lookupKey), nil, nil) if err != nil { diff --git a/txscript/opcode.go b/txscript/opcode.go index 359036cea..069b91ac5 100644 --- a/txscript/opcode.go +++ b/txscript/opcode.go @@ -15,7 +15,7 @@ import ( "golang.org/x/crypto/ripemd160" "github.com/daglabs/btcd/btcec" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/wire" ) @@ -1090,7 +1090,7 @@ func verifyLockTime(txLockTime, threshold, lockTime uint64) error { // LockTime field of the transaction containing the script signature // validating if the transaction outputs are spendable yet. func opcodeCheckLockTimeVerify(op *parsedOpcode, vm *Engine) error { - // The current transaction locktime is a int64 resulting in a maximum + // The current transaction locktime is a uint64 resulting in a maximum // locktime of 2^63-1 (the year 292278994). However, scriptNums are signed // and therefore a standard 4-byte scriptNum would only support up to a // maximum of 2^31-1 (the year 2038). Thus, a 5-byte scriptNum is used @@ -1152,7 +1152,7 @@ func opcodeCheckLockTimeVerify(op *parsedOpcode, vm *Engine) error { func opcodeCheckSequenceVerify(op *parsedOpcode, vm *Engine) error { // The current transaction sequence is a uint64 resulting in a maximum - // sequence of 2^32-1. However, scriptNums are signed and therefore a + // sequence of 2^63-1. However, scriptNums are signed and therefore a // standard 4-byte scriptNum would only support up to a maximum of // 2^31-1. Thus, a 5-byte scriptNum is used here since it will support // up to 2^39-1 which allows sequences beyond the current sequence @@ -1949,7 +1949,7 @@ func opcodeHash256(op *parsedOpcode, vm *Engine) error { return err } - vm.dstack.PushByteArray(chainhash.DoubleHashB(buf)) + vm.dstack.PushByteArray(daghash.DoubleHashB(buf)) return nil } @@ -2038,7 +2038,7 @@ func opcodeCheckSig(op *parsedOpcode, vm *Engine) error { var valid bool if vm.sigCache != nil { - var sigHash chainhash.Hash + var sigHash daghash.Hash copy(sigHash[:], hash) valid = vm.sigCache.Exists(sigHash, signature, pubKey) @@ -2242,7 +2242,7 @@ func opcodeCheckMultiSig(op *parsedOpcode, vm *Engine) error { var valid bool if vm.sigCache != nil { - var sigHash chainhash.Hash + var sigHash daghash.Hash copy(sigHash[:], hash) valid = vm.sigCache.Exists(sigHash, parsedSig, parsedPubKey) diff --git a/txscript/reference_test.go b/txscript/reference_test.go index 3fac40084..eb40a4c31 100644 --- a/txscript/reference_test.go +++ b/txscript/reference_test.go @@ -15,7 +15,7 @@ import ( "strings" "testing" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/wire" "github.com/daglabs/btcutil" ) @@ -233,7 +233,7 @@ func parseExpectedResult(expected string) ([]ErrorCode, error) { func createSpendingTx(sigScript, pkScript []byte) *wire.MsgTx { coinbaseTx := wire.NewMsgTx(wire.TxVersion) - outPoint := wire.NewOutPoint(&chainhash.Hash{}, ^uint32(0)) + outPoint := wire.NewOutPoint(&daghash.Hash{}, ^uint32(0)) txIn := wire.NewTxIn(outPoint, []byte{Op0, Op0}) txOut := wire.NewTxOut(0, pkScript) coinbaseTx.AddTxIn(txIn) @@ -489,7 +489,7 @@ testloop: continue testloop } - prevhash, err := chainhash.NewHashFromStr(previoustx) + prevhash, err := daghash.NewHashFromStr(previoustx) if err != nil { t.Errorf("bad test (%dth input hash not hash %v)"+ "%d: %v", j, err, i, test) @@ -630,7 +630,7 @@ testloop: continue } - prevhash, err := chainhash.NewHashFromStr(previoustx) + prevhash, err := daghash.NewHashFromStr(previoustx) if err != nil { t.Errorf("bad test (%dth input hash not hash %v)"+ "%d: %v", j, err, i, test) @@ -737,7 +737,7 @@ func TestCalcSignatureHash(t *testing.T) { "Failed calculating signature hash: %s", i, err) } - expectedHash, _ := chainhash.NewHashFromStr(test[4].(string)) + expectedHash, _ := daghash.NewHashFromStr(test[4].(string)) if !bytes.Equal(hash, expectedHash[:]) { t.Errorf("TestCalcSignatureHash failed test #%d: "+ "Signature hash mismatch.", i) diff --git a/txscript/script.go b/txscript/script.go index 420c7c230..0130dfdcd 100644 --- a/txscript/script.go +++ b/txscript/script.go @@ -10,7 +10,7 @@ import ( "fmt" "time" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/wire" ) @@ -365,7 +365,7 @@ func calcSignatureHash(script []parsedOpcode, hashType SigHashType, tx *wire.Msg wbuf := bytes.NewBuffer(make([]byte, 0, txCopy.SerializeSize()+4)) txCopy.Serialize(wbuf) binary.Write(wbuf, binary.LittleEndian, hashType) - return chainhash.DoubleHashB(wbuf.Bytes()), nil + return daghash.DoubleHashB(wbuf.Bytes()), nil } // asSmallInt returns the passed opcode, which must be true according to diff --git a/txscript/sigcache.go b/txscript/sigcache.go index ccb7f642a..384babda0 100644 --- a/txscript/sigcache.go +++ b/txscript/sigcache.go @@ -8,7 +8,7 @@ import ( "sync" "github.com/daglabs/btcd/btcec" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" ) // sigCacheEntry represents an entry in the SigCache. Entries within the @@ -34,7 +34,7 @@ type sigCacheEntry struct { // if they've already been seen and verified within the mempool. type SigCache struct { sync.RWMutex - validSigs map[chainhash.Hash]sigCacheEntry + validSigs map[daghash.Hash]sigCacheEntry maxEntries uint } @@ -45,7 +45,7 @@ type SigCache struct { // cache to exceed the max. func NewSigCache(maxEntries uint) *SigCache { return &SigCache{ - validSigs: make(map[chainhash.Hash]sigCacheEntry, maxEntries), + validSigs: make(map[daghash.Hash]sigCacheEntry, maxEntries), maxEntries: maxEntries, } } @@ -55,7 +55,7 @@ func NewSigCache(maxEntries uint) *SigCache { // // NOTE: This function is safe for concurrent access. Readers won't be blocked // unless there exists a writer, adding an entry to the SigCache. -func (s *SigCache) Exists(sigHash chainhash.Hash, sig *btcec.Signature, pubKey *btcec.PublicKey) bool { +func (s *SigCache) Exists(sigHash daghash.Hash, sig *btcec.Signature, pubKey *btcec.PublicKey) bool { s.RLock() entry, ok := s.validSigs[sigHash] s.RUnlock() @@ -70,7 +70,7 @@ func (s *SigCache) Exists(sigHash chainhash.Hash, sig *btcec.Signature, pubKey * // // NOTE: This function is safe for concurrent access. Writers will block // simultaneous readers until function execution has concluded. -func (s *SigCache) Add(sigHash chainhash.Hash, sig *btcec.Signature, pubKey *btcec.PublicKey) { +func (s *SigCache) Add(sigHash daghash.Hash, sig *btcec.Signature, pubKey *btcec.PublicKey) { s.Lock() defer s.Unlock() diff --git a/txscript/sigcache_test.go b/txscript/sigcache_test.go index 04def36ba..440204d81 100644 --- a/txscript/sigcache_test.go +++ b/txscript/sigcache_test.go @@ -9,19 +9,19 @@ import ( "testing" "github.com/daglabs/btcd/btcec" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" ) // genRandomSig returns a random message, a signature of the message under the // public key and the public key. This function is used to generate randomized // test data. -func genRandomSig() (*chainhash.Hash, *btcec.Signature, *btcec.PublicKey, error) { +func genRandomSig() (*daghash.Hash, *btcec.Signature, *btcec.PublicKey, error) { privKey, err := btcec.NewPrivateKey(btcec.S256()) if err != nil { return nil, nil, nil, err } - var msgHash chainhash.Hash + var msgHash daghash.Hash if _, err := rand.Read(msgHash[:]); err != nil { return nil, nil, nil, err } diff --git a/txscript/sign.go b/txscript/sign.go index 2bbbde14d..e9e0b43f1 100644 --- a/txscript/sign.go +++ b/txscript/sign.go @@ -9,7 +9,7 @@ import ( "fmt" "github.com/daglabs/btcd/btcec" - "github.com/daglabs/btcd/chaincfg" + "github.com/daglabs/btcd/dagconfig" "github.com/daglabs/btcd/wire" "github.com/daglabs/btcutil" ) @@ -96,7 +96,7 @@ func signMultiSig(tx *wire.MsgTx, idx int, script []byte, hashType SigHashType, return signedScript, signedCount == nRequired } -func sign(chainParams *chaincfg.Params, tx *wire.MsgTx, idx int, +func sign(chainParams *dagconfig.Params, tx *wire.MsgTx, idx int, script []byte, hashType SigHashType, kdb KeyDB, sdb ScriptDB) ([]byte, ScriptClass, []btcutil.Address, int, error) { @@ -160,8 +160,8 @@ func sign(chainParams *chaincfg.Params, tx *wire.MsgTx, idx int, // and nrequired are the result of extracting the addresses from pkscript. // The return value is the best effort merging of the two scripts. Calling this // function with addresses, class and nrequired that do not match pkScript is -// an error. -func mergeScripts(chainParams *chaincfg.Params, tx *wire.MsgTx, idx int, +// an error and results in undefined behaviour. +func mergeScripts(chainParams *dagconfig.Params, tx *wire.MsgTx, idx int, pkScript []byte, class ScriptClass, addresses []btcutil.Address, nRequired int, sigScript, prevScript []byte) ([]byte, error) { @@ -376,7 +376,7 @@ func (sc ScriptClosure) GetScript(address btcutil.Address) ([]byte, error) { // getScript. If previousScript is provided then the results in previousScript // will be merged in a type-dependent manner with the newly generated. // signature script. -func SignTxOutput(chainParams *chaincfg.Params, tx *wire.MsgTx, idx int, +func SignTxOutput(chainParams *dagconfig.Params, tx *wire.MsgTx, idx int, pkScript []byte, hashType SigHashType, kdb KeyDB, sdb ScriptDB, previousScript []byte) ([]byte, error) { diff --git a/txscript/sign_test.go b/txscript/sign_test.go index b20dd7573..4195182aa 100644 --- a/txscript/sign_test.go +++ b/txscript/sign_test.go @@ -10,8 +10,8 @@ import ( "testing" "github.com/daglabs/btcd/btcec" - "github.com/daglabs/btcd/chaincfg" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/daglabs/btcd/wire" "github.com/daglabs/btcutil" ) @@ -75,7 +75,7 @@ func signAndCheck(msg string, tx *wire.MsgTx, idx int, pkScript []byte, hashType SigHashType, kdb KeyDB, sdb ScriptDB, previousScript []byte) error { - sigScript, err := SignTxOutput(&chaincfg.TestNet3Params, tx, idx, + sigScript, err := SignTxOutput(&dagconfig.TestNet3Params, tx, idx, pkScript, hashType, kdb, sdb, nil) if err != nil { return fmt.Errorf("failed to sign output %s: %v", msg, err) @@ -104,21 +104,21 @@ func TestSignTxOutput(t *testing.T) { TxIn: []*wire.TxIn{ { PreviousOutPoint: wire.OutPoint{ - Hash: chainhash.Hash{}, + Hash: daghash.Hash{}, Index: 0, }, Sequence: 4294967295, }, { PreviousOutPoint: wire.OutPoint{ - Hash: chainhash.Hash{}, + Hash: daghash.Hash{}, Index: 1, }, Sequence: 4294967295, }, { PreviousOutPoint: wire.OutPoint{ - Hash: chainhash.Hash{}, + Hash: daghash.Hash{}, Index: 2, }, Sequence: 4294967295, @@ -152,7 +152,7 @@ func TestSignTxOutput(t *testing.T) { pk := (*btcec.PublicKey)(&key.PublicKey). SerializeUncompressed() address, err := btcutil.NewAddressPubKeyHash( - btcutil.Hash160(pk), &chaincfg.TestNet3Params) + btcutil.Hash160(pk), &dagconfig.TestNet3Params) if err != nil { t.Errorf("failed to make address for %s: %v", msg, err) @@ -189,7 +189,7 @@ func TestSignTxOutput(t *testing.T) { pk := (*btcec.PublicKey)(&key.PublicKey). SerializeUncompressed() address, err := btcutil.NewAddressPubKeyHash( - btcutil.Hash160(pk), &chaincfg.TestNet3Params) + btcutil.Hash160(pk), &dagconfig.TestNet3Params) if err != nil { t.Errorf("failed to make address for %s: %v", msg, err) @@ -202,7 +202,7 @@ func TestSignTxOutput(t *testing.T) { "for %s: %v", msg, err) } - sigScript, err := SignTxOutput(&chaincfg.TestNet3Params, + sigScript, err := SignTxOutput(&dagconfig.TestNet3Params, tx, i, pkScript, hashType, mkGetKey(map[string]addressToKey{ address.EncodeAddress(): {key, false}, @@ -215,7 +215,7 @@ func TestSignTxOutput(t *testing.T) { // by the above loop, this should be valid, now sign // again and merge. - sigScript, err = SignTxOutput(&chaincfg.TestNet3Params, + sigScript, err = SignTxOutput(&dagconfig.TestNet3Params, tx, i, pkScript, hashType, mkGetKey(map[string]addressToKey{ address.EncodeAddress(): {key, false}, @@ -250,7 +250,7 @@ func TestSignTxOutput(t *testing.T) { pk := (*btcec.PublicKey)(&key.PublicKey). SerializeCompressed() address, err := btcutil.NewAddressPubKeyHash( - btcutil.Hash160(pk), &chaincfg.TestNet3Params) + btcutil.Hash160(pk), &dagconfig.TestNet3Params) if err != nil { t.Errorf("failed to make address for %s: %v", msg, err) @@ -288,7 +288,7 @@ func TestSignTxOutput(t *testing.T) { pk := (*btcec.PublicKey)(&key.PublicKey). SerializeCompressed() address, err := btcutil.NewAddressPubKeyHash( - btcutil.Hash160(pk), &chaincfg.TestNet3Params) + btcutil.Hash160(pk), &dagconfig.TestNet3Params) if err != nil { t.Errorf("failed to make address for %s: %v", msg, err) @@ -301,7 +301,7 @@ func TestSignTxOutput(t *testing.T) { "for %s: %v", msg, err) } - sigScript, err := SignTxOutput(&chaincfg.TestNet3Params, + sigScript, err := SignTxOutput(&dagconfig.TestNet3Params, tx, i, pkScript, hashType, mkGetKey(map[string]addressToKey{ address.EncodeAddress(): {key, true}, @@ -314,7 +314,7 @@ func TestSignTxOutput(t *testing.T) { // by the above loop, this should be valid, now sign // again and merge. - sigScript, err = SignTxOutput(&chaincfg.TestNet3Params, + sigScript, err = SignTxOutput(&dagconfig.TestNet3Params, tx, i, pkScript, hashType, mkGetKey(map[string]addressToKey{ address.EncodeAddress(): {key, true}, @@ -348,7 +348,7 @@ func TestSignTxOutput(t *testing.T) { pk := (*btcec.PublicKey)(&key.PublicKey). SerializeUncompressed() - address, err := btcutil.NewAddressPubKey(pk, &chaincfg.TestNet3Params) + address, err := btcutil.NewAddressPubKey(pk, &dagconfig.TestNet3Params) if err != nil { t.Errorf("failed to make address for %s: %v", msg, err) @@ -385,7 +385,7 @@ func TestSignTxOutput(t *testing.T) { pk := (*btcec.PublicKey)(&key.PublicKey). SerializeUncompressed() - address, err := btcutil.NewAddressPubKey(pk, &chaincfg.TestNet3Params) + address, err := btcutil.NewAddressPubKey(pk, &dagconfig.TestNet3Params) if err != nil { t.Errorf("failed to make address for %s: %v", msg, err) @@ -398,7 +398,7 @@ func TestSignTxOutput(t *testing.T) { "for %s: %v", msg, err) } - sigScript, err := SignTxOutput(&chaincfg.TestNet3Params, + sigScript, err := SignTxOutput(&dagconfig.TestNet3Params, tx, i, pkScript, hashType, mkGetKey(map[string]addressToKey{ address.EncodeAddress(): {key, false}, @@ -411,7 +411,7 @@ func TestSignTxOutput(t *testing.T) { // by the above loop, this should be valid, now sign // again and merge. - sigScript, err = SignTxOutput(&chaincfg.TestNet3Params, + sigScript, err = SignTxOutput(&dagconfig.TestNet3Params, tx, i, pkScript, hashType, mkGetKey(map[string]addressToKey{ address.EncodeAddress(): {key, false}, @@ -445,7 +445,7 @@ func TestSignTxOutput(t *testing.T) { pk := (*btcec.PublicKey)(&key.PublicKey). SerializeCompressed() - address, err := btcutil.NewAddressPubKey(pk, &chaincfg.TestNet3Params) + address, err := btcutil.NewAddressPubKey(pk, &dagconfig.TestNet3Params) if err != nil { t.Errorf("failed to make address for %s: %v", msg, err) @@ -482,7 +482,7 @@ func TestSignTxOutput(t *testing.T) { pk := (*btcec.PublicKey)(&key.PublicKey). SerializeCompressed() - address, err := btcutil.NewAddressPubKey(pk, &chaincfg.TestNet3Params) + address, err := btcutil.NewAddressPubKey(pk, &dagconfig.TestNet3Params) if err != nil { t.Errorf("failed to make address for %s: %v", msg, err) @@ -495,7 +495,7 @@ func TestSignTxOutput(t *testing.T) { "for %s: %v", msg, err) } - sigScript, err := SignTxOutput(&chaincfg.TestNet3Params, + sigScript, err := SignTxOutput(&dagconfig.TestNet3Params, tx, i, pkScript, hashType, mkGetKey(map[string]addressToKey{ address.EncodeAddress(): {key, true}, @@ -508,7 +508,7 @@ func TestSignTxOutput(t *testing.T) { // by the above loop, this should be valid, now sign // again and merge. - sigScript, err = SignTxOutput(&chaincfg.TestNet3Params, + sigScript, err = SignTxOutput(&dagconfig.TestNet3Params, tx, i, pkScript, hashType, mkGetKey(map[string]addressToKey{ address.EncodeAddress(): {key, true}, @@ -543,7 +543,7 @@ func TestSignTxOutput(t *testing.T) { pk := (*btcec.PublicKey)(&key.PublicKey). SerializeUncompressed() address, err := btcutil.NewAddressPubKeyHash( - btcutil.Hash160(pk), &chaincfg.TestNet3Params) + btcutil.Hash160(pk), &dagconfig.TestNet3Params) if err != nil { t.Errorf("failed to make address for %s: %v", msg, err) @@ -558,7 +558,7 @@ func TestSignTxOutput(t *testing.T) { } scriptAddr, err := btcutil.NewAddressScriptHash( - pkScript, &chaincfg.TestNet3Params) + pkScript, &dagconfig.TestNet3Params) if err != nil { t.Errorf("failed to make p2sh addr for %s: %v", msg, err) @@ -599,7 +599,7 @@ func TestSignTxOutput(t *testing.T) { pk := (*btcec.PublicKey)(&key.PublicKey). SerializeUncompressed() address, err := btcutil.NewAddressPubKeyHash( - btcutil.Hash160(pk), &chaincfg.TestNet3Params) + btcutil.Hash160(pk), &dagconfig.TestNet3Params) if err != nil { t.Errorf("failed to make address for %s: %v", msg, err) @@ -614,7 +614,7 @@ func TestSignTxOutput(t *testing.T) { } scriptAddr, err := btcutil.NewAddressScriptHash( - pkScript, &chaincfg.TestNet3Params) + pkScript, &dagconfig.TestNet3Params) if err != nil { t.Errorf("failed to make p2sh addr for %s: %v", msg, err) @@ -629,7 +629,7 @@ func TestSignTxOutput(t *testing.T) { break } - sigScript, err := SignTxOutput(&chaincfg.TestNet3Params, + sigScript, err := SignTxOutput(&dagconfig.TestNet3Params, tx, i, scriptPkScript, hashType, mkGetKey(map[string]addressToKey{ address.EncodeAddress(): {key, false}, @@ -644,7 +644,7 @@ func TestSignTxOutput(t *testing.T) { // by the above loop, this should be valid, now sign // again and merge. - sigScript, err = SignTxOutput(&chaincfg.TestNet3Params, + sigScript, err = SignTxOutput(&dagconfig.TestNet3Params, tx, i, scriptPkScript, hashType, mkGetKey(map[string]addressToKey{ address.EncodeAddress(): {key, false}, @@ -681,7 +681,7 @@ func TestSignTxOutput(t *testing.T) { pk := (*btcec.PublicKey)(&key.PublicKey). SerializeCompressed() address, err := btcutil.NewAddressPubKeyHash( - btcutil.Hash160(pk), &chaincfg.TestNet3Params) + btcutil.Hash160(pk), &dagconfig.TestNet3Params) if err != nil { t.Errorf("failed to make address for %s: %v", msg, err) @@ -695,7 +695,7 @@ func TestSignTxOutput(t *testing.T) { } scriptAddr, err := btcutil.NewAddressScriptHash( - pkScript, &chaincfg.TestNet3Params) + pkScript, &dagconfig.TestNet3Params) if err != nil { t.Errorf("failed to make p2sh addr for %s: %v", msg, err) @@ -737,7 +737,7 @@ func TestSignTxOutput(t *testing.T) { pk := (*btcec.PublicKey)(&key.PublicKey). SerializeCompressed() address, err := btcutil.NewAddressPubKeyHash( - btcutil.Hash160(pk), &chaincfg.TestNet3Params) + btcutil.Hash160(pk), &dagconfig.TestNet3Params) if err != nil { t.Errorf("failed to make address for %s: %v", msg, err) @@ -751,7 +751,7 @@ func TestSignTxOutput(t *testing.T) { } scriptAddr, err := btcutil.NewAddressScriptHash( - pkScript, &chaincfg.TestNet3Params) + pkScript, &dagconfig.TestNet3Params) if err != nil { t.Errorf("failed to make p2sh addr for %s: %v", msg, err) @@ -766,7 +766,7 @@ func TestSignTxOutput(t *testing.T) { break } - sigScript, err := SignTxOutput(&chaincfg.TestNet3Params, + sigScript, err := SignTxOutput(&dagconfig.TestNet3Params, tx, i, scriptPkScript, hashType, mkGetKey(map[string]addressToKey{ address.EncodeAddress(): {key, true}, @@ -781,7 +781,7 @@ func TestSignTxOutput(t *testing.T) { // by the above loop, this should be valid, now sign // again and merge. - sigScript, err = SignTxOutput(&chaincfg.TestNet3Params, + sigScript, err = SignTxOutput(&dagconfig.TestNet3Params, tx, i, scriptPkScript, hashType, mkGetKey(map[string]addressToKey{ address.EncodeAddress(): {key, true}, @@ -817,7 +817,7 @@ func TestSignTxOutput(t *testing.T) { pk := (*btcec.PublicKey)(&key.PublicKey). SerializeUncompressed() - address, err := btcutil.NewAddressPubKey(pk, &chaincfg.TestNet3Params) + address, err := btcutil.NewAddressPubKey(pk, &dagconfig.TestNet3Params) if err != nil { t.Errorf("failed to make address for %s: %v", msg, err) @@ -831,7 +831,7 @@ func TestSignTxOutput(t *testing.T) { } scriptAddr, err := btcutil.NewAddressScriptHash( - pkScript, &chaincfg.TestNet3Params) + pkScript, &dagconfig.TestNet3Params) if err != nil { t.Errorf("failed to make p2sh addr for %s: %v", msg, err) @@ -872,7 +872,7 @@ func TestSignTxOutput(t *testing.T) { pk := (*btcec.PublicKey)(&key.PublicKey). SerializeUncompressed() - address, err := btcutil.NewAddressPubKey(pk, &chaincfg.TestNet3Params) + address, err := btcutil.NewAddressPubKey(pk, &dagconfig.TestNet3Params) if err != nil { t.Errorf("failed to make address for %s: %v", msg, err) @@ -886,7 +886,7 @@ func TestSignTxOutput(t *testing.T) { } scriptAddr, err := btcutil.NewAddressScriptHash( - pkScript, &chaincfg.TestNet3Params) + pkScript, &dagconfig.TestNet3Params) if err != nil { t.Errorf("failed to make p2sh addr for %s: %v", msg, err) @@ -900,7 +900,7 @@ func TestSignTxOutput(t *testing.T) { break } - sigScript, err := SignTxOutput(&chaincfg.TestNet3Params, + sigScript, err := SignTxOutput(&dagconfig.TestNet3Params, tx, i, scriptPkScript, hashType, mkGetKey(map[string]addressToKey{ address.EncodeAddress(): {key, false}, @@ -915,7 +915,7 @@ func TestSignTxOutput(t *testing.T) { // by the above loop, this should be valid, now sign // again and merge. - sigScript, err = SignTxOutput(&chaincfg.TestNet3Params, + sigScript, err = SignTxOutput(&dagconfig.TestNet3Params, tx, i, scriptPkScript, hashType, mkGetKey(map[string]addressToKey{ address.EncodeAddress(): {key, false}, @@ -951,7 +951,7 @@ func TestSignTxOutput(t *testing.T) { pk := (*btcec.PublicKey)(&key.PublicKey). SerializeCompressed() - address, err := btcutil.NewAddressPubKey(pk, &chaincfg.TestNet3Params) + address, err := btcutil.NewAddressPubKey(pk, &dagconfig.TestNet3Params) if err != nil { t.Errorf("failed to make address for %s: %v", msg, err) @@ -965,7 +965,7 @@ func TestSignTxOutput(t *testing.T) { } scriptAddr, err := btcutil.NewAddressScriptHash( - pkScript, &chaincfg.TestNet3Params) + pkScript, &dagconfig.TestNet3Params) if err != nil { t.Errorf("failed to make p2sh addr for %s: %v", msg, err) @@ -1005,7 +1005,7 @@ func TestSignTxOutput(t *testing.T) { pk := (*btcec.PublicKey)(&key.PublicKey). SerializeCompressed() - address, err := btcutil.NewAddressPubKey(pk, &chaincfg.TestNet3Params) + address, err := btcutil.NewAddressPubKey(pk, &dagconfig.TestNet3Params) if err != nil { t.Errorf("failed to make address for %s: %v", msg, err) @@ -1019,7 +1019,7 @@ func TestSignTxOutput(t *testing.T) { } scriptAddr, err := btcutil.NewAddressScriptHash( - pkScript, &chaincfg.TestNet3Params) + pkScript, &dagconfig.TestNet3Params) if err != nil { t.Errorf("failed to make p2sh addr for %s: %v", msg, err) @@ -1033,7 +1033,7 @@ func TestSignTxOutput(t *testing.T) { break } - sigScript, err := SignTxOutput(&chaincfg.TestNet3Params, + sigScript, err := SignTxOutput(&dagconfig.TestNet3Params, tx, i, scriptPkScript, hashType, mkGetKey(map[string]addressToKey{ address.EncodeAddress(): {key, true}, @@ -1048,7 +1048,7 @@ func TestSignTxOutput(t *testing.T) { // by the above loop, this should be valid, now sign // again and merge. - sigScript, err = SignTxOutput(&chaincfg.TestNet3Params, + sigScript, err = SignTxOutput(&dagconfig.TestNet3Params, tx, i, scriptPkScript, hashType, mkGetKey(map[string]addressToKey{ address.EncodeAddress(): {key, true}, @@ -1084,7 +1084,7 @@ func TestSignTxOutput(t *testing.T) { pk1 := (*btcec.PublicKey)(&key1.PublicKey). SerializeCompressed() - address1, err := btcutil.NewAddressPubKey(pk1, &chaincfg.TestNet3Params) + address1, err := btcutil.NewAddressPubKey(pk1, &dagconfig.TestNet3Params) if err != nil { t.Errorf("failed to make address for %s: %v", msg, err) @@ -1100,7 +1100,7 @@ func TestSignTxOutput(t *testing.T) { pk2 := (*btcec.PublicKey)(&key2.PublicKey). SerializeCompressed() - address2, err := btcutil.NewAddressPubKey(pk2, &chaincfg.TestNet3Params) + address2, err := btcutil.NewAddressPubKey(pk2, &dagconfig.TestNet3Params) if err != nil { t.Errorf("failed to make address 2 for %s: %v", msg, err) @@ -1116,7 +1116,7 @@ func TestSignTxOutput(t *testing.T) { } scriptAddr, err := btcutil.NewAddressScriptHash( - pkScript, &chaincfg.TestNet3Params) + pkScript, &dagconfig.TestNet3Params) if err != nil { t.Errorf("failed to make p2sh addr for %s: %v", msg, err) @@ -1157,7 +1157,7 @@ func TestSignTxOutput(t *testing.T) { pk1 := (*btcec.PublicKey)(&key1.PublicKey). SerializeCompressed() - address1, err := btcutil.NewAddressPubKey(pk1, &chaincfg.TestNet3Params) + address1, err := btcutil.NewAddressPubKey(pk1, &dagconfig.TestNet3Params) if err != nil { t.Errorf("failed to make address for %s: %v", msg, err) @@ -1173,7 +1173,7 @@ func TestSignTxOutput(t *testing.T) { pk2 := (*btcec.PublicKey)(&key2.PublicKey). SerializeCompressed() - address2, err := btcutil.NewAddressPubKey(pk2, &chaincfg.TestNet3Params) + address2, err := btcutil.NewAddressPubKey(pk2, &dagconfig.TestNet3Params) if err != nil { t.Errorf("failed to make address 2 for %s: %v", msg, err) @@ -1189,7 +1189,7 @@ func TestSignTxOutput(t *testing.T) { } scriptAddr, err := btcutil.NewAddressScriptHash( - pkScript, &chaincfg.TestNet3Params) + pkScript, &dagconfig.TestNet3Params) if err != nil { t.Errorf("failed to make p2sh addr for %s: %v", msg, err) @@ -1203,7 +1203,7 @@ func TestSignTxOutput(t *testing.T) { break } - sigScript, err := SignTxOutput(&chaincfg.TestNet3Params, + sigScript, err := SignTxOutput(&dagconfig.TestNet3Params, tx, i, scriptPkScript, hashType, mkGetKey(map[string]addressToKey{ address1.EncodeAddress(): {key1, true}, @@ -1223,7 +1223,7 @@ func TestSignTxOutput(t *testing.T) { } // Sign with the other key and merge - sigScript, err = SignTxOutput(&chaincfg.TestNet3Params, + sigScript, err = SignTxOutput(&dagconfig.TestNet3Params, tx, i, scriptPkScript, hashType, mkGetKey(map[string]addressToKey{ address2.EncodeAddress(): {key2, true}, @@ -1259,7 +1259,7 @@ func TestSignTxOutput(t *testing.T) { pk1 := (*btcec.PublicKey)(&key1.PublicKey). SerializeCompressed() - address1, err := btcutil.NewAddressPubKey(pk1, &chaincfg.TestNet3Params) + address1, err := btcutil.NewAddressPubKey(pk1, &dagconfig.TestNet3Params) if err != nil { t.Errorf("failed to make address for %s: %v", msg, err) @@ -1275,7 +1275,7 @@ func TestSignTxOutput(t *testing.T) { pk2 := (*btcec.PublicKey)(&key2.PublicKey). SerializeCompressed() - address2, err := btcutil.NewAddressPubKey(pk2, &chaincfg.TestNet3Params) + address2, err := btcutil.NewAddressPubKey(pk2, &dagconfig.TestNet3Params) if err != nil { t.Errorf("failed to make address 2 for %s: %v", msg, err) @@ -1291,7 +1291,7 @@ func TestSignTxOutput(t *testing.T) { } scriptAddr, err := btcutil.NewAddressScriptHash( - pkScript, &chaincfg.TestNet3Params) + pkScript, &dagconfig.TestNet3Params) if err != nil { t.Errorf("failed to make p2sh addr for %s: %v", msg, err) @@ -1305,7 +1305,7 @@ func TestSignTxOutput(t *testing.T) { break } - sigScript, err := SignTxOutput(&chaincfg.TestNet3Params, + sigScript, err := SignTxOutput(&dagconfig.TestNet3Params, tx, i, scriptPkScript, hashType, mkGetKey(map[string]addressToKey{ address1.EncodeAddress(): {key1, true}, @@ -1325,7 +1325,7 @@ func TestSignTxOutput(t *testing.T) { } // Sign with the other key and merge - sigScript, err = SignTxOutput(&chaincfg.TestNet3Params, + sigScript, err = SignTxOutput(&dagconfig.TestNet3Params, tx, i, scriptPkScript, hashType, mkGetKey(map[string]addressToKey{ address1.EncodeAddress(): {key1, true}, diff --git a/txscript/standard.go b/txscript/standard.go index 818a40b8c..ba9b2ad3b 100644 --- a/txscript/standard.go +++ b/txscript/standard.go @@ -7,7 +7,7 @@ package txscript import ( "fmt" - "github.com/daglabs/btcd/chaincfg" + "github.com/daglabs/btcd/dagconfig" "github.com/daglabs/btcutil" ) @@ -417,7 +417,7 @@ func PushedData(script []byte) ([][]byte, error) { // signatures associated with the passed PkScript. Note that it only works for // 'standard' transaction script types. Any data such as public keys which are // invalid are omitted from the results. -func ExtractPkScriptAddrs(pkScript []byte, chainParams *chaincfg.Params) (ScriptClass, []btcutil.Address, int, error) { +func ExtractPkScriptAddrs(pkScript []byte, chainParams *dagconfig.Params) (ScriptClass, []btcutil.Address, int, error) { var addrs []btcutil.Address var requiredSigs int diff --git a/txscript/standard_test.go b/txscript/standard_test.go index ff27f3055..8e6d55767 100644 --- a/txscript/standard_test.go +++ b/txscript/standard_test.go @@ -9,7 +9,7 @@ import ( "reflect" "testing" - "github.com/daglabs/btcd/chaincfg" + "github.com/daglabs/btcd/dagconfig" "github.com/daglabs/btcutil" ) @@ -32,7 +32,7 @@ func mustParseShortForm(script string) []byte { // the tests as a helper since the only way it can fail is if there is an error // in the test source code. func newAddressPubKey(serializedPubKey []byte) btcutil.Address { - addr, err := btcutil.NewAddressPubKey(serializedPubKey, &chaincfg.MainNetParams) + addr, err := btcutil.NewAddressPubKey(serializedPubKey, &dagconfig.MainNetParams) if err != nil { panic("invalid public key in test source") } @@ -45,7 +45,7 @@ func newAddressPubKey(serializedPubKey []byte) btcutil.Address { // as a helper since the only way it can fail is if there is an error in the // test source code. func newAddressPubKeyHash(pkHash []byte) btcutil.Address { - addr, err := btcutil.NewAddressPubKeyHash(pkHash, &chaincfg.MainNetParams) + addr, err := btcutil.NewAddressPubKeyHash(pkHash, &dagconfig.MainNetParams) if err != nil { panic("invalid public key hash in test source") } @@ -59,7 +59,7 @@ func newAddressPubKeyHash(pkHash []byte) btcutil.Address { // test source code. func newAddressScriptHash(scriptHash []byte) btcutil.Address { addr, err := btcutil.NewAddressScriptHashFromHash(scriptHash, - &chaincfg.MainNetParams) + &dagconfig.MainNetParams) if err != nil { panic("invalid script hash in test source") } @@ -342,7 +342,7 @@ func TestExtractPkScriptAddrs(t *testing.T) { t.Logf("Running %d tests.", len(tests)) for i, test := range tests { class, addrs, reqSigs, err := ExtractPkScriptAddrs( - test.script, &chaincfg.MainNetParams) + test.script, &dagconfig.MainNetParams) if err != nil { } @@ -498,7 +498,7 @@ func (b *bogusAddress) ScriptAddress() []byte { } // IsForNet lies blatantly to satisfy the btcutil.Address interface. -func (b *bogusAddress) IsForNet(chainParams *chaincfg.Params) bool { +func (b *bogusAddress) IsForNet(chainParams *dagconfig.Params) bool { return true // why not? } @@ -515,7 +515,7 @@ func TestPayToAddrScript(t *testing.T) { // 1MirQ9bwyQcGVJPwKUgapu5ouK2E2Ey4gX p2pkhMain, err := btcutil.NewAddressPubKeyHash(hexToBytes("e34cce70c86"+ - "373273efcc54ce7d2a491bb4a0e84"), &chaincfg.MainNetParams) + "373273efcc54ce7d2a491bb4a0e84"), &dagconfig.MainNetParams) if err != nil { t.Fatalf("Unable to create public key hash address: %v", err) } @@ -523,20 +523,20 @@ func TestPayToAddrScript(t *testing.T) { // Taken from transaction: // b0539a45de13b3e0403909b8bd1a555b8cbe45fd4e3f3fda76f3a5f52835c29d p2shMain, _ := btcutil.NewAddressScriptHashFromHash(hexToBytes("e8c300"+ - "c87986efa84c37c0519929019ef86eb5b4"), &chaincfg.MainNetParams) + "c87986efa84c37c0519929019ef86eb5b4"), &dagconfig.MainNetParams) if err != nil { t.Fatalf("Unable to create script hash address: %v", err) } // mainnet p2pk 13CG6SJ3yHUXo4Cr2RY4THLLJrNFuG3gUg p2pkCompressedMain, err := btcutil.NewAddressPubKey(hexToBytes("02192d"+ - "74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"), &chaincfg.MainNetParams) + "74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"), &dagconfig.MainNetParams) if err != nil { t.Fatalf("Unable to create pubkey address (compressed): %v", err) } p2pkCompressed2Main, err := btcutil.NewAddressPubKey(hexToBytes("03b0b"+ - "d634234abbb1ba1e986e884185c61cf43e001f9137f23c2c409273eb16e65"), &chaincfg.MainNetParams) + "d634234abbb1ba1e986e884185c61cf43e001f9137f23c2c409273eb16e65"), &dagconfig.MainNetParams) if err != nil { t.Fatalf("Unable to create pubkey address (compressed 2): %v", err) @@ -545,7 +545,7 @@ func TestPayToAddrScript(t *testing.T) { p2pkUncompressedMain, err := btcutil.NewAddressPubKey(hexToBytes("0411"+ "db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5"+ "cb2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b4"+ - "12a3"), &chaincfg.MainNetParams) + "12a3"), &dagconfig.MainNetParams) if err != nil { t.Fatalf("Unable to create pubkey address (uncompressed): %v", err) @@ -632,13 +632,13 @@ func TestMultiSigScript(t *testing.T) { // mainnet p2pk 13CG6SJ3yHUXo4Cr2RY4THLLJrNFuG3gUg p2pkCompressedMain, err := btcutil.NewAddressPubKey(hexToBytes("02192d"+ - "74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"), &chaincfg.MainNetParams) + "74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"), &dagconfig.MainNetParams) if err != nil { t.Fatalf("Unable to create pubkey address (compressed): %v", err) } p2pkCompressed2Main, err := btcutil.NewAddressPubKey(hexToBytes("03b0b"+ - "d634234abbb1ba1e986e884185c61cf43e001f9137f23c2c409273eb16e65"), &chaincfg.MainNetParams) + "d634234abbb1ba1e986e884185c61cf43e001f9137f23c2c409273eb16e65"), &dagconfig.MainNetParams) if err != nil { t.Fatalf("Unable to create pubkey address (compressed 2): %v", err) @@ -647,7 +647,7 @@ func TestMultiSigScript(t *testing.T) { p2pkUncompressedMain, err := btcutil.NewAddressPubKey(hexToBytes("0411"+ "db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5"+ "cb2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b4"+ - "12a3"), &chaincfg.MainNetParams) + "12a3"), &dagconfig.MainNetParams) if err != nil { t.Fatalf("Unable to create pubkey address (uncompressed): %v", err) diff --git a/wire/bench_test.go b/wire/bench_test.go index 4b2f76838..2ce7978ef 100644 --- a/wire/bench_test.go +++ b/wire/bench_test.go @@ -14,7 +14,7 @@ import ( "os" "testing" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" ) // genesisCoinbaseTx is the coinbase transaction for the genesis blocks for @@ -24,7 +24,7 @@ var genesisCoinbaseTx = MsgTx{ TxIn: []*TxIn{ { PreviousOutPoint: OutPoint{ - Hash: chainhash.Hash{}, + Hash: daghash.Hash{}, Index: 0xffffffff, }, SignatureScript: []byte{ @@ -197,7 +197,7 @@ func BenchmarkReadOutPoint(b *testing.B) { // transaction output point. func BenchmarkWriteOutPoint(b *testing.B) { op := &OutPoint{ - Hash: chainhash.Hash{}, + Hash: daghash.Hash{}, Index: 0, } for i := 0; i < b.N; i++ { @@ -386,7 +386,7 @@ func BenchmarkDecodeGetHeaders(b *testing.B) { pver := ProtocolVersion var m MsgGetHeaders for i := 0; i < MaxBlockLocatorsPerMsg; i++ { - hash, err := chainhash.NewHashFromStr(fmt.Sprintf("%x", i)) + hash, err := daghash.NewHashFromStr(fmt.Sprintf("%x", i)) if err != nil { b.Fatalf("NewHashFromStr: unexpected error: %v", err) } @@ -410,17 +410,26 @@ func BenchmarkDecodeGetHeaders(b *testing.B) { } // BenchmarkDecodeHeaders performs a benchmark on how long it takes to -// decode a headers message with the maximum number of headers. +// decode a headers message with the maximum number of headers and maximum number of +// previous hashes per header. func BenchmarkDecodeHeaders(b *testing.B) { // Create a message with the maximum number of headers. pver := ProtocolVersion var m MsgHeaders for i := 0; i < MaxBlockHeadersPerMsg; i++ { - hash, err := chainhash.NewHashFromStr(fmt.Sprintf("%x", i)) + hash, err := daghash.NewHashFromStr(fmt.Sprintf("%x", i)) if err != nil { b.Fatalf("NewHashFromStr: unexpected error: %v", err) } - m.AddBlockHeader(NewBlockHeader(1, hash, hash, 0, uint32(i))) + prevBlocks := make([]daghash.Hash, MaxNumPrevBlocks) + for j := byte(0); j < MaxNumPrevBlocks; j++ { + hash, err := daghash.NewHashFromStr(fmt.Sprintf("%x%x", i, j)) + if err != nil { + b.Fatalf("NewHashFromStr: unexpected error: %v", err) + } + prevBlocks[i] = *hash + } + m.AddBlockHeader(NewBlockHeader(1, prevBlocks, hash, 0, uint32(i))) } // Serialize it so the bytes are available to test the decode below. @@ -446,7 +455,7 @@ func BenchmarkDecodeGetBlocks(b *testing.B) { pver := ProtocolVersion var m MsgGetBlocks for i := 0; i < MaxBlockLocatorsPerMsg; i++ { - hash, err := chainhash.NewHashFromStr(fmt.Sprintf("%x", i)) + hash, err := daghash.NewHashFromStr(fmt.Sprintf("%x", i)) if err != nil { b.Fatalf("NewHashFromStr: unexpected error: %v", err) } @@ -503,7 +512,7 @@ func BenchmarkDecodeInv(b *testing.B) { pver := ProtocolVersion var m MsgInv for i := 0; i < MaxInvPerMsg; i++ { - hash, err := chainhash.NewHashFromStr(fmt.Sprintf("%x", i)) + hash, err := daghash.NewHashFromStr(fmt.Sprintf("%x", i)) if err != nil { b.Fatalf("NewHashFromStr: unexpected error: %v", err) } @@ -533,7 +542,7 @@ func BenchmarkDecodeNotFound(b *testing.B) { pver := ProtocolVersion var m MsgNotFound for i := 0; i < MaxInvPerMsg; i++ { - hash, err := chainhash.NewHashFromStr(fmt.Sprintf("%x", i)) + hash, err := daghash.NewHashFromStr(fmt.Sprintf("%x", i)) if err != nil { b.Fatalf("NewHashFromStr: unexpected error: %v", err) } @@ -562,13 +571,13 @@ func BenchmarkDecodeMerkleBlock(b *testing.B) { // Create a message with random data. pver := ProtocolVersion var m MsgMerkleBlock - hash, err := chainhash.NewHashFromStr(fmt.Sprintf("%x", 10000)) + hash, err := daghash.NewHashFromStr(fmt.Sprintf("%x", 10000)) if err != nil { b.Fatalf("NewHashFromStr: unexpected error: %v", err) } - m.Header = *NewBlockHeader(1, hash, hash, 0, uint32(10000)) + m.Header = *NewBlockHeader(1, []daghash.Hash{*hash}, hash, 0, uint32(10000)) for i := 0; i < 105; i++ { - hash, err := chainhash.NewHashFromStr(fmt.Sprintf("%x", i)) + hash, err := daghash.NewHashFromStr(fmt.Sprintf("%x", i)) if err != nil { b.Fatalf("NewHashFromStr: unexpected error: %v", err) } @@ -614,12 +623,12 @@ func BenchmarkDoubleHashB(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - _ = chainhash.DoubleHashB(txBytes) + _ = daghash.DoubleHashB(txBytes) } } // BenchmarkDoubleHashH performs a benchmark on how long it takes to perform -// a double hash returning a chainhash.Hash. +// a double hash returning a daghash.Hash. func BenchmarkDoubleHashH(b *testing.B) { var buf bytes.Buffer if err := genesisCoinbaseTx.Serialize(&buf); err != nil { @@ -630,6 +639,6 @@ func BenchmarkDoubleHashH(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - _ = chainhash.DoubleHashH(txBytes) + _ = daghash.DoubleHashH(txBytes) } } diff --git a/wire/blockheader.go b/wire/blockheader.go index 9007f560a..739b6d699 100644 --- a/wire/blockheader.go +++ b/wire/blockheader.go @@ -9,25 +9,39 @@ import ( "io" "time" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" ) +// BaseBlockHeaderPayload is the base number of bytes a block header can be, +// not including the list of previous block headers. +// Version 4 bytes + Timestamp 8 bytes + Bits 4 bytes + Nonce 4 bytes + +// + NumPrevBlocks 1 byte + MerkleRoot hash. +// To get total size of block header len(PrevBlocks) * daghash.HashSize should be +// added to this value +const BaseBlockHeaderPayload = 21 + (daghash.HashSize) + +// MaxNumPrevBlocks is the maximum number of previous blocks a block can reference. +// Currently set to 255 as the maximum number NumPrevBlocks can be due to it being a byte +const MaxNumPrevBlocks = 255 + // MaxBlockHeaderPayload is the maximum number of bytes a block header can be. -// Version 4 bytes + Timestamp 4 bytes + Bits 4 bytes + Nonce 4 bytes + -// PrevBlock and MerkleRoot hashes. -const MaxBlockHeaderPayload = 16 + (chainhash.HashSize * 2) +// BaseBlockHeaderPayload + up to MaxNumPrevBlocks hashes of previous blocks +const MaxBlockHeaderPayload = BaseBlockHeaderPayload + (MaxNumPrevBlocks * daghash.HashSize) // BlockHeader defines information about a block and is used in the bitcoin -// block (MsgBlock) and headers (MsgHeaders) messages. +// block (MsgBlock) and headers (MsgHeader) messages. type BlockHeader struct { // Version of the block. This is not the same as the protocol version. Version int32 - // Hash of the previous block header in the block chain. - PrevBlock chainhash.Hash + // Number of entries in PrevBlocks + NumPrevBlocks byte + + // Hashes of the previous block headers in the blockDAG. + PrevBlocks []daghash.Hash // Merkle tree reference to hash of all transactions for the block. - MerkleRoot chainhash.Hash + MerkleRoot daghash.Hash // Time the block was created. This is, unfortunately, encoded as a // uint32 on the wire and therefore is limited to 2106. @@ -40,20 +54,25 @@ type BlockHeader struct { Nonce uint32 } -// blockHeaderLen is a constant that represents the number of bytes for a block -// header. -const blockHeaderLen = 84 - // BlockHash computes the block identifier hash for the given block header. -func (h *BlockHeader) BlockHash() chainhash.Hash { +func (h *BlockHeader) BlockHash() daghash.Hash { // Encode the header and double sha256 everything prior to the number of // transactions. Ignore the error returns since there is no way the // encode could fail except being out of memory which would cause a // run-time panic. - buf := bytes.NewBuffer(make([]byte, 0, MaxBlockHeaderPayload)) + buf := bytes.NewBuffer(make([]byte, 0, BaseBlockHeaderPayload+len(h.PrevBlocks))) _ = writeBlockHeader(buf, 0, h) - return chainhash.DoubleHashH(buf.Bytes()) + return daghash.DoubleHashH(buf.Bytes()) +} + +// BestPrevBlock returns the hash of the selected block header. +func (header *BlockHeader) SelectedPrevBlock() *daghash.Hash { + if header.NumPrevBlocks == 0 { + return nil + } + + return &header.PrevBlocks[0] } // BtcDecode decodes r using the bitcoin protocol encoding into the receiver. @@ -92,21 +111,28 @@ func (h *BlockHeader) Serialize(w io.Writer) error { return writeBlockHeader(w, 0, h) } +// SerializeSize returns the number of bytes it would take to serialize the +// block header. +func (h *BlockHeader) SerializeSize() int { + return BaseBlockHeaderPayload + int(h.NumPrevBlocks)*daghash.HashSize +} + // NewBlockHeader returns a new BlockHeader using the provided version, previous // block hash, merkle root hash, difficulty bits, and nonce used to generate the -// block with defaults for the remaining fields. -func NewBlockHeader(version int32, prevHash, merkleRootHash *chainhash.Hash, +// block with defaults or calclulated values for the remaining fields. +func NewBlockHeader(version int32, prevHashes []daghash.Hash, merkleRootHash *daghash.Hash, bits uint32, nonce uint32) *BlockHeader { // Limit the timestamp to one second precision since the protocol // doesn't support better. return &BlockHeader{ - Version: version, - PrevBlock: *prevHash, - MerkleRoot: *merkleRootHash, - Timestamp: time.Unix(time.Now().Unix(), 0), - Bits: bits, - Nonce: nonce, + Version: version, + NumPrevBlocks: byte(len(prevHashes)), + PrevBlocks: prevHashes, + MerkleRoot: *merkleRootHash, + Timestamp: time.Unix(time.Now().Unix(), 0), + Bits: bits, + Nonce: nonce, } } @@ -114,8 +140,19 @@ func NewBlockHeader(version int32, prevHash, merkleRootHash *chainhash.Hash, // decoding block headers stored to disk, such as in a database, as opposed to // decoding from the wire. func readBlockHeader(r io.Reader, pver uint32, bh *BlockHeader) error { - return readElements(r, &bh.Version, &bh.PrevBlock, &bh.MerkleRoot, - (*int64Time)(&bh.Timestamp), &bh.Bits, &bh.Nonce) + err := readElements(r, &bh.Version, &bh.NumPrevBlocks) + if err != nil { + return err + } + + bh.PrevBlocks = make([]daghash.Hash, bh.NumPrevBlocks) + for i := byte(0); i < bh.NumPrevBlocks; i++ { + err := readElement(r, &bh.PrevBlocks[i]) + if err != nil { + return err + } + } + return readElements(r, &bh.MerkleRoot, (*int64Time)(&bh.Timestamp), &bh.Bits, &bh.Nonce) } // writeBlockHeader writes a bitcoin block header to w. See Serialize for @@ -123,6 +160,6 @@ func readBlockHeader(r io.Reader, pver uint32, bh *BlockHeader) error { // opposed to encoding for the wire. func writeBlockHeader(w io.Writer, pver uint32, bh *BlockHeader) error { sec := int64(bh.Timestamp.Unix()) - return writeElements(w, bh.Version, &bh.PrevBlock, &bh.MerkleRoot, + return writeElements(w, bh.Version, bh.NumPrevBlocks, &bh.PrevBlocks, &bh.MerkleRoot, sec, bh.Bits, bh.Nonce) } diff --git a/wire/blockheader_test.go b/wire/blockheader_test.go index 39570f181..bed9bbae6 100644 --- a/wire/blockheader_test.go +++ b/wire/blockheader_test.go @@ -10,6 +10,7 @@ import ( "testing" "time" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/davecgh/go-spew/spew" ) @@ -21,15 +22,16 @@ func TestBlockHeader(t *testing.T) { } nonce := uint32(nonce64) - hash := mainNetGenesisHash + hashes := []daghash.Hash{mainNetGenesisHash, simNetGenesisHash} + merkleHash := mainNetGenesisMerkleRoot bits := uint32(0x1d00ffff) - bh := NewBlockHeader(1, &hash, &merkleHash, bits, nonce) + bh := NewBlockHeader(1, hashes, &merkleHash, bits, nonce) // Ensure we get the same data back out. - if !bh.PrevBlock.IsEqual(&hash) { - t.Errorf("NewBlockHeader: wrong prev hash - got %v, want %v", - spew.Sprint(bh.PrevBlock), spew.Sprint(hash)) + if !reflect.DeepEqual(bh.PrevBlocks, hashes) { + t.Errorf("NewBlockHeader: wrong prev hashes - got %v, want %v", + spew.Sprint(bh.PrevBlocks), spew.Sprint(hashes)) } if !bh.MerkleRoot.IsEqual(&merkleHash) { t.Errorf("NewBlockHeader: wrong merkle root - got %v, want %v", @@ -54,25 +56,31 @@ func TestBlockHeaderWire(t *testing.T) { // baseBlockHdr is used in the various tests as a baseline BlockHeader. bits := uint32(0x1d00ffff) baseBlockHdr := &BlockHeader{ - Version: 1, - PrevBlock: mainNetGenesisHash, - MerkleRoot: mainNetGenesisMerkleRoot, - Timestamp: time.Unix(0x495fab29, 0), // 2009-01-03 12:15:05 -0600 CST - Bits: bits, - Nonce: nonce, + Version: 1, + NumPrevBlocks: 2, + PrevBlocks: []daghash.Hash{mainNetGenesisHash, simNetGenesisHash}, + MerkleRoot: mainNetGenesisMerkleRoot, + Timestamp: time.Unix(0x495fab29, 0), // 2009-01-03 12:15:05 -0600 CST + Bits: bits, + Nonce: nonce, } // baseBlockHdrEncoded is the wire encoded bytes of baseBlockHdr. baseBlockHdrEncoded := []byte{ 0x01, 0x00, 0x00, 0x00, // Version 1 - 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, + 0x02, // NumPrevBlocks + 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // PrevBlock mainNetGenesisHash 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, - 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, // PrevBlock - 0x3b, 0xa3, 0xed, 0xfd, 0x7a, 0x7b, 0x12, 0xb2, + 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // PrevBlock simNetGenesisHash + 0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0, + 0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91, + 0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68, + 0x3b, 0xa3, 0xed, 0xfd, 0x7a, 0x7b, 0x12, 0xb2, // MerkleRoot 0x7a, 0xc7, 0x2c, 0x3e, 0x67, 0x76, 0x8f, 0x61, 0x7f, 0xc8, 0x1b, 0xc3, 0x88, 0x8a, 0x51, 0x32, - 0x3a, 0x9f, 0xb8, 0xaa, 0x4b, 0x1e, 0x5e, 0x4a, // MerkleRoot + 0x3a, 0x9f, 0xb8, 0xaa, 0x4b, 0x1e, 0x5e, 0x4a, 0x29, 0xab, 0x5f, 0x49, 0x00, 0x00, 0x00, 0x00, // Timestamp 0xff, 0xff, 0x00, 0x1d, // Bits 0xf3, 0xe0, 0x01, 0x00, // Nonce @@ -187,25 +195,31 @@ func TestBlockHeaderSerialize(t *testing.T) { // baseBlockHdr is used in the various tests as a baseline BlockHeader. bits := uint32(0x1d00ffff) baseBlockHdr := &BlockHeader{ - Version: 1, - PrevBlock: mainNetGenesisHash, - MerkleRoot: mainNetGenesisMerkleRoot, - Timestamp: time.Unix(0x495fab29, 0), // 2009-01-03 12:15:05 -0600 CST - Bits: bits, - Nonce: nonce, + Version: 1, + NumPrevBlocks: 2, + PrevBlocks: []daghash.Hash{mainNetGenesisHash, simNetGenesisHash}, + MerkleRoot: mainNetGenesisMerkleRoot, + Timestamp: time.Unix(0x495fab29, 0), // 2009-01-03 12:15:05 -0600 CST + Bits: bits, + Nonce: nonce, } // baseBlockHdrEncoded is the wire encoded bytes of baseBlockHdr. baseBlockHdrEncoded := []byte{ 0x01, 0x00, 0x00, 0x00, // Version 1 - 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, + 0x02, // NumPrevBlocks + 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // PrevBlock mainNetGenesisHash 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, - 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, // PrevBlock - 0x3b, 0xa3, 0xed, 0xfd, 0x7a, 0x7b, 0x12, 0xb2, + 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // PrevBlock simNetGenesisHash + 0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0, + 0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91, + 0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68, + 0x3b, 0xa3, 0xed, 0xfd, 0x7a, 0x7b, 0x12, 0xb2, // MerkleRoot 0x7a, 0xc7, 0x2c, 0x3e, 0x67, 0x76, 0x8f, 0x61, 0x7f, 0xc8, 0x1b, 0xc3, 0x88, 0x8a, 0x51, 0x32, - 0x3a, 0x9f, 0xb8, 0xaa, 0x4b, 0x1e, 0x5e, 0x4a, // MerkleRoot + 0x3a, 0x9f, 0xb8, 0xaa, 0x4b, 0x1e, 0x5e, 0x4a, 0x29, 0xab, 0x5f, 0x49, 0x00, 0x00, 0x00, 0x00, // Timestamp 0xff, 0xff, 0x00, 0x1d, // Bits 0xf3, 0xe0, 0x01, 0x00, // Nonce @@ -253,3 +267,51 @@ func TestBlockHeaderSerialize(t *testing.T) { } } } + +// TestBlockHeaderSerializeSize performs tests to ensure the serialize size for +// various block headers is accurate. +func TestBlockHeaderSerializeSize(t *testing.T) { + nonce := uint32(123123) // 0x1e0f3 + bits := uint32(0x1d00ffff) + timestamp := time.Unix(0x495fab29, 0) // 2009-01-03 12:15:05 -0600 CST + baseBlockHdr := &BlockHeader{ + Version: 1, + NumPrevBlocks: 2, + PrevBlocks: []daghash.Hash{mainNetGenesisHash, simNetGenesisHash}, + MerkleRoot: mainNetGenesisMerkleRoot, + Timestamp: timestamp, + Bits: bits, + Nonce: nonce, + } + + genesisBlockHdr := &BlockHeader{ + Version: 1, + NumPrevBlocks: 0, + PrevBlocks: []daghash.Hash{}, + MerkleRoot: mainNetGenesisMerkleRoot, + Timestamp: timestamp, + Bits: bits, + Nonce: nonce, + } + tests := []struct { + in *BlockHeader // Block header to encode + size int // Expected serialized size + }{ + // Block with no transactions. + {genesisBlockHdr, 49}, + + // First block in the mainnet block chain. + {baseBlockHdr, 113}, + } + + t.Logf("Running %d tests", len(tests)) + for i, test := range tests { + serializedSize := test.in.SerializeSize() + if serializedSize != test.size { + t.Errorf("BlockHeader.SerializeSize: #%d got: %d, want: "+ + "%d", i, serializedSize, test.size) + + continue + } + } +} diff --git a/wire/common.go b/wire/common.go index 2e814c7ad..e84b381d6 100644 --- a/wire/common.go +++ b/wire/common.go @@ -12,7 +12,7 @@ import ( "math" "time" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" ) const ( @@ -264,7 +264,7 @@ func readElement(r io.Reader, element interface{}) error { } return nil - case *chainhash.Hash: + case *daghash.Hash: _, err := io.ReadFull(r, e[:]) if err != nil { return err @@ -398,7 +398,7 @@ func writeElement(w io.Writer, element interface{}) error { } return nil - case *chainhash.Hash: + case *daghash.Hash: _, err := w.Write(e[:]) if err != nil { return err diff --git a/wire/common_test.go b/wire/common_test.go index afd81665f..2bb078a0c 100644 --- a/wire/common_test.go +++ b/wire/common_test.go @@ -12,22 +12,31 @@ import ( "strings" "testing" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/davecgh/go-spew/spew" ) // mainNetGenesisHash is the hash of the first block in the block chain for the // main network (genesis block). -var mainNetGenesisHash = chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy. +var mainNetGenesisHash = daghash.Hash([daghash.HashSize]byte{ // Make go vet happy. 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, }) +// simNetGenesisHash is the hash of the first block in the block chain for the +// simulation test network. +var simNetGenesisHash = daghash.Hash([daghash.HashSize]byte{ // Make go vet happy. + 0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, + 0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0, + 0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91, + 0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68, +}) + // mainNetGenesisMerkleRoot is the hash of the first transaction in the genesis // block for the main network. -var mainNetGenesisMerkleRoot = chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy. +var mainNetGenesisMerkleRoot = daghash.Hash([daghash.HashSize]byte{ // Make go vet happy. 0x3b, 0xa3, 0xed, 0xfd, 0x7a, 0x7b, 0x12, 0xb2, 0x7a, 0xc7, 0x2c, 0x3e, 0x67, 0x76, 0x8f, 0x61, 0x7f, 0xc8, 0x1b, 0xc3, 0x88, 0x8a, 0x51, 0x32, @@ -104,7 +113,7 @@ func TestElementWire(t *testing.T) { }, }, { - (*chainhash.Hash)(&[chainhash.HashSize]byte{ // Make go vet happy. + (*daghash.Hash)(&[daghash.HashSize]byte{ // Make go vet happy. 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, @@ -203,7 +212,7 @@ func TestElementWireErrors(t *testing.T) { 0, io.ErrShortWrite, io.EOF, }, { - (*chainhash.Hash)(&[chainhash.HashSize]byte{ // Make go vet happy. + (*daghash.Hash)(&[daghash.HashSize]byte{ // Make go vet happy. 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, diff --git a/wire/invvect.go b/wire/invvect.go index 1d1fb425d..d0577aa2c 100644 --- a/wire/invvect.go +++ b/wire/invvect.go @@ -8,7 +8,7 @@ import ( "fmt" "io" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" ) const ( @@ -17,7 +17,7 @@ const ( MaxInvPerMsg = 50000 // Maximum payload size for an inventory vector. - maxInvVectPayload = 4 + chainhash.HashSize + maxInvVectPayload = 4 + daghash.HashSize ) // InvType represents the allowed types of inventory vectors. See InvVect. @@ -52,12 +52,12 @@ func (invtype InvType) String() string { // as specified by the Type field, that a peer wants, has, or does not have to // another peer. type InvVect struct { - Type InvType // Type of data - Hash chainhash.Hash // Hash of the data + Type InvType // Type of data + Hash daghash.Hash // Hash of the data } // NewInvVect returns a new InvVect using the provided type and hash. -func NewInvVect(typ InvType, hash *chainhash.Hash) *InvVect { +func NewInvVect(typ InvType, hash *daghash.Hash) *InvVect { return &InvVect{ Type: typ, Hash: *hash, diff --git a/wire/invvect_test.go b/wire/invvect_test.go index ec74273ce..22a69889d 100644 --- a/wire/invvect_test.go +++ b/wire/invvect_test.go @@ -9,7 +9,7 @@ import ( "reflect" "testing" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/davecgh/go-spew/spew" ) @@ -40,7 +40,7 @@ func TestInvTypeStringer(t *testing.T) { // TestInvVect tests the InvVect API. func TestInvVect(t *testing.T) { ivType := InvTypeBlock - hash := chainhash.Hash{} + hash := daghash.Hash{} // Ensure we get the same payload and signature back out. iv := NewInvVect(ivType, &hash) @@ -60,7 +60,7 @@ func TestInvVect(t *testing.T) { func TestInvVectWire(t *testing.T) { // Block 203707 hash. hashStr := "3264bc2ac36a60840790ba1d475d01367e7c723da941069e9dc" - baseHash, err := chainhash.NewHashFromStr(hashStr) + baseHash, err := daghash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewHashFromStr: %v", err) } @@ -68,7 +68,7 @@ func TestInvVectWire(t *testing.T) { // errInvVect is an inventory vector with an error. errInvVect := InvVect{ Type: InvTypeError, - Hash: chainhash.Hash{}, + Hash: daghash.Hash{}, } // errInvVectEncoded is the wire encoded bytes of errInvVect. diff --git a/wire/message.go b/wire/message.go index 74c189d81..b459a9022 100644 --- a/wire/message.go +++ b/wire/message.go @@ -10,7 +10,7 @@ import ( "io" "unicode/utf8" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" ) // MessageHeaderSize is the number of bytes in a bitcoin message header. @@ -267,7 +267,7 @@ func WriteMessageN(w io.Writer, msg Message, pver uint32, btcnet BitcoinNet) (in hdr.magic = btcnet hdr.command = cmd hdr.length = uint32(lenp) - copy(hdr.checksum[:], chainhash.DoubleHashB(payload)[0:4]) + copy(hdr.checksum[:], daghash.DoubleHashB(payload)[0:4]) // Encode the header for the message. This is done to a buffer // rather than directly to the writer since writeElements doesn't @@ -364,7 +364,7 @@ func ReadMessageN(r io.Reader, pver uint32, btcnet BitcoinNet) (int, Message, [] } // Test checksum. - checksum := chainhash.DoubleHashB(payload)[0:4] + checksum := daghash.DoubleHashB(payload)[0:4] if !bytes.Equal(checksum[:], hdr.checksum[:]) { str := fmt.Sprintf("payload checksum failed - header "+ "indicates %v, but actual checksum is %v.", diff --git a/wire/message_test.go b/wire/message_test.go index 2d0d6c293..8fe726d39 100644 --- a/wire/message_test.go +++ b/wire/message_test.go @@ -13,7 +13,7 @@ import ( "testing" "time" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/davecgh/go-spew/spew" ) @@ -51,7 +51,7 @@ func TestMessage(t *testing.T) { msgVerack := NewMsgVerAck() msgGetAddr := NewMsgGetAddr() msgAddr := NewMsgAddr() - msgGetBlocks := NewMsgGetBlocks(&chainhash.Hash{}) + msgGetBlocks := NewMsgGetBlocks(&daghash.Hash{}) msgBlock := &blockOne msgInv := NewMsgInv() msgGetData := NewMsgGetData() @@ -66,16 +66,16 @@ func TestMessage(t *testing.T) { msgFilterAdd := NewMsgFilterAdd([]byte{0x01}) msgFilterClear := NewMsgFilterClear() msgFilterLoad := NewMsgFilterLoad([]byte{0x01}, 10, 0, BloomUpdateNone) - bh := NewBlockHeader(1, &chainhash.Hash{}, &chainhash.Hash{}, 0, 0) + bh := NewBlockHeader(1, []daghash.Hash{mainNetGenesisHash, simNetGenesisHash}, &daghash.Hash{}, 0, 0) msgMerkleBlock := NewMsgMerkleBlock(bh) msgReject := NewMsgReject("block", RejectDuplicate, "duplicate block") - msgGetCFilters := NewMsgGetCFilters(GCSFilterExtended, 0, &chainhash.Hash{}) - msgGetCFHeaders := NewMsgGetCFHeaders(GCSFilterExtended, 0, &chainhash.Hash{}) - msgGetCFCheckpt := NewMsgGetCFCheckpt(GCSFilterExtended, &chainhash.Hash{}) - msgCFilter := NewMsgCFilter(GCSFilterExtended, &chainhash.Hash{}, + msgGetCFilters := NewMsgGetCFilters(GCSFilterExtended, 0, &daghash.Hash{}) + msgGetCFHeaders := NewMsgGetCFHeaders(GCSFilterExtended, 0, &daghash.Hash{}) + msgGetCFCheckpt := NewMsgGetCFCheckpt(GCSFilterExtended, &daghash.Hash{}) + msgCFilter := NewMsgCFilter(GCSFilterExtended, &daghash.Hash{}, []byte("payload")) msgCFHeaders := NewMsgCFHeaders() - msgCFCheckpt := NewMsgCFCheckpt(GCSFilterExtended, &chainhash.Hash{}, 0) + msgCFCheckpt := NewMsgCFCheckpt(GCSFilterExtended, &daghash.Hash{}, 0) tests := []struct { in Message // Value to encode @@ -89,7 +89,7 @@ func TestMessage(t *testing.T) { {msgGetAddr, msgGetAddr, pver, MainNet, 24}, {msgAddr, msgAddr, pver, MainNet, 25}, {msgGetBlocks, msgGetBlocks, pver, MainNet, 61}, - {msgBlock, msgBlock, pver, MainNet, 251}, + {msgBlock, msgBlock, pver, MainNet, 284}, {msgInv, msgInv, pver, MainNet, 25}, {msgGetData, msgGetData, pver, MainNet, 25}, {msgNotFound, msgNotFound, pver, MainNet, 25}, @@ -103,7 +103,7 @@ func TestMessage(t *testing.T) { {msgFilterAdd, msgFilterAdd, pver, MainNet, 26}, {msgFilterClear, msgFilterClear, pver, MainNet, 24}, {msgFilterLoad, msgFilterLoad, pver, MainNet, 35}, - {msgMerkleBlock, msgMerkleBlock, pver, MainNet, 114}, + {msgMerkleBlock, msgMerkleBlock, pver, MainNet, 147}, {msgReject, msgReject, pver, MainNet, 79}, {msgGetCFilters, msgGetCFilters, pver, MainNet, 61}, {msgGetCFHeaders, msgGetCFHeaders, pver, MainNet, 61}, diff --git a/wire/msgblock.go b/wire/msgblock.go index d94e6313d..8571406e3 100644 --- a/wire/msgblock.go +++ b/wire/msgblock.go @@ -9,7 +9,7 @@ import ( "fmt" "io" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" ) // defaultTransactionAlloc is the default size used for the backing array @@ -202,7 +202,7 @@ func (msg *MsgBlock) Serialize(w io.Writer) error { func (msg *MsgBlock) SerializeSize() int { // Block header bytes + Serialized varint size for the number of // transactions. - n := blockHeaderLen + VarIntSerializeSize(uint64(len(msg.Transactions))) + n := msg.Header.SerializeSize() + VarIntSerializeSize(uint64(len(msg.Transactions))) for _, tx := range msg.Transactions { n += tx.SerializeSize() @@ -227,13 +227,13 @@ func (msg *MsgBlock) MaxPayloadLength(pver uint32) uint32 { } // BlockHash computes the block identifier hash for this block. -func (msg *MsgBlock) BlockHash() chainhash.Hash { +func (msg *MsgBlock) BlockHash() daghash.Hash { return msg.Header.BlockHash() } // TxHashes returns a slice of hashes of all of transactions in this block. -func (msg *MsgBlock) TxHashes() ([]chainhash.Hash, error) { - hashList := make([]chainhash.Hash, 0, len(msg.Transactions)) +func (msg *MsgBlock) TxHashes() ([]daghash.Hash, error) { + hashList := make([]daghash.Hash, 0, len(msg.Transactions)) for _, tx := range msg.Transactions { hashList = append(hashList, tx.TxHash()) } diff --git a/wire/msgblock_test.go b/wire/msgblock_test.go index fa5aa08db..a7e2fe311 100644 --- a/wire/msgblock_test.go +++ b/wire/msgblock_test.go @@ -12,7 +12,7 @@ import ( "testing" "time" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/davecgh/go-spew/spew" ) @@ -21,11 +21,11 @@ func TestBlock(t *testing.T) { pver := ProtocolVersion // Block 1 header. - prevHash := &blockOne.Header.PrevBlock + prevHashes := blockOne.Header.PrevBlocks merkleHash := &blockOne.Header.MerkleRoot bits := blockOne.Header.Bits nonce := blockOne.Header.Nonce - bh := NewBlockHeader(1, prevHash, merkleHash, bits, nonce) + bh := NewBlockHeader(1, prevHashes, merkleHash, bits, nonce) // Ensure the command is expected value. wantCmd := "block" @@ -72,14 +72,14 @@ func TestBlock(t *testing.T) { // hashes from a block accurately. func TestBlockTxHashes(t *testing.T) { // Block 1, transaction 1 hash. - hashStr := "b7c3332bc138e2c9429818f5fed500bcc1746544218772389054dc8047d7cd3f" - wantHash, err := chainhash.NewHashFromStr(hashStr) + hashStr := "0e3e2357e806b6cdb1f70b54c3a3a17b6714ee1f0e68bebb44a74b1efd512098" + wantHash, err := daghash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewHashFromStr: %v", err) return } - wantHashes := []chainhash.Hash{*wantHash} + wantHashes := []daghash.Hash{*wantHash} hashes, err := blockOne.TxHashes() if err != nil { t.Errorf("TxHashes: %v", err) @@ -93,8 +93,8 @@ func TestBlockTxHashes(t *testing.T) { // TestBlockHash tests the ability to generate the hash of a block accurately. func TestBlockHash(t *testing.T) { // Block 1 hash. - hashStr := "ec85da8297525c2a2a5f3e826510ea1a48ee741e13a18b93ceeb2fb6c9848925" - wantHash, err := chainhash.NewHashFromStr(hashStr) + hashStr := "2357979742c556c68e90bf624a1139af8c85cafb4ac98d6d1dc367cd661ef67d" + wantHash, err := daghash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewHashFromStr: %v", err) } @@ -212,20 +212,24 @@ func TestBlockWireErrors(t *testing.T) { }{ // Force error in version. {&blockOne, blockOneBytes, pver, 0, io.ErrShortWrite, io.EOF}, - // Force error in prev block hash. + // Force error in num block hashes. {&blockOne, blockOneBytes, pver, 4, io.ErrShortWrite, io.EOF}, + // Force error in prev block hash #1. + {&blockOne, blockOneBytes, pver, 5, io.ErrShortWrite, io.EOF}, + // Force error in prev block hash #2. + {&blockOne, blockOneBytes, pver, 37, io.ErrShortWrite, io.EOF}, // Force error in merkle root. - {&blockOne, blockOneBytes, pver, 36, io.ErrShortWrite, io.EOF}, + {&blockOne, blockOneBytes, pver, 69, io.ErrShortWrite, io.EOF}, // Force error in timestamp. - {&blockOne, blockOneBytes, pver, 68, io.ErrShortWrite, io.EOF}, + {&blockOne, blockOneBytes, pver, 101, io.ErrShortWrite, io.EOF}, // Force error in difficulty bits. - {&blockOne, blockOneBytes, pver, 76, io.ErrShortWrite, io.EOF}, + {&blockOne, blockOneBytes, pver, 109, io.ErrShortWrite, io.EOF}, // Force error in header nonce. - {&blockOne, blockOneBytes, pver, 80, io.ErrShortWrite, io.EOF}, + {&blockOne, blockOneBytes, pver, 113, io.ErrShortWrite, io.EOF}, // Force error in transaction count. - {&blockOne, blockOneBytes, pver, 84, io.ErrShortWrite, io.EOF}, + {&blockOne, blockOneBytes, pver, 117, io.ErrShortWrite, io.EOF}, // Force error in transactions. - {&blockOne, blockOneBytes, pver, 85, io.ErrShortWrite, io.EOF}, + {&blockOne, blockOneBytes, pver, 118, io.ErrShortWrite, io.EOF}, } t.Logf("Running %d tests", len(tests)) @@ -330,20 +334,24 @@ func TestBlockSerializeErrors(t *testing.T) { }{ // Force error in version. {&blockOne, blockOneBytes, 0, io.ErrShortWrite, io.EOF}, - // Force error in prev block hash. + // Force error in numPrevBlocks. {&blockOne, blockOneBytes, 4, io.ErrShortWrite, io.EOF}, + // Force error in prev block hash #1. + {&blockOne, blockOneBytes, 5, io.ErrShortWrite, io.EOF}, + // Force error in prev block hash #2. + {&blockOne, blockOneBytes, 37, io.ErrShortWrite, io.EOF}, // Force error in merkle root. - {&blockOne, blockOneBytes, 36, io.ErrShortWrite, io.EOF}, + {&blockOne, blockOneBytes, 69, io.ErrShortWrite, io.EOF}, // Force error in timestamp. - {&blockOne, blockOneBytes, 68, io.ErrShortWrite, io.EOF}, + {&blockOne, blockOneBytes, 101, io.ErrShortWrite, io.EOF}, // Force error in difficulty bits. - {&blockOne, blockOneBytes, 76, io.ErrShortWrite, io.EOF}, + {&blockOne, blockOneBytes, 109, io.ErrShortWrite, io.EOF}, // Force error in header nonce. - {&blockOne, blockOneBytes, 80, io.ErrShortWrite, io.EOF}, + {&blockOne, blockOneBytes, 113, io.ErrShortWrite, io.EOF}, // Force error in transaction count. - {&blockOne, blockOneBytes, 84, io.ErrShortWrite, io.EOF}, + {&blockOne, blockOneBytes, 117, io.ErrShortWrite, io.EOF}, // Force error in transactions. - {&blockOne, blockOneBytes, 85, io.ErrShortWrite, io.EOF}, + {&blockOne, blockOneBytes, 118, io.ErrShortWrite, io.EOF}, } t.Logf("Running %d tests", len(tests)) @@ -397,14 +405,19 @@ func TestBlockOverflowErrors(t *testing.T) { { []byte{ 0x01, 0x00, 0x00, 0x00, // Version 1 - 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, + 0x02, // NumPrevBlocks + 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // PrevBlock mainNetGenesisHash 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, - 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, // PrevBlock - 0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44, - 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, - 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, - 0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, // MerkleRoot + 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // PrevBlock simNetGenesisHash + 0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0, + 0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91, + 0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68, + 0x3b, 0xa3, 0xed, 0xfd, 0x7a, 0x7b, 0x12, 0xb2, // MerkleRoot + 0x7a, 0xc7, 0x2c, 0x3e, 0x67, 0x76, 0x8f, 0x61, + 0x7f, 0xc8, 0x1b, 0xc3, 0x88, 0x8a, 0x51, 0x32, + 0x3a, 0x9f, 0xb8, 0xaa, 0x4b, 0x1e, 0x5e, 0x4a, 0x61, 0xbc, 0x66, 0x49, 0x00, 0x00, 0x00, 0x00, // Timestamp 0xff, 0xff, 0x00, 0x1d, // Bits 0x01, 0xe3, 0x62, 0x99, // Nonce @@ -457,7 +470,7 @@ func TestBlockSerializeSize(t *testing.T) { size int // Expected serialized size }{ // Block with no transactions. - {noTxBlock, 85}, + {noTxBlock, 118}, // First block in the mainnet block chain. {&blockOne, len(blockOneBytes)}, @@ -469,6 +482,7 @@ func TestBlockSerializeSize(t *testing.T) { if serializedSize != test.size { t.Errorf("MsgBlock.SerializeSize: #%d got: %d, want: "+ "%d", i, serializedSize, test.size) + continue } } @@ -477,19 +491,10 @@ func TestBlockSerializeSize(t *testing.T) { // blockOne is the first block in the mainnet block chain. var blockOne = MsgBlock{ Header: BlockHeader{ - Version: 1, - PrevBlock: chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy. - 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, - 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, - 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, - 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, - }), - MerkleRoot: chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy. - 0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44, - 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, - 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, - 0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, - }), + Version: 1, + NumPrevBlocks: 2, + PrevBlocks: []daghash.Hash{mainNetGenesisHash, simNetGenesisHash}, + MerkleRoot: daghash.Hash(mainNetGenesisMerkleRoot), Timestamp: time.Unix(0x4966bc61, 0), // 2009-01-08 20:54:25 -0600 CST Bits: 0x1d00ffff, // 486604799 @@ -501,7 +506,7 @@ var blockOne = MsgBlock{ TxIn: []*TxIn{ { PreviousOutPoint: OutPoint{ - Hash: chainhash.Hash{}, + Hash: daghash.Hash{}, Index: 0xffffffff, }, SignatureScript: []byte{ @@ -536,14 +541,19 @@ var blockOne = MsgBlock{ // Block one serialized bytes. var blockOneBytes = []byte{ 0x01, 0x00, 0x00, 0x00, // Version 1 - 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, + 0x02, // NumPrevBlocks + 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // PrevBlock mainNetGenesisHash 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, - 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, // PrevBlock - 0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44, - 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, - 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, - 0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, // MerkleRoot + 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // PrevBlock simNetGenesisHash + 0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0, + 0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91, + 0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68, + 0x3b, 0xa3, 0xed, 0xfd, 0x7a, 0x7b, 0x12, 0xb2, // MerkleRoot + 0x7a, 0xc7, 0x2c, 0x3e, 0x67, 0x76, 0x8f, 0x61, + 0x7f, 0xc8, 0x1b, 0xc3, 0x88, 0x8a, 0x51, 0x32, + 0x3a, 0x9f, 0xb8, 0xaa, 0x4b, 0x1e, 0x5e, 0x4a, 0x61, 0xbc, 0x66, 0x49, 0x00, 0x00, 0x00, 0x00, // Timestamp 0xff, 0xff, 0x00, 0x1d, // Bits 0x01, 0xe3, 0x62, 0x99, // Nonce @@ -577,5 +587,5 @@ var blockOneBytes = []byte{ // Transaction location information for block one transactions. var blockOneTxLocs = []TxLoc{ - {TxStart: 85, TxLen: 142}, + {TxStart: 118, TxLen: 138}, } diff --git a/wire/msgcfcheckpt.go b/wire/msgcfcheckpt.go index c9f8626c7..73218a5ab 100644 --- a/wire/msgcfcheckpt.go +++ b/wire/msgcfcheckpt.go @@ -8,7 +8,7 @@ import ( "fmt" "io" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" ) const ( @@ -23,12 +23,12 @@ const ( // for details on requesting the headers. type MsgCFCheckpt struct { FilterType FilterType - StopHash chainhash.Hash - FilterHeaders []*chainhash.Hash + StopHash daghash.Hash + FilterHeaders []*daghash.Hash } // AddCFHeader adds a new committed filter header to the message. -func (msg *MsgCFCheckpt) AddCFHeader(header *chainhash.Hash) error { +func (msg *MsgCFCheckpt) AddCFHeader(header *daghash.Hash) error { if len(msg.FilterHeaders) == cap(msg.FilterHeaders) { str := fmt.Sprintf("FilterHeaders has insufficient capacity for "+ "additional header: len = %d", len(msg.FilterHeaders)) @@ -62,9 +62,9 @@ func (msg *MsgCFCheckpt) BtcDecode(r io.Reader, pver uint32) error { // Create a contiguous slice of hashes to deserialize into in order to // reduce the number of allocations. - msg.FilterHeaders = make([]*chainhash.Hash, count) + msg.FilterHeaders = make([]*daghash.Hash, count) for i := uint64(0); i < count; i++ { - var cfh chainhash.Hash + var cfh daghash.Hash err := readElement(r, &cfh) if err != nil { return err @@ -139,11 +139,11 @@ func (msg *MsgCFCheckpt) MaxPayloadLength(pver uint32) uint32 { // NewMsgCFCheckpt returns a new bitcoin cfheaders message that conforms to // the Message interface. See MsgCFCheckpt for details. -func NewMsgCFCheckpt(filterType FilterType, stopHash *chainhash.Hash, +func NewMsgCFCheckpt(filterType FilterType, stopHash *daghash.Hash, headersCount int) *MsgCFCheckpt { return &MsgCFCheckpt{ FilterType: filterType, StopHash: *stopHash, - FilterHeaders: make([]*chainhash.Hash, 0, headersCount), + FilterHeaders: make([]*daghash.Hash, 0, headersCount), } } diff --git a/wire/msgcfheaders.go b/wire/msgcfheaders.go index 9986bf696..a66fb49c0 100644 --- a/wire/msgcfheaders.go +++ b/wire/msgcfheaders.go @@ -8,13 +8,13 @@ import ( "fmt" "io" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" ) const ( // MaxCFHeaderPayload is the maximum byte size of a committed // filter header. - MaxCFHeaderPayload = chainhash.HashSize + MaxCFHeaderPayload = daghash.HashSize // MaxCFHeadersPerMsg is the maximum number of committed filter headers // that can be in a single bitcoin cfheaders message. @@ -28,13 +28,13 @@ const ( // MsgGetCFHeaders for details on requesting the headers. type MsgCFHeaders struct { FilterType FilterType - StopHash chainhash.Hash - PrevFilterHeader chainhash.Hash - FilterHashes []*chainhash.Hash + StopHash daghash.Hash + PrevFilterHeader daghash.Hash + FilterHashes []*daghash.Hash } // AddCFHash adds a new filter hash to the message. -func (msg *MsgCFHeaders) AddCFHash(hash *chainhash.Hash) error { +func (msg *MsgCFHeaders) AddCFHash(hash *daghash.Hash) error { if len(msg.FilterHashes)+1 > MaxCFHeadersPerMsg { str := fmt.Sprintf("too many block headers in message [max %v]", MaxBlockHeadersPerMsg) @@ -82,9 +82,9 @@ func (msg *MsgCFHeaders) BtcDecode(r io.Reader, pver uint32) error { // Create a contiguous slice of hashes to deserialize into in order to // reduce the number of allocations. - msg.FilterHashes = make([]*chainhash.Hash, 0, count) + msg.FilterHashes = make([]*daghash.Hash, 0, count) for i := uint64(0); i < count; i++ { - var cfh chainhash.Hash + var cfh daghash.Hash err := readElement(r, &cfh) if err != nil { return err @@ -167,7 +167,7 @@ func (msg *MsgCFHeaders) Command() string { func (msg *MsgCFHeaders) MaxPayloadLength(pver uint32) uint32 { // Hash size + filter type + num headers (varInt) + // (header size * max headers). - return 1 + chainhash.HashSize + chainhash.HashSize + MaxVarIntPayload + + return 1 + daghash.HashSize + daghash.HashSize + MaxVarIntPayload + (MaxCFHeaderPayload * MaxCFHeadersPerMsg) } @@ -175,6 +175,6 @@ func (msg *MsgCFHeaders) MaxPayloadLength(pver uint32) uint32 { // the Message interface. See MsgCFHeaders for details. func NewMsgCFHeaders() *MsgCFHeaders { return &MsgCFHeaders{ - FilterHashes: make([]*chainhash.Hash, 0, MaxCFHeadersPerMsg), + FilterHashes: make([]*daghash.Hash, 0, MaxCFHeadersPerMsg), } } diff --git a/wire/msgcfilter.go b/wire/msgcfilter.go index a48bd2fe9..771e44efb 100644 --- a/wire/msgcfilter.go +++ b/wire/msgcfilter.go @@ -8,7 +8,7 @@ import ( "fmt" "io" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" ) // FilterType is used to represent a filter type. @@ -33,7 +33,7 @@ const ( // getcfilters (MsgGetCFilters) message. type MsgCFilter struct { FilterType FilterType - BlockHash chainhash.Hash + BlockHash daghash.Hash Data []byte } @@ -107,12 +107,12 @@ func (msg *MsgCFilter) Command() string { // receiver. This is part of the Message interface implementation. func (msg *MsgCFilter) MaxPayloadLength(pver uint32) uint32 { return uint32(VarIntSerializeSize(MaxCFilterDataSize)) + - MaxCFilterDataSize + chainhash.HashSize + 1 + MaxCFilterDataSize + daghash.HashSize + 1 } // NewMsgCFilter returns a new bitcoin cfilter message that conforms to the // Message interface. See MsgCFilter for details. -func NewMsgCFilter(filterType FilterType, blockHash *chainhash.Hash, +func NewMsgCFilter(filterType FilterType, blockHash *daghash.Hash, data []byte) *MsgCFilter { return &MsgCFilter{ FilterType: filterType, diff --git a/wire/msggetblocks.go b/wire/msggetblocks.go index c8ed4d192..895ee81ec 100644 --- a/wire/msggetblocks.go +++ b/wire/msggetblocks.go @@ -8,7 +8,7 @@ import ( "fmt" "io" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" ) // MaxBlockLocatorsPerMsg is the maximum number of block locator hashes allowed @@ -32,12 +32,12 @@ const MaxBlockLocatorsPerMsg = 500 // closer to the genesis block you get. type MsgGetBlocks struct { ProtocolVersion uint32 - BlockLocatorHashes []*chainhash.Hash - HashStop chainhash.Hash + BlockLocatorHashes []*daghash.Hash + HashStop daghash.Hash } // AddBlockLocatorHash adds a new block locator hash to the message. -func (msg *MsgGetBlocks) AddBlockLocatorHash(hash *chainhash.Hash) error { +func (msg *MsgGetBlocks) AddBlockLocatorHash(hash *daghash.Hash) error { if len(msg.BlockLocatorHashes)+1 > MaxBlockLocatorsPerMsg { str := fmt.Sprintf("too many block locator hashes for message [max %v]", MaxBlockLocatorsPerMsg) @@ -69,8 +69,8 @@ func (msg *MsgGetBlocks) BtcDecode(r io.Reader, pver uint32) error { // Create a contiguous slice of hashes to deserialize into in order to // reduce the number of allocations. - locatorHashes := make([]chainhash.Hash, count) - msg.BlockLocatorHashes = make([]*chainhash.Hash, 0, count) + locatorHashes := make([]daghash.Hash, count) + msg.BlockLocatorHashes = make([]*daghash.Hash, 0, count) for i := uint64(0); i < count; i++ { hash := &locatorHashes[i] err := readElement(r, hash) @@ -124,16 +124,16 @@ func (msg *MsgGetBlocks) Command() string { func (msg *MsgGetBlocks) MaxPayloadLength(pver uint32) uint32 { // Protocol version 4 bytes + num hashes (varInt) + max block locator // hashes + hash stop. - return 4 + MaxVarIntPayload + (MaxBlockLocatorsPerMsg * chainhash.HashSize) + chainhash.HashSize + return 4 + MaxVarIntPayload + (MaxBlockLocatorsPerMsg * daghash.HashSize) + daghash.HashSize } // NewMsgGetBlocks returns a new bitcoin getblocks message that conforms to the // Message interface using the passed parameters and defaults for the remaining // fields. -func NewMsgGetBlocks(hashStop *chainhash.Hash) *MsgGetBlocks { +func NewMsgGetBlocks(hashStop *daghash.Hash) *MsgGetBlocks { return &MsgGetBlocks{ ProtocolVersion: ProtocolVersion, - BlockLocatorHashes: make([]*chainhash.Hash, 0, MaxBlockLocatorsPerMsg), + BlockLocatorHashes: make([]*daghash.Hash, 0, MaxBlockLocatorsPerMsg), HashStop: *hashStop, } } diff --git a/wire/msggetblocks_test.go b/wire/msggetblocks_test.go index 2feac8c7d..d7335640c 100644 --- a/wire/msggetblocks_test.go +++ b/wire/msggetblocks_test.go @@ -10,7 +10,7 @@ import ( "reflect" "testing" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/davecgh/go-spew/spew" ) @@ -20,14 +20,14 @@ func TestGetBlocks(t *testing.T) { // Block 99500 hash. hashStr := "000000000002e7ad7b9eef9479e4aabc65cb831269cc20d2632c13684406dee0" - locatorHash, err := chainhash.NewHashFromStr(hashStr) + locatorHash, err := daghash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewHashFromStr: %v", err) } // Block 100000 hash. hashStr = "3ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506" - hashStop, err := chainhash.NewHashFromStr(hashStr) + hashStop, err := daghash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewHashFromStr: %v", err) } @@ -88,27 +88,27 @@ func TestGetBlocksWire(t *testing.T) { // Block 99499 hash. hashStr := "2710f40c87ec93d010a6fd95f42c59a2cbacc60b18cf6b7957535" - hashLocator, err := chainhash.NewHashFromStr(hashStr) + hashLocator, err := daghash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewHashFromStr: %v", err) } // Block 99500 hash. hashStr = "2e7ad7b9eef9479e4aabc65cb831269cc20d2632c13684406dee0" - hashLocator2, err := chainhash.NewHashFromStr(hashStr) + hashLocator2, err := daghash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewHashFromStr: %v", err) } // Block 100000 hash. hashStr = "3ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506" - hashStop, err := chainhash.NewHashFromStr(hashStr) + hashStop, err := daghash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewHashFromStr: %v", err) } // MsgGetBlocks message with no block locators or stop hash. - noLocators := NewMsgGetBlocks(&chainhash.Hash{}) + noLocators := NewMsgGetBlocks(&daghash.Hash{}) noLocators.ProtocolVersion = pver noLocatorsEncoded := []byte{ 0x62, 0xea, 0x00, 0x00, // Protocol version 60002 @@ -270,21 +270,21 @@ func TestGetBlocksWireErrors(t *testing.T) { // Block 99499 hash. hashStr := "2710f40c87ec93d010a6fd95f42c59a2cbacc60b18cf6b7957535" - hashLocator, err := chainhash.NewHashFromStr(hashStr) + hashLocator, err := daghash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewHashFromStr: %v", err) } // Block 99500 hash. hashStr = "2e7ad7b9eef9479e4aabc65cb831269cc20d2632c13684406dee0" - hashLocator2, err := chainhash.NewHashFromStr(hashStr) + hashLocator2, err := daghash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewHashFromStr: %v", err) } // Block 100000 hash. hashStr = "3ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506" - hashStop, err := chainhash.NewHashFromStr(hashStr) + hashStop, err := daghash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewHashFromStr: %v", err) } diff --git a/wire/msggetcfcheckpt.go b/wire/msggetcfcheckpt.go index f3620730f..e9cf1497e 100644 --- a/wire/msggetcfcheckpt.go +++ b/wire/msggetcfcheckpt.go @@ -7,7 +7,7 @@ package wire import ( "io" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" ) // MsgGetCFCheckpt is a request for filter headers at evenly spaced intervals @@ -15,7 +15,7 @@ import ( // get headers in the chain of basic (0x00) or extended (0x01) headers. type MsgGetCFCheckpt struct { FilterType FilterType - StopHash chainhash.Hash + StopHash daghash.Hash } // BtcDecode decodes r using the bitcoin protocol encoding into the receiver. @@ -50,13 +50,13 @@ func (msg *MsgGetCFCheckpt) Command() string { // receiver. This is part of the Message interface implementation. func (msg *MsgGetCFCheckpt) MaxPayloadLength(pver uint32) uint32 { // Filter type + uint32 + block hash - return 1 + chainhash.HashSize + return 1 + daghash.HashSize } // NewMsgGetCFCheckpt returns a new bitcoin getcfcheckpt message that conforms // to the Message interface using the passed parameters and defaults for the // remaining fields. -func NewMsgGetCFCheckpt(filterType FilterType, stopHash *chainhash.Hash) *MsgGetCFCheckpt { +func NewMsgGetCFCheckpt(filterType FilterType, stopHash *daghash.Hash) *MsgGetCFCheckpt { return &MsgGetCFCheckpt{ FilterType: filterType, StopHash: *stopHash, diff --git a/wire/msggetcfheaders.go b/wire/msggetcfheaders.go index dbfa0cdfa..4d43cd4d1 100644 --- a/wire/msggetcfheaders.go +++ b/wire/msggetcfheaders.go @@ -7,7 +7,7 @@ package wire import ( "io" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" ) // MsgGetCFHeaders is a message similar to MsgGetHeaders, but for committed @@ -16,7 +16,7 @@ import ( type MsgGetCFHeaders struct { FilterType FilterType StartHeight uint32 - StopHash chainhash.Hash + StopHash daghash.Hash } // BtcDecode decodes r using the bitcoin protocol encoding into the receiver. @@ -61,14 +61,14 @@ func (msg *MsgGetCFHeaders) Command() string { // receiver. This is part of the Message interface implementation. func (msg *MsgGetCFHeaders) MaxPayloadLength(pver uint32) uint32 { // Filter type + uint32 + block hash - return 1 + 4 + chainhash.HashSize + return 1 + 4 + daghash.HashSize } // NewMsgGetCFHeaders returns a new bitcoin getcfheader message that conforms to // the Message interface using the passed parameters and defaults for the // remaining fields. func NewMsgGetCFHeaders(filterType FilterType, startHeight uint32, - stopHash *chainhash.Hash) *MsgGetCFHeaders { + stopHash *daghash.Hash) *MsgGetCFHeaders { return &MsgGetCFHeaders{ FilterType: filterType, StartHeight: startHeight, diff --git a/wire/msggetcfilters.go b/wire/msggetcfilters.go index 04d1c4a5f..bc686b9c1 100644 --- a/wire/msggetcfilters.go +++ b/wire/msggetcfilters.go @@ -7,7 +7,7 @@ package wire import ( "io" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" ) // MaxGetCFiltersReqRange the maximum number of filters that may be requested in @@ -20,7 +20,7 @@ const MaxGetCFiltersReqRange = 1000 type MsgGetCFilters struct { FilterType FilterType StartHeight uint32 - StopHash chainhash.Hash + StopHash daghash.Hash } // BtcDecode decodes r using the bitcoin protocol encoding into the receiver. @@ -65,14 +65,14 @@ func (msg *MsgGetCFilters) Command() string { // receiver. This is part of the Message interface implementation. func (msg *MsgGetCFilters) MaxPayloadLength(pver uint32) uint32 { // Filter type + uint32 + block hash - return 1 + 4 + chainhash.HashSize + return 1 + 4 + daghash.HashSize } // NewMsgGetCFilters returns a new bitcoin getcfilters message that conforms to // the Message interface using the passed parameters and defaults for the // remaining fields. func NewMsgGetCFilters(filterType FilterType, startHeight uint32, - stopHash *chainhash.Hash) *MsgGetCFilters { + stopHash *daghash.Hash) *MsgGetCFilters { return &MsgGetCFilters{ FilterType: filterType, StartHeight: startHeight, diff --git a/wire/msggetdata_test.go b/wire/msggetdata_test.go index eade88286..946d289ac 100644 --- a/wire/msggetdata_test.go +++ b/wire/msggetdata_test.go @@ -10,7 +10,7 @@ import ( "reflect" "testing" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/davecgh/go-spew/spew" ) @@ -37,7 +37,7 @@ func TestGetData(t *testing.T) { } // Ensure inventory vectors are added properly. - hash := chainhash.Hash{} + hash := daghash.Hash{} iv := NewInvVect(InvTypeBlock, &hash) err := msg.AddInvVect(iv) if err != nil { @@ -73,14 +73,14 @@ func TestGetData(t *testing.T) { func TestGetDataWire(t *testing.T) { // Block 203707 hash. hashStr := "3264bc2ac36a60840790ba1d475d01367e7c723da941069e9dc" - blockHash, err := chainhash.NewHashFromStr(hashStr) + blockHash, err := daghash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewHashFromStr: %v", err) } // Transaction 1 of Block 203707 hash. hashStr = "d28a3dc7392bf00a9855ee93dd9a81eff82a2c4fe57fbd42cfe71b487accfaf0" - txHash, err := chainhash.NewHashFromStr(hashStr) + txHash, err := daghash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewHashFromStr: %v", err) } @@ -238,7 +238,7 @@ func TestGetDataWireErrors(t *testing.T) { // Block 203707 hash. hashStr := "3264bc2ac36a60840790ba1d475d01367e7c723da941069e9dc" - blockHash, err := chainhash.NewHashFromStr(hashStr) + blockHash, err := daghash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewHashFromStr: %v", err) } diff --git a/wire/msggetheaders.go b/wire/msggetheaders.go index 58b50f74c..3c2ebff02 100644 --- a/wire/msggetheaders.go +++ b/wire/msggetheaders.go @@ -8,7 +8,7 @@ import ( "fmt" "io" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" ) // MsgGetHeaders implements the Message interface and represents a bitcoin @@ -29,12 +29,12 @@ import ( // closer to the genesis block you get. type MsgGetHeaders struct { ProtocolVersion uint32 - BlockLocatorHashes []*chainhash.Hash - HashStop chainhash.Hash + BlockLocatorHashes []*daghash.Hash + HashStop daghash.Hash } // AddBlockLocatorHash adds a new block locator hash to the message. -func (msg *MsgGetHeaders) AddBlockLocatorHash(hash *chainhash.Hash) error { +func (msg *MsgGetHeaders) AddBlockLocatorHash(hash *daghash.Hash) error { if len(msg.BlockLocatorHashes)+1 > MaxBlockLocatorsPerMsg { str := fmt.Sprintf("too many block locator hashes for message [max %v]", MaxBlockLocatorsPerMsg) @@ -66,8 +66,8 @@ func (msg *MsgGetHeaders) BtcDecode(r io.Reader, pver uint32) error { // Create a contiguous slice of hashes to deserialize into in order to // reduce the number of allocations. - locatorHashes := make([]chainhash.Hash, count) - msg.BlockLocatorHashes = make([]*chainhash.Hash, 0, count) + locatorHashes := make([]daghash.Hash, count) + msg.BlockLocatorHashes = make([]*daghash.Hash, 0, count) for i := uint64(0); i < count; i++ { hash := &locatorHashes[i] err := readElement(r, hash) @@ -123,14 +123,14 @@ func (msg *MsgGetHeaders) MaxPayloadLength(pver uint32) uint32 { // Version 4 bytes + num block locator hashes (varInt) + max allowed block // locators + hash stop. return 4 + MaxVarIntPayload + (MaxBlockLocatorsPerMsg * - chainhash.HashSize) + chainhash.HashSize + daghash.HashSize) + daghash.HashSize } // NewMsgGetHeaders returns a new bitcoin getheaders message that conforms to // the Message interface. See MsgGetHeaders for details. func NewMsgGetHeaders() *MsgGetHeaders { return &MsgGetHeaders{ - BlockLocatorHashes: make([]*chainhash.Hash, 0, + BlockLocatorHashes: make([]*daghash.Hash, 0, MaxBlockLocatorsPerMsg), } } diff --git a/wire/msggetheaders_test.go b/wire/msggetheaders_test.go index a10eb9f18..3bdd246c1 100644 --- a/wire/msggetheaders_test.go +++ b/wire/msggetheaders_test.go @@ -10,7 +10,7 @@ import ( "reflect" "testing" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/davecgh/go-spew/spew" ) @@ -20,7 +20,7 @@ func TestGetHeaders(t *testing.T) { // Block 99500 hash. hashStr := "000000000002e7ad7b9eef9479e4aabc65cb831269cc20d2632c13684406dee0" - locatorHash, err := chainhash.NewHashFromStr(hashStr) + locatorHash, err := daghash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewHashFromStr: %v", err) } @@ -77,21 +77,21 @@ func TestGetHeadersWire(t *testing.T) { // Block 99499 hash. hashStr := "2710f40c87ec93d010a6fd95f42c59a2cbacc60b18cf6b7957535" - hashLocator, err := chainhash.NewHashFromStr(hashStr) + hashLocator, err := daghash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewHashFromStr: %v", err) } // Block 99500 hash. hashStr = "2e7ad7b9eef9479e4aabc65cb831269cc20d2632c13684406dee0" - hashLocator2, err := chainhash.NewHashFromStr(hashStr) + hashLocator2, err := daghash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewHashFromStr: %v", err) } // Block 100000 hash. hashStr = "3ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506" - hashStop, err := chainhash.NewHashFromStr(hashStr) + hashStop, err := daghash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewHashFromStr: %v", err) } @@ -260,21 +260,21 @@ func TestGetHeadersWireErrors(t *testing.T) { // Block 99499 hash. hashStr := "2710f40c87ec93d010a6fd95f42c59a2cbacc60b18cf6b7957535" - hashLocator, err := chainhash.NewHashFromStr(hashStr) + hashLocator, err := daghash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewHashFromStr: %v", err) } // Block 99500 hash. hashStr = "2e7ad7b9eef9479e4aabc65cb831269cc20d2632c13684406dee0" - hashLocator2, err := chainhash.NewHashFromStr(hashStr) + hashLocator2, err := daghash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewHashFromStr: %v", err) } // Block 100000 hash. hashStr = "3ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506" - hashStop, err := chainhash.NewHashFromStr(hashStr) + hashStop, err := daghash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewHashFromStr: %v", err) } diff --git a/wire/msgheaders_test.go b/wire/msgheaders_test.go index fbc6adf4a..2ceb9d21f 100644 --- a/wire/msgheaders_test.go +++ b/wire/msgheaders_test.go @@ -10,6 +10,7 @@ import ( "reflect" "testing" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/davecgh/go-spew/spew" ) @@ -28,7 +29,7 @@ func TestHeaders(t *testing.T) { // Ensure max payload is expected value for latest protocol version. // Num headers (varInt) + max allowed headers (header length + 1 byte // for the number of transactions which is always 0). - wantPayload := uint32(162009) + wantPayload := uint32(16420009) maxPayload := msg.MaxPayloadLength(pver) if maxPayload != wantPayload { t.Errorf("MaxPayloadLength: wrong max payload length for "+ @@ -60,11 +61,11 @@ func TestHeaders(t *testing.T) { // TestHeadersWire tests the MsgHeaders wire encode and decode for various // numbers of headers and protocol versions. func TestHeadersWire(t *testing.T) { - hash := mainNetGenesisHash + hashes := []daghash.Hash{mainNetGenesisHash, simNetGenesisHash} merkleHash := blockOne.Header.MerkleRoot bits := uint32(0x1d00ffff) nonce := uint32(0x9962e301) - bh := NewBlockHeader(1, &hash, &merkleHash, bits, nonce) + bh := NewBlockHeader(1, hashes, &merkleHash, bits, nonce) bh.Version = blockOne.Header.Version bh.Timestamp = blockOne.Header.Timestamp @@ -80,14 +81,19 @@ func TestHeadersWire(t *testing.T) { oneHeaderEncoded := []byte{ 0x01, // VarInt for number of headers. 0x01, 0x00, 0x00, 0x00, // Version 1 - 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, + 0x02, // NumPrevBlocks + 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // PrevBlock mainNetGenesisHash 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, - 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, // PrevBlock - 0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44, - 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, - 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, - 0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, // MerkleRoot + 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // PrevBlock simNetGenesisHash + 0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0, + 0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91, + 0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68, + 0x3b, 0xa3, 0xed, 0xfd, 0x7a, 0x7b, 0x12, 0xb2, // MerkleRoot + 0x7a, 0xc7, 0x2c, 0x3e, 0x67, 0x76, 0x8f, 0x61, + 0x7f, 0xc8, 0x1b, 0xc3, 0x88, 0x8a, 0x51, 0x32, + 0x3a, 0x9f, 0xb8, 0xaa, 0x4b, 0x1e, 0x5e, 0x4a, 0x61, 0xbc, 0x66, 0x49, 0x00, 0x00, 0x00, 0x00, // Timestamp 0xff, 0xff, 0x00, 0x1d, // Bits 0x01, 0xe3, 0x62, 0x99, // Nonce @@ -217,11 +223,11 @@ func TestHeadersWireErrors(t *testing.T) { pver := ProtocolVersion wireErr := &MessageError{} - hash := mainNetGenesisHash + hashes := []daghash.Hash{mainNetGenesisHash, simNetGenesisHash} merkleHash := blockOne.Header.MerkleRoot bits := uint32(0x1d00ffff) nonce := uint32(0x9962e301) - bh := NewBlockHeader(1, &hash, &merkleHash, bits, nonce) + bh := NewBlockHeader(1, hashes, &merkleHash, bits, nonce) bh.Version = blockOne.Header.Version bh.Timestamp = blockOne.Header.Timestamp @@ -231,14 +237,19 @@ func TestHeadersWireErrors(t *testing.T) { oneHeaderEncoded := []byte{ 0x01, // VarInt for number of headers. 0x01, 0x00, 0x00, 0x00, // Version 1 - 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, + 0x02, // NumPrevBlocks + 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // PrevBlock mainNetGenesisHash 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, - 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, // PrevBlock - 0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44, - 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, - 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, - 0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, // MerkleRoot + 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // PrevBlock simNetGenesisHash + 0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0, + 0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91, + 0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68, + 0x3b, 0xa3, 0xed, 0xfd, 0x7a, 0x7b, 0x12, 0xb2, // MerkleRoot + 0x7a, 0xc7, 0x2c, 0x3e, 0x67, 0x76, 0x8f, 0x61, + 0x7f, 0xc8, 0x1b, 0xc3, 0x88, 0x8a, 0x51, 0x32, + 0x3a, 0x9f, 0xb8, 0xaa, 0x4b, 0x1e, 0x5e, 0x4a, 0x61, 0xbc, 0x66, 0x49, 0x00, 0x00, 0x00, 0x00, // Timestamp 0xff, 0xff, 0x00, 0x1d, // Bits 0x01, 0xe3, 0x62, 0x99, // Nonce @@ -258,7 +269,7 @@ func TestHeadersWireErrors(t *testing.T) { // Intentionally invalid block header that has a transaction count used // to force errors. - bhTrans := NewBlockHeader(1, &hash, &merkleHash, bits, nonce) + bhTrans := NewBlockHeader(1, hashes, &merkleHash, bits, nonce) bhTrans.Version = blockOne.Header.Version bhTrans.Timestamp = blockOne.Header.Timestamp @@ -267,14 +278,19 @@ func TestHeadersWireErrors(t *testing.T) { transHeaderEncoded := []byte{ 0x01, // VarInt for number of headers. 0x01, 0x00, 0x00, 0x00, // Version 1 - 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, + 0x02, // NumPrevBlocks + 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // PrevBlock mainNetGenesisHash 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, - 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, // PrevBlock - 0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44, - 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, - 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, - 0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, // MerkleRoot + 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // PrevBlock simNetGenesisHash + 0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0, + 0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91, + 0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68, + 0x3b, 0xa3, 0xed, 0xfd, 0x7a, 0x7b, 0x12, 0xb2, // MerkleRoot + 0x7a, 0xc7, 0x2c, 0x3e, 0x67, 0x76, 0x8f, 0x61, + 0x7f, 0xc8, 0x1b, 0xc3, 0x88, 0x8a, 0x51, 0x32, + 0x3a, 0x9f, 0xb8, 0xaa, 0x4b, 0x1e, 0x5e, 0x4a, 0x61, 0xbc, 0x66, 0x49, 0x00, 0x00, 0x00, 0x00, // Timestamp 0xff, 0xff, 0x00, 0x1d, // Bits 0x01, 0xe3, 0x62, 0x99, // Nonce @@ -297,7 +313,7 @@ func TestHeadersWireErrors(t *testing.T) { // Force error with greater than max headers. {maxHeaders, maxHeadersEncoded, pver, 3, wireErr, wireErr}, // Force error with number of transactions. - {transHeader, transHeaderEncoded, pver, 81, io.ErrShortWrite, io.EOF}, + {transHeader, transHeaderEncoded, pver, 114, io.ErrShortWrite, io.EOF}, // Force error with included transactions. {transHeader, transHeaderEncoded, pver, len(transHeaderEncoded), nil, wireErr}, } diff --git a/wire/msginv_test.go b/wire/msginv_test.go index 1950127e4..0ddb2353e 100644 --- a/wire/msginv_test.go +++ b/wire/msginv_test.go @@ -10,7 +10,7 @@ import ( "reflect" "testing" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/davecgh/go-spew/spew" ) @@ -37,7 +37,7 @@ func TestInv(t *testing.T) { } // Ensure inventory vectors are added properly. - hash := chainhash.Hash{} + hash := daghash.Hash{} iv := NewInvVect(InvTypeBlock, &hash) err := msg.AddInvVect(iv) if err != nil { @@ -73,14 +73,14 @@ func TestInv(t *testing.T) { func TestInvWire(t *testing.T) { // Block 203707 hash. hashStr := "3264bc2ac36a60840790ba1d475d01367e7c723da941069e9dc" - blockHash, err := chainhash.NewHashFromStr(hashStr) + blockHash, err := daghash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewHashFromStr: %v", err) } // Transaction 1 of Block 203707 hash. hashStr = "d28a3dc7392bf00a9855ee93dd9a81eff82a2c4fe57fbd42cfe71b487accfaf0" - txHash, err := chainhash.NewHashFromStr(hashStr) + txHash, err := daghash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewHashFromStr: %v", err) } @@ -238,7 +238,7 @@ func TestInvWireErrors(t *testing.T) { // Block 203707 hash. hashStr := "3264bc2ac36a60840790ba1d475d01367e7c723da941069e9dc" - blockHash, err := chainhash.NewHashFromStr(hashStr) + blockHash, err := daghash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewHashFromStr: %v", err) } diff --git a/wire/msgmerkleblock.go b/wire/msgmerkleblock.go index b82b7b24f..bdfa76afc 100644 --- a/wire/msgmerkleblock.go +++ b/wire/msgmerkleblock.go @@ -8,7 +8,7 @@ import ( "fmt" "io" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" ) // maxFlagsPerMerkleBlock is the maximum number of flag bytes that could @@ -24,12 +24,12 @@ const maxFlagsPerMerkleBlock = maxTxPerBlock / 8 type MsgMerkleBlock struct { Header BlockHeader Transactions uint32 - Hashes []*chainhash.Hash + Hashes []*daghash.Hash Flags []byte } // AddTxHash adds a new transaction hash to the message. -func (msg *MsgMerkleBlock) AddTxHash(hash *chainhash.Hash) error { +func (msg *MsgMerkleBlock) AddTxHash(hash *daghash.Hash) error { if len(msg.Hashes)+1 > maxTxPerBlock { str := fmt.Sprintf("too many tx hashes for message [max %v]", maxTxPerBlock) @@ -72,8 +72,8 @@ func (msg *MsgMerkleBlock) BtcDecode(r io.Reader, pver uint32) error { // Create a contiguous slice of hashes to deserialize into in order to // reduce the number of allocations. - hashes := make([]chainhash.Hash, count) - msg.Hashes = make([]*chainhash.Hash, 0, count) + hashes := make([]daghash.Hash, count) + msg.Hashes = make([]*daghash.Hash, 0, count) for i := uint64(0); i < count; i++ { hash := &hashes[i] err := readElement(r, hash) @@ -153,7 +153,7 @@ func NewMsgMerkleBlock(bh *BlockHeader) *MsgMerkleBlock { return &MsgMerkleBlock{ Header: *bh, Transactions: 0, - Hashes: make([]*chainhash.Hash, 0), + Hashes: make([]*daghash.Hash, 0), Flags: make([]byte, 0), } } diff --git a/wire/msgmerkleblock_test.go b/wire/msgmerkleblock_test.go index 13597870a..b03be6dc0 100644 --- a/wire/msgmerkleblock_test.go +++ b/wire/msgmerkleblock_test.go @@ -12,7 +12,7 @@ import ( "testing" "time" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/davecgh/go-spew/spew" ) @@ -21,11 +21,11 @@ func TestMerkleBlock(t *testing.T) { pver := ProtocolVersion // Block 1 header. - prevHash := &blockOne.Header.PrevBlock + prevHashes := blockOne.Header.PrevBlocks merkleHash := &blockOne.Header.MerkleRoot bits := blockOne.Header.Bits nonce := blockOne.Header.Nonce - bh := NewBlockHeader(1, prevHash, merkleHash, bits, nonce) + bh := NewBlockHeader(1, prevHashes, merkleHash, bits, nonce) // Ensure the command is expected value. wantCmd := "merkleblock" @@ -49,7 +49,7 @@ func TestMerkleBlock(t *testing.T) { data := make([]byte, 32) for i := 0; i < maxTxPerBlock; i++ { rand.Read(data) - hash, err := chainhash.NewHash(data) + hash, err := daghash.NewHash(data) if err != nil { t.Errorf("NewHash failed: %v\n", err) return @@ -63,7 +63,7 @@ func TestMerkleBlock(t *testing.T) { // Add one more Tx to test failure. rand.Read(data) - hash, err := chainhash.NewHash(data) + hash, err := daghash.NewHash(data) if err != nil { t.Errorf("NewHash failed: %v\n", err) return @@ -113,11 +113,11 @@ func TestMerkleBlock(t *testing.T) { // the latest protocol version and decoding with BIP0031Version. func TestMerkleBlockCrossProtocol(t *testing.T) { // Block 1 header. - prevHash := &blockOne.Header.PrevBlock + prevHashes := blockOne.Header.PrevBlocks merkleHash := &blockOne.Header.MerkleRoot bits := blockOne.Header.Bits nonce := blockOne.Header.Nonce - bh := NewBlockHeader(1, prevHash, merkleHash, bits, nonce) + bh := NewBlockHeader(1, prevHashes, merkleHash, bits, nonce) msg := NewMsgMerkleBlock(bh) @@ -208,65 +208,33 @@ func TestMerkleBlockWireErrors(t *testing.T) { readErr error // Expected read error }{ // Force error in version. - { - &merkleBlockOne, merkleBlockOneBytes, pver, 0, - io.ErrShortWrite, io.EOF, - }, - // Force error in prev block hash. - { - &merkleBlockOne, merkleBlockOneBytes, pver, 4, - io.ErrShortWrite, io.EOF, - }, + {&merkleBlockOne, merkleBlockOneBytes, pver, 0, io.ErrShortWrite, io.EOF}, + // Force error in num prev hashes. + {&merkleBlockOne, merkleBlockOneBytes, pver, 4, io.ErrShortWrite, io.EOF}, + // Force error in prev block hash #1. + {&merkleBlockOne, merkleBlockOneBytes, pver, 5, io.ErrShortWrite, io.EOF}, + // Force error in prev block hash #2. + {&merkleBlockOne, merkleBlockOneBytes, pver, 37, io.ErrShortWrite, io.EOF}, // Force error in merkle root. - { - &merkleBlockOne, merkleBlockOneBytes, pver, 36, - io.ErrShortWrite, io.EOF, - }, + {&merkleBlockOne, merkleBlockOneBytes, pver, 69, io.ErrShortWrite, io.EOF}, // Force error in timestamp. - { - &merkleBlockOne, merkleBlockOneBytes, pver, 68, - io.ErrShortWrite, io.EOF, - }, + {&merkleBlockOne, merkleBlockOneBytes, pver, 101, io.ErrShortWrite, io.EOF}, // Force error in difficulty bits. - { - &merkleBlockOne, merkleBlockOneBytes, pver, 76, - io.ErrShortWrite, io.EOF, - }, + {&merkleBlockOne, merkleBlockOneBytes, pver, 109, io.ErrShortWrite, io.EOF}, // Force error in header nonce. - { - &merkleBlockOne, merkleBlockOneBytes, pver, 80, - io.ErrShortWrite, io.EOF, - }, + {&merkleBlockOne, merkleBlockOneBytes, pver, 113, io.ErrShortWrite, io.EOF}, // Force error in transaction count. - { - &merkleBlockOne, merkleBlockOneBytes, pver, 84, - io.ErrShortWrite, io.EOF, - }, + {&merkleBlockOne, merkleBlockOneBytes, pver, 117, io.ErrShortWrite, io.EOF}, // Force error in num hashes. - { - &merkleBlockOne, merkleBlockOneBytes, pver, 88, - io.ErrShortWrite, io.EOF, - }, + {&merkleBlockOne, merkleBlockOneBytes, pver, 121, io.ErrShortWrite, io.EOF}, // Force error in hashes. - { - &merkleBlockOne, merkleBlockOneBytes, pver, 89, - io.ErrShortWrite, io.EOF, - }, + {&merkleBlockOne, merkleBlockOneBytes, pver, 122, io.ErrShortWrite, io.EOF}, // Force error in num flag bytes. - { - &merkleBlockOne, merkleBlockOneBytes, pver, 121, - io.ErrShortWrite, io.EOF, - }, + {&merkleBlockOne, merkleBlockOneBytes, pver, 154, io.ErrShortWrite, io.EOF}, // Force error in flag bytes. - { - &merkleBlockOne, merkleBlockOneBytes, pver, 122, - io.ErrShortWrite, io.EOF, - }, + {&merkleBlockOne, merkleBlockOneBytes, pver, 155, io.ErrShortWrite, io.EOF}, // Force error due to unsupported protocol version. - { - &merkleBlockOne, merkleBlockOneBytes, pverNoMerkleBlock, - 123, wireErr, wireErr, - }, + {&merkleBlockOne, merkleBlockOneBytes, pverNoMerkleBlock, 155, wireErr, wireErr}, } t.Logf("Running %d tests", len(tests)) @@ -326,7 +294,7 @@ func TestMerkleBlockOverflowErrors(t *testing.T) { // allowed tx hashes. var buf bytes.Buffer WriteVarInt(&buf, pver, maxTxPerBlock+1) - numHashesOffset := 88 + numHashesOffset := 121 exceedMaxHashes := make([]byte, numHashesOffset) copy(exceedMaxHashes, merkleBlockOneBytes[:numHashesOffset]) exceedMaxHashes = append(exceedMaxHashes, buf.Bytes()...) @@ -335,7 +303,7 @@ func TestMerkleBlockOverflowErrors(t *testing.T) { // allowed flag bytes. buf.Reset() WriteVarInt(&buf, pver, maxFlagsPerMerkleBlock+1) - numFlagBytesOffset := 121 + numFlagBytesOffset := 154 exceedMaxFlagBytes := make([]byte, numFlagBytesOffset) copy(exceedMaxFlagBytes, merkleBlockOneBytes[:numFlagBytesOffset]) exceedMaxFlagBytes = append(exceedMaxFlagBytes, buf.Bytes()...) @@ -369,14 +337,10 @@ func TestMerkleBlockOverflowErrors(t *testing.T) { // where the first transaction matches. var merkleBlockOne = MsgMerkleBlock{ Header: BlockHeader{ - Version: 1, - PrevBlock: chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy. - 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, - 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, - 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, - 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, - }), - MerkleRoot: chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy. + Version: 1, + NumPrevBlocks: 2, + PrevBlocks: []daghash.Hash{mainNetGenesisHash, simNetGenesisHash}, + MerkleRoot: daghash.Hash([daghash.HashSize]byte{ // Make go vet happy. 0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44, 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, @@ -387,8 +351,8 @@ var merkleBlockOne = MsgMerkleBlock{ Nonce: 0x9962e301, // 2573394689 }, Transactions: 1, - Hashes: []*chainhash.Hash{ - (*chainhash.Hash)(&[chainhash.HashSize]byte{ // Make go vet happy. + Hashes: []*daghash.Hash{ + (*daghash.Hash)(&[daghash.HashSize]byte{ // Make go vet happy. 0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44, 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, @@ -402,11 +366,16 @@ var merkleBlockOne = MsgMerkleBlock{ // block one of the block chain where the first transaction matches. var merkleBlockOneBytes = []byte{ 0x01, 0x00, 0x00, 0x00, // Version 1 - 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, + 0x02, // NumPrevBlocks + 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // PrevBlock mainNetGenesisHash 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, - 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, // PrevBlock - 0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44, + 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // PrevBlock simNetGenesisHash + 0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0, + 0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91, + 0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68, + 0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44, // MerkleRoot 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, 0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, // MerkleRoot diff --git a/wire/msgnotfound_test.go b/wire/msgnotfound_test.go index b840f87d0..2dfabbea9 100644 --- a/wire/msgnotfound_test.go +++ b/wire/msgnotfound_test.go @@ -10,7 +10,7 @@ import ( "reflect" "testing" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/davecgh/go-spew/spew" ) @@ -37,7 +37,7 @@ func TestNotFound(t *testing.T) { } // Ensure inventory vectors are added properly. - hash := chainhash.Hash{} + hash := daghash.Hash{} iv := NewInvVect(InvTypeBlock, &hash) err := msg.AddInvVect(iv) if err != nil { @@ -64,14 +64,14 @@ func TestNotFound(t *testing.T) { func TestNotFoundWire(t *testing.T) { // Block 203707 hash. hashStr := "3264bc2ac36a60840790ba1d475d01367e7c723da941069e9dc" - blockHash, err := chainhash.NewHashFromStr(hashStr) + blockHash, err := daghash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewHashFromStr: %v", err) } // Transaction 1 of Block 203707 hash. hashStr = "d28a3dc7392bf00a9855ee93dd9a81eff82a2c4fe57fbd42cfe71b487accfaf0" - txHash, err := chainhash.NewHashFromStr(hashStr) + txHash, err := daghash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewHashFromStr: %v", err) } @@ -229,7 +229,7 @@ func TestNotFoundWireErrors(t *testing.T) { // Block 203707 hash. hashStr := "3264bc2ac36a60840790ba1d475d01367e7c723da941069e9dc" - blockHash, err := chainhash.NewHashFromStr(hashStr) + blockHash, err := daghash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewHashFromStr: %v", err) } diff --git a/wire/msgreject.go b/wire/msgreject.go index 5077970c3..d998ee79a 100644 --- a/wire/msgreject.go +++ b/wire/msgreject.go @@ -8,7 +8,7 @@ import ( "fmt" "io" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" ) // RejectCode represents a numeric value by which a remote peer indicates @@ -68,7 +68,7 @@ type MsgReject struct { // Hash identifies a specific block or transaction that was rejected // and therefore only applies the MsgBlock and MsgTx messages. - Hash chainhash.Hash + Hash daghash.Hash } // BtcDecode decodes r using the bitcoin protocol encoding into the receiver. diff --git a/wire/msgtx.go b/wire/msgtx.go index 98fa072dc..5fae96193 100644 --- a/wire/msgtx.go +++ b/wire/msgtx.go @@ -11,7 +11,7 @@ import ( "math" "strconv" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" ) const ( @@ -57,7 +57,7 @@ const ( // minTxInPayload is the minimum payload size for a transaction input. // PreviousOutPoint.Hash + PreviousOutPoint.Index 4 bytes + Varint for // SignatureScript length 1 byte + Sequence 4 bytes. - minTxInPayload = 9 + chainhash.HashSize + minTxInPayload = 9 + daghash.HashSize // maxTxInPerMessage is the maximum number of transactions inputs that // a transaction which fits into a message could possibly have. @@ -156,13 +156,13 @@ var scriptPool scriptFreeList = make(chan []byte, freeListMaxItems) // OutPoint defines a bitcoin data type that is used to track previous // transaction outputs. type OutPoint struct { - Hash chainhash.Hash + Hash daghash.Hash Index uint32 } // NewOutPoint returns a new bitcoin transaction outpoint point with the // provided hash and index. -func NewOutPoint(hash *chainhash.Hash, index uint32) *OutPoint { +func NewOutPoint(hash *daghash.Hash, index uint32) *OutPoint { return &OutPoint{ Hash: *hash, Index: index, @@ -177,9 +177,9 @@ func (o OutPoint) String() string { // maximum message payload may increase in the future and this // optimization may go unnoticed, so allocate space for 10 decimal // digits, which will fit any uint32. - buf := make([]byte, 2*chainhash.HashSize+1, 2*chainhash.HashSize+1+10) + buf := make([]byte, 2*daghash.HashSize+1, 2*daghash.HashSize+1+10) copy(buf, o.Hash.String()) - buf[2*chainhash.HashSize] = ':' + buf[2*daghash.HashSize] = ':' buf = strconv.AppendUint(buf, uint64(o.Index), 10) return string(buf) } @@ -259,14 +259,14 @@ func (msg *MsgTx) AddTxOut(to *TxOut) { } // TxHash generates the Hash for the transaction. -func (msg *MsgTx) TxHash() chainhash.Hash { +func (msg *MsgTx) TxHash() daghash.Hash { // Encode the transaction and calculate double sha256 on the result. // Ignore the error returns since the only way the encode could fail // is being out of memory or due to nil pointers, both of which would // cause a run-time panic. buf := bytes.NewBuffer(make([]byte, 0, msg.SerializeSize())) _ = msg.Serialize(buf) - return chainhash.DoubleHashH(buf.Bytes()) + return daghash.DoubleHashH(buf.Bytes()) } // Copy creates a deep copy of a transaction so that the original does not get @@ -431,8 +431,8 @@ func (msg *MsgTx) BtcDecode(r io.Reader, pver uint32) error { totalScriptSize += uint64(len(to.PkScript)) } - uint64LockTime, err := binarySerializer.Uint64(r, littleEndian) - msg.LockTime = uint64LockTime + lockTime, err := binarySerializer.Uint64(r, littleEndian) + msg.LockTime = uint64(lockTime) if err != nil { returnScriptBuffers() return err @@ -543,7 +543,7 @@ func (msg *MsgTx) BtcEncode(w io.Writer, pver uint32) error { } } - return binarySerializer.PutUint64(w, littleEndian, uint64(msg.LockTime)) + return binarySerializer.PutUint64(w, littleEndian, msg.LockTime) } // Serialize encodes the transaction to w using a format that suitable for @@ -728,7 +728,7 @@ func writeTxIn(w io.Writer, pver uint32, version int32, ti *TxIn) error { return err } - return binarySerializer.PutUint64(w, littleEndian, ti.Sequence) + return binarySerializer.PutUint64(w, littleEndian, uint64(ti.Sequence)) } // readTxOut reads the next sequence of bytes from r as a transaction output diff --git a/wire/msgtx_test.go b/wire/msgtx_test.go index e3018c812..602c16442 100644 --- a/wire/msgtx_test.go +++ b/wire/msgtx_test.go @@ -12,7 +12,7 @@ import ( "reflect" "testing" - "github.com/daglabs/btcd/chaincfg/chainhash" + "github.com/daglabs/btcd/dagconfig/daghash" "github.com/davecgh/go-spew/spew" ) @@ -22,7 +22,7 @@ func TestTx(t *testing.T) { // Block 100000 hash. hashStr := "3ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506" - hash, err := chainhash.NewHashFromStr(hashStr) + hash, err := daghash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewHashFromStr: %v", err) } @@ -130,7 +130,7 @@ func TestTx(t *testing.T) { func TestTxHash(t *testing.T) { // Hash of first transaction from block 113875. hashStr := "768f7e5de1e0a209c9f4e89a5b610d15e888dfe8f32be7f92462edc5815fc025" - wantHash, err := chainhash.NewHashFromStr(hashStr) + wantHash, err := daghash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewHashFromStr: %v", err) return @@ -140,7 +140,7 @@ func TestTxHash(t *testing.T) { msgTx := NewMsgTx(1) txIn := TxIn{ PreviousOutPoint: OutPoint{ - Hash: chainhash.Hash{}, + Hash: daghash.Hash{}, Index: 0xffffffff, }, SignatureScript: []byte{0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62}, @@ -643,7 +643,7 @@ var multiTx = &MsgTx{ TxIn: []*TxIn{ { PreviousOutPoint: OutPoint{ - Hash: chainhash.Hash{}, + Hash: daghash.Hash{}, Index: 0xffffffff, }, SignatureScript: []byte{