[DEV-234] Add to txindex method to return the block in which tx was accepted (or indication it was not accepted) (#116)

* [DEV-234] add TxAcceptedInBlock and TxBlocks

* [DEV-234] test TxAcceptedInBlock and TxBlocks

* [DEV-234] test TxAcceptedInBlock and TxFirstBlockRegion

* [DEV-234] rename selectedPathSet to selectedPathChain

* [DEV-234] set indexers db as part of index manager initialization

* [DEV-234] remove redudant dag instance in txindex

* [DEV-234] fix TestTxIndexConnectBlock and add DAGParams as part of config in DAGSetup

* [DEV-234] TestTxIndexConnectBlock make K=1 to make calculations easier

* [DEV-234] rename TxAcceptingBlock to BlockThatAcceptedTx

* [DEV-234] update block fields names in txindex_test.go

* [DEV-234] rename selectedPathChain -> selectedPathChainSet
This commit is contained in:
Ori Newman 2018-11-05 16:11:54 +02:00 committed by stasatdaglabs
parent 4c2ce469aa
commit 7093155c3a
22 changed files with 694 additions and 247 deletions

View File

@ -13,7 +13,9 @@ import (
func TestMaybeAcceptBlockErrors(t *testing.T) {
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestMaybeAcceptBlockErrors", &dagconfig.SimNetParams)
dag, teardownFunc, err := DAGSetup("TestMaybeAcceptBlockErrors", Config{
DAGParams: &dagconfig.SimNetParams,
})
if err != nil {
t.Fatalf("TestMaybeAcceptBlockErrors: Failed to setup DAG instance: %v", err)
}

View File

@ -22,7 +22,9 @@ func TestAncestorErrors(t *testing.T) {
func TestFlushToDBErrors(t *testing.T) {
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestFlushToDBErrors", &dagconfig.SimNetParams)
dag, teardownFunc, err := DAGSetup("TestFlushToDBErrors", Config{
DAGParams: &dagconfig.SimNetParams,
})
if err != nil {
t.Fatalf("TestFlushToDBErrors: Failed to setup DAG instance: %s", err)
}

View File

@ -1,7 +1,6 @@
package blockdag
import (
"sort"
"strings"
"github.com/daglabs/btcd/dagconfig/daghash"
@ -106,6 +105,12 @@ func (bs blockSet) contains(block *blockNode) bool {
return ok
}
// containsHash returns true iff this set contains a block hash
func (bs blockSet) containsHash(hash *daghash.Hash) bool {
_, ok := bs[*hash]
return ok
}
// hashesEqual returns true if the given hashes are equal to the hashes
// of the blocks in this set.
// NOTE: The given hash slice must not contain duplicates.
@ -129,9 +134,7 @@ func (bs blockSet) hashes() []daghash.Hash {
for hash := range bs {
hashes = append(hashes, hash)
}
sort.Slice(hashes, func(i, j int) bool {
return daghash.Less(&hashes[i], &hashes[j])
})
daghash.Sort(hashes)
return hashes
}

View File

@ -1000,6 +1000,11 @@ func (dag *BlockDAG) GetUTXOEntry(outPoint wire.OutPoint) (*UTXOEntry, bool) {
return dag.virtual.utxoSet.get(outPoint)
}
// IsInSelectedPathChain returns whether or not a block hash is found in the selected path
func (dag *BlockDAG) IsInSelectedPathChain(blockHash *daghash.Hash) bool {
return dag.virtual.selectedPathChainSet.containsHash(blockHash)
}
// Height returns the height of the highest tip in the DAG
func (dag *BlockDAG) Height() int32 {
return dag.virtual.tips().maxHeight()
@ -1405,7 +1410,7 @@ type IndexManager interface {
// channel parameter specifies a channel the caller can close to signal
// that the process should be interrupted. It can be nil if that
// behavior is not desired.
Init(*BlockDAG, <-chan struct{}) error
Init(database.DB, *BlockDAG, <-chan struct{}) error
// ConnectBlock is invoked when a new block has been connected to the
// DAG.
@ -1537,7 +1542,7 @@ func New(config *Config) (*BlockDAG, error) {
// Initialize and catch up all of the currently active optional indexes
// as needed.
if config.IndexManager != nil {
err := config.IndexManager.Init(&dag, config.Interrupt)
err := config.IndexManager.Init(dag.db, &dag, config.Interrupt)
if err != nil {
return nil, err
}

View File

@ -41,8 +41,9 @@ func TestBlockCount(t *testing.T) {
}
// Create a new database and chain instance to run tests against.
dag, teardownFunc, err := DAGSetup("haveblock",
&dagconfig.SimNetParams)
dag, teardownFunc, err := DAGSetup("haveblock", Config{
DAGParams: &dagconfig.SimNetParams,
})
if err != nil {
t.Fatalf("Failed to setup chain instance: %v", err)
}
@ -89,8 +90,9 @@ func TestHaveBlock(t *testing.T) {
}
// Create a new database and chain instance to run tests against.
dag, teardownFunc, err := DAGSetup("haveblock",
&dagconfig.SimNetParams)
dag, teardownFunc, err := DAGSetup("haveblock", Config{
DAGParams: &dagconfig.SimNetParams,
})
if err != nil {
t.Fatalf("Failed to setup chain instance: %v", err)
}
@ -811,7 +813,9 @@ func testErrorThroughPatching(t *testing.T, expectedErrorMessage string, targetF
}
// Create a new database and dag instance to run tests against.
dag, teardownFunc, err := DAGSetup("testErrorThroughPatching", &dagconfig.SimNetParams)
dag, teardownFunc, err := DAGSetup("testErrorThroughPatching", Config{
DAGParams: &dagconfig.SimNetParams,
})
if err != nil {
t.Fatalf("Failed to setup dag instance: %v", err)
}

View File

@ -591,8 +591,8 @@ func (idx *AddrIndex) NeedsInputs() bool {
// initialize for this index.
//
// This is part of the Indexer interface.
func (idx *AddrIndex) Init() error {
// Nothing to do.
func (idx *AddrIndex) Init(db database.DB) error {
idx.db = db
return nil
}
@ -914,9 +914,8 @@ func (idx *AddrIndex) UnconfirmedTxnsForAddress(addr util.Address) []*util.Tx {
// It implements the Indexer interface which plugs into the IndexManager that in
// turn is used by the blockchain package. This allows the index to be
// seamlessly maintained along with the chain.
func NewAddrIndex(db database.DB, dagParams *dagconfig.Params) *AddrIndex {
func NewAddrIndex(dagParams *dagconfig.Params) *AddrIndex {
return &AddrIndex{
db: db,
dagParams: dagParams,
txnsByAddr: make(map[[addrKeySize]byte]map[daghash.Hash]*util.Tx),
addrsByTx: make(map[daghash.Hash]map[[addrKeySize]byte]struct{}),

View File

@ -88,8 +88,9 @@ var _ Indexer = (*CfIndex)(nil)
// Init initializes the hash-based cf index. This is part of the Indexer
// interface.
func (idx *CfIndex) Init() error {
return nil // Nothing to do.
func (idx *CfIndex) Init(db database.DB) error {
idx.db = db
return nil
}
// Key returns the database key to use for the index as a byte slice. This is
@ -345,8 +346,8 @@ func (idx *CfIndex) FilterHashesByBlockHashes(blockHashes []*daghash.Hash,
// It implements the Indexer interface which plugs into the IndexManager that
// in turn is used by the blockchain package. This allows the index to be
// seamlessly maintained along with the chain.
func NewCfIndex(db database.DB, dagParams *dagconfig.Params) *CfIndex {
return &CfIndex{db: db, dagParams: dagParams}
func NewCfIndex(dagParams *dagconfig.Params) *CfIndex {
return &CfIndex{dagParams: dagParams}
}
// DropCfIndex drops the CF index from the provided database if exists.

View File

@ -48,7 +48,7 @@ type Indexer interface {
// Init is invoked when the index manager is first initializing the
// index. This differs from the Create method in that it is called on
// every load, including the case the index was just created.
Init() error
Init(db database.DB) error
// ConnectBlock is invoked when the index manager is notified that a new
// block has been connected to the DAG.

View File

@ -127,7 +127,7 @@ func (m *Manager) maybeCreateIndexes(dbTx database.Tx) error {
// catch up due to the I/O contention.
//
// This is part of the blockchain.IndexManager interface.
func (m *Manager) Init(blockDAG *blockdag.BlockDAG, interrupt <-chan struct{}) error {
func (m *Manager) Init(db database.DB, blockDAG *blockdag.BlockDAG, interrupt <-chan struct{}) error {
// Nothing to do when no indexes are enabled.
if len(m.enabledIndexes) == 0 {
return nil
@ -137,6 +137,8 @@ func (m *Manager) Init(blockDAG *blockdag.BlockDAG, interrupt <-chan struct{}) e
return errInterruptRequested
}
m.db = db
// Finish and drops that were previously interrupted.
if err := m.maybeFinishDrops(interrupt); err != nil {
return err
@ -159,7 +161,7 @@ func (m *Manager) Init(blockDAG *blockdag.BlockDAG, interrupt <-chan struct{}) e
// Initialize each of the enabled indexes.
for _, indexer := range m.enabledIndexes {
if err := indexer.Init(); err != nil {
if err := indexer.Init(db); err != nil {
return err
}
}
@ -226,9 +228,8 @@ func (m *Manager) ConnectBlock(dbTx database.Tx, block *util.Block, dag *blockda
//
// The manager returned satisfies the blockchain.IndexManager interface and thus
// cleanly plugs into the normal blockchain processing path.
func NewManager(db database.DB, enabledIndexes []Indexer) *Manager {
func NewManager(enabledIndexes []Indexer) *Manager {
return &Manager{
db: db,
enabledIndexes: enabledIndexes,
}
}

View File

@ -325,7 +325,9 @@ var _ Indexer = (*TxIndex)(nil)
// disconnecting blocks.
//
// This is part of the Indexer interface.
func (idx *TxIndex) Init() error {
func (idx *TxIndex) Init(db database.DB) error {
idx.db = db
// Find the latest known block id field for the internal block id
// index and initialize it. This is done because it's a lot more
// efficient to do a single search at initialize time than it is to
@ -441,7 +443,7 @@ func (idx *TxIndex) ConnectBlock(dbTx database.Tx, block *util.Block, _ *blockda
return nil
}
// TxFirstBlockRegion returns the block region for the provided transaction hash
// TxFirstBlockRegion returns the first block region for the provided transaction hash
// from the transaction index. The block region can in turn be used to load the
// raw transaction bytes. When there is no entry for the provided hash, nil
// will be returned for the both the entry and the error.
@ -457,6 +459,86 @@ func (idx *TxIndex) TxFirstBlockRegion(hash *daghash.Hash) (*database.BlockRegio
return region, err
}
// TxBlocks returns the hashes of the blocks where the transaction exists
func (idx *TxIndex) TxBlocks(txHash *daghash.Hash) ([]daghash.Hash, error) {
blockHashes := make([]daghash.Hash, 0)
err := idx.db.View(func(dbTx database.Tx) error {
var err error
blockHashes, err = dbFetchTxBlocks(dbTx, txHash)
if err != nil {
return err
}
return nil
})
return blockHashes, err
}
func dbFetchTxBlocks(dbTx database.Tx, txHash *daghash.Hash) ([]daghash.Hash, error) {
blockHashes := make([]daghash.Hash, 0)
bucket := dbTx.Metadata().Bucket(includingBlocksIndexKey).Bucket(txHash[:])
if bucket == nil {
return nil, database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("No including blocks "+
"were found for %s", txHash),
}
}
err := bucket.ForEach(func(blockIDBytes, _ []byte) error {
blockID := byteOrder.Uint32(blockIDBytes)
blockHash, err := dbFetchBlockHashByID(dbTx, blockID)
if err != nil {
return err
}
blockHashes = append(blockHashes, *blockHash)
return nil
})
if err != nil {
return nil, err
}
return blockHashes, nil
}
// BlockThatAcceptedTx returns the hash of the block where the transaction got accepted (from the virtual block point of view)
func (idx *TxIndex) BlockThatAcceptedTx(dag *blockdag.BlockDAG, txHash *daghash.Hash) (*daghash.Hash, error) {
var acceptingBlock *daghash.Hash
err := idx.db.View(func(dbTx database.Tx) error {
var err error
acceptingBlock, err = dbFetchTxAcceptingBlock(dbTx, txHash, dag)
return err
})
return acceptingBlock, err
}
func dbFetchTxAcceptingBlock(dbTx database.Tx, txHash *daghash.Hash, dag *blockdag.BlockDAG) (*daghash.Hash, error) {
bucket := dbTx.Metadata().Bucket(acceptingBlocksIndexKey).Bucket(txHash[:])
if bucket == nil {
return nil, database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("No accepting blocks "+
"were found for %s", txHash),
}
}
cursor := bucket.Cursor()
if !cursor.First() {
return nil, database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("No accepting blocks "+
"were found for %s", txHash),
}
}
for ; cursor.Key() != nil; cursor.Next() {
blockID := byteOrder.Uint32(cursor.Key())
blockHash, err := dbFetchBlockHashByID(dbTx, blockID)
if err != nil {
return nil, err
}
if dag.IsInSelectedPathChain(blockHash) {
return blockHash, nil
}
}
return nil, nil
}
// NewTxIndex returns a new instance of an indexer that is used to create a
// mapping of the hashes of all transactions in the blockchain to the respective
// block, location within the block, and size of the transaction.
@ -464,8 +546,8 @@ func (idx *TxIndex) TxFirstBlockRegion(hash *daghash.Hash) (*database.BlockRegio
// It implements the Indexer interface which plugs into the IndexManager that in
// turn is used by the blockchain package. This allows the index to be
// seamlessly maintained along with the chain.
func NewTxIndex(db database.DB) *TxIndex {
return &TxIndex{db: db}
func NewTxIndex() *TxIndex {
return &TxIndex{}
}
// dropBlockIDIndex drops the internal block id index.

View File

@ -2,174 +2,517 @@ package indexers
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"math"
"reflect"
"testing"
"time"
"github.com/daglabs/btcd/blockdag"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/dagconfig/daghash"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/wire"
)
func tempDb() (database.DB, func(), error) {
dbPath, err := ioutil.TempDir("", "ffldb")
if err != nil {
return nil, nil, err
}
db, err := database.Create("ffldb", dbPath, wire.MainNet)
if err != nil {
return nil, nil, fmt.Errorf("error creating db: %v", err)
}
teardown := func() {
db.Close()
os.RemoveAll(dbPath)
}
return db, teardown, nil
}
func TestTxIndexConnectBlock(t *testing.T) {
db, teardown, err := tempDb()
blocks := make(map[daghash.Hash]*util.Block)
processBlock := func(t *testing.T, dag *blockdag.BlockDAG, msgBlock *wire.MsgBlock, blockName string) {
block := util.NewBlock(msgBlock)
blocks[*block.Hash()] = block
isOrphan, err := dag.ProcessBlock(block, blockdag.BFNone)
if err != nil {
t.Fatalf("TestTxIndexConnectBlock: dag.ProcessBlock got unexpected error for block %v: %v", blockName, err)
}
if isOrphan {
t.Fatalf("TestTxIndexConnectBlock: block %v was unexpectedly orphan", blockName)
}
}
txIndex := NewTxIndex()
indexManager := NewManager([]Indexer{txIndex})
params := dagconfig.SimNetParams
params.CoinbaseMaturity = 1
params.K = 1
config := blockdag.Config{
IndexManager: indexManager,
DAGParams: &params,
}
dag, teardown, err := blockdag.DAGSetup("TestTxIndexConnectBlock", config)
if err != nil {
t.Fatalf("TestTxIndexConnectBlock: Failed to setup DAG instance: %v", err)
}
if teardown != nil {
defer teardown()
}
processBlock(t, dag, &block1, "1")
processBlock(t, dag, &block2, "2")
processBlock(t, dag, &block3, "3")
block3TxHash := block3Tx.TxHash()
block3TxNewAcceptedBlock, err := txIndex.BlockThatAcceptedTx(dag, &block3TxHash)
if err != nil {
t.Fatalf("TestTxIndexConnectBlock: %v", err)
t.Errorf("TestTxIndexConnectBlock: TxAcceptedInBlock: %v", err)
}
block3Hash := block3.Header.BlockHash()
if !block3TxNewAcceptedBlock.IsEqual(&block3Hash) {
t.Errorf("TestTxIndexConnectBlock: block3Tx should've "+
"been accepted in block %v but instead got accepted in block %v", block3Hash, block3TxNewAcceptedBlock)
}
err = db.Update(func(dbTx database.Tx) error {
txIndex := NewTxIndex(db)
err := txIndex.Create(dbTx)
if err != nil {
t.Fatalf("TestTxIndexConnectBlock: Couldn't create txIndex: %v", err)
}
msgBlock1 := wire.NewMsgBlock(wire.NewBlockHeader(1,
[]daghash.Hash{{1}}, &daghash.Hash{}, 1, 1))
dummyPrevOutHash, err := daghash.NewHashFromStr("01")
if err != nil {
t.Fatalf("TestTxIndexConnectBlock: NewShaHashFromStr: unexpected error: %v", err)
}
dummyPrevOut1 := wire.OutPoint{Hash: *dummyPrevOutHash, Index: 0}
dummySigScript := bytes.Repeat([]byte{0x00}, 65)
dummyTxOut := &wire.TxOut{
Value: 5000000000,
PkScript: bytes.Repeat([]byte{0x00}, 65),
}
processBlock(t, dag, &block3A, "3A")
processBlock(t, dag, &block4, "4")
processBlock(t, dag, &block5, "5")
tx1 := wire.NewMsgTx(wire.TxVersion)
tx1.AddTxIn(wire.NewTxIn(&dummyPrevOut1, dummySigScript))
tx1.AddTxOut(dummyTxOut)
msgBlock1.AddTransaction(tx1)
block1 := util.NewBlock(msgBlock1)
err = txIndex.ConnectBlock(dbTx, block1, &blockdag.BlockDAG{}, []*blockdag.TxWithBlockHash{
{
Tx: util.NewTx(tx1),
InBlock: block1.Hash(),
},
})
if err != nil {
t.Fatalf("TestTxIndexConnectBlock: Couldn't connect block 1 to txindex")
}
tx1Hash := tx1.TxHash()
block1IDBytes := make([]byte, 4)
byteOrder.PutUint32(block1IDBytes, uint32(1))
tx1IncludingBucket := dbTx.Metadata().Bucket(includingBlocksIndexKey).Bucket(tx1Hash[:])
if tx1IncludingBucket == nil {
t.Fatalf("TestTxIndexConnectBlock: No including blocks bucket was found for tx1")
}
block1Tx1includingBlocksIndexEntry := tx1IncludingBucket.Get(block1IDBytes)
if len(block1Tx1includingBlocksIndexEntry) == 0 {
t.Fatalf("TestTxIndexConnectBlock: there was no entry for block1 in tx1's including blocks bucket")
}
tx1Offset := byteOrder.Uint32(block1Tx1includingBlocksIndexEntry[:4])
tx1Len := byteOrder.Uint32(block1Tx1includingBlocksIndexEntry[4:])
block1Bytes, err := block1.Bytes()
if err != nil {
t.Fatalf("TestTxIndexConnectBlock: Couldn't serialize block 1 to bytes")
}
tx1InBlock1 := block1Bytes[tx1Offset : tx1Offset+tx1Len]
wTx1 := bytes.NewBuffer(make([]byte, 0, tx1.SerializeSize()))
tx1.BtcEncode(wTx1, 0)
tx1Bytes := wTx1.Bytes()
if !reflect.DeepEqual(tx1Bytes, tx1InBlock1) {
t.Errorf("TestTxIndexConnectBlock: the block region that was in the bucket doesn't match tx1")
}
tx1AcceptingBlocksBucket := dbTx.Metadata().Bucket(acceptingBlocksIndexKey).Bucket(tx1Hash[:])
if tx1AcceptingBlocksBucket == nil {
t.Fatalf("TestTxIndexConnectBlock: No accepting blocks bucket was found for tx1")
}
block1Tx1AcceptingEntry := tx1AcceptingBlocksBucket.Get(block1IDBytes)
tx1IncludingBlockID := byteOrder.Uint32(block1Tx1AcceptingEntry)
if tx1IncludingBlockID != 1 {
t.Fatalf("TestTxIndexConnectBlock: tx1 should've been included in block 1, but got %v", tx1IncludingBlockID)
}
msgBlock2 := wire.NewMsgBlock(wire.NewBlockHeader(1,
[]daghash.Hash{{2}}, &daghash.Hash{}, 1, 1))
dummyPrevOut2 := wire.OutPoint{Hash: *dummyPrevOutHash, Index: 1}
tx2 := wire.NewMsgTx(wire.TxVersion)
tx2.AddTxIn(wire.NewTxIn(&dummyPrevOut2, dummySigScript))
tx2.AddTxOut(dummyTxOut)
msgBlock2.AddTransaction(tx2)
block2 := util.NewBlock(msgBlock2)
err = txIndex.ConnectBlock(dbTx, block2, &blockdag.BlockDAG{}, []*blockdag.TxWithBlockHash{
{
Tx: util.NewTx(tx1),
InBlock: block1.Hash(),
},
{
Tx: util.NewTx(tx2),
InBlock: block2.Hash(),
},
})
if err != nil {
t.Fatalf("TestTxIndexConnectBlock: Couldn't connect block 2 to txindex")
}
tx2Hash := tx2.TxHash()
block2IDBytes := make([]byte, 4)
byteOrder.PutUint32(block2IDBytes, uint32(2))
tx2IncludingBlocksBucket := dbTx.Metadata().Bucket(includingBlocksIndexKey).Bucket(tx2Hash[:])
if tx2IncludingBlocksBucket == nil {
t.Fatalf("TestTxIndexConnectBlock: No including blocks bucket was found for tx2")
}
block2Tx2includingBlocksIndexEntry := tx2IncludingBlocksBucket.Get(block2IDBytes)
if len(block2Tx2includingBlocksIndexEntry) == 0 {
t.Fatalf("TestTxIndexConnectBlock: there was no entry for block2 in tx2's including blocks bucket")
}
tx2AcceptingBlocksBucket := dbTx.Metadata().Bucket(acceptingBlocksIndexKey).Bucket(tx2Hash[:])
if tx2AcceptingBlocksBucket == nil {
t.Fatalf("TestTxIndexConnectBlock: No accepting blocks bucket was found for tx2")
}
block2Tx2AcceptingEntry := tx2AcceptingBlocksBucket.Get(block2IDBytes)
tx2IncludingBlockID := byteOrder.Uint32(block2Tx2AcceptingEntry)
if tx2IncludingBlockID != 2 {
t.Fatalf("TestTxIndexConnectBlock: tx2 should've been included in block 2, but got %v", tx1IncludingBlockID)
}
block2Tx1AcceptingEntry := tx1AcceptingBlocksBucket.Get(block2IDBytes)
tx1Block2IncludingBlockID := byteOrder.Uint32(block2Tx1AcceptingEntry)
if tx1Block2IncludingBlockID != 1 {
t.Fatalf("TestTxIndexConnectBlock: tx2 should've been included in block 1, but got %v", tx1Block2IncludingBlockID)
}
return nil
})
block3TxAcceptedBlock, err := txIndex.BlockThatAcceptedTx(dag, &block3TxHash)
if err != nil {
t.Fatalf("TestTxIndexConnectBlock: %v", err)
t.Errorf("TestTxIndexConnectBlock: TxAcceptedInBlock: %v", err)
}
block3AHash := block3A.Header.BlockHash()
if !block3TxAcceptedBlock.IsEqual(&block3AHash) {
t.Errorf("TestTxIndexConnectBlock: block3Tx should've "+
"been accepted in block %v but instead got accepted in block %v", block3AHash, block3TxAcceptedBlock)
}
region, err := txIndex.TxFirstBlockRegion(&block3TxHash)
if err != nil {
t.Fatalf("TestTxIndexConnectBlock: no block region was found for block3Tx")
}
regionBlock, ok := blocks[*region.Hash]
if !ok {
t.Fatalf("TestTxIndexConnectBlock: couldn't find block with hash %v", region.Hash)
}
regionBlockBytes, err := regionBlock.Bytes()
if err != nil {
t.Fatalf("TestTxIndexConnectBlock: Couldn't serialize block to bytes")
}
block3TxInBlock := regionBlockBytes[region.Offset : region.Offset+region.Len]
block3TxBuf := bytes.NewBuffer(make([]byte, 0, block3Tx.SerializeSize()))
block3Tx.BtcEncode(block3TxBuf, 0)
blockTxBytes := block3TxBuf.Bytes()
if !reflect.DeepEqual(blockTxBytes, block3TxInBlock) {
t.Errorf("TestTxIndexConnectBlock: the block region that was in the bucket doesn't match block3Tx")
}
}
var block1 = wire.MsgBlock{
Header: wire.BlockHeader{
Version: 1,
NumParentBlocks: 1,
ParentHashes: []daghash.Hash{
[32]byte{ // Make go vet happy.
0x4a, 0xc1, 0x82, 0x2e, 0x43, 0x05, 0xea, 0x0c,
0x4f, 0xcc, 0x77, 0x87, 0xae, 0x26, 0x48, 0x87,
0x50, 0x13, 0xee, 0x2f, 0x55, 0xa7, 0x18, 0xa7,
0x1e, 0xf2, 0xd8, 0x7c, 0xc1, 0x13, 0xac, 0x22,
},
},
MerkleRoot: daghash.Hash([32]byte{ // Make go vet happy.
0xec, 0x37, 0x81, 0x75, 0x51, 0x79, 0x41, 0x34,
0x3a, 0xae, 0x05, 0x48, 0x67, 0xfa, 0xdf, 0x84,
0xef, 0x06, 0x5b, 0x93, 0x07, 0xa8, 0xc2, 0xb7,
0x2a, 0x94, 0x07, 0x3b, 0x5f, 0xee, 0xb8, 0x6a,
}),
Timestamp: time.Unix(0x5bd58c4a, 0),
Bits: 0x207fffff,
Nonce: 0xdffffffffffffffa,
},
Transactions: []*wire.MsgTx{
{
Version: 1,
TxIn: []*wire.TxIn{
{
PreviousOutPoint: wire.OutPoint{
Hash: daghash.Hash{},
Index: 0xffffffff,
},
SignatureScript: []byte{
0x51, 0x00, 0x0b, 0x2f, 0x50, 0x32, 0x53, 0x48,
0x2f, 0x62, 0x74, 0x63, 0x64, 0x2f,
},
Sequence: math.MaxUint64,
},
},
TxOut: []*wire.TxOut{
{
Value: 5000000000,
PkScript: []byte{
0x76, 0xa9, 0x14, 0x3d, 0xee, 0x47, 0x71, 0x6e,
0x3c, 0xfa, 0x57, 0xdf, 0x45, 0x11, 0x34, 0x73,
0xa6, 0x31, 0x2e, 0xbe, 0xae, 0xf3, 0x11, 0x88,
0xac,
},
},
},
LockTime: 0,
},
},
}
var block2 = wire.MsgBlock{
Header: wire.BlockHeader{
Version: 1,
NumParentBlocks: 1,
ParentHashes: []daghash.Hash{
[32]byte{ // Make go vet happy.
0x42, 0xb9, 0x2c, 0xee, 0x3e, 0x3e, 0x35, 0x02,
0xf5, 0x8d, 0xd2, 0xc8, 0xff, 0x61, 0xe3, 0x44,
0x59, 0xb2, 0x5d, 0x72, 0x10, 0x29, 0x62, 0x58,
0x3f, 0xc9, 0x41, 0xe2, 0xcd, 0xa9, 0x05, 0x11,
},
},
MerkleRoot: daghash.Hash([32]byte{ // Make go vet happy.
0x3e, 0x89, 0x5f, 0xb4, 0xa8, 0x2f, 0x64, 0xb9,
0xe7, 0x1d, 0x5d, 0xce, 0x41, 0x4a, 0xb0, 0x36,
0x4e, 0xd0, 0x4b, 0xfc, 0x0c, 0xe1, 0x82, 0xfc,
0x51, 0x0d, 0x03, 0x7b, 0x8c, 0xdd, 0x3e, 0x49,
}),
Timestamp: time.Unix(0x5bd58c4b, 0),
Bits: 0x207fffff,
Nonce: 0x9ffffffffffffffb,
},
Transactions: []*wire.MsgTx{
{
Version: 1,
TxIn: []*wire.TxIn{
{
PreviousOutPoint: wire.OutPoint{
Hash: daghash.Hash{},
Index: 0xffffffff,
},
SignatureScript: []byte{
0x52, 0x00, 0x0b, 0x2f, 0x50, 0x32, 0x53, 0x48,
0x2f, 0x62, 0x74, 0x63, 0x64, 0x2f,
},
Sequence: math.MaxUint64,
},
},
TxOut: []*wire.TxOut{
{
Value: 5000000000,
PkScript: []byte{
0x76, 0xa9, 0x14, 0x3d, 0xee, 0x47, 0x71, 0x6e,
0x3c, 0xfa, 0x57, 0xdf, 0x45, 0x11, 0x34, 0x73,
0xa6, 0x31, 0x2e, 0xbe, 0xae, 0xf3, 0x11, 0x88,
0xac,
},
},
},
LockTime: 0,
},
{
Version: 1,
TxIn: []*wire.TxIn{
{
PreviousOutPoint: wire.OutPoint{
Hash: daghash.Hash{
0xec, 0x37, 0x81, 0x75, 0x51, 0x79, 0x41, 0x34,
0x3a, 0xae, 0x05, 0x48, 0x67, 0xfa, 0xdf, 0x84,
0xef, 0x06, 0x5b, 0x93, 0x07, 0xa8, 0xc2, 0xb7,
0x2a, 0x94, 0x07, 0x3b, 0x5f, 0xee, 0xb8, 0x6a,
},
Index: 0,
},
SignatureScript: []byte{
0x47, 0x30, 0x44, 0x02, 0x20, 0x5d, 0xca, 0x41,
0xb0, 0x73, 0x9e, 0xba, 0x0c, 0xba, 0x59, 0xdd,
0xb5, 0x6a, 0x6e, 0xd2, 0xd2, 0x36, 0x61, 0xa5,
0xa0, 0x5c, 0xb5, 0x2b, 0xee, 0x5f, 0x30, 0x62,
0x72, 0xb3, 0x26, 0xa2, 0xdb, 0x02, 0x20, 0x0d,
0xc5, 0x22, 0xd8, 0x88, 0x5a, 0xf7, 0xef, 0x60,
0xa6, 0xd9, 0x5c, 0x7a, 0x44, 0x96, 0xfc, 0x14,
0x66, 0x74, 0xda, 0x2b, 0x6c, 0x99, 0x2c, 0x56,
0x34, 0x3d, 0x64, 0xdf, 0xc2, 0x36, 0xe8, 0x01,
0x21, 0x02, 0xa6, 0x73, 0x63, 0x8c, 0xb9, 0x58,
0x7c, 0xb6, 0x8e, 0xa0, 0x8d, 0xbe, 0xf6, 0x85,
0xc6, 0xf2, 0xd2, 0xa7, 0x51, 0xa8, 0xb3, 0xc6,
0xf2, 0xa7, 0xe9, 0xa4, 0x99, 0x9e, 0x6e, 0x4b,
0xfa, 0xf5,
},
Sequence: math.MaxUint64,
},
},
TxOut: []*wire.TxOut{
{
Value: 5000000000,
PkScript: []byte{
0x76, 0xa9, 0x14, 0x3d, 0xee, 0x47, 0x71, 0x6e,
0x3c, 0xfa, 0x57, 0xdf, 0x45, 0x11, 0x34, 0x73,
0xa6, 0x31, 0x2e, 0xbe, 0xae, 0xf3, 0x11, 0x88,
0xac,
},
},
},
LockTime: 0,
},
},
}
var block3Tx *wire.MsgTx = &wire.MsgTx{
Version: 1,
TxIn: []*wire.TxIn{
{
PreviousOutPoint: wire.OutPoint{
Hash: daghash.Hash{
0x69, 0x11, 0xbd, 0x7e, 0x46, 0x5e, 0xe8, 0xf7,
0xbe, 0x80, 0xb0, 0x21, 0x6a, 0xc8, 0xb4, 0xea,
0xef, 0xfa, 0x6a, 0x34, 0x75, 0x6e, 0xb5, 0x96,
0xd9, 0x3b, 0xe2, 0x6a, 0xd6, 0x49, 0xac, 0x6e,
},
Index: 0,
},
SignatureScript: []byte{
0x48, 0x30, 0x45, 0x02, 0x21, 0x00, 0xea, 0xa8,
0xa5, 0x8b, 0x2d, 0xeb, 0x15, 0xc1, 0x18, 0x79,
0xa4, 0xad, 0xc3, 0xde, 0x57, 0x09, 0xac, 0xdb,
0x16, 0x16, 0x9f, 0x07, 0xe8, 0x7d, 0xbe, 0xf1,
0x4b, 0xaa, 0xd3, 0x76, 0xb4, 0x87, 0x02, 0x20,
0x03, 0xb3, 0xee, 0xc8, 0x9f, 0x87, 0x18, 0xee,
0xf3, 0xc3, 0x29, 0x29, 0x57, 0xb9, 0x93, 0x95,
0x4a, 0xe9, 0x49, 0x74, 0x90, 0xa1, 0x5b, 0xae,
0x49, 0x16, 0xa9, 0x3e, 0xb8, 0xf0, 0xf9, 0x6b,
0x01, 0x21, 0x02, 0xa6, 0x73, 0x63, 0x8c, 0xb9,
0x58, 0x7c, 0xb6, 0x8e, 0xa0, 0x8d, 0xbe, 0xf6,
0x85, 0xc6, 0xf2, 0xd2, 0xa7, 0x51, 0xa8, 0xb3,
0xc6, 0xf2, 0xa7, 0xe9, 0xa4, 0x99, 0x9e, 0x6e,
0x4b, 0xfa, 0xf5,
},
Sequence: math.MaxUint64,
},
},
TxOut: []*wire.TxOut{
{
Value: 5000000000,
PkScript: []byte{
0x76, 0xa9, 0x14, 0x3d, 0xee, 0x47, 0x71, 0x6e,
0x3c, 0xfa, 0x57, 0xdf, 0x45, 0x11, 0x34, 0x73,
0xa6, 0x31, 0x2e, 0xbe, 0xae, 0xf3, 0x11, 0x88,
0xac,
},
},
},
LockTime: 0,
}
var block3 = wire.MsgBlock{
Header: wire.BlockHeader{
Version: 1,
NumParentBlocks: 1,
ParentHashes: []daghash.Hash{
[32]byte{ // Make go vet happy.
0x8b, 0xdf, 0xd1, 0x48, 0xef, 0xf5, 0x2b, 0x5e,
0xfe, 0x26, 0xba, 0x37, 0xcb, 0x23, 0x0d, 0x41,
0x24, 0x80, 0xfe, 0x9a, 0x38, 0x90, 0xb9, 0xd3,
0x07, 0x30, 0xcc, 0xa0, 0x4f, 0x4e, 0xf1, 0x02,
},
},
MerkleRoot: daghash.Hash([32]byte{ // Make go vet happy.
0x24, 0x0f, 0x21, 0x89, 0x94, 0xd1, 0x77, 0x32,
0xff, 0x5d, 0xb4, 0xe9, 0x11, 0xd2, 0x74, 0xc9,
0x0f, 0x0c, 0xb7, 0xe5, 0x16, 0xf6, 0xca, 0x63,
0xac, 0xaa, 0x6c, 0x23, 0x42, 0xe9, 0xd5, 0x58,
}),
Timestamp: time.Unix(0x5bd58c4c, 0),
Bits: 0x207fffff,
Nonce: 0x7ffffffffffffffc,
},
Transactions: []*wire.MsgTx{
{
Version: 1,
TxIn: []*wire.TxIn{
{
PreviousOutPoint: wire.OutPoint{
Hash: daghash.Hash{},
Index: 0xffffffff,
},
SignatureScript: []byte{
0x53, 0x00, 0x0b, 0x2f, 0x50, 0x32, 0x53, 0x48,
0x2f, 0x62, 0x74, 0x63, 0x64, 0x2f,
},
Sequence: math.MaxUint64,
},
},
TxOut: []*wire.TxOut{
{
Value: 5000000000,
PkScript: []byte{
0x76, 0xa9, 0x14, 0x3d, 0xee, 0x47, 0x71, 0x6e,
0x3c, 0xfa, 0x57, 0xdf, 0x45, 0x11, 0x34, 0x73,
0xa6, 0x31, 0x2e, 0xbe, 0xae, 0xf3, 0x11, 0x88,
0xac,
},
},
},
LockTime: 0,
},
block3Tx,
},
}
var block3A = wire.MsgBlock{
Header: wire.BlockHeader{
Version: 1,
NumParentBlocks: 1,
ParentHashes: []daghash.Hash{
[32]byte{ // Make go vet happy.
0x8b, 0xdf, 0xd1, 0x48, 0xef, 0xf5, 0x2b, 0x5e,
0xfe, 0x26, 0xba, 0x37, 0xcb, 0x23, 0x0d, 0x41,
0x24, 0x80, 0xfe, 0x9a, 0x38, 0x90, 0xb9, 0xd3,
0x07, 0x30, 0xcc, 0xa0, 0x4f, 0x4e, 0xf1, 0x02,
},
},
MerkleRoot: daghash.Hash([32]byte{ // Make go vet happy.
0x4b, 0xd6, 0xbf, 0x21, 0xa0, 0x62, 0x77, 0xb5,
0xc0, 0xd3, 0x3b, 0x31, 0x9d, 0x30, 0x9b, 0x89,
0x93, 0x75, 0x50, 0xdb, 0x3b, 0x87, 0x23, 0x67,
0x2f, 0xeb, 0xf9, 0xf2, 0x1b, 0x63, 0x5f, 0x1c,
}),
Timestamp: time.Unix(0x5bd58c4c, 0),
Bits: 0x207fffff,
Nonce: 0xdffffffffffffff9,
},
Transactions: []*wire.MsgTx{
{
Version: 1,
TxIn: []*wire.TxIn{
{
PreviousOutPoint: wire.OutPoint{
Hash: daghash.Hash{},
Index: 0xffffffff,
},
SignatureScript: []byte{
0x53, 0x51, 0x0b, 0x2f, 0x50, 0x32, 0x53, 0x48,
0x2f, 0x62, 0x74, 0x63, 0x64, 0x2f,
},
Sequence: math.MaxUint64,
},
},
TxOut: []*wire.TxOut{
{
Value: 5000000000,
PkScript: []byte{
0x76, 0xa9, 0x14, 0x3d, 0xee, 0x47, 0x71, 0x6e,
0x3c, 0xfa, 0x57, 0xdf, 0x45, 0x11, 0x34, 0x73,
0xa6, 0x31, 0x2e, 0xbe, 0xae, 0xf3, 0x11, 0x88,
0xac,
},
},
},
LockTime: 0,
},
block3Tx,
},
}
var block4 = wire.MsgBlock{
Header: wire.BlockHeader{
Version: 1,
NumParentBlocks: 1,
ParentHashes: []daghash.Hash{
[32]byte{ // Make go vet happy.
0xde, 0xe3, 0x62, 0x5f, 0x0c, 0x98, 0x26, 0x5f,
0x9b, 0x3e, 0xb1, 0xd9, 0x32, 0x0a, 0x84, 0xb3,
0xe1, 0xbe, 0xe2, 0xb7, 0x8e, 0x4a, 0xfb, 0x97,
0x7a, 0x53, 0x32, 0xff, 0x32, 0x17, 0xfc, 0x57,
},
},
MerkleRoot: daghash.Hash([32]byte{ // Make go vet happy.
0xe1, 0x13, 0x4a, 0xd8, 0xd5, 0x43, 0x33, 0x95,
0x55, 0x19, 0x00, 0xaf, 0x13, 0x3f, 0xd6, 0x3a,
0x63, 0x98, 0x50, 0x61, 0xfc, 0x02, 0x2c, 0x44,
0x1b, 0x0e, 0x74, 0x7d, 0x5c, 0x19, 0x58, 0xb4,
}),
Timestamp: time.Unix(0x5bd58c4d, 0),
Bits: 0x207fffff,
Nonce: 0xdffffffffffffffa,
},
Transactions: []*wire.MsgTx{
{
Version: 1,
TxIn: []*wire.TxIn{
{
PreviousOutPoint: wire.OutPoint{
Hash: daghash.Hash{},
Index: 0xffffffff,
},
SignatureScript: []byte{
0x54, 0x00, 0x0b, 0x2f, 0x50, 0x32, 0x53, 0x48,
0x2f, 0x62, 0x74, 0x63, 0x64, 0x2f,
},
Sequence: math.MaxUint64,
},
},
TxOut: []*wire.TxOut{
{
Value: 5000000000,
PkScript: []byte{
0x76, 0xa9, 0x14, 0x3d, 0xee, 0x47, 0x71, 0x6e,
0x3c, 0xfa, 0x57, 0xdf, 0x45, 0x11, 0x34, 0x73,
0xa6, 0x31, 0x2e, 0xbe, 0xae, 0xf3, 0x11, 0x88,
0xac,
},
},
},
LockTime: 0,
},
},
}
var block5 = wire.MsgBlock{
Header: wire.BlockHeader{
Version: 1,
NumParentBlocks: 2,
ParentHashes: []daghash.Hash{
[32]byte{ // Make go vet happy.
0xfd, 0x96, 0x3c, 0xfb, 0xed, 0x5a, 0xeb, 0xdb, 0x3d, 0x8e, 0xe9, 0x53, 0xf1, 0xe6, 0xad, 0x12, 0x21, 0x02, 0x55, 0x62, 0xbc, 0x2e, 0x52, 0xee, 0xb9, 0xd0, 0x60, 0xda, 0xd6, 0x4a, 0x20, 0x5a},
[32]byte{ // Make go vet happy.
0xec, 0x42, 0x2c, 0x0c, 0x8c, 0x94, 0x50, 0x17, 0x85, 0xbb, 0x8c, 0xaf, 0x72, 0xd9, 0x39, 0x28, 0x26, 0xaa, 0x42, 0x8d, 0xd5, 0x09, 0xa2, 0xb6, 0xa6, 0x8c, 0x4e, 0x85, 0x72, 0x44, 0xd5, 0x70},
},
MerkleRoot: daghash.Hash([32]byte{ // Make go vet happy.
0x77, 0xc7, 0x09, 0x46, 0x0f, 0x81, 0x37, 0xca,
0xf5, 0xec, 0xa5, 0xae, 0x4c, 0xad, 0x65, 0xc5,
0xdd, 0x73, 0x4f, 0xb5, 0xcf, 0x04, 0x20, 0x38,
0x29, 0x10, 0x5b, 0x66, 0xfe, 0x15, 0x8a, 0xfb,
}),
Timestamp: time.Unix(0x5bd58c4e, 0),
Bits: 0x207fffff,
Nonce: 4,
},
Transactions: []*wire.MsgTx{
{
Version: 1,
TxIn: []*wire.TxIn{
{
PreviousOutPoint: wire.OutPoint{
Hash: daghash.Hash{},
Index: 0xffffffff,
},
SignatureScript: []byte{
0x55, 0x00, 0x0b, 0x2f, 0x50, 0x32, 0x53, 0x48,
0x2f, 0x62, 0x74, 0x63, 0x64, 0x2f,
},
Sequence: math.MaxUint64,
},
},
TxOut: []*wire.TxOut{
{
Value: 5000000000,
PkScript: []byte{
0x76, 0xa9, 0x14, 0x3d, 0xee, 0x47, 0x71, 0x6e,
0x3c, 0xfa, 0x57, 0xdf, 0x45, 0x11, 0x34, 0x73,
0xa6, 0x31, 0x2e, 0xbe, 0xae, 0xf3, 0x11, 0x88,
0xac,
},
},
},
LockTime: 0,
},
},
}

View File

@ -18,8 +18,9 @@ func TestNotifications(t *testing.T) {
}
// Create a new database and dag instance to run tests against.
dag, teardownFunc, err := DAGSetup("notifications",
&dagconfig.SimNetParams)
dag, teardownFunc, err := DAGSetup("notifications", Config{
DAGParams: &dagconfig.SimNetParams,
})
if err != nil {
t.Fatalf("Failed to setup dag instance: %v", err)
}

View File

@ -5,7 +5,6 @@ import (
"os"
"path/filepath"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/database"
_ "github.com/daglabs/btcd/database/ffldb" // blank import ffldb so that its init() function runs before tests
"github.com/daglabs/btcd/txscript"
@ -49,53 +48,45 @@ func fileExists(name string) bool {
// DAGSetup is used to create a new db and chain instance with the genesis
// block already inserted. In addition to the new chain instance, it returns
// a teardown function the caller should invoke when done testing to clean up.
func DAGSetup(dbName string, params *dagconfig.Params) (*BlockDAG, func(), error) {
func DAGSetup(dbName string, config Config) (*BlockDAG, func(), error) {
if !isSupportedDbType(testDbType) {
return nil, nil, fmt.Errorf("unsupported db type %v", testDbType)
}
// Handle memory database specially since it doesn't need the disk
// specific handling.
var db database.DB
var teardown func()
// Create the root directory for test databases.
if !fileExists(testDbRoot) {
if err := os.MkdirAll(testDbRoot, 0700); err != nil {
err := fmt.Errorf("unable to create test db "+
"root: %v", err)
return nil, nil, err
if config.DB == nil {
// Create the root directory for test databases.
if !fileExists(testDbRoot) {
if err := os.MkdirAll(testDbRoot, 0700); err != nil {
err := fmt.Errorf("unable to create test db "+
"root: %v", err)
return nil, nil, err
}
}
dbPath := filepath.Join(testDbRoot, dbName)
_ = os.RemoveAll(dbPath)
var err error
config.DB, err = database.Create(testDbType, dbPath, blockDataNet)
if err != nil {
return nil, nil, fmt.Errorf("error creating db: %v", err)
}
// Setup a teardown function for cleaning up. This function is
// returned to the caller to be invoked when it is done testing.
teardown = func() {
config.DB.Close()
os.RemoveAll(dbPath)
os.RemoveAll(testDbRoot)
}
}
// Create a new database to store the accepted blocks into.
dbPath := filepath.Join(testDbRoot, dbName)
_ = os.RemoveAll(dbPath)
ndb, err := database.Create(testDbType, dbPath, blockDataNet)
if err != nil {
return nil, nil, fmt.Errorf("error creating db: %v", err)
}
db = ndb
// Setup a teardown function for cleaning up. This function is
// returned to the caller to be invoked when it is done testing.
teardown = func() {
db.Close()
os.RemoveAll(dbPath)
os.RemoveAll(testDbRoot)
}
// Copy the chain params to ensure any modifications the tests do to
// the chain parameters do not affect the global instance.
paramsCopy := *params
config.TimeSource = NewMedianTime()
config.SigCache = txscript.NewSigCache(1000)
// Create the DAG instance.
dag, err := New(&Config{
DB: db,
DAGParams: &paramsCopy,
Checkpoints: nil,
TimeSource: NewMedianTime(),
SigCache: txscript.NewSigCache(1000),
})
dag, err := New(&config)
if err != nil {
teardown()
err := fmt.Errorf("failed to create dag instance: %v", err)

View File

@ -44,7 +44,9 @@ func TestDAGSetupErrors(t *testing.T) {
func testDAGSetupErrorThroughPatching(t *testing.T, expectedErrorMessage string, targetFunction interface{}, replacementFunction interface{}) {
guard := monkey.Patch(targetFunction, replacementFunction)
defer guard.Unpatch()
_, tearDown, err := DAGSetup("TestDAGSetup", &dagconfig.MainNetParams)
_, tearDown, err := DAGSetup("TestDAGSetup", Config{
DAGParams: &dagconfig.MainNetParams,
})
if tearDown != nil {
defer tearDown()
}

View File

@ -842,7 +842,9 @@ func createCoinbaseTx(blockHeight int32, numOutputs uint32) (*wire.MsgTx, error)
func TestApplyUTXOChanges(t *testing.T) {
// Create a new database and dag instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestApplyUTXOChanges", &dagconfig.SimNetParams)
dag, teardownFunc, err := DAGSetup("TestApplyUTXOChanges", Config{
DAGParams: &dagconfig.SimNetParams,
})
if err != nil {
t.Fatalf("Failed to setup dag instance: %v", err)
}

View File

@ -67,8 +67,9 @@ func TestSequenceLocksActive(t *testing.T) {
// ensure it fails.
func TestCheckConnectBlockTemplate(t *testing.T) {
// Create a new database and chain instance to run tests against.
dag, teardownFunc, err := DAGSetup("checkconnectblocktemplate",
&dagconfig.SimNetParams)
dag, teardownFunc, err := DAGSetup("checkconnectblocktemplate", Config{
DAGParams: &dagconfig.SimNetParams,
})
if err != nil {
t.Errorf("Failed to setup chain instance: %v", err)
return

View File

@ -14,8 +14,8 @@ type virtualBlock struct {
phantomK uint32
utxoSet *FullUTXOSet
blockNode
// selectedPathSet is a block set that includes all the blocks that belong to the chain of selected parents from the virtual block.
selectedPathSet blockSet
// selectedPathChainSet is a block set that includes all the blocks that belong to the chain of selected parents from the virtual block.
selectedPathChainSet blockSet
}
// newVirtualBlock creates and returns a new VirtualBlock.
@ -24,7 +24,7 @@ func newVirtualBlock(tips blockSet, phantomK uint32) *virtualBlock {
var virtual virtualBlock
virtual.phantomK = phantomK
virtual.utxoSet = NewFullUTXOSet()
virtual.selectedPathSet = newSet()
virtual.selectedPathChainSet = newSet()
virtual.setTips(tips)
return &virtual
@ -33,10 +33,10 @@ func newVirtualBlock(tips blockSet, phantomK uint32) *virtualBlock {
// clone creates and returns a clone of the virtual block.
func (v *virtualBlock) clone() *virtualBlock {
return &virtualBlock{
phantomK: v.phantomK,
utxoSet: v.utxoSet.clone().(*FullUTXOSet),
blockNode: v.blockNode,
selectedPathSet: v.selectedPathSet,
phantomK: v.phantomK,
utxoSet: v.utxoSet.clone().(*FullUTXOSet),
blockNode: v.blockNode,
selectedPathChainSet: v.selectedPathChainSet,
}
}
@ -62,10 +62,10 @@ func (v *virtualBlock) setTips(tips blockSet) {
func (v *virtualBlock) updateSelectedPathSet(oldSelectedParent *blockNode) {
var intersectionNode *blockNode
for node := v.blockNode.selectedParent; intersectionNode == nil && node != nil; node = node.selectedParent {
if v.selectedPathSet.contains(node) {
if v.selectedPathChainSet.contains(node) {
intersectionNode = node
} else {
v.selectedPathSet.add(node)
v.selectedPathChainSet.add(node)
}
}
@ -75,7 +75,7 @@ func (v *virtualBlock) updateSelectedPathSet(oldSelectedParent *blockNode) {
if intersectionNode != nil {
for node := oldSelectedParent; !node.hash.IsEqual(&intersectionNode.hash); node = node.selectedParent {
v.selectedPathSet.remove(node)
v.selectedPathChainSet.remove(node)
}
}
}

View File

@ -138,7 +138,7 @@ func TestSelectedPath(t *testing.T) {
virtual.AddTip(tip)
}
// For now we don't have any DAG, just chain, the selected path should include all the blocks on the chain.
if !reflect.DeepEqual(virtual.selectedPathSet, firstPath) {
if !reflect.DeepEqual(virtual.selectedPathChainSet, firstPath) {
t.Fatalf("TestSelectedPath: selectedPathSet doesn't include the expected values. got %v, want %v", virtual.selectedParent, firstPath)
}
@ -150,7 +150,7 @@ func TestSelectedPath(t *testing.T) {
virtual.AddTip(tip)
}
// Because we added a chain that is much longer than the previous chain, the selected path should be re-organized.
if !reflect.DeepEqual(virtual.selectedPathSet, secondPath) {
if !reflect.DeepEqual(virtual.selectedPathChainSet, secondPath) {
t.Fatalf("TestSelectedPath: selectedPathSet didn't handle the re-org as expected. got %v, want %v", virtual.selectedParent, firstPath)
}
@ -160,7 +160,7 @@ func TestSelectedPath(t *testing.T) {
virtual.AddTip(tip)
}
// Because we added a very short chain, the selected path should not be affected.
if !reflect.DeepEqual(virtual.selectedPathSet, secondPath) {
if !reflect.DeepEqual(virtual.selectedPathChainSet, secondPath) {
t.Fatalf("TestSelectedPath: selectedPathSet did an unexpected re-org. got %v, want %v", virtual.selectedParent, firstPath)
}

View File

@ -310,17 +310,17 @@ func newBlockImporter(db database.DB, r io.ReadSeeker) (*blockImporter, error) {
} else {
log.Info("Transaction index is enabled")
}
indexes = append(indexes, indexers.NewTxIndex(db))
indexes = append(indexes, indexers.NewTxIndex())
}
if cfg.AddrIndex {
log.Info("Address index is enabled")
indexes = append(indexes, indexers.NewAddrIndex(db, activeNetParams))
indexes = append(indexes, indexers.NewAddrIndex(activeNetParams))
}
// Create an index manager if any of the optional indexes are enabled.
var indexManager blockdag.IndexManager
if len(indexes) > 0 {
indexManager = indexers.NewManager(db, indexes)
indexManager = indexers.NewManager(indexes)
}
dag, err := blockdag.New(&blockdag.Config{

View File

@ -9,6 +9,7 @@ import (
"encoding/hex"
"fmt"
"math/big"
"sort"
"strings"
)
@ -188,3 +189,9 @@ func Less(a *Hash, b *Hash) bool {
func JoinHashesStrings(hashes []Hash, separator string) string {
return strings.Join(Strings(hashes), separator)
}
func Sort(hashes []Hash) {
sort.Slice(hashes, func(i, j int) bool {
return Less(&hashes[i], &hashes[j])
})
}

View File

@ -271,8 +271,9 @@ func newPoolHarness(dagParams *dagconfig.Params, numOutputs uint32, dbName strin
}
// Create a new database and chain instance to run tests against.
dag, teardownFunc, err := blockdag.DAGSetup(dbName,
&dagconfig.MainNetParams)
dag, teardownFunc, err := blockdag.DAGSetup(dbName, blockdag.Config{
DAGParams: &dagconfig.MainNetParams,
})
if err != nil {
return nil, nil, fmt.Errorf("Failed to setup DAG instance: %v", err)
}

View File

@ -2353,24 +2353,24 @@ func NewServer(listenAddrs []string, db database.DB, dagParams *dagconfig.Params
indxLog.Info("Transaction index is enabled")
}
s.TxIndex = indexers.NewTxIndex(db)
s.TxIndex = indexers.NewTxIndex()
indexes = append(indexes, s.TxIndex)
}
if config.MainConfig().AddrIndex {
indxLog.Info("Address index is enabled")
s.AddrIndex = indexers.NewAddrIndex(db, dagParams)
s.AddrIndex = indexers.NewAddrIndex(dagParams)
indexes = append(indexes, s.AddrIndex)
}
if config.MainConfig().EnableCFilters {
indxLog.Info("cf index is enabled")
s.CfIndex = indexers.NewCfIndex(db, dagParams)
s.CfIndex = indexers.NewCfIndex(dagParams)
indexes = append(indexes, s.CfIndex)
}
// Create an index manager if any of the optional indexes are enabled.
var indexManager blockdag.IndexManager
if len(indexes) > 0 {
indexManager = indexers.NewManager(db, indexes)
indexManager = indexers.NewManager(indexes)
}
// Merge given checkpoints with the default ones unless they are disabled.