[NOD-55] Change daghash hash to pointer in most places (#239)

* [NOD-55] Change daghash.Hash to pointer in most places

* [NOD-55] Fixed format error

* [NOD-55] Fixed merge error

* [NOD-55] Cancel copying hash in blockSet.hashes()
This commit is contained in:
Ori Newman 2019-04-02 13:49:47 +03:00 committed by Svarog
parent 58b427424f
commit 254eab96cd
87 changed files with 565 additions and 526 deletions

View File

@ -92,7 +92,7 @@ func lookupParentNodes(block *util.Block, blockDAG *BlockDAG) (blockSet, error)
nodes := newSet()
for _, parentHash := range parentHashes {
node := blockDAG.index.LookupNode(&parentHash)
node := blockDAG.index.LookupNode(parentHash)
if node == nil {
str := fmt.Sprintf("parent block %s is unknown", parentHashes)
return nil, ruleError(ErrParentBlockUnknown, str)

View File

@ -29,7 +29,7 @@ type upHeap struct{ baseHeap }
func (h upHeap) Less(i, j int) bool {
if h.baseHeap[i].height == h.baseHeap[j].height {
return daghash.HashToBig(&h.baseHeap[i].hash).Cmp(daghash.HashToBig(&h.baseHeap[j].hash)) < 0
return daghash.HashToBig(h.baseHeap[i].hash).Cmp(daghash.HashToBig(h.baseHeap[j].hash)) < 0
}
return h.baseHeap[i].height < h.baseHeap[j].height
@ -40,7 +40,7 @@ type downHeap struct{ baseHeap }
func (h downHeap) Less(i, j int) bool {
if h.baseHeap[i].height == h.baseHeap[j].height {
return daghash.HashToBig(&h.baseHeap[i].hash).Cmp(daghash.HashToBig(&h.baseHeap[j].hash)) > 0
return daghash.HashToBig(h.baseHeap[i].hash).Cmp(daghash.HashToBig(h.baseHeap[j].hash)) > 0
}
return h.baseHeap[i].height > h.baseHeap[j].height

View File

@ -16,7 +16,7 @@ func TestBlockHeap(t *testing.T) {
block100000 := newBlockNode(&block100000Header, setFromSlice(block0), dagconfig.MainNetParams.K)
block0smallHash := newBlockNode(&block0Header, newSet(), dagconfig.MainNetParams.K)
block0smallHash.hash = daghash.Hash{}
block0smallHash.hash = &daghash.Hash{}
tests := []struct {
name string

View File

@ -78,7 +78,7 @@ func (bi *blockIndex) AddNode(node *blockNode) {
//
// This function is NOT safe for concurrent access.
func (bi *blockIndex) addNode(node *blockNode) {
bi.index[node.hash] = node
bi.index[*node.hash] = node
}
// NodeStatus provides concurrent-safe access to the status field of a node.

View File

@ -87,7 +87,7 @@ type blockNode struct {
diffChild *blockNode
// hash is the double sha 256 of the block.
hash daghash.Hash
hash *daghash.Hash
// workSum is the total amount of work in the DAG up to and including
// this node.
@ -107,8 +107,8 @@ type blockNode struct {
bits uint32
nonce uint64
timestamp int64
hashMerkleRoot daghash.Hash
idMerkleRoot daghash.Hash
hashMerkleRoot *daghash.Hash
idMerkleRoot *daghash.Hash
// status is a bitfield representing the validation state of the block. The
// status field, unlike the other fields, may be written to and so should
@ -139,6 +139,8 @@ func initBlockNode(node *blockNode, blockHeader *wire.BlockHeader, parents block
node.timestamp = blockHeader.Timestamp.Unix()
node.hashMerkleRoot = blockHeader.HashMerkleRoot
node.idMerkleRoot = blockHeader.IDMerkleRoot
} else {
node.hash = &daghash.ZeroHash
}
if len(parents) > 0 {
@ -246,7 +248,7 @@ func (node *blockNode) PastMedianTime() time.Time {
return time.Unix(medianTimestamp, 0)
}
func (node *blockNode) ParentHashes() []daghash.Hash {
func (node *blockNode) ParentHashes() []*daghash.Hash {
return node.parents.hashes()
}

View File

@ -18,7 +18,7 @@ func newSet() blockSet {
func setFromSlice(blocks ...*blockNode) blockSet {
set := newSet()
for _, block := range blocks {
set[block.hash] = block
set.add(block)
}
return set
}
@ -39,7 +39,7 @@ func (bs blockSet) highest() *blockNode {
for _, node := range bs {
if highest == nil ||
highest.height < node.height ||
(highest.height == node.height && daghash.Less(&node.hash, &highest.hash)) {
(highest.height == node.height && daghash.Less(node.hash, highest.hash)) {
highest = node
}
@ -49,13 +49,13 @@ func (bs blockSet) highest() *blockNode {
// add adds a block to this BlockSet
func (bs blockSet) add(block *blockNode) {
bs[block.hash] = block
bs[*block.hash] = block
}
// remove removes a block from this BlockSet, if exists
// Does nothing if this set does not contain the block
func (bs blockSet) remove(block *blockNode) {
delete(bs, block.hash)
delete(bs, *block.hash)
}
// clone clones thie block set
@ -104,7 +104,7 @@ func (bs blockSet) union(other blockSet) blockSet {
// contains returns true iff this set contains block
func (bs blockSet) contains(block *blockNode) bool {
_, ok := bs[block.hash]
_, ok := bs[*block.hash]
return ok
}
@ -117,13 +117,13 @@ func (bs blockSet) containsHash(hash *daghash.Hash) bool {
// hashesEqual returns true if the given hashes are equal to the hashes
// of the blocks in this set.
// NOTE: The given hash slice must not contain duplicates.
func (bs blockSet) hashesEqual(hashes []daghash.Hash) bool {
func (bs blockSet) hashesEqual(hashes []*daghash.Hash) bool {
if len(hashes) != len(bs) {
return false
}
for _, hash := range hashes {
if _, wasFound := bs[hash]; !wasFound {
if _, wasFound := bs[*hash]; !wasFound {
return false
}
}
@ -132,10 +132,10 @@ func (bs blockSet) hashesEqual(hashes []daghash.Hash) bool {
}
// hashes returns the hashes of the blocks in this set.
func (bs blockSet) hashes() []daghash.Hash {
hashes := make([]daghash.Hash, 0, len(bs))
for hash := range bs {
hashes = append(hashes, hash)
func (bs blockSet) hashes() []*daghash.Hash {
hashes := make([]*daghash.Hash, 0, len(bs))
for _, node := range bs {
hashes = append(hashes, node.hash)
}
daghash.Sort(hashes)
return hashes
@ -166,7 +166,7 @@ func (bs blockSet) bluest() *blockNode {
for _, node := range bs {
if bluestNode == nil ||
node.blueScore > maxScore ||
(node.blueScore == maxScore && daghash.Less(&node.hash, &bluestNode.hash)) {
(node.blueScore == maxScore && daghash.Less(node.hash, bluestNode.hash)) {
bluestNode = node
maxScore = node.blueScore
}

View File

@ -10,35 +10,36 @@ import (
func TestHashes(t *testing.T) {
bs := setFromSlice(
&blockNode{
hash: daghash.Hash{3},
hash: &daghash.Hash{3},
},
&blockNode{
hash: daghash.Hash{1},
hash: &daghash.Hash{1},
},
&blockNode{
hash: daghash.Hash{0},
hash: &daghash.Hash{0},
},
&blockNode{
hash: daghash.Hash{2},
hash: &daghash.Hash{2},
},
)
expected := []daghash.Hash{
expected := []*daghash.Hash{
{0},
{1},
{2},
{3},
}
if !daghash.AreEqual(bs.hashes(), expected) {
t.Errorf("TestHashes: hashes are not ordered as expected")
hashes := bs.hashes()
if !daghash.AreEqual(hashes, expected) {
t.Errorf("TestHashes: hashes order is %s but expected %s", hashes, expected)
}
}
func TestBlockSetHighest(t *testing.T) {
node1 := &blockNode{hash: daghash.Hash{10}, height: 1}
node2a := &blockNode{hash: daghash.Hash{20}, height: 2}
node2b := &blockNode{hash: daghash.Hash{21}, height: 2}
node3 := &blockNode{hash: daghash.Hash{30}, height: 3}
node1 := &blockNode{hash: &daghash.Hash{10}, height: 1}
node2a := &blockNode{hash: &daghash.Hash{20}, height: 2}
node2b := &blockNode{hash: &daghash.Hash{21}, height: 2}
node3 := &blockNode{hash: &daghash.Hash{30}, height: 3}
tests := []struct {
name string
@ -77,9 +78,9 @@ func TestBlockSetHighest(t *testing.T) {
}
func TestBlockSetSubtract(t *testing.T) {
node1 := &blockNode{hash: daghash.Hash{10}}
node2 := &blockNode{hash: daghash.Hash{20}}
node3 := &blockNode{hash: daghash.Hash{30}}
node1 := &blockNode{hash: &daghash.Hash{10}}
node2 := &blockNode{hash: &daghash.Hash{20}}
node3 := &blockNode{hash: &daghash.Hash{30}}
tests := []struct {
name string
@ -129,9 +130,9 @@ func TestBlockSetSubtract(t *testing.T) {
}
func TestBlockSetAddSet(t *testing.T) {
node1 := &blockNode{hash: daghash.Hash{10}}
node2 := &blockNode{hash: daghash.Hash{20}}
node3 := &blockNode{hash: daghash.Hash{30}}
node1 := &blockNode{hash: &daghash.Hash{10}}
node2 := &blockNode{hash: &daghash.Hash{20}}
node3 := &blockNode{hash: &daghash.Hash{30}}
tests := []struct {
name string
@ -181,9 +182,9 @@ func TestBlockSetAddSet(t *testing.T) {
}
func TestBlockSetAddSlice(t *testing.T) {
node1 := &blockNode{hash: daghash.Hash{10}}
node2 := &blockNode{hash: daghash.Hash{20}}
node3 := &blockNode{hash: daghash.Hash{30}}
node1 := &blockNode{hash: &daghash.Hash{10}}
node2 := &blockNode{hash: &daghash.Hash{20}}
node3 := &blockNode{hash: &daghash.Hash{30}}
tests := []struct {
name string
@ -233,9 +234,9 @@ func TestBlockSetAddSlice(t *testing.T) {
}
func TestBlockSetUnion(t *testing.T) {
node1 := &blockNode{hash: daghash.Hash{10}}
node2 := &blockNode{hash: daghash.Hash{20}}
node3 := &blockNode{hash: daghash.Hash{30}}
node1 := &blockNode{hash: &daghash.Hash{10}}
node2 := &blockNode{hash: &daghash.Hash{20}}
node3 := &blockNode{hash: &daghash.Hash{30}}
tests := []struct {
name string
@ -285,43 +286,43 @@ func TestBlockSetUnion(t *testing.T) {
}
func TestBlockSetHashesEqual(t *testing.T) {
node1 := &blockNode{hash: daghash.Hash{10}}
node2 := &blockNode{hash: daghash.Hash{20}}
node1 := &blockNode{hash: &daghash.Hash{10}}
node2 := &blockNode{hash: &daghash.Hash{20}}
tests := []struct {
name string
set blockSet
hashes []daghash.Hash
hashes []*daghash.Hash
expectedResult bool
}{
{
name: "empty set, no hashes",
set: setFromSlice(),
hashes: []daghash.Hash{},
hashes: []*daghash.Hash{},
expectedResult: true,
},
{
name: "empty set, one hash",
set: setFromSlice(),
hashes: []daghash.Hash{node1.hash},
hashes: []*daghash.Hash{node1.hash},
expectedResult: false,
},
{
name: "set and hashes of different length",
set: setFromSlice(node1, node2),
hashes: []daghash.Hash{node1.hash},
hashes: []*daghash.Hash{node1.hash},
expectedResult: false,
},
{
name: "set equal to hashes",
set: setFromSlice(node1, node2),
hashes: []daghash.Hash{node1.hash, node2.hash},
hashes: []*daghash.Hash{node1.hash, node2.hash},
expectedResult: true,
},
{
name: "set equal to hashes, different order",
set: setFromSlice(node1, node2),
hashes: []daghash.Hash{node2.hash, node1.hash},
hashes: []*daghash.Hash{node2.hash, node1.hash},
expectedResult: true,
},
}

View File

@ -188,10 +188,12 @@ func newTestDAG(params *dagconfig.Params) *BlockDAG {
func newTestNode(parents blockSet, blockVersion int32, bits uint32, timestamp time.Time, phantomK uint32) *blockNode {
// Make up a header and create a block node from it.
header := &wire.BlockHeader{
Version: blockVersion,
ParentHashes: parents.hashes(),
Bits: bits,
Timestamp: timestamp,
Version: blockVersion,
ParentHashes: parents.hashes(),
Bits: bits,
Timestamp: timestamp,
HashMerkleRoot: &daghash.ZeroHash,
IDMerkleRoot: &daghash.ZeroHash,
}
return newBlockNode(header, parents, phantomK)
}
@ -208,7 +210,7 @@ func buildNodeGenerator(phantomK uint32, withChildren bool) func(parents blockSe
hashCounter := byte(1)
buildNode := func(parents blockSet) *blockNode {
block := newBlockNode(nil, parents, phantomK)
block.hash = daghash.Hash{hashCounter}
block.hash = &daghash.Hash{hashCounter}
hashCounter++
return block

View File

@ -170,9 +170,9 @@ func (dag *BlockDAG) HaveBlock(hash *daghash.Hash) (bool, error) {
// be in, like part of the DAG or the orphan pool.
//
// This function is safe for concurrent access.
func (dag *BlockDAG) HaveBlocks(hashes []daghash.Hash) (bool, error) {
func (dag *BlockDAG) HaveBlocks(hashes []*daghash.Hash) (bool, error) {
for _, hash := range hashes {
haveBlock, err := dag.HaveBlock(&hash)
haveBlock, err := dag.HaveBlock(hash)
if err != nil {
return false, err
}
@ -225,7 +225,7 @@ func (dag *BlockDAG) GetOrphanMissingAncestorHashes(hash *daghash.Hash) ([]*dagh
orphan, orphanExists := dag.orphans[*current]
if orphanExists {
for _, parentHash := range orphan.block.MsgBlock().Header.ParentHashes {
queue = append(queue, &parentHash)
queue = append(queue, parentHash)
}
} else {
existsInDag, err := dag.BlockExists(current)
@ -651,7 +651,7 @@ func (dag *BlockDAG) LastFinalityPointHash() *daghash.Hash {
if dag.lastFinalityPoint == nil {
return nil
}
return &dag.lastFinalityPoint.hash
return dag.lastFinalityPoint.hash
}
// checkFinalityRules checks the new block does not violate the finality rules
@ -799,7 +799,7 @@ func (node *blockNode) addTxsToAcceptanceData(txsAcceptanceData MultiBlockTxsAcc
IsAccepted: true,
})
}
txsAcceptanceData[node.hash] = blockTxsAcceptanceData
txsAcceptanceData[*node.hash] = blockTxsAcceptanceData
}
// verifyAndBuildUTXO verifies all transactions in the given block and builds its UTXO
@ -890,7 +890,7 @@ func (node *blockNode) applyBlueBlocks(selectedParentUTXO UTXOSet, blueBlocks []
for _, blueBlock := range blueBlocks {
transactions := blueBlock.Transactions()
blockTxsAcceptanceData := make(BlockTxsAcceptanceData, len(transactions))
isSelectedParent := blueBlock.Hash().IsEqual(&node.selectedParent.hash)
isSelectedParent := blueBlock.Hash().IsEqual(node.selectedParent.hash)
for i, tx := range blueBlock.Transactions() {
var isAccepted bool
if isSelectedParent {
@ -1075,7 +1075,7 @@ func (dag *BlockDAG) SelectedTipHash() *daghash.Hash {
return nil
}
return &selectedTip.hash
return selectedTip.hash
}
// UTXOSet returns the DAG's UTXO set
@ -1113,13 +1113,13 @@ func (dag *BlockDAG) BlockCount() uint64 {
}
// TipHashes returns the hashes of the DAG's tips
func (dag *BlockDAG) TipHashes() []daghash.Hash {
func (dag *BlockDAG) TipHashes() []*daghash.Hash {
return dag.virtual.tips().hashes()
}
// HighestTipHash returns the hash of the highest tip.
// This function is a placeholder for places that aren't DAG-compatible, and it's needed to be removed in the future
func (dag *BlockDAG) HighestTipHash() daghash.Hash {
func (dag *BlockDAG) HighestTipHash() *daghash.Hash {
return dag.virtual.tips().highest().hash
}
@ -1162,7 +1162,7 @@ func (dag *BlockDAG) BlockLocatorFromHash(hash *daghash.Hash) BlockLocator {
defer dag.dagLock.RUnlock()
node := dag.index.LookupNode(hash)
if node != nil {
for !dag.IsInSelectedPathChain(&node.hash) {
for !dag.IsInSelectedPathChain(node.hash) {
node = node.selectedParent
}
}
@ -1208,7 +1208,7 @@ func (dag *BlockDAG) blockLocator(node *blockNode) BlockLocator {
step := int32(1)
for node != nil {
locator = append(locator, &node.hash)
locator = append(locator, node.hash)
// Nothing more to add once the genesis block has been added.
if node.height == 0 {
@ -1250,7 +1250,7 @@ func (dag *BlockDAG) BlockHeightByHash(hash *daghash.Hash) (int32, error) {
// DAG.
//
// This function is safe for concurrent access.
func (dag *BlockDAG) ChildHashesByHash(hash *daghash.Hash) ([]daghash.Hash, error) {
func (dag *BlockDAG) ChildHashesByHash(hash *daghash.Hash) ([]*daghash.Hash, error) {
node := dag.index.LookupNode(hash)
if node == nil {
str := fmt.Sprintf("block %s is not in the DAG", hash)
@ -1268,7 +1268,7 @@ func (dag *BlockDAG) ChildHashesByHash(hash *daghash.Hash) ([]daghash.Hash, erro
//
// This function is safe for concurrent access.
func (dag *BlockDAG) HeightToHashRange(startHeight int32,
endHash *daghash.Hash, maxResults int) ([]daghash.Hash, error) {
endHash *daghash.Hash, maxResults int) ([]*daghash.Hash, error) {
endNode := dag.index.LookupNode(endHash)
if endNode == nil {
@ -1295,7 +1295,7 @@ func (dag *BlockDAG) HeightToHashRange(startHeight int32,
// Walk backwards from endHeight to startHeight, collecting block hashes.
node := endNode
hashes := make([]daghash.Hash, resultsLength)
hashes := make([]*daghash.Hash, resultsLength)
for i := resultsLength - 1; i >= 0; i-- {
hashes[i] = node.hash
node = node.selectedParent
@ -1308,7 +1308,7 @@ func (dag *BlockDAG) HeightToHashRange(startHeight int32,
//
// This function is safe for concurrent access.
func (dag *BlockDAG) IntervalBlockHashes(endHash *daghash.Hash, interval int,
) ([]daghash.Hash, error) {
) ([]*daghash.Hash, error) {
endNode := dag.index.LookupNode(endHash)
if endNode == nil {
@ -1320,7 +1320,7 @@ func (dag *BlockDAG) IntervalBlockHashes(endHash *daghash.Hash, interval int,
endHeight := endNode.height
resultsLength := int(endHeight) / interval
hashes := make([]daghash.Hash, resultsLength)
hashes := make([]*daghash.Hash, resultsLength)
dag.virtual.mtx.Lock()
defer dag.virtual.mtx.Unlock()
@ -1396,9 +1396,9 @@ func (dag *BlockDAG) locateInventory(locator BlockLocator, hashStop *daghash.Has
// See the comment on the exported function for more details on special cases.
//
// This function MUST be called with the DAG state lock held (for reads).
func (dag *BlockDAG) locateBlocks(locator BlockLocator, hashStop *daghash.Hash, maxHashes uint32) []daghash.Hash {
func (dag *BlockDAG) locateBlocks(locator BlockLocator, hashStop *daghash.Hash, maxHashes uint32) []*daghash.Hash {
nodes := dag.locateBlockNodes(locator, hashStop, maxHashes)
hashes := make([]daghash.Hash, len(nodes))
hashes := make([]*daghash.Hash, len(nodes))
for i, node := range nodes {
hashes[i] = node.hash
}
@ -1451,7 +1451,7 @@ func (dag *BlockDAG) locateBlockNodes(locator BlockLocator, hashStop *daghash.Ha
// after the genesis block will be returned
//
// This function is safe for concurrent access.
func (dag *BlockDAG) LocateBlocks(locator BlockLocator, hashStop *daghash.Hash, maxHashes uint32) []daghash.Hash {
func (dag *BlockDAG) LocateBlocks(locator BlockLocator, hashStop *daghash.Hash, maxHashes uint32) []*daghash.Hash {
dag.dagLock.RLock()
hashes := dag.locateBlocks(locator, hashStop, maxHashes)
dag.dagLock.RUnlock()

View File

@ -525,8 +525,8 @@ func TestCalcPastMedianTime(t *testing.T) {
// nodeHashes is a convenience function that returns the hashes for all of the
// passed indexes of the provided nodes. It is used to construct expected hash
// slices in the tests.
func nodeHashes(nodes []*blockNode, indexes ...int) []daghash.Hash {
hashes := make([]daghash.Hash, 0, len(indexes))
func nodeHashes(nodes []*blockNode, indexes ...int) []*daghash.Hash {
hashes := make([]*daghash.Hash, 0, len(indexes))
for _, idx := range indexes {
hashes = append(hashes, nodes[idx].hash)
}
@ -546,7 +546,11 @@ func chainedNodes(parents blockSet, numNodes int) []*blockNode {
for i := 0; i < numNodes; i++ {
// This is invalid, but all that is needed is enough to get the
// synthetic tests to work.
header := wire.BlockHeader{Nonce: testNoncePrng.Uint64()}
header := wire.BlockHeader{
Nonce: testNoncePrng.Uint64(),
IDMerkleRoot: &daghash.ZeroHash,
HashMerkleRoot: &daghash.ZeroHash,
}
header.ParentHashes = tips.hashes()
nodes[i] = newBlockNode(&header, tips, dagconfig.SimNetParams.K)
tips = setFromSlice(nodes[i])
@ -585,10 +589,10 @@ func TestHeightToHashRange(t *testing.T) {
tests := []struct {
name string
startHeight int32 // locator for requested inventory
endHash daghash.Hash // stop hash for locator
maxResults int // max to locate, 0 = wire const
hashes []daghash.Hash // expected located hashes
startHeight int32 // locator for requested inventory
endHash *daghash.Hash // stop hash for locator
maxResults int // max to locate, 0 = wire const
hashes []*daghash.Hash // expected located hashes
expectError bool
}{
{
@ -636,7 +640,7 @@ func TestHeightToHashRange(t *testing.T) {
},
}
for _, test := range tests {
hashes, err := blockDAG.HeightToHashRange(test.startHeight, &test.endHash,
hashes, err := blockDAG.HeightToHashRange(test.startHeight, test.endHash,
test.maxResults)
if err != nil {
if !test.expectError {
@ -660,26 +664,26 @@ func TestIntervalBlockHashes(t *testing.T) {
// genesis -> 1 -> 2 -> ... -> 15 -> 16 -> 17 -> 18
// \-> 16a -> 17a -> 18a (unvalidated)
tip := testTip
chain := newTestDAG(&dagconfig.SimNetParams)
branch0Nodes := chainedNodes(setFromSlice(chain.genesis), 18)
dag := newTestDAG(&dagconfig.SimNetParams)
branch0Nodes := chainedNodes(setFromSlice(dag.genesis), 18)
branch1Nodes := chainedNodes(setFromSlice(branch0Nodes[14]), 3)
for _, node := range branch0Nodes {
chain.index.SetStatusFlags(node, statusValid)
chain.index.AddNode(node)
dag.index.SetStatusFlags(node, statusValid)
dag.index.AddNode(node)
}
for _, node := range branch1Nodes {
if node.height < 18 {
chain.index.SetStatusFlags(node, statusValid)
dag.index.SetStatusFlags(node, statusValid)
}
chain.index.AddNode(node)
dag.index.AddNode(node)
}
chain.virtual.SetTips(setFromSlice(tip(branch0Nodes)))
dag.virtual.SetTips(setFromSlice(tip(branch0Nodes)))
tests := []struct {
name string
endHash daghash.Hash
endHash *daghash.Hash
interval int
hashes []daghash.Hash
hashes []*daghash.Hash
expectError bool
}{
{
@ -699,7 +703,7 @@ func TestIntervalBlockHashes(t *testing.T) {
name: "no results",
endHash: branch0Nodes[17].hash,
interval: 20,
hashes: []daghash.Hash{},
hashes: []*daghash.Hash{},
},
{
name: "unvalidated block",
@ -709,7 +713,7 @@ func TestIntervalBlockHashes(t *testing.T) {
},
}
for _, test := range tests {
hashes, err := chain.IntervalBlockHashes(&test.endHash, test.interval)
hashes, err := dag.IntervalBlockHashes(test.endHash, test.interval)
if err != nil {
if !test.expectError {
t.Errorf("%s: unexpected error: %v", test.name, err)
@ -886,7 +890,7 @@ func TestValidateFeeTransaction(t *testing.T) {
var flags BehaviorFlags
flags |= BFFastAdd | BFNoPoWCheck
buildBlock := func(blockName string, parentHashes []daghash.Hash, transactions []*wire.MsgTx, expectedErrorCode ErrorCode) *wire.MsgBlock {
buildBlock := func(blockName string, parentHashes []*daghash.Hash, transactions []*wire.MsgTx, expectedErrorCode ErrorCode) *wire.MsgBlock {
utilTxs := make([]*util.Tx, len(transactions))
for i, tx := range transactions {
utilTxs[i] = util.NewTx(tx)
@ -896,8 +900,8 @@ func TestValidateFeeTransaction(t *testing.T) {
Header: wire.BlockHeader{
Bits: dag.genesis.Header().Bits,
ParentHashes: parentHashes,
HashMerkleRoot: *BuildHashMerkleTreeStore(utilTxs).Root(),
IDMerkleRoot: *BuildIDMerkleTreeStore(utilTxs).Root(),
HashMerkleRoot: BuildHashMerkleTreeStore(utilTxs).Root(),
IDMerkleRoot: BuildIDMerkleTreeStore(utilTxs).Root(),
},
Transactions: transactions,
}
@ -929,7 +933,7 @@ func TestValidateFeeTransaction(t *testing.T) {
TxIn: []*wire.TxIn{
{
PreviousOutPoint: wire.OutPoint{
TxID: daghash.TxID(dag.genesis.hash),
TxID: daghash.TxID(*dag.genesis.hash),
Index: math.MaxUint32,
},
Sequence: wire.MaxTxInSequenceNum,
@ -941,7 +945,7 @@ func TestValidateFeeTransaction(t *testing.T) {
TxIn: []*wire.TxIn{
{
PreviousOutPoint: wire.OutPoint{
TxID: daghash.TxID(dag.genesis.hash),
TxID: daghash.TxID(*dag.genesis.hash),
Index: math.MaxUint32,
},
Sequence: wire.MaxTxInSequenceNum,
@ -950,7 +954,7 @@ func TestValidateFeeTransaction(t *testing.T) {
SubnetworkID: *subnetworkid.SubnetworkIDNative,
},
}
buildBlock("blockWithExtraFeeTx", []daghash.Hash{dag.genesis.hash}, blockWithExtraFeeTxTransactions, ErrMultipleFeeTransactions)
buildBlock("blockWithExtraFeeTx", []*daghash.Hash{dag.genesis.hash}, blockWithExtraFeeTxTransactions, ErrMultipleFeeTransactions)
block1Txs := []*wire.MsgTx{
cb1,
@ -959,7 +963,7 @@ func TestValidateFeeTransaction(t *testing.T) {
TxIn: []*wire.TxIn{
{
PreviousOutPoint: wire.OutPoint{
TxID: daghash.TxID(dag.genesis.hash),
TxID: daghash.TxID(*dag.genesis.hash),
Index: math.MaxUint32,
},
Sequence: wire.MaxTxInSequenceNum,
@ -968,7 +972,7 @@ func TestValidateFeeTransaction(t *testing.T) {
SubnetworkID: *subnetworkid.SubnetworkIDNative,
},
}
block1 := buildBlock("block1", []daghash.Hash{dag.genesis.hash}, block1Txs, 0)
block1 := buildBlock("block1", []*daghash.Hash{dag.genesis.hash}, block1Txs, 0)
cb1A := createCoinbase(nil)
block1ATxs := []*wire.MsgTx{
@ -978,7 +982,7 @@ func TestValidateFeeTransaction(t *testing.T) {
TxIn: []*wire.TxIn{
{
PreviousOutPoint: wire.OutPoint{
TxID: daghash.TxID(dag.genesis.hash),
TxID: daghash.TxID(*dag.genesis.hash),
Index: math.MaxUint32,
},
Sequence: wire.MaxTxInSequenceNum,
@ -987,7 +991,7 @@ func TestValidateFeeTransaction(t *testing.T) {
SubnetworkID: *subnetworkid.SubnetworkIDNative,
},
}
block1A := buildBlock("block1A", []daghash.Hash{dag.genesis.hash}, block1ATxs, 0)
block1A := buildBlock("block1A", []*daghash.Hash{dag.genesis.hash}, block1ATxs, 0)
block1AChildCbPkScript, err := payToPubKeyHashScript((&[20]byte{0x1A, 0xC0})[:])
if err != nil {
@ -1001,7 +1005,7 @@ func TestValidateFeeTransaction(t *testing.T) {
TxIn: []*wire.TxIn{
{
PreviousOutPoint: wire.OutPoint{
TxID: daghash.TxID(block1A.BlockHash()),
TxID: daghash.TxID(*block1A.BlockHash()),
Index: math.MaxUint32,
},
Sequence: wire.MaxTxInSequenceNum,
@ -1028,7 +1032,7 @@ func TestValidateFeeTransaction(t *testing.T) {
SubnetworkID: *subnetworkid.SubnetworkIDNative,
},
}
block1AChild := buildBlock("block1AChild", []daghash.Hash{block1A.BlockHash()}, block1AChildTxs, 0)
block1AChild := buildBlock("block1AChild", []*daghash.Hash{block1A.BlockHash()}, block1AChildTxs, 0)
cb2 := createCoinbase(nil)
block2Txs := []*wire.MsgTx{
@ -1038,7 +1042,7 @@ func TestValidateFeeTransaction(t *testing.T) {
TxIn: []*wire.TxIn{
{
PreviousOutPoint: wire.OutPoint{
TxID: daghash.TxID(block1.BlockHash()),
TxID: daghash.TxID(*block1.BlockHash()),
Index: math.MaxUint32,
},
Sequence: wire.MaxTxInSequenceNum,
@ -1047,7 +1051,7 @@ func TestValidateFeeTransaction(t *testing.T) {
SubnetworkID: *subnetworkid.SubnetworkIDNative,
},
}
block2 := buildBlock("block2", []daghash.Hash{block1.BlockHash()}, block2Txs, 0)
block2 := buildBlock("block2", []*daghash.Hash{block1.BlockHash()}, block2Txs, 0)
cb3 := createCoinbase(nil)
block3Txs := []*wire.MsgTx{
@ -1057,7 +1061,7 @@ func TestValidateFeeTransaction(t *testing.T) {
TxIn: []*wire.TxIn{
{
PreviousOutPoint: wire.OutPoint{
TxID: daghash.TxID(block2.BlockHash()),
TxID: daghash.TxID(*block2.BlockHash()),
Index: math.MaxUint32,
},
Sequence: wire.MaxTxInSequenceNum,
@ -1066,7 +1070,7 @@ func TestValidateFeeTransaction(t *testing.T) {
SubnetworkID: *subnetworkid.SubnetworkIDNative,
},
}
block3 := buildBlock("block3", []daghash.Hash{block2.BlockHash()}, block3Txs, 0)
block3 := buildBlock("block3", []*daghash.Hash{block2.BlockHash()}, block3Txs, 0)
block4CbPkScript, err := payToPubKeyHashScript((&[20]byte{0x40})[:])
if err != nil {
@ -1081,7 +1085,7 @@ func TestValidateFeeTransaction(t *testing.T) {
TxIn: []*wire.TxIn{
{
PreviousOutPoint: wire.OutPoint{
TxID: daghash.TxID(block3.BlockHash()),
TxID: daghash.TxID(*block3.BlockHash()),
Index: math.MaxUint32,
},
Sequence: wire.MaxTxInSequenceNum,
@ -1108,7 +1112,7 @@ func TestValidateFeeTransaction(t *testing.T) {
SubnetworkID: *subnetworkid.SubnetworkIDNative,
},
}
block4 := buildBlock("block4", []daghash.Hash{block3.BlockHash()}, block4Txs, 0)
block4 := buildBlock("block4", []*daghash.Hash{block3.BlockHash()}, block4Txs, 0)
block4ACbPkScript, err := payToPubKeyHashScript((&[20]byte{0x4A})[:])
if err != nil {
@ -1122,7 +1126,7 @@ func TestValidateFeeTransaction(t *testing.T) {
TxIn: []*wire.TxIn{
{
PreviousOutPoint: wire.OutPoint{
TxID: daghash.TxID(block3.BlockHash()),
TxID: daghash.TxID(*block3.BlockHash()),
Index: math.MaxUint32,
},
Sequence: wire.MaxTxInSequenceNum,
@ -1149,17 +1153,17 @@ func TestValidateFeeTransaction(t *testing.T) {
SubnetworkID: *subnetworkid.SubnetworkIDNative,
},
}
block4A := buildBlock("block4A", []daghash.Hash{block3.BlockHash()}, block4ATxs, 0)
block4A := buildBlock("block4A", []*daghash.Hash{block3.BlockHash()}, block4ATxs, 0)
cb5 := createCoinbase(nil)
feeInOuts := map[daghash.Hash]*struct {
txIn *wire.TxIn
txOut *wire.TxOut
}{
block4.BlockHash(): {
*block4.BlockHash(): {
txIn: &wire.TxIn{
PreviousOutPoint: wire.OutPoint{
TxID: daghash.TxID(block4.BlockHash()),
TxID: daghash.TxID(*block4.BlockHash()),
Index: math.MaxUint32,
},
Sequence: wire.MaxTxInSequenceNum,
@ -1169,10 +1173,10 @@ func TestValidateFeeTransaction(t *testing.T) {
Value: cb3.TxOut[0].Value - 1,
},
},
block4A.BlockHash(): {
*block4A.BlockHash(): {
txIn: &wire.TxIn{
PreviousOutPoint: wire.OutPoint{
TxID: daghash.TxID(block4A.BlockHash()),
TxID: daghash.TxID(*block4A.BlockHash()),
Index: math.MaxUint32,
},
Sequence: wire.MaxTxInSequenceNum,
@ -1195,7 +1199,7 @@ func TestValidateFeeTransaction(t *testing.T) {
block5Txs := []*wire.MsgTx{cb5, sortedBlock5FeeTx}
block5ParentHashes := []daghash.Hash{block4.BlockHash(), block4A.BlockHash()}
block5ParentHashes := []*daghash.Hash{block4.BlockHash(), block4A.BlockHash()}
buildBlock("block5", block5ParentHashes, block5Txs, 0)
sortedBlock5FeeTx.TxIn[0], sortedBlock5FeeTx.TxIn[1] = sortedBlock5FeeTx.TxIn[1], sortedBlock5FeeTx.TxIn[0]
@ -1205,7 +1209,7 @@ func TestValidateFeeTransaction(t *testing.T) {
block5FeeTxWith1Achild.AddTxIn(&wire.TxIn{
PreviousOutPoint: wire.OutPoint{
TxID: daghash.TxID(block1AChild.BlockHash()),
TxID: daghash.TxID(*block1AChild.BlockHash()),
Index: math.MaxUint32,
},
Sequence: wire.MaxTxInSequenceNum,

View File

@ -399,8 +399,8 @@ func dbFetchHeightByHash(dbTx database.Tx, hash *daghash.Hash) (int32, error) {
}
type dagState struct {
TipHashes []daghash.Hash
LastFinalityPoint daghash.Hash
TipHashes []*daghash.Hash
LastFinalityPoint *daghash.Hash
}
// serializeDAGState returns the serialization of the DAG state.
@ -584,7 +584,7 @@ func (dag *BlockDAG) initDAGState() error {
}
} else {
for _, hash := range header.ParentHashes {
parent := dag.index.LookupNode(&hash)
parent := dag.index.LookupNode(hash)
if parent == nil {
return AssertError(fmt.Sprintf("initDAGState: Could "+
"not find parent %s for block %s", hash, header.BlockHash()))
@ -669,7 +669,7 @@ func (dag *BlockDAG) initDAGState() error {
// Apply the stored tips to the virtual block.
tips := newSet()
for _, tipHash := range state.TipHashes {
tip := dag.index.LookupNode(&tipHash)
tip := dag.index.LookupNode(tipHash)
if tip == nil {
return AssertError(fmt.Sprintf("initDAGState: cannot find "+
"DAG tip %s in block index", state.TipHashes))
@ -679,7 +679,7 @@ func (dag *BlockDAG) initDAGState() error {
dag.virtual.SetTips(tips)
// Set the last finality point
dag.lastFinalityPoint = dag.index.LookupNode(&state.LastFinalityPoint)
dag.lastFinalityPoint = dag.index.LookupNode(state.LastFinalityPoint)
return nil
})
@ -709,7 +709,7 @@ func deserializeBlockRow(blockRow []byte) (*wire.BlockHeader, blockStatus, error
// with the height set.
func dbFetchBlockByNode(dbTx database.Tx, node *blockNode) (*util.Block, error) {
// Load the raw block bytes from the database.
blockBytes, err := dbTx.FetchBlock(&node.hash)
blockBytes, err := dbTx.FetchBlock(node.hash)
if err != nil {
return nil, err
}
@ -742,7 +742,7 @@ func dbStoreBlockNode(dbTx database.Tx, node *blockNode) error {
// Write block header data to block index bucket.
blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName)
key := blockIndexKey(&node.hash, uint32(node.height))
key := blockIndexKey(node.hash, uint32(node.height))
return blockIndexBucket.Put(key, value)
}

View File

@ -178,16 +178,16 @@ func TestDAGStateSerialization(t *testing.T) {
{
name: "genesis",
state: &dagState{
TipHashes: []daghash.Hash{*newHashFromStr("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f")},
LastFinalityPoint: *newHashFromStr("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"),
TipHashes: []*daghash.Hash{newHashFromStr("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f")},
LastFinalityPoint: newHashFromStr("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"),
},
serialized: []byte("{\"TipHashes\":[[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0]],\"LastFinalityPoint\":[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0]}"),
},
{
name: "block 1",
state: &dagState{
TipHashes: []daghash.Hash{*newHashFromStr("00000000839a8e6886ab5951d76f411475428afc90947ee320161bbf18eb6048")},
LastFinalityPoint: *newHashFromStr("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"),
TipHashes: []*daghash.Hash{newHashFromStr("00000000839a8e6886ab5951d76f411475428afc90947ee320161bbf18eb6048")},
LastFinalityPoint: newHashFromStr("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"),
},
serialized: []byte("{\"TipHashes\":[[72,96,235,24,191,27,22,32,227,126,148,144,252,138,66,117,20,65,111,215,81,89,171,134,104,142,154,131,0,0,0,0]],\"LastFinalityPoint\":[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0]}"),
},

View File

@ -45,7 +45,7 @@ func TestFinality(t *testing.T) {
t.Fatalf("Failed to setup DAG instance: %v", err)
}
defer teardownFunc()
buildNodeToDag := func(parentHashes []daghash.Hash) (*util.Block, error) {
buildNodeToDag := func(parentHashes []*daghash.Hash) (*util.Block, error) {
msgBlock, err := mining.PrepareBlockForTest(dag, &params, parentHashes, nil, false, 1)
if err != nil {
return nil, err
@ -68,7 +68,7 @@ func TestFinality(t *testing.T) {
// First we build a chain of blockdag.FinalityInterval blocks for future use
for i := 0; i < blockdag.FinalityInterval; i++ {
currentNode, err = buildNodeToDag([]daghash.Hash{*currentNode.Hash()})
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
if err != nil {
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
}
@ -80,7 +80,7 @@ func TestFinality(t *testing.T) {
// we expect the block with height 1 * blockdag.FinalityInterval to be the last finality point
currentNode = genesis
for i := 0; i < blockdag.FinalityInterval; i++ {
currentNode, err = buildNodeToDag([]daghash.Hash{*currentNode.Hash()})
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
if err != nil {
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
}
@ -89,13 +89,13 @@ func TestFinality(t *testing.T) {
expectedFinalityPoint := currentNode
for i := 0; i < blockdag.FinalityInterval; i++ {
currentNode, err = buildNodeToDag([]daghash.Hash{*currentNode.Hash()})
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
if err != nil {
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
}
}
if *dag.LastFinalityPointHash() != *expectedFinalityPoint.Hash() {
if !dag.LastFinalityPointHash().IsEqual(expectedFinalityPoint.Hash()) {
t.Errorf("TestFinality: dag.lastFinalityPoint expected to be %v but got %v", expectedFinalityPoint, dag.LastFinalityPointHash())
}
@ -106,13 +106,13 @@ func TestFinality(t *testing.T) {
if err != nil {
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
}
if *dag.LastFinalityPointHash() != *expectedFinalityPoint.Hash() {
if !dag.LastFinalityPointHash().IsEqual(expectedFinalityPoint.Hash()) {
t.Errorf("TestFinality: dag.lastFinalityPoint was unexpectly changed")
}
// Here we check that a block with lower blue score than the last finality
// point will get rejected
_, err = buildNodeToDag([]daghash.Hash{*genesis.Hash()})
_, err = buildNodeToDag([]*daghash.Hash{genesis.Hash()})
if err == nil {
t.Errorf("TestFinality: buildNodeToDag expected an error but got <nil>")
}
@ -127,7 +127,7 @@ func TestFinality(t *testing.T) {
// Here we check that a block that doesn't have the last finality point in
// its selected parent chain will get rejected
_, err = buildNodeToDag([]daghash.Hash{*altChainTip.Hash()})
_, err = buildNodeToDag([]*daghash.Hash{altChainTip.Hash()})
if err == nil {
t.Errorf("TestFinality: buildNodeToDag expected an error but got <nil>")
}
@ -182,7 +182,7 @@ func TestChainedTransactions(t *testing.T) {
}
defer teardownFunc()
block1, err := mining.PrepareBlockForTest(dag, &params, []daghash.Hash{*params.GenesisHash}, nil, false, 1)
block1, err := mining.PrepareBlockForTest(dag, &params, []*daghash.Hash{params.GenesisHash}, nil, false, 1)
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
@ -217,7 +217,7 @@ func TestChainedTransactions(t *testing.T) {
}
chainedTx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{chainedTxIn}, []*wire.TxOut{chainedTxOut})
block2, err := mining.PrepareBlockForTest(dag, &params, []daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{tx, chainedTx}, true, 1)
block2, err := mining.PrepareBlockForTest(dag, &params, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{tx, chainedTx}, true, 1)
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
@ -248,7 +248,7 @@ func TestChainedTransactions(t *testing.T) {
}
nonChainedTx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{nonChainedTxIn}, []*wire.TxOut{nonChainedTxOut})
block3, err := mining.PrepareBlockForTest(dag, &params, []daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{nonChainedTx}, false, 1)
block3, err := mining.PrepareBlockForTest(dag, &params, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{nonChainedTx}, false, 1)
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}

View File

@ -82,12 +82,12 @@ func (node *blockNode) getBluesFeeData(dag *BlockDAG) (map[daghash.Hash]compactF
dag.db.View(func(dbTx database.Tx) error {
for _, blueBlock := range node.blues {
feeData, err := dbFetchFeeData(dbTx, &blueBlock.hash)
feeData, err := dbFetchFeeData(dbTx, blueBlock.hash)
if err != nil {
return fmt.Errorf("Error getting fee data for block %s: %s", blueBlock.hash, err)
}
bluesFeeData[blueBlock.hash] = feeData
bluesFeeData[*blueBlock.hash] = feeData
}
return nil
@ -166,11 +166,11 @@ func (node *blockNode) buildFeeTransaction(dag *BlockDAG, txsAcceptanceData Mult
func feeInputAndOutputForBlueBlock(blueBlock *blockNode, txsAcceptanceData MultiBlockTxsAcceptanceData, feeData map[daghash.Hash]compactFeeData) (
*wire.TxIn, *wire.TxOut, error) {
blockTxsAcceptanceData, ok := txsAcceptanceData[blueBlock.hash]
blockTxsAcceptanceData, ok := txsAcceptanceData[*blueBlock.hash]
if !ok {
return nil, nil, fmt.Errorf("No txsAcceptanceData for block %s", blueBlock.hash)
}
blockFeeData, ok := feeData[blueBlock.hash]
blockFeeData, ok := feeData[*blueBlock.hash]
if !ok {
return nil, nil, fmt.Errorf("No feeData for block %s", blueBlock.hash)
}
@ -184,7 +184,7 @@ func feeInputAndOutputForBlueBlock(blueBlock *blockNode, txsAcceptanceData Multi
txIn := &wire.TxIn{
SignatureScript: []byte{},
PreviousOutPoint: wire.OutPoint{
TxID: daghash.TxID(blueBlock.hash),
TxID: daghash.TxID(*blueBlock.hash),
Index: math.MaxUint32,
},
Sequence: wire.MaxTxInSequenceNum,

View File

@ -189,7 +189,7 @@ type testGenerator struct {
// Used for tracking spendable coinbase outputs.
spendableOuts []spendableOut
prevCollectedHash daghash.Hash
prevCollectedHash *daghash.Hash
// Common key for any tests which require signed transactions.
privKey *btcec.PrivateKey
@ -203,7 +203,7 @@ func makeTestGenerator(params *dagconfig.Params) (testGenerator, error) {
genesisHash := genesis.BlockHash()
return testGenerator{
params: params,
blocks: map[daghash.Hash]*wire.MsgBlock{genesisHash: genesis},
blocks: map[daghash.Hash]*wire.MsgBlock{*genesisHash: genesis},
blocksByName: map[string]*wire.MsgBlock{"genesis": genesis},
blockHeights: map[string]int32{"genesis": 0},
tip: genesis,
@ -299,9 +299,9 @@ func (g *testGenerator) createCoinbaseTx(blockHeight int32) *wire.MsgTx {
// calcHashMerkleRoot creates a merkle tree from the slice of transactions and
// returns the root of the tree.
func calcHashMerkleRoot(txns []*wire.MsgTx) daghash.Hash {
func calcHashMerkleRoot(txns []*wire.MsgTx) *daghash.Hash {
if len(txns) == 0 {
return daghash.Hash{}
return &daghash.Hash{}
}
utilTxns := make([]*util.Tx, 0, len(txns))
@ -309,7 +309,7 @@ func calcHashMerkleRoot(txns []*wire.MsgTx) daghash.Hash {
utilTxns = append(utilTxns, util.NewTx(tx))
}
merkleTree := blockdag.BuildHashMerkleTreeStore(utilTxns)
return *merkleTree.Root()
return merkleTree.Root()
}
// solveBlock attempts to find a nonce which makes the passed block header hash
@ -342,7 +342,7 @@ func solveBlock(header *wire.BlockHeader) bool {
default:
hdr.Nonce = i
hash := hdr.BlockHash()
if daghash.HashToBig(&hash).Cmp(
if daghash.HashToBig(hash).Cmp(
targetDifficulty) <= 0 {
results <- sbResult{true, i}
@ -510,7 +510,7 @@ func (g *testGenerator) nextBlock(blockName string, spend *spendableOut, mungers
block := wire.MsgBlock{
Header: wire.BlockHeader{
Version: 1,
ParentHashes: []daghash.Hash{g.tip.BlockHash()}, // TODO: (Stas) This is wrong. Modified only to satisfy compilation.
ParentHashes: []*daghash.Hash{g.tip.BlockHash()}, // TODO: (Stas) This is wrong. Modified only to satisfy compilation.
HashMerkleRoot: calcHashMerkleRoot(txns),
Bits: g.params.PowLimitBits,
Timestamp: ts,
@ -539,7 +539,7 @@ func (g *testGenerator) nextBlock(blockName string, spend *spendableOut, mungers
// Update generator state and return the block.
blockHash := block.BlockHash()
g.blocks[blockHash] = &block
g.blocks[*blockHash] = &block
g.blocksByName[blockName] = &block
g.blockHeights[blockName] = nextHeight
g.tip = &block
@ -552,18 +552,18 @@ func (g *testGenerator) nextBlock(blockName string, spend *spendableOut, mungers
// map references to a block via its old hash and insert new ones for the new
// block hash. This is useful if the test code has to manually change a block
// after 'nextBlock' has returned.
func (g *testGenerator) updateBlockState(oldBlockName string, oldBlockHash daghash.Hash, newBlockName string, newBlock *wire.MsgBlock) {
func (g *testGenerator) updateBlockState(oldBlockName string, oldBlockHash *daghash.Hash, newBlockName string, newBlock *wire.MsgBlock) {
// Look up the height from the existing entries.
blockHeight := g.blockHeights[oldBlockName]
// Remove existing entries.
delete(g.blocks, oldBlockHash)
delete(g.blocks, *oldBlockHash)
delete(g.blocksByName, oldBlockName)
delete(g.blockHeights, oldBlockName)
// Add new entries.
newBlockHash := newBlock.BlockHash()
g.blocks[newBlockHash] = newBlock
g.blocks[*newBlockHash] = newBlock
g.blocksByName[newBlockName] = newBlock
g.blockHeights[newBlockName] = blockHeight
}
@ -735,9 +735,9 @@ func (g *testGenerator) assertTipBlockNumTxns(expected int) {
// assertTipBlockHash panics if the current tip block associated with the
// generator does not match the specified hash.
func (g *testGenerator) assertTipBlockHash(expected daghash.Hash) {
func (g *testGenerator) assertTipBlockHash(expected *daghash.Hash) {
hash := g.tip.BlockHash()
if hash != expected {
if !hash.IsEqual(expected) {
panic(fmt.Sprintf("block hash of block %q (height %d) is %s "+
"instead of expected %s", g.tipName, g.tipHeight, hash,
expected))
@ -746,9 +746,9 @@ func (g *testGenerator) assertTipBlockHash(expected daghash.Hash) {
// assertTipBlockMerkleRoot panics if the merkle root in header of the current
// tip block associated with the generator does not match the specified hash.
func (g *testGenerator) assertTipBlockMerkleRoot(expected daghash.Hash) {
func (g *testGenerator) assertTipBlockMerkleRoot(expected *daghash.Hash) {
hash := g.tip.Header.HashMerkleRoot
if hash != expected {
if !hash.IsEqual(expected) {
panic(fmt.Sprintf("merkle root of block %q (height %d) is %s "+
"instead of expected %s", g.tipName, g.tipHeight, hash,
expected))
@ -1443,7 +1443,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
// a uint256 is higher than the limit.
b46.Header.Nonce++
blockHash := b46.BlockHash()
hashNum := daghash.HashToBig(&blockHash)
hashNum := daghash.HashToBig(blockHash)
if hashNum.Cmp(g.params.PowLimit) >= 0 {
break
}
@ -1470,7 +1470,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
// \-> b48(14)
g.setTip("b43")
g.nextBlock("b48", outs[14], func(b *wire.MsgBlock) {
b.Header.HashMerkleRoot = daghash.Hash{}
b.Header.HashMerkleRoot = &daghash.Hash{}
})
rejected(blockdag.ErrBadMerkleRoot)

View File

@ -68,8 +68,8 @@ var (
regTestGenesisBlock = wire.MsgBlock{
Header: wire.BlockHeader{
Version: 1,
ParentHashes: []daghash.Hash{},
HashMerkleRoot: *newHashFromStr("4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b"),
ParentHashes: []*daghash.Hash{},
HashMerkleRoot: newHashFromStr("4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b"),
Timestamp: time.Unix(0x5b28c636, 0), // 2018-06-19 09:00:38 +0000 UTC
Bits: 0x207fffff, // 545259519 [7fffff0000000000000000000000000000000000000000000000000000000000]
Nonce: 1,

View File

@ -189,7 +189,7 @@ func storeFilter(dbTx database.Tx, block *util.Block, f *gcs.Filter,
}
}
fh, err := builder.MakeHeaderForFilter(f, *prevHeader)
fh, err := builder.MakeHeaderForFilter(f, prevHeader)
if err != nil {
return err
}

View File

@ -451,8 +451,8 @@ func (idx *TxIndex) TxFirstBlockRegion(txID *daghash.TxID) (*database.BlockRegio
}
// TxBlocks returns the hashes of the blocks where the transaction exists
func (idx *TxIndex) TxBlocks(txHash *daghash.Hash) ([]daghash.Hash, error) {
blockHashes := make([]daghash.Hash, 0)
func (idx *TxIndex) TxBlocks(txHash *daghash.Hash) ([]*daghash.Hash, error) {
blockHashes := make([]*daghash.Hash, 0)
err := idx.db.View(func(dbTx database.Tx) error {
var err error
blockHashes, err = dbFetchTxBlocks(dbTx, txHash)
@ -464,8 +464,8 @@ func (idx *TxIndex) TxBlocks(txHash *daghash.Hash) ([]daghash.Hash, error) {
return blockHashes, err
}
func dbFetchTxBlocks(dbTx database.Tx, txHash *daghash.Hash) ([]daghash.Hash, error) {
blockHashes := make([]daghash.Hash, 0)
func dbFetchTxBlocks(dbTx database.Tx, txHash *daghash.Hash) ([]*daghash.Hash, error) {
blockHashes := make([]*daghash.Hash, 0)
bucket := dbTx.Metadata().Bucket(includingBlocksIndexKey).Bucket(txHash[:])
if bucket == nil {
return nil, database.Error{
@ -480,7 +480,7 @@ func dbFetchTxBlocks(dbTx database.Tx, txHash *daghash.Hash) ([]daghash.Hash, er
if err != nil {
return err
}
blockHashes = append(blockHashes, *blockHash)
blockHashes = append(blockHashes, blockHash)
return nil
})
if err != nil {

View File

@ -52,13 +52,13 @@ func TestTxIndexConnectBlock(t *testing.T) {
defer teardown()
}
prepareAndProcessBlock := func(parentHashes []daghash.Hash, transactions []*wire.MsgTx, blockName string) *wire.MsgBlock {
prepareAndProcessBlock := func(parentHashes []*daghash.Hash, transactions []*wire.MsgTx, blockName string) *wire.MsgBlock {
block, err := mining.PrepareBlockForTest(dag, &params, parentHashes, transactions, false, 1)
if err != nil {
t.Fatalf("TestTxIndexConnectBlock: block %v got unexpected error from PrepareBlockForTest: %v", blockName, err)
}
utilBlock := util.NewBlock(block)
blocks[block.BlockHash()] = utilBlock
blocks[*block.BlockHash()] = utilBlock
isOrphan, err := dag.ProcessBlock(utilBlock, blockdag.BFNoPoWCheck)
if err != nil {
t.Fatalf("TestTxIndexConnectBlock: dag.ProcessBlock got unexpected error for block %v: %v", blockName, err)
@ -69,11 +69,11 @@ func TestTxIndexConnectBlock(t *testing.T) {
return block
}
block1 := prepareAndProcessBlock([]daghash.Hash{*params.GenesisHash}, nil, "1")
block1 := prepareAndProcessBlock([]*daghash.Hash{params.GenesisHash}, nil, "1")
block2Tx := createTransaction(block1.Transactions[0].TxOut[0].Value, block1.Transactions[0], 0)
block2 := prepareAndProcessBlock([]daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{block2Tx}, "2")
block2 := prepareAndProcessBlock([]*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{block2Tx}, "2")
block3Tx := createTransaction(block2.Transactions[0].TxOut[0].Value, block2.Transactions[0], 0)
block3 := prepareAndProcessBlock([]daghash.Hash{block2.BlockHash()}, []*wire.MsgTx{block3Tx}, "3")
block3 := prepareAndProcessBlock([]*daghash.Hash{block2.BlockHash()}, []*wire.MsgTx{block3Tx}, "3")
block3TxID := block3Tx.TxID()
block3TxNewAcceptedBlock, err := txIndex.BlockThatAcceptedTx(dag, &block3TxID)
@ -81,21 +81,21 @@ func TestTxIndexConnectBlock(t *testing.T) {
t.Errorf("TestTxIndexConnectBlock: TxAcceptedInBlock: %v", err)
}
block3Hash := block3.BlockHash()
if !block3TxNewAcceptedBlock.IsEqual(&block3Hash) {
if !block3TxNewAcceptedBlock.IsEqual(block3Hash) {
t.Errorf("TestTxIndexConnectBlock: block3Tx should've "+
"been accepted in block %v but instead got accepted in block %v", block3Hash, block3TxNewAcceptedBlock)
}
block3A := prepareAndProcessBlock([]daghash.Hash{block2.BlockHash()}, []*wire.MsgTx{block3Tx}, "3A")
block4 := prepareAndProcessBlock([]daghash.Hash{block3.BlockHash()}, nil, "4")
prepareAndProcessBlock([]daghash.Hash{block3A.BlockHash(), block4.BlockHash()}, nil, "5")
block3A := prepareAndProcessBlock([]*daghash.Hash{block2.BlockHash()}, []*wire.MsgTx{block3Tx}, "3A")
block4 := prepareAndProcessBlock([]*daghash.Hash{block3.BlockHash()}, nil, "4")
prepareAndProcessBlock([]*daghash.Hash{block3A.BlockHash(), block4.BlockHash()}, nil, "5")
block3TxAcceptedBlock, err := txIndex.BlockThatAcceptedTx(dag, &block3TxID)
if err != nil {
t.Errorf("TestTxIndexConnectBlock: TxAcceptedInBlock: %v", err)
}
block3AHash := block3A.BlockHash()
if !block3TxAcceptedBlock.IsEqual(&block3AHash) {
if !block3TxAcceptedBlock.IsEqual(block3AHash) {
t.Errorf("TestTxIndexConnectBlock: block3Tx should've "+
"been accepted in block %v but instead got accepted in block %v", block3AHash, block3TxAcceptedBlock)
}

View File

@ -16,7 +16,7 @@ func TestMerkle(t *testing.T) {
hashMerkleTree := BuildHashMerkleTreeStore(block.Transactions())
calculatedHashMerkleRoot := hashMerkleTree.Root()
wantHashMerkleRoot := &Block100000.Header.HashMerkleRoot
wantHashMerkleRoot := Block100000.Header.HashMerkleRoot
if !wantHashMerkleRoot.IsEqual(calculatedHashMerkleRoot) {
t.Errorf("BuildHashMerkleTreeStore: hash merkle root mismatch - "+
"got %v, want %v", calculatedHashMerkleRoot, wantHashMerkleRoot)
@ -24,7 +24,7 @@ func TestMerkle(t *testing.T) {
idMerkleTree := BuildIDMerkleTreeStore(block.Transactions())
calculatedIDMerkleRoot := idMerkleTree.Root()
wantIDMerkleRoot := &Block100000.Header.IDMerkleRoot
wantIDMerkleRoot := Block100000.Header.IDMerkleRoot
if !wantIDMerkleRoot.IsEqual(calculatedIDMerkleRoot) {
t.Errorf("BuildIDMerkleTreeStore: ID merkle root mismatch - "+
"got %v, want %v", calculatedIDMerkleRoot, wantIDMerkleRoot)

View File

@ -27,11 +27,11 @@ func phantom(block *blockNode, k uint32) (blues []*blockNode, selectedParent *bl
blues := traverseCandidates(block, candidates, parent)
score := uint64(len(blues)) + parent.blueScore
if score > bestScore || (score == bestScore && (bestHash == nil || daghash.Less(&parent.hash, bestHash))) {
if score > bestScore || (score == bestScore && (bestHash == nil || daghash.Less(parent.hash, bestHash))) {
bestScore = score
bestBlues = blues
bestParent = parent
bestHash = &parent.hash
bestHash = parent.hash
}
}

View File

@ -825,7 +825,7 @@ func TestPhantom(t *testing.T) {
parents.add(parent)
}
node := newTestNode(parents, blockVersion, 0, blockTime, test.k)
node.hash = daghash.Hash{} //It helps to predict hash order
node.hash = &daghash.Hash{} //It helps to predict hash order
for i, char := range blockData.id {
node.hash[i] = byte(char)
}

View File

@ -212,7 +212,7 @@ func (dag *BlockDAG) ProcessBlock(block *util.Block, flags BehaviorFlags) (bool,
// Handle orphan blocks.
allParentsExist := true
for _, parentHash := range blockHeader.ParentHashes {
parentExists, err := dag.BlockExists(&parentHash)
parentExists, err := dag.BlockExists(parentHash)
if err != nil {
return false, err
}

View File

@ -42,7 +42,7 @@ func TestCheckBlockScripts(t *testing.T) {
}
node := &blockNode{
hash: *blocks[0].Hash(),
hash: blocks[0].Hash(),
}
scriptFlags := txscript.ScriptNoFlags

View File

@ -190,10 +190,10 @@ func SetVirtualForTest(dag *BlockDAG, virtual *virtualBlock) *virtualBlock {
}
// GetVirtualFromParentsForTest generates a virtual block with the given parents.
func GetVirtualFromParentsForTest(dag *BlockDAG, parentHashes []daghash.Hash) (*virtualBlock, error) {
func GetVirtualFromParentsForTest(dag *BlockDAG, parentHashes []*daghash.Hash) (*virtualBlock, error) {
parents := newSet()
for _, hash := range parentHashes {
parent := dag.index.LookupNode(&hash)
parent := dag.index.LookupNode(hash)
if parent == nil {
return nil, fmt.Errorf("GetVirtualFromParentsForTest: didn't found node for hash %s", hash)
}

View File

@ -146,7 +146,7 @@ func (dag *BlockDAG) thresholdState(prevNode *blockNode, checker thresholdCondit
for prevNode != nil {
// Nothing more to do if the state of the block is already
// cached.
if _, ok := cache.Lookup(&prevNode.hash); ok {
if _, ok := cache.Lookup(prevNode.hash); ok {
break
}
@ -157,7 +157,7 @@ func (dag *BlockDAG) thresholdState(prevNode *blockNode, checker thresholdCondit
// The state is simply defined if the start time hasn't been
// been reached yet.
if uint64(medianTime.Unix()) < checker.BeginTime() {
cache.Update(&prevNode.hash, ThresholdDefined)
cache.Update(prevNode.hash, ThresholdDefined)
break
}
@ -175,7 +175,7 @@ func (dag *BlockDAG) thresholdState(prevNode *blockNode, checker thresholdCondit
state := ThresholdDefined
if prevNode != nil {
var ok bool
state, ok = cache.Lookup(&prevNode.hash)
state, ok = cache.Lookup(prevNode.hash)
if !ok {
return ThresholdFailed, AssertError(fmt.Sprintf(
"thresholdState: cache lookup failed for %s",
@ -253,7 +253,7 @@ func (dag *BlockDAG) thresholdState(prevNode *blockNode, checker thresholdCondit
// Update the cache to avoid recalculating the state in the
// future.
cache.Update(&prevNode.hash, state)
cache.Update(prevNode.hash, state)
}
return state, nil

View File

@ -312,7 +312,7 @@ func (dag *BlockDAG) checkProofOfWork(header *wire.BlockHeader, flags BehaviorFl
if flags&BFNoPoWCheck != BFNoPoWCheck {
// The block hash must be less than the claimed target.
hash := header.BlockHash()
hashNum := daghash.HashToBig(&hash)
hashNum := daghash.HashToBig(hash)
if hashNum.Cmp(target) > 0 {
str := fmt.Sprintf("block hash of %064x is higher than "+
"expected max of %064x", hashNum, target)
@ -417,7 +417,7 @@ func (dag *BlockDAG) checkBlockHeaderSanity(header *wire.BlockHeader, flags Beha
}
if len(header.ParentHashes) == 0 {
if header.BlockHash() != *dag.dagParams.GenesisHash {
if !header.BlockHash().IsEqual(dag.dagParams.GenesisHash) {
return ruleError(ErrNoParents, "block has no parents")
}
} else {
@ -452,12 +452,12 @@ func (dag *BlockDAG) checkBlockHeaderSanity(header *wire.BlockHeader, flags Beha
//checkBlockParentsOrder ensures that the block's parents are ordered by hash
func checkBlockParentsOrder(header *wire.BlockHeader) error {
sortedHashes := make([]daghash.Hash, 0, header.NumParentBlocks())
sortedHashes := make([]*daghash.Hash, 0, header.NumParentBlocks())
for _, hash := range header.ParentHashes {
sortedHashes = append(sortedHashes, hash)
}
sort.Slice(sortedHashes, func(i, j int) bool {
return daghash.Less(&sortedHashes[i], &sortedHashes[j])
return daghash.Less(sortedHashes[i], sortedHashes[j])
})
if !daghash.AreEqual(header.ParentHashes, sortedHashes) {
return ruleError(ErrWrongParentsOrder, "block parents are not ordered by hash")
@ -695,7 +695,7 @@ func (dag *BlockDAG) checkBlockHeaderContext(header *wire.BlockHeader, bluestPar
func (dag *BlockDAG) validateCheckpoints(header *wire.BlockHeader, blockHeight int32) error {
// Ensure dag matches up to predetermined checkpoints.
blockHash := header.BlockHash()
if !dag.verifyCheckpoint(blockHeight, &blockHash) {
if !dag.verifyCheckpoint(blockHeight, blockHash) {
str := fmt.Sprintf("block at height %d does not match "+
"checkpoint hash", blockHeight)
return ruleError(ErrBadCheckpoint, str)

View File

@ -204,32 +204,32 @@ func TestCheckBlockSanity(t *testing.T) {
var invalidParentsOrderBlock = wire.MsgBlock{
Header: wire.BlockHeader{
Version: 0x10000000,
ParentHashes: []daghash.Hash{
[32]byte{ // Make go vet happy.
ParentHashes: []*daghash.Hash{
{ // Make go vet happy.
0x4b, 0xb0, 0x75, 0x35, 0xdf, 0xd5, 0x8e, 0x0b,
0x3c, 0xd6, 0x4f, 0xd7, 0x15, 0x52, 0x80, 0x87,
0x2a, 0x04, 0x71, 0xbc, 0xf8, 0x30, 0x95, 0x52,
0x6a, 0xce, 0x0e, 0x38, 0xc6, 0x00, 0x00, 0x00,
},
[32]byte{ // Make go vet happy.
{ // Make go vet happy.
0x16, 0x5e, 0x38, 0xe8, 0xb3, 0x91, 0x45, 0x95,
0xd9, 0xc6, 0x41, 0xf3, 0xb8, 0xee, 0xc2, 0xf3,
0x46, 0x11, 0x89, 0x6b, 0x82, 0x1a, 0x68, 0x3b,
0x7a, 0x4e, 0xde, 0xfe, 0x2c, 0x00, 0x00, 0x00,
},
},
HashMerkleRoot: daghash.Hash([32]byte{ // Make go vet happy.
HashMerkleRoot: &daghash.Hash{ // Make go vet happy.
0x2f, 0x4c, 0xc3, 0x0b, 0x0a, 0x84, 0xbb, 0x95,
0x56, 0x9d, 0x77, 0xa2, 0xee, 0x3e, 0xb1, 0xac,
0x48, 0x3e, 0x8b, 0xe1, 0xcf, 0xdc, 0x20, 0xba,
0xae, 0xec, 0x0a, 0x2f, 0xe4, 0x85, 0x31, 0x30,
}),
IDMerkleRoot: daghash.Hash([32]byte{ // Make go vet happy.
},
IDMerkleRoot: &daghash.Hash{ // Make go vet happy.
0x4e, 0x06, 0xba, 0x64, 0xd7, 0x61, 0xda, 0x25,
0x1a, 0x0e, 0x21, 0xd4, 0x64, 0x49, 0x02, 0xa2,
0x80, 0xf7, 0x00, 0xe3, 0x16, 0x3d, 0x04, 0x95,
0x5b, 0x7e, 0xaf, 0x84, 0x7e, 0x1b, 0x6b, 0x06,
}),
},
Timestamp: time.Unix(0x5c40613a, 0),
Bits: 0x207fffff,
Nonce: 0x4000000000000001,
@ -516,7 +516,7 @@ func TestCheckSerializedHeight(t *testing.T) {
msgTx := coinbaseTx.Copy()
msgTx.TxIn[0].SignatureScript = test.sigScript
msgBlock := wire.NewMsgBlock(wire.NewBlockHeader(1, []daghash.Hash{}, &daghash.Hash{}, &daghash.Hash{}, 0, 0))
msgBlock := wire.NewMsgBlock(wire.NewBlockHeader(1, []*daghash.Hash{}, &daghash.Hash{}, &daghash.Hash{}, 0, 0))
msgBlock.AddTransaction(msgTx)
block := util.NewBlock(msgBlock)
block.SetHeight(test.wantHeight)
@ -623,7 +623,7 @@ func TestValidateParents(t *testing.T) {
b := generateNode(a)
c := generateNode(genesisNode)
fakeBlockHeader := &wire.BlockHeader{}
fakeBlockHeader := &wire.BlockHeader{IDMerkleRoot: &daghash.ZeroHash, HashMerkleRoot: &daghash.ZeroHash}
// Check direct parents relation
err := validateParents(fakeBlockHeader, setFromSlice(a, b))
@ -735,32 +735,32 @@ func TestCheckTransactionSanity(t *testing.T) {
var Block100000 = wire.MsgBlock{
Header: wire.BlockHeader{
Version: 0x10000000,
ParentHashes: []daghash.Hash{
[32]byte{ // Make go vet happy.
ParentHashes: []*daghash.Hash{
{ // Make go vet happy.
0x16, 0x5e, 0x38, 0xe8, 0xb3, 0x91, 0x45, 0x95,
0xd9, 0xc6, 0x41, 0xf3, 0xb8, 0xee, 0xc2, 0xf3,
0x46, 0x11, 0x89, 0x6b, 0x82, 0x1a, 0x68, 0x3b,
0x7a, 0x4e, 0xde, 0xfe, 0x2c, 0x00, 0x00, 0x00,
},
[32]byte{ // Make go vet happy.
{ // Make go vet happy.
0x4b, 0xb0, 0x75, 0x35, 0xdf, 0xd5, 0x8e, 0x0b,
0x3c, 0xd6, 0x4f, 0xd7, 0x15, 0x52, 0x80, 0x87,
0x2a, 0x04, 0x71, 0xbc, 0xf8, 0x30, 0x95, 0x52,
0x6a, 0xce, 0x0e, 0x38, 0xc6, 0x00, 0x00, 0x00,
},
},
HashMerkleRoot: daghash.Hash([32]byte{ // Make go vet happy.
HashMerkleRoot: &daghash.Hash{ // Make go vet happy.
0xb6, 0xfb, 0xb8, 0xc1, 0xbe, 0x96, 0x5b, 0x22,
0xf8, 0x46, 0x05, 0x8b, 0x81, 0xc9, 0xb0, 0x94,
0x6c, 0x55, 0x06, 0xd9, 0x7f, 0xe0, 0x85, 0x7a,
0xaa, 0xce, 0x6a, 0x52, 0x4b, 0x69, 0x21, 0x3a,
}),
IDMerkleRoot: daghash.Hash([32]byte{ // Make go vet happy.
},
IDMerkleRoot: &daghash.Hash{ // Make go vet happy.
0x03, 0x10, 0x71, 0x0d, 0x36, 0xc5, 0x91, 0x03,
0x5f, 0x8f, 0x67, 0x08, 0x78, 0xe4, 0x31, 0xaf,
0x0d, 0xb2, 0x91, 0xe7, 0x80, 0x12, 0x5d, 0x76,
0x2b, 0x69, 0xe4, 0xc4, 0xc0, 0x67, 0xe7, 0xd1,
}),
},
Timestamp: time.Unix(0x5c404bc3, 0),
Bits: 0x207fffff,
Nonce: 0xdffffffffffffff9,
@ -1026,26 +1026,32 @@ var Block100000 = wire.MsgBlock{
var BlockWithWrongTxOrder = wire.MsgBlock{
Header: wire.BlockHeader{
Version: 1,
ParentHashes: []daghash.Hash{
[32]byte{ // Make go vet happy.
ParentHashes: []*daghash.Hash{
{ // Make go vet happy.
0x16, 0x5e, 0x38, 0xe8, 0xb3, 0x91, 0x45, 0x95,
0xd9, 0xc6, 0x41, 0xf3, 0xb8, 0xee, 0xc2, 0xf3,
0x46, 0x11, 0x89, 0x6b, 0x82, 0x1a, 0x68, 0x3b,
0x7a, 0x4e, 0xde, 0xfe, 0x2c, 0x00, 0x00, 0x00,
},
[32]byte{ // Make go vet happy.
{ // Make go vet happy.
0x4b, 0xb0, 0x75, 0x35, 0xdf, 0xd5, 0x8e, 0x0b,
0x3c, 0xd6, 0x4f, 0xd7, 0x15, 0x52, 0x80, 0x87,
0x2a, 0x04, 0x71, 0xbc, 0xf8, 0x30, 0x95, 0x52,
0x6a, 0xce, 0x0e, 0x38, 0xc6, 0x00, 0x00, 0x00,
},
},
HashMerkleRoot: daghash.Hash([32]byte{ // Make go vet happy.
HashMerkleRoot: &daghash.Hash{ // Make go vet happy.
0x0b, 0x79, 0xf5, 0x29, 0x6d, 0x1c, 0xaa, 0x90,
0x2f, 0x01, 0xd4, 0x83, 0x9b, 0x2a, 0x04, 0x5e,
0xa0, 0x69, 0x2d, 0x16, 0xb5, 0xd7, 0xe4, 0xf3,
0xcd, 0xc7, 0xc9, 0xaf, 0xfb, 0xd2, 0x1b, 0x85,
}),
},
IDMerkleRoot: &daghash.Hash{ // Make go vet happy.
0x0b, 0x79, 0xf5, 0x29, 0x6d, 0x1c, 0xaa, 0x90,
0x2f, 0x01, 0xd4, 0x83, 0x9b, 0x2a, 0x04, 0x5e,
0xa0, 0x69, 0x2d, 0x16, 0xb5, 0xd7, 0xe4, 0xf3,
0xcd, 0xc7, 0xc9, 0xaf, 0xfb, 0xd2, 0x1b, 0x85,
},
Timestamp: time.Unix(0x5c22330f, 0),
Bits: 0x207fffff,
Nonce: 0xdffffffffffffffc,

View File

@ -74,7 +74,7 @@ func (v *virtualBlock) updateSelectedPathSet(oldSelectedParent *blockNode) {
}
if intersectionNode != nil {
for node := oldSelectedParent; !node.hash.IsEqual(&intersectionNode.hash); node = node.selectedParent {
for node := oldSelectedParent; !node.hash.IsEqual(intersectionNode.hash); node = node.selectedParent {
v.selectedPathChainSet.remove(node)
}
}

View File

@ -107,7 +107,7 @@ func findCandidates(dag *blockdag.BlockDAG, highestTipHash *daghash.Hash) ([]*da
}
parentHashes := block.MsgBlock().Header.ParentHashes
selectedBlockHash := &parentHashes[0]
selectedBlockHash := parentHashes[0]
block, err = dag.BlockByHash(selectedBlockHash)
if err != nil {
return nil, err
@ -166,7 +166,7 @@ func main() {
// Find checkpoint candidates.
highestTipHash := dag.HighestTipHash()
candidates, err := findCandidates(dag, &highestTipHash)
candidates, err := findCandidates(dag, highestTipHash)
if err != nil {
fmt.Fprintln(os.Stderr, "Unable to identify candidates:", err)
return

View File

@ -41,7 +41,7 @@ func solveGenesisBlock(block *wire.MsgBlock, powBits uint32, netName string) {
// The block is solved when the new block hash is less
// than the target difficulty. Yay!
if daghash.HashToBig(&hash).Cmp(targetDifficulty) <= 0 {
if daghash.HashToBig(hash).Cmp(targetDifficulty) <= 0 {
fmt.Printf("\n\nGenesis block of %s is solved:\n", netName)
fmt.Printf("timestamp: 0x%x\n", header.Timestamp.Unix())
fmt.Printf("bits (difficulty): 0x%x\n", header.Bits)

View File

@ -46,7 +46,7 @@ func (txID TxID) String() string {
}
// Strings returns a slice of strings representing the hashes in the given slice of hashes
func Strings(hashes []Hash) []string {
func Strings(hashes []*Hash) []string {
strings := make([]string, len(hashes))
for i, hash := range hashes {
strings[i] = hash.String()
@ -113,13 +113,13 @@ func (txID *TxID) IsEqual(target *TxID) bool {
// AreEqual returns true if both slices contain the same hashes.
// Either slice must not contain duplicates.
func AreEqual(first []Hash, second []Hash) bool {
func AreEqual(first []*Hash, second []*Hash) bool {
if len(first) != len(second) {
return false
}
for i := range first {
if first[i] != second[i] {
if !first[i].IsEqual(second[i]) {
return false
}
}
@ -230,14 +230,14 @@ func Less(a *Hash, b *Hash) bool {
}
//JoinHashesStrings joins all the stringified hashes separated by a separator
func JoinHashesStrings(hashes []Hash, separator string) string {
func JoinHashesStrings(hashes []*Hash, separator string) string {
return strings.Join(Strings(hashes), separator)
}
// Sort sorts a slice of hashes
func Sort(hashes []Hash) {
func Sort(hashes []*Hash) {
sort.Slice(hashes, func(i, j int) bool {
return Less(&hashes[i], &hashes[j])
return Less(hashes[i], hashes[j])
})
}

View File

@ -112,25 +112,25 @@ func TestHashString(t *testing.T) {
}
func TestHashesStrings(t *testing.T) {
first := Hash([HashSize]byte{ // Make go vet happy.
first := &Hash{ // Make go vet happy.
0x06, 0xe5, 0x33, 0xfd, 0x1a, 0xda, 0x86, 0x39,
0x1f, 0x3f, 0x6c, 0x34, 0x32, 0x04, 0xb0, 0xd2,
0x78, 0xd4, 0xaa, 0xec, 0x1c, 0x0b, 0x20, 0xaa,
0x27, 0xba, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
})
}
firstStr := "000000000003ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506"
second := Hash([HashSize]byte{})
second := &Hash{}
secondStr := "0000000000000000000000000000000000000000000000000000000000000000"
tests := []struct {
name string
hashes []Hash
hashes []*Hash
expectedStrings []string
}{
{"empty", []Hash{}, []string{}},
{"two hashes", []Hash{first, second}, []string{firstStr, secondStr}},
{"two hashes inversed", []Hash{second, first}, []string{secondStr, firstStr}},
{"empty", []*Hash{}, []string{}},
{"two hashes", []*Hash{first, second}, []string{firstStr, secondStr}},
{"two hashes inversed", []*Hash{second, first}, []string{secondStr, firstStr}},
}
for _, test := range tests {
@ -234,14 +234,14 @@ func TestAreEqual(t *testing.T) {
hash1, _ := NewHashFromStr("1111111111111111111111111111111111111111111111111111111111111111")
hash2, _ := NewHashFromStr("2222222222222222222222222222222222222222222222222222222222222222")
hash3, _ := NewHashFromStr("3333333333333333333333333333333333333333333333333333333333333333")
hashes0To2 := []Hash{*hash0, *hash1, *hash2}
hashes1To3 := []Hash{*hash1, *hash2, *hash3}
hashes0To3 := []Hash{*hash0, *hash1, *hash2, *hash3}
hashes0To2 := []*Hash{hash0, hash1, hash2}
hashes1To3 := []*Hash{hash1, hash2, hash3}
hashes0To3 := []*Hash{hash0, hash1, hash2, hash3}
tests := []struct {
name string
first []Hash
second []Hash
first []*Hash
second []*Hash
expected bool
}{
{
@ -372,18 +372,18 @@ func TestJoinHashesStrings(t *testing.T) {
tests := []struct {
name string
hashes []Hash
hashes []*Hash
separator string
expected string
}{
{"no separator", []Hash{*hash0, *hash1}, "",
{"no separator", []*Hash{hash0, hash1}, "",
"00000000000000000000000000000000000000000000000000000000000000001111111111111111111111111111111111111111111111111111111111111111"},
{", separator", []Hash{*hash0, *hash1}, ",",
{", separator", []*Hash{hash0, hash1}, ",",
"0000000000000000000000000000000000000000000000000000000000000000,1111111111111111111111111111111111111111111111111111111111111111"},
{"blabla separator", []Hash{*hash0, *hash1}, "blabla",
{"blabla separator", []*Hash{hash0, hash1}, "blabla",
"0000000000000000000000000000000000000000000000000000000000000000blabla1111111111111111111111111111111111111111111111111111111111111111"},
{"1 hash", []Hash{*hash0}, ",", "0000000000000000000000000000000000000000000000000000000000000000"},
{"0 hashes", []Hash{}, ",", ""},
{"1 hash", []*Hash{hash0}, ",", "0000000000000000000000000000000000000000000000000000000000000000"},
{"0 hashes", []*Hash{}, ",", ""},
}
for _, test := range tests {
@ -404,15 +404,15 @@ func TestSort(t *testing.T) {
tests := []struct {
name string
hashes []Hash
expected []Hash
hashes []*Hash
expected []*Hash
}{
{"empty", []Hash{}, []Hash{}},
{"single item", []Hash{*hash0}, []Hash{*hash0}},
{"already sorted", []Hash{*hash0, *hash1, *hash2, *hash3}, []Hash{*hash0, *hash1, *hash2, *hash3}},
{"inverted", []Hash{*hash3, *hash2, *hash1, *hash0}, []Hash{*hash0, *hash1, *hash2, *hash3}},
{"shuffled", []Hash{*hash2, *hash3, *hash0, *hash1}, []Hash{*hash0, *hash1, *hash2, *hash3}},
{"with duplicates", []Hash{*hash2, *hash3, *hash0, *hash1, *hash1}, []Hash{*hash0, *hash1, *hash1, *hash2, *hash3}},
{"empty", []*Hash{}, []*Hash{}},
{"single item", []*Hash{hash0}, []*Hash{hash0}},
{"already sorted", []*Hash{hash0, hash1, hash2, hash3}, []*Hash{hash0, hash1, hash2, hash3}},
{"inverted", []*Hash{hash3, hash2, hash1, hash0}, []*Hash{hash0, hash1, hash2, hash3}},
{"shuffled", []*Hash{hash2, hash3, hash0, hash1}, []*Hash{hash0, hash1, hash2, hash3}},
{"with duplicates", []*Hash{hash2, hash3, hash0, hash1, hash1}, []*Hash{hash0, hash1, hash1, hash2, hash3}},
}
for _, test := range tests {

View File

@ -61,9 +61,9 @@ var genesisMerkleRoot = daghash.Hash([daghash.HashSize]byte{ // Make go vet happ
var genesisBlock = wire.MsgBlock{
Header: wire.BlockHeader{
Version: 1,
ParentHashes: []daghash.Hash{},
HashMerkleRoot: genesisMerkleRoot,
IDMerkleRoot: genesisMerkleRoot,
ParentHashes: []*daghash.Hash{},
HashMerkleRoot: &genesisMerkleRoot,
IDMerkleRoot: &genesisMerkleRoot,
Timestamp: time.Unix(0x5c3cafec, 0),
Bits: 0x207fffff,
Nonce: 0,
@ -132,9 +132,9 @@ var devNetGenesisMerkleRoot = genesisMerkleRoot
var devNetGenesisBlock = wire.MsgBlock{
Header: wire.BlockHeader{
Version: 1,
ParentHashes: []daghash.Hash{},
HashMerkleRoot: devNetGenesisMerkleRoot,
IDMerkleRoot: devNetGenesisMerkleRoot,
ParentHashes: []*daghash.Hash{},
HashMerkleRoot: &devNetGenesisMerkleRoot,
IDMerkleRoot: &devNetGenesisMerkleRoot,
Timestamp: time.Unix(0x5c922d07, 0),
Bits: 0x1e7fffff,
Nonce: 0x2633,

View File

@ -30,7 +30,7 @@ func TestGenesisBlock(t *testing.T) {
// Check hash of the block against expected hash.
hash := MainNetParams.GenesisBlock.BlockHash()
if !MainNetParams.GenesisHash.IsEqual(&hash) {
if !MainNetParams.GenesisHash.IsEqual(hash) {
t.Fatalf("TestGenesisBlock: Genesis block hash does not "+
"appear valid - got %v, want %v", spew.Sdump(hash),
spew.Sdump(MainNetParams.GenesisHash))
@ -57,7 +57,7 @@ func TestRegTestGenesisBlock(t *testing.T) {
// Check hash of the block against expected hash.
hash := RegressionNetParams.GenesisBlock.BlockHash()
if !RegressionNetParams.GenesisHash.IsEqual(&hash) {
if !RegressionNetParams.GenesisHash.IsEqual(hash) {
t.Fatalf("TestRegTestGenesisBlock: Genesis block hash does "+
"not appear valid - got %v, want %v", spew.Sdump(hash),
spew.Sdump(RegressionNetParams.GenesisHash))
@ -84,7 +84,7 @@ func TestTestNet3GenesisBlock(t *testing.T) {
// Check hash of the block against expected hash.
hash := TestNet3Params.GenesisBlock.BlockHash()
if !TestNet3Params.GenesisHash.IsEqual(&hash) {
if !TestNet3Params.GenesisHash.IsEqual(hash) {
t.Fatalf("TestTestNet3GenesisBlock: Genesis block hash does "+
"not appear valid - got %v, want %v", spew.Sdump(hash),
spew.Sdump(TestNet3Params.GenesisHash))
@ -111,7 +111,7 @@ func TestSimNetGenesisBlock(t *testing.T) {
// Check hash of the block against expected hash.
hash := SimNetParams.GenesisBlock.BlockHash()
if !SimNetParams.GenesisHash.IsEqual(&hash) {
if !SimNetParams.GenesisHash.IsEqual(hash) {
t.Fatalf("TestSimNetGenesisBlock: Genesis block hash does "+
"not appear valid - got %v, want %v", spew.Sdump(hash),
spew.Sdump(SimNetParams.GenesisHash))

View File

@ -130,7 +130,7 @@ func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) {
for _, parentHash := range parentHashes {
var exists bool
err := bi.db.View(func(dbTx database.Tx) error {
exists, err = dbTx.HasBlock(&parentHash)
exists, err = dbTx.HasBlock(parentHash)
return err
})
if err != nil {

View File

@ -71,11 +71,11 @@ func (cmd *headersCmd) Execute(args []string) error {
// Bulk load headers.
err = db.View(func(dbTx database.Tx) error {
blockIdxBucket := dbTx.Metadata().Bucket(blockIdxName)
hashes := make([]daghash.Hash, 0, 500000)
hashes := make([]*daghash.Hash, 0, 500000)
blockIdxBucket.ForEach(func(k, v []byte) error {
var hash daghash.Hash
copy(hash[:], k)
hashes = append(hashes, hash)
hashes = append(hashes, &hash)
return nil
})

View File

@ -16,7 +16,7 @@ import (
func TestDeleteFile(t *testing.T) {
testBlock := util.NewBlock(wire.NewMsgBlock(
wire.NewBlockHeader(1, []daghash.Hash{}, &daghash.Hash{}, &daghash.Hash{}, 0, 0)))
wire.NewBlockHeader(1, []*daghash.Hash{}, &daghash.Hash{}, &daghash.Hash{}, 0, 0)))
tests := []struct {
fileNum uint32
@ -69,7 +69,7 @@ func TestDeleteFile(t *testing.T) {
// and makes sure no panic occurs, as well as ensures the writeCursor was updated correctly.
func TestHandleRollbackErrors(t *testing.T) {
testBlock := util.NewBlock(wire.NewMsgBlock(
wire.NewBlockHeader(1, []daghash.Hash{}, &daghash.Hash{}, &daghash.Hash{}, 0, 0)))
wire.NewBlockHeader(1, []*daghash.Hash{}, &daghash.Hash{}, &daghash.Hash{}, 0, 0)))
testBlockSize := uint32(testBlock.MsgBlock().SerializeSize())
tests := []struct {

View File

@ -1213,7 +1213,7 @@ func (tx *transaction) HasBlock(hash *daghash.Hash) (bool, error) {
// - ErrTxClosed if the transaction has already been closed
//
// This function is part of the database.Tx interface implementation.
func (tx *transaction) HasBlocks(hashes []daghash.Hash) ([]bool, error) {
func (tx *transaction) HasBlocks(hashes []*daghash.Hash) ([]bool, error) {
// Ensure transaction state is valid.
if err := tx.checkClosed(); err != nil {
return nil, err
@ -1221,7 +1221,7 @@ func (tx *transaction) HasBlocks(hashes []daghash.Hash) ([]bool, error) {
results := make([]bool, len(hashes))
for i := range hashes {
results[i] = tx.hasBlock(&hashes[i])
results[i] = tx.hasBlock(hashes[i])
}
return results, nil
@ -1289,10 +1289,10 @@ func (tx *transaction) FetchBlockHeader(hash *daghash.Hash) ([]byte, error) {
// fetchBlockHeadersSizes fetches the NumParentBlocks fields out of the block headers
// and uses it to compute the total sizes of the block headers
func (tx *transaction) fetchBlockHeadersSizes(hashes []daghash.Hash) ([]byte, error) {
func (tx *transaction) fetchBlockHeadersSizes(hashes []*daghash.Hash) ([]byte, error) {
regions := make([]database.BlockRegion, len(hashes))
for i := range hashes {
regions[i].Hash = &hashes[i]
regions[i].Hash = hashes[i]
regions[i].Offset = numParentBlocksOffset
regions[i].Len = 1
}
@ -1325,7 +1325,7 @@ func (tx *transaction) fetchBlockHeadersSizes(hashes []daghash.Hash) ([]byte, er
// allows support for memory-mapped database implementations.
//
// This function is part of the database.Tx interface implementation.
func (tx *transaction) FetchBlockHeaders(hashes []daghash.Hash) ([][]byte, error) {
func (tx *transaction) FetchBlockHeaders(hashes []*daghash.Hash) ([][]byte, error) {
headerSizes, err := tx.fetchBlockHeadersSizes(hashes)
if err != nil {
return nil, err
@ -1333,7 +1333,7 @@ func (tx *transaction) FetchBlockHeaders(hashes []daghash.Hash) ([][]byte, error
regions := make([]database.BlockRegion, len(hashes))
for i := range hashes {
regions[i].Hash = &hashes[i]
regions[i].Hash = hashes[i]
regions[i].Offset = 0
regions[i].Len = uint32(headerSizes[i])
}
@ -1405,7 +1405,7 @@ func (tx *transaction) FetchBlock(hash *daghash.Hash) ([]byte, error) {
// allows support for memory-mapped database implementations.
//
// This function is part of the database.Tx interface implementation.
func (tx *transaction) FetchBlocks(hashes []daghash.Hash) ([][]byte, error) {
func (tx *transaction) FetchBlocks(hashes []*daghash.Hash) ([][]byte, error) {
// Ensure transaction state is valid.
if err := tx.checkClosed(); err != nil {
return nil, err
@ -1420,7 +1420,7 @@ func (tx *transaction) FetchBlocks(hashes []daghash.Hash) ([][]byte, error) {
blocks := make([][]byte, len(hashes))
for i := range hashes {
var err error
blocks[i], err = tx.FetchBlock(&hashes[i])
blocks[i], err = tx.FetchBlock(hashes[i])
if err != nil {
return nil, err
}

View File

@ -553,7 +553,7 @@ func TestForEachBucket(t *testing.T) {
// TestStoreBlockErrors tests all error-cases in *tx.StoreBlock().
// The non-error-cases are tested in the more general tests.
func TestStoreBlockErrors(t *testing.T) {
testBlock := util.NewBlock(wire.NewMsgBlock(wire.NewBlockHeader(1, []daghash.Hash{}, &daghash.Hash{}, &daghash.Hash{}, 0, 0)))
testBlock := util.NewBlock(wire.NewMsgBlock(wire.NewBlockHeader(1, []*daghash.Hash{}, &daghash.Hash{}, &daghash.Hash{}, 0, 0)))
tests := []struct {
name string
@ -716,7 +716,7 @@ func TestWritePendingAndCommitErrors(t *testing.T) {
rollbackCalled = false
err = pdb.Update(func(dbTx database.Tx) error {
return dbTx.StoreBlock(util.NewBlock(wire.NewMsgBlock(
wire.NewBlockHeader(1, []daghash.Hash{}, &daghash.Hash{}, &daghash.Hash{}, 0, 0))))
wire.NewBlockHeader(1, []*daghash.Hash{}, &daghash.Hash{}, &daghash.Hash{}, 0, 0))))
})
if err == nil {
t.Errorf("No error returned when blockIdx.Put() should have returned an error")

View File

@ -1117,11 +1117,11 @@ func testFetchBlockIOMissing(tc *testContext, dbTx database.Tx) bool {
// Test the individual block APIs one block at a time to ensure they
// return the expected error. Also, build the data needed to test the
// bulk APIs below while looping.
allBlockHashes := make([]daghash.Hash, len(tc.blocks))
allBlockHashes := make([]*daghash.Hash, len(tc.blocks))
allBlockRegions := make([]database.BlockRegion, len(tc.blocks))
for i, block := range tc.blocks {
blockHash := block.Hash()
allBlockHashes[i] = *blockHash
allBlockHashes[i] = blockHash
txLocs, err := block.TxLoc()
if err != nil {
@ -1222,14 +1222,14 @@ func testFetchBlockIO(tc *testContext, dbTx database.Tx) bool {
// Test the individual block APIs one block at a time. Also, build the
// data needed to test the bulk APIs below while looping.
allBlockHashes := make([]daghash.Hash, len(tc.blocks))
allBlockHashes := make([]*daghash.Hash, len(tc.blocks))
allBlockBytes := make([][]byte, len(tc.blocks))
allBlockTxLocs := make([][]wire.TxLoc, len(tc.blocks))
allBlockRegions := make([]database.BlockRegion, len(tc.blocks))
allBlockHeaderSizes := make([]int, len(tc.blocks))
for i, block := range tc.blocks {
blockHash := block.Hash()
allBlockHashes[i] = *blockHash
allBlockHashes[i] = blockHash
blockBytes, err := block.Bytes()
if err != nil {
@ -1463,9 +1463,9 @@ func testFetchBlockIO(tc *testContext, dbTx database.Tx) bool {
// Ensure fetching blocks for which one doesn't exist returns the
// expected error.
testName := "FetchBlocks invalid hash"
badBlockHashes := make([]daghash.Hash, len(allBlockHashes)+1)
badBlockHashes := make([]*daghash.Hash, len(allBlockHashes)+1)
copy(badBlockHashes, allBlockHashes)
badBlockHashes[len(badBlockHashes)-1] = daghash.Hash{}
badBlockHashes[len(badBlockHashes)-1] = &daghash.Hash{}
wantErrCode := database.ErrBlockNotFound
_, err = dbTx.FetchBlocks(badBlockHashes)
if !checkDbError(tc.t, testName, err, wantErrCode) {
@ -1837,11 +1837,11 @@ func testClosedTxInterface(tc *testContext, dbTx database.Tx) bool {
// Test the individual block APIs one block at a time to ensure they
// return the expected error. Also, build the data needed to test the
// bulk APIs below while looping.
allBlockHashes := make([]daghash.Hash, len(tc.blocks))
allBlockHashes := make([]*daghash.Hash, len(tc.blocks))
allBlockRegions := make([]database.BlockRegion, len(tc.blocks))
for i, block := range tc.blocks {
blockHash := block.Hash()
allBlockHashes[i] = *blockHash
allBlockHashes[i] = blockHash
txLocs, err := block.TxLoc()
if err != nil {

View File

@ -247,7 +247,7 @@ type Tx interface {
// - ErrTxClosed if the transaction has already been closed
//
// Other errors are possible depending on the implementation.
HasBlocks(hashes []daghash.Hash) ([]bool, error)
HasBlocks(hashes []*daghash.Hash) ([]bool, error)
// FetchBlockHeader returns the raw serialized bytes for the block
// header identified by the given hash. The raw bytes are in the format
@ -297,7 +297,7 @@ type Tx interface {
// has ended results in undefined behavior. This constraint prevents
// additional data copies and allows support for memory-mapped database
// implementations.
FetchBlockHeaders(hashes []daghash.Hash) ([][]byte, error)
FetchBlockHeaders(hashes []*daghash.Hash) ([][]byte, error)
// FetchBlock returns the raw serialized bytes for the block identified
// by the given hash. The raw bytes are in the format returned by
@ -332,7 +332,7 @@ type Tx interface {
// has ended results in undefined behavior. This constraint prevents
// additional data copies and allows support for memory-mapped database
// implementations.
FetchBlocks(hashes []daghash.Hash) ([][]byte, error)
FetchBlocks(hashes []*daghash.Hash) ([][]byte, error)
// FetchBlockRegion returns the raw serialized bytes for the given
// block region.

View File

@ -45,7 +45,7 @@ func generateBlock(parent *wire.MsgBlock) *wire.MsgBlock {
return &wire.MsgBlock{
Header: wire.BlockHeader{
Version: 1,
ParentHashes: []daghash.Hash{parent.BlockHash()},
ParentHashes: []*daghash.Hash{parent.BlockHash()},
HashMerkleRoot: genesisMerkleRoot,
Timestamp: time.Unix(0x5b28c4c8, 0), // 2018-06-19 08:54:32 +0000 UTC
Bits: 0x2e00ffff, // 503382015 [000000ffff000000000000000000000000000000000000000000000000000000]

View File

@ -44,7 +44,7 @@ func solveBlock(header *wire.BlockHeader, targetDifficulty *big.Int) bool {
default:
hdr.Nonce = i
hash := hdr.BlockHash()
if daghash.HashToBig(&hash).Cmp(targetDifficulty) <= 0 {
if daghash.HashToBig(hash).Cmp(targetDifficulty) <= 0 {
select {
case results <- sbResult{true, i}:
return
@ -187,9 +187,9 @@ func CreateBlock(parentBlock *util.Block, inclusionTxs []*util.Tx,
var block wire.MsgBlock
block.Header = wire.BlockHeader{
Version: blockVersion,
ParentHashes: []daghash.Hash{*parentHash},
HashMerkleRoot: *hashMerkleTree.Root(),
IDMerkleRoot: *idMerkleTree.Root(),
ParentHashes: []*daghash.Hash{parentHash},
HashMerkleRoot: hashMerkleTree.Root(),
IDMerkleRoot: idMerkleTree.Root(),
Timestamp: ts,
Bits: net.PowLimitBits,
}

View File

@ -135,7 +135,7 @@ func deserializeObservedTransaction(r io.Reader) (*observedTransaction, error) {
// is used if Rollback is called to reverse the effect of registering
// a block.
type registeredBlock struct {
hash daghash.Hash
hash *daghash.Hash
transactions []*observedTransaction
}
@ -249,7 +249,7 @@ func (ef *FeeEstimator) RegisterBlock(block *util.Block) error {
// Keep track of which txs were dropped in case of an orphan block.
dropped := &registeredBlock{
hash: *block.Hash(),
hash: block.Hash(),
transactions: make([]*observedTransaction, 0, 100),
}
@ -583,9 +583,13 @@ const estimateFeeSaveVersion = 1
func deserializeRegisteredBlock(r io.Reader, txs map[uint32]*observedTransaction) (*registeredBlock, error) {
var lenTransactions uint32
rb := &registeredBlock{}
binary.Read(r, binary.BigEndian, &rb.hash)
binary.Read(r, binary.BigEndian, &lenTransactions)
rb := &registeredBlock{hash: &daghash.Hash{}}
if err := binary.Read(r, binary.BigEndian, rb.hash); err != nil {
return nil, err
}
if err := binary.Read(r, binary.BigEndian, &lenTransactions); err != nil {
return nil, err
}
rb.transactions = make([]*observedTransaction, lenTransactions)

View File

@ -69,6 +69,7 @@ func (eft *estimateFeeTester) newBlock(txs []*wire.MsgTx) {
eft.height++
block := util.NewBlock(&wire.MsgBlock{
Header: wire.BlockHeader{IDMerkleRoot: &daghash.ZeroHash, HashMerkleRoot: &daghash.ZeroHash},
Transactions: txs,
})
block.SetHeight(eft.height)
@ -393,7 +394,7 @@ func (eft *estimateFeeTester) checkSaveAndRestore(
}
}
// TestSave tests saving and restoring to a []byte.
// TestDatabase tests saving and restoring to a []byte.
func TestDatabase(t *testing.T) {
txPerRound := uint32(7)

View File

@ -1767,25 +1767,25 @@ func TestHandleNewBlock(t *testing.T) {
var dummyBlock = wire.MsgBlock{
Header: wire.BlockHeader{
Version: 1,
ParentHashes: []daghash.Hash{
[32]byte{ // Make go vet happy.
ParentHashes: []*daghash.Hash{
{ // Make go vet happy.
0x82, 0xdc, 0xbd, 0xe6, 0x88, 0x37, 0x74, 0x5b,
0x78, 0x6b, 0x03, 0x1d, 0xa3, 0x48, 0x3c, 0x45,
0x3f, 0xc3, 0x2e, 0xd4, 0x53, 0x5b, 0x6f, 0x26,
0x26, 0xb0, 0x48, 0x4f, 0x09, 0x00, 0x00, 0x00,
}, // MainNet genesis
[32]byte{ // Make go vet happy.
{ // Make go vet happy.
0xc1, 0x5b, 0x71, 0xfe, 0x20, 0x70, 0x0f, 0xd0,
0x08, 0x49, 0x88, 0x1b, 0x32, 0xb5, 0xbd, 0x13,
0x17, 0xbe, 0x75, 0xe7, 0x29, 0x46, 0xdd, 0x03,
0x01, 0x92, 0x90, 0xf1, 0xca, 0x8a, 0x88, 0x11,
}}, // SimNet genesis
HashMerkleRoot: daghash.Hash([32]byte{ // Make go vet happy.
HashMerkleRoot: &daghash.Hash{ // Make go vet happy.
0x66, 0x57, 0xa9, 0x25, 0x2a, 0xac, 0xd5, 0xc0,
0xb2, 0x94, 0x09, 0x96, 0xec, 0xff, 0x95, 0x22,
0x28, 0xc3, 0x06, 0x7c, 0xc3, 0x8d, 0x48, 0x85,
0xef, 0xb5, 0xa4, 0xac, 0x42, 0x47, 0xe9, 0xf3,
}), // f3e94742aca4b5ef85488dc37c06c3282295ffec960994b2c0d5ac2a25a95766
}, // f3e94742aca4b5ef85488dc37c06c3282295ffec960994b2c0d5ac2a25a95766
Timestamp: time.Unix(1529483563, 0), // 2018-06-20 08:32:43 +0000 UTC
Bits: 0x1e00ffff, // 503382015
Nonce: 0x000ae53f, // 714047

View File

@ -273,7 +273,7 @@ func (m *CPUMiner) solveBlock(msgBlock *wire.MsgBlock, blockHeight int32,
// The block is solved when the new block hash is less
// than the target difficulty. Yay!
if daghash.HashToBig(&hash).Cmp(targetDifficulty) <= 0 {
if daghash.HashToBig(hash).Cmp(targetDifficulty) <= 0 {
m.updateHashes <- hashesCompleted
return true
}

View File

@ -660,8 +660,8 @@ func (g *BlkTmplGenerator) NewBlockTemplate(payToAddress util.Address) (*BlockTe
msgBlock.Header = wire.BlockHeader{
Version: nextBlockVersion,
ParentHashes: g.dag.TipHashes(),
HashMerkleRoot: *hashMerkleTree.Root(),
IDMerkleRoot: *idMerkleTree.Root(),
HashMerkleRoot: hashMerkleTree.Root(),
IDMerkleRoot: idMerkleTree.Root(),
Timestamp: ts,
Bits: reqDifficulty,
}
@ -743,9 +743,9 @@ func (g *BlkTmplGenerator) UpdateExtraNonce(msgBlock *wire.MsgBlock, blockHeight
// Recalculate the merkle roots with the updated extra nonce.
block := util.NewBlock(msgBlock)
hashMerkleTree := blockdag.BuildHashMerkleTreeStore(block.Transactions())
msgBlock.Header.HashMerkleRoot = *hashMerkleTree.Root()
msgBlock.Header.HashMerkleRoot = hashMerkleTree.Root()
idMerkleTree := blockdag.BuildIDMerkleTreeStore(block.Transactions())
msgBlock.Header.IDMerkleRoot = *idMerkleTree.Root()
msgBlock.Header.IDMerkleRoot = idMerkleTree.Root()
return nil
}
@ -756,7 +756,7 @@ func (g *BlkTmplGenerator) DAGHeight() int32 {
}
// TipHashes returns the hashes of the DAG's tips
func (g *BlkTmplGenerator) TipHashes() []daghash.Hash {
func (g *BlkTmplGenerator) TipHashes() []*daghash.Hash {
return g.dag.TipHashes()
}

View File

@ -22,13 +22,13 @@ var random = rand.New(rand.NewSource(time.Now().UnixNano()))
func parseBlock(template *btcjson.GetBlockTemplateResult) (*util.Block, error) {
// parse parent hashes
parentHashes := make([]daghash.Hash, len(template.ParentHashes))
parentHashes := make([]*daghash.Hash, len(template.ParentHashes))
for i, parentHash := range template.ParentHashes {
hash, err := daghash.NewHashFromStr(parentHash)
if err != nil {
return nil, fmt.Errorf("Error decoding hash %s: %s", parentHash, err)
}
parentHashes[i] = *hash
parentHashes[i] = hash
}
// parse Bits
@ -59,7 +59,7 @@ func solveBlock(msgBlock *wire.MsgBlock) {
for i := uint64(0); i < maxNonce; i++ {
msgBlock.Header.Nonce = i
hash := msgBlock.BlockHash()
if daghash.HashToBig(&hash).Cmp(targetDifficulty) <= 0 {
if daghash.HashToBig(hash).Cmp(targetDifficulty) <= 0 {
break
}
}
@ -90,8 +90,8 @@ func mineLoop(clients []*rpcclient.Client) error {
msgBlock := block.MsgBlock()
msgBlock.Header.HashMerkleRoot = *blockdag.BuildHashMerkleTreeStore(block.Transactions()).Root()
msgBlock.Header.IDMerkleRoot = *blockdag.BuildIDMerkleTreeStore(block.Transactions()).Root()
msgBlock.Header.HashMerkleRoot = blockdag.BuildHashMerkleTreeStore(block.Transactions()).Root()
msgBlock.Header.IDMerkleRoot = blockdag.BuildIDMerkleTreeStore(block.Transactions()).Root()
solveBlock(msgBlock)

View File

@ -38,7 +38,7 @@ func (txs *fakeTxSource) HaveTransaction(txID *daghash.TxID) bool {
}
// PrepareBlockForTest generates a block with the proper merkle roots, coinbase/fee transactions etc. This function is used for test purposes only
func PrepareBlockForTest(dag *blockdag.BlockDAG, params *dagconfig.Params, parentHashes []daghash.Hash, transactions []*wire.MsgTx, forceTransactions bool, coinbaseOutputs uint64) (*wire.MsgBlock, error) {
func PrepareBlockForTest(dag *blockdag.BlockDAG, params *dagconfig.Params, parentHashes []*daghash.Hash, transactions []*wire.MsgTx, forceTransactions bool, coinbaseOutputs uint64) (*wire.MsgBlock, error) {
newVirtual, err := blockdag.GetVirtualFromParentsForTest(dag, parentHashes)
if err != nil {
return nil, err
@ -114,8 +114,8 @@ func PrepareBlockForTest(dag *blockdag.BlockDAG, params *dagconfig.Params, paren
for i, tx := range template.Block.Transactions {
utilTxs[i] = util.NewTx(tx)
}
template.Block.Header.HashMerkleRoot = *blockdag.BuildHashMerkleTreeStore(utilTxs).Root()
template.Block.Header.IDMerkleRoot = *blockdag.BuildIDMerkleTreeStore(utilTxs).Root()
template.Block.Header.HashMerkleRoot = blockdag.BuildHashMerkleTreeStore(utilTxs).Root()
template.Block.Header.IDMerkleRoot = blockdag.BuildIDMerkleTreeStore(utilTxs).Root()
}
return template.Block, nil
}

View File

@ -396,7 +396,7 @@ func (sm *SyncManager) handleDonePeerMsg(peer *peerpkg.Peer) {
sm.syncPeer = nil
if sm.headersFirstMode {
highestTipHash := sm.dag.HighestTipHash()
sm.resetHeaderState(&highestTipHash, sm.dag.Height()) //TODO: (Ori) This is probably wrong. Done only for compilation
sm.resetHeaderState(highestTipHash, sm.dag.Height()) //TODO: (Ori) This is probably wrong. Done only for compilation
}
sm.startSync()
}
@ -672,13 +672,13 @@ func (sm *SyncManager) addBlocksToRequestQueue(state *peerSyncState, hashes []*d
func (state *peerSyncState) addInvToRequestQueue(iv *wire.InvVect, isRelayedInv bool) {
if isRelayedInv {
if _, exists := state.relayedInvsRequestQueueSet[iv.Hash]; !exists {
state.relayedInvsRequestQueueSet[iv.Hash] = struct{}{}
if _, exists := state.relayedInvsRequestQueueSet[*iv.Hash]; !exists {
state.relayedInvsRequestQueueSet[*iv.Hash] = struct{}{}
state.relayedInvsRequestQueue = append(state.relayedInvsRequestQueue, iv)
}
} else {
if _, exists := state.requestQueueSet[iv.Hash]; !exists {
state.requestQueueSet[iv.Hash] = struct{}{}
if _, exists := state.requestQueueSet[*iv.Hash]; !exists {
state.requestQueueSet[*iv.Hash] = struct{}{}
state.requestQueue = append(state.requestQueue, iv)
}
}
@ -768,7 +768,7 @@ func (sm *SyncManager) handleHeadersMsg(hmsg *headersMsg) {
var finalHash *daghash.Hash
for _, blockHeader := range msg.Headers {
blockHash := blockHeader.BlockHash()
finalHash = &blockHash
finalHash = blockHash
// Ensure there is a previous header to compare against.
prevNodeEl := sm.headerList.Back()
@ -781,9 +781,9 @@ func (sm *SyncManager) handleHeadersMsg(hmsg *headersMsg) {
// Ensure the header properly connects to the previous one and
// add it to the list of headers.
node := headerNode{hash: &blockHash}
node := headerNode{hash: blockHash}
prevNode := prevNodeEl.Value.(*headerNode)
if prevNode.hash.IsEqual(&blockHeader.ParentHashes[0]) { // TODO: (Stas) This is wrong. Modified only to satisfy compilation.
if prevNode.hash.IsEqual(blockHeader.ParentHashes[0]) { // TODO: (Stas) This is wrong. Modified only to satisfy compilation.
node.height = prevNode.height + 1
e := sm.headerList.PushBack(&node)
if sm.startHeader == nil {
@ -856,12 +856,12 @@ func (sm *SyncManager) haveInventory(invVect *wire.InvVect) (bool, error) {
fallthrough
case wire.InvTypeBlock:
// Ask DAG if the block is known to it in any form (in DAG or as an orphan).
return sm.dag.HaveBlock(&invVect.Hash)
return sm.dag.HaveBlock(invVect.Hash)
case wire.InvTypeTx:
// Ask the transaction memory pool if the transaction is known
// to it in any form (main pool or orphan).
if sm.txMemPool.HaveTransaction((*daghash.TxID)(&invVect.Hash)) {
if sm.txMemPool.HaveTransaction((*daghash.TxID)(invVect.Hash)) {
return true, nil
}
@ -873,7 +873,7 @@ func (sm *SyncManager) haveInventory(invVect *wire.InvVect) (bool, error) {
// checked because the vast majority of transactions consist of
// two outputs where one is some form of "pay-to-somebody-else"
// and the other is a change output.
prevOut := wire.OutPoint{TxID: daghash.TxID(invVect.Hash)}
prevOut := wire.OutPoint{TxID: daghash.TxID(*invVect.Hash)}
for i := uint32(0); i < 2; i++ {
prevOut.Index = i
entry, ok := sm.dag.GetUTXOEntry(prevOut)
@ -949,7 +949,7 @@ func (sm *SyncManager) handleInvMsg(imsg *invMsg) {
if iv.Type == wire.InvTypeTx {
// Skip the transaction if it has already been
// rejected.
if _, exists := sm.rejectedTxns[daghash.TxID(iv.Hash)]; exists {
if _, exists := sm.rejectedTxns[daghash.TxID(*iv.Hash)]; exists {
continue
}
}
@ -970,8 +970,8 @@ func (sm *SyncManager) handleInvMsg(imsg *invMsg) {
// resending the orphan block as an available block
// to signal there are more missing blocks that need to
// be requested.
if sm.dag.IsKnownOrphan(&iv.Hash) {
missingAncestors, err := sm.dag.GetOrphanMissingAncestorHashes(&iv.Hash)
if sm.dag.IsKnownOrphan(iv.Hash) {
missingAncestors, err := sm.dag.GetOrphanMissingAncestorHashes(iv.Hash)
if err != nil {
log.Errorf("Failed to find missing ancestors for block %s: %s",
iv.Hash, err)
@ -990,7 +990,7 @@ func (sm *SyncManager) handleInvMsg(imsg *invMsg) {
// in the selected path chain, one up to the
// final one the remote peer knows about (zero
// stop hash).
locator := sm.dag.BlockLocatorFromHash(&iv.Hash)
locator := sm.dag.BlockLocatorFromHash(iv.Hash)
peer.PushGetBlocksMsg(locator, &daghash.ZeroHash)
}
}
@ -1029,10 +1029,10 @@ func (sm *SyncManager) addInvsToGetDataMessageFromQueue(gdmsg *wire.MsgGetData,
addBlockInv := func(iv *wire.InvVect) {
// Request the block if there is not already a pending
// request.
if _, exists := sm.requestedBlocks[iv.Hash]; !exists {
sm.requestedBlocks[iv.Hash] = struct{}{}
if _, exists := sm.requestedBlocks[*iv.Hash]; !exists {
sm.requestedBlocks[*iv.Hash] = struct{}{}
sm.limitHashMap(sm.requestedBlocks, maxRequestedBlocks)
state.requestedBlocks[iv.Hash] = struct{}{}
state.requestedBlocks[*iv.Hash] = struct{}{}
gdmsg.AddInvVect(iv)
}
@ -1040,20 +1040,20 @@ func (sm *SyncManager) addInvsToGetDataMessageFromQueue(gdmsg *wire.MsgGetData,
for _, iv := range invsToAdd {
switch iv.Type {
case wire.InvTypeSyncBlock:
delete(state.requestQueueSet, iv.Hash)
delete(state.requestQueueSet, *iv.Hash)
addBlockInv(iv)
case wire.InvTypeBlock:
delete(state.relayedInvsRequestQueueSet, iv.Hash)
delete(state.relayedInvsRequestQueueSet, *iv.Hash)
addBlockInv(iv)
case wire.InvTypeTx:
delete(state.relayedInvsRequestQueueSet, iv.Hash)
delete(state.relayedInvsRequestQueueSet, *iv.Hash)
// Request the transaction if there is not already a
// pending request.
if _, exists := sm.requestedTxns[daghash.TxID(iv.Hash)]; !exists {
sm.requestedTxns[daghash.TxID(iv.Hash)] = struct{}{}
if _, exists := sm.requestedTxns[daghash.TxID(*iv.Hash)]; !exists {
sm.requestedTxns[daghash.TxID(*iv.Hash)] = struct{}{}
sm.limitTxIDMap(sm.requestedTxns, maxRequestedTxns)
state.requestedTxns[daghash.TxID(iv.Hash)] = struct{}{}
state.requestedTxns[daghash.TxID(*iv.Hash)] = struct{}{}
gdmsg.AddInvVect(iv)
}
@ -1404,7 +1404,7 @@ func New(config *Config) (*SyncManager, error) {
// Initialize the next checkpoint based on the current height.
sm.nextCheckpoint = sm.findNextHeaderCheckpoint(sm.dag.Height()) //TODO: (Ori) This is probably wrong. Done only for compilation
if sm.nextCheckpoint != nil {
sm.resetHeaderState(&highestTipHash, sm.dag.Height()) //TODO: (Ori) This is probably wrong. Done only for compilation)
sm.resetHeaderState(highestTipHash, sm.dag.Height()) //TODO: (Ori) This is probably wrong. Done only for compilation)
}
} else {
log.Info("Checkpoints are disabled")

View File

@ -170,10 +170,10 @@ func messageSummary(msg wire.Message) string {
return invSummary(msg.InvList)
case *wire.MsgGetBlocks:
return locatorSummary(msg.BlockLocatorHashes, &msg.HashStop)
return locatorSummary(msg.BlockLocatorHashes, msg.HashStop)
case *wire.MsgGetHeaders:
return locatorSummary(msg.BlockLocatorHashes, &msg.HashStop)
return locatorSummary(msg.BlockLocatorHashes, msg.HashStop)
case *wire.MsgHeaders:
return fmt.Sprintf("num %d", len(msg.Headers))

View File

@ -928,7 +928,7 @@ func (p *Peer) PushGetHeadersMsg(locator blockdag.BlockLocator, stopHash *daghas
// Construct the getheaders request and queue it to be sent.
msg := wire.NewMsgGetHeaders()
msg.HashStop = *stopHash
msg.HashStop = stopHash
for _, hash := range locator {
err := msg.AddBlockLocatorHash(hash)
if err != nil {
@ -961,7 +961,7 @@ func (p *Peer) PushRejectMsg(command string, code wire.RejectCode, reason string
"but does not", command)
hash = &daghash.ZeroHash
}
msg.Hash = *hash
msg.Hash = hash
}
// Send the message without waiting if the caller has not requested it.

View File

@ -498,7 +498,7 @@ func TestPeerListeners(t *testing.T) {
{
"OnBlock",
wire.NewMsgBlock(wire.NewBlockHeader(1,
[]daghash.Hash{}, &daghash.Hash{}, &daghash.Hash{}, 1, 1)),
[]*daghash.Hash{}, &daghash.Hash{}, &daghash.Hash{}, 1, 1)),
},
{
"OnInv",
@ -564,7 +564,7 @@ func TestPeerListeners(t *testing.T) {
{
"OnMerkleBlock",
wire.NewMsgMerkleBlock(wire.NewBlockHeader(1,
[]daghash.Hash{}, &daghash.Hash{}, &daghash.Hash{}, 1, 1)),
[]*daghash.Hash{}, &daghash.Hash{}, &daghash.Hash{}, 1, 1)),
},
// only one version message is allowed
// only one verack message is allowed

View File

@ -767,7 +767,7 @@ func (r FutureRescanBlocksResult) Receive() ([]btcjson.RescannedBlock, error) {
//
// NOTE: This is a btcsuite extension ported from
// github.com/decred/dcrrpcclient.
func (c *Client) RescanBlocksAsync(blockHashes []daghash.Hash) FutureRescanBlocksResult {
func (c *Client) RescanBlocksAsync(blockHashes []*daghash.Hash) FutureRescanBlocksResult {
strBlockHashes := make([]string, len(blockHashes))
for i := range blockHashes {
strBlockHashes[i] = blockHashes[i].String()
@ -783,7 +783,7 @@ func (c *Client) RescanBlocksAsync(blockHashes []daghash.Hash) FutureRescanBlock
//
// NOTE: This is a btcsuite extension ported from
// github.com/decred/dcrrpcclient.
func (c *Client) RescanBlocks(blockHashes []daghash.Hash) ([]btcjson.RescannedBlock, error) {
func (c *Client) RescanBlocks(blockHashes []*daghash.Hash) ([]btcjson.RescannedBlock, error) {
return c.RescanBlocksAsync(blockHashes).Receive()
}
@ -900,7 +900,7 @@ func (r FutureGetCFilterHeaderResult) Receive() (*wire.MsgCFHeaders, error) {
}
// Assign the hash to a headers message and return it.
msgCFHeaders := wire.MsgCFHeaders{PrevFilterHeader: *headerHash}
msgCFHeaders := wire.MsgCFHeaders{PrevFilterHeader: headerHash}
return &msgCFHeaders, nil
}

View File

@ -282,7 +282,7 @@ func (r FutureGetHeadersResult) Receive() ([]wire.BlockHeader, error) {
//
// NOTE: This is a btcsuite extension ported from
// github.com/decred/dcrrpcclient.
func (c *Client) GetHeadersAsync(blockLocators []daghash.Hash, hashStop *daghash.Hash) FutureGetHeadersResult {
func (c *Client) GetHeadersAsync(blockLocators []*daghash.Hash, hashStop *daghash.Hash) FutureGetHeadersResult {
locators := make([]string, len(blockLocators))
for i := range blockLocators {
locators[i] = blockLocators[i].String()
@ -301,7 +301,7 @@ func (c *Client) GetHeadersAsync(blockLocators []daghash.Hash, hashStop *daghash
//
// NOTE: This is a btcsuite extension ported from
// github.com/decred/dcrrpcclient.
func (c *Client) GetHeaders(blockLocators []daghash.Hash, hashStop *daghash.Hash) ([]wire.BlockHeader, error) {
func (c *Client) GetHeaders(blockLocators []*daghash.Hash, hashStop *daghash.Hash) ([]wire.BlockHeader, error) {
return c.GetHeadersAsync(blockLocators, hashStop).Receive()
}

View File

@ -240,8 +240,8 @@ func (ps *peerState) forAllPeers(callback func(sp *Peer) bool) bool {
// cfHeaderKV is a tuple of a filter header and its associated block hash. The
// struct is used to cache cfcheckpt responses.
type cfHeaderKV struct {
blockHash daghash.Hash
filterHeader daghash.Hash
blockHash *daghash.Hash
filterHeader *daghash.Hash
}
// Server provides a bitcoin server for handling communications to and from
@ -646,13 +646,13 @@ func (sp *Peer) OnGetData(_ *peer.Peer, msg *wire.MsgGetData) {
var err error
switch iv.Type {
case wire.InvTypeTx:
err = sp.server.pushTxMsg(sp, (*daghash.TxID)(&iv.Hash), c, waitChan)
err = sp.server.pushTxMsg(sp, (*daghash.TxID)(iv.Hash), c, waitChan)
case wire.InvTypeSyncBlock:
fallthrough
case wire.InvTypeBlock:
err = sp.server.pushBlockMsg(sp, &iv.Hash, c, waitChan)
err = sp.server.pushBlockMsg(sp, iv.Hash, c, waitChan)
case wire.InvTypeFilteredBlock:
err = sp.server.pushMerkleBlockMsg(sp, &iv.Hash, c, waitChan)
err = sp.server.pushMerkleBlockMsg(sp, iv.Hash, c, waitChan)
default:
peerLog.Warnf("Unknown type in inventory request %d",
iv.Type)
@ -701,13 +701,13 @@ func (sp *Peer) OnGetBlocks(_ *peer.Peer, msg *wire.MsgGetBlocks) {
//
// This mirrors the behavior in the reference implementation.
dag := sp.server.DAG
hashList := dag.LocateBlocks(msg.BlockLocatorHashes, &msg.HashStop,
hashList := dag.LocateBlocks(msg.BlockLocatorHashes, msg.HashStop,
wire.MaxBlocksPerMsg)
// Generate inventory message.
invMsg := wire.NewMsgInv()
for i := range hashList {
iv := wire.NewInvVect(wire.InvTypeSyncBlock, &hashList[i])
iv := wire.NewInvVect(wire.InvTypeSyncBlock, hashList[i])
invMsg.AddInvVect(iv)
}
@ -720,7 +720,7 @@ func (sp *Peer) OnGetBlocks(_ *peer.Peer, msg *wire.MsgGetBlocks) {
// would prevent the entire slice from being eligible
// for GC as soon as it's sent.
continueHash := invMsg.InvList[invListLen-1].Hash
sp.continueHash = &continueHash
sp.continueHash = continueHash
}
sp.QueueMessage(invMsg, nil)
}
@ -745,7 +745,7 @@ func (sp *Peer) OnGetHeaders(_ *peer.Peer, msg *wire.MsgGetHeaders) {
//
// This mirrors the behavior in the reference implementation.
dag := sp.server.DAG
headers := dag.LocateHeaders(msg.BlockLocatorHashes, &msg.HashStop)
headers := dag.LocateHeaders(msg.BlockLocatorHashes, msg.HashStop)
// Send found headers to the requesting peer.
blockHeaders := make([]*wire.BlockHeader, len(headers))
@ -763,20 +763,13 @@ func (sp *Peer) OnGetCFilters(_ *peer.Peer, msg *wire.MsgGetCFilters) {
}
hashes, err := sp.server.DAG.HeightToHashRange(int32(msg.StartHeight),
&msg.StopHash, wire.MaxGetCFiltersReqRange)
msg.StopHash, wire.MaxGetCFiltersReqRange)
if err != nil {
peerLog.Debugf("Invalid getcfilters request: %s", err)
return
}
// Create []*daghash.Hash from []daghash.Hash to pass to
// FiltersByBlockHashes.
hashPtrs := make([]*daghash.Hash, len(hashes))
for i := range hashes {
hashPtrs[i] = &hashes[i]
}
filters, err := sp.server.CfIndex.FiltersByBlockHashes(hashPtrs,
filters, err := sp.server.CfIndex.FiltersByBlockHashes(hashes,
msg.FilterType)
if err != nil {
peerLog.Errorf("Error retrieving cfilters: %s", err)
@ -788,7 +781,7 @@ func (sp *Peer) OnGetCFilters(_ *peer.Peer, msg *wire.MsgGetCFilters) {
peerLog.Warnf("Could not obtain cfilter for %s", hashes[i])
return
}
filterMsg := wire.NewMsgCFilter(msg.FilterType, &hashes[i], filterBytes)
filterMsg := wire.NewMsgCFilter(msg.FilterType, hashes[i], filterBytes)
sp.QueueMessage(filterMsg, nil)
}
}
@ -812,7 +805,7 @@ func (sp *Peer) OnGetCFHeaders(_ *peer.Peer, msg *wire.MsgGetCFHeaders) {
// Fetch the hashes from the block index.
hashList, err := sp.server.DAG.HeightToHashRange(startHeight,
&msg.StopHash, maxResults)
msg.StopHash, maxResults)
if err != nil {
peerLog.Debugf("Invalid getcfheaders request: %s", err)
}
@ -825,15 +818,8 @@ func (sp *Peer) OnGetCFHeaders(_ *peer.Peer, msg *wire.MsgGetCFHeaders) {
return
}
// Create []*daghash.Hash from []daghash.Hash to pass to
// FilterHeadersByBlockHashes.
hashPtrs := make([]*daghash.Hash, len(hashList))
for i := range hashList {
hashPtrs[i] = &hashList[i]
}
// Fetch the raw filter hash bytes from the database for all blocks.
filterHashes, err := sp.server.CfIndex.FilterHashesByBlockHashes(hashPtrs,
filterHashes, err := sp.server.CfIndex.FilterHashesByBlockHashes(hashList,
msg.FilterType)
if err != nil {
peerLog.Errorf("Error retrieving cfilter hashes: %s", err)
@ -845,7 +831,7 @@ func (sp *Peer) OnGetCFHeaders(_ *peer.Peer, msg *wire.MsgGetCFHeaders) {
// Populate the PrevFilterHeader field.
if msg.StartHeight > 0 {
parentHash := &hashList[0]
parentHash := hashList[0]
// Fetch the raw committed filter header bytes from the
// database.
@ -902,7 +888,7 @@ func (sp *Peer) OnGetCFCheckpt(_ *peer.Peer, msg *wire.MsgGetCFCheckpt) {
return
}
blockHashes, err := sp.server.DAG.IntervalBlockHashes(&msg.StopHash,
blockHashes, err := sp.server.DAG.IntervalBlockHashes(msg.StopHash,
wire.CFCheckptInterval)
if err != nil {
peerLog.Debugf("Invalid getcfilters request: %s", err)
@ -940,22 +926,22 @@ func (sp *Peer) OnGetCFCheckpt(_ *peer.Peer, msg *wire.MsgGetCFCheckpt) {
// Iterate backwards until the block hash is found in the cache.
var forkIdx int
for forkIdx = len(checkptCache); forkIdx > 0; forkIdx-- {
if checkptCache[forkIdx-1].blockHash == blockHashes[forkIdx-1] {
if checkptCache[forkIdx-1].blockHash.IsEqual(blockHashes[forkIdx-1]) {
break
}
}
// Populate results with cached checkpoints.
checkptMsg := wire.NewMsgCFCheckpt(msg.FilterType, &msg.StopHash,
checkptMsg := wire.NewMsgCFCheckpt(msg.FilterType, msg.StopHash,
len(blockHashes))
for i := 0; i < forkIdx; i++ {
checkptMsg.AddCFHeader(&checkptCache[i].filterHeader)
checkptMsg.AddCFHeader(checkptCache[i].filterHeader)
}
// Look up any filter headers that aren't cached.
blockHashPtrs := make([]*daghash.Hash, 0, len(blockHashes)-forkIdx)
for i := forkIdx; i < len(blockHashes); i++ {
blockHashPtrs = append(blockHashPtrs, &blockHashes[i])
blockHashPtrs = append(blockHashPtrs, blockHashes[i])
}
filterHeaders, err := sp.server.CfIndex.FilterHeadersByBlockHashes(blockHashPtrs,
@ -982,7 +968,7 @@ func (sp *Peer) OnGetCFCheckpt(_ *peer.Peer, msg *wire.MsgGetCFCheckpt) {
if updateCache {
checkptCache[forkIdx+i] = cfHeaderKV{
blockHash: blockHashes[forkIdx+i],
filterHeader: *filterHeader,
filterHeader: filterHeader,
}
}
}
@ -1345,7 +1331,7 @@ func (s *Server) pushBlockMsg(sp *Peer, hash *daghash.Hash, doneChan chan<- stru
if sendInv {
highestTipHash := sp.server.DAG.HighestTipHash()
invMsg := wire.NewMsgInvSizeHint(1)
iv := wire.NewInvVect(wire.InvTypeBlock, &highestTipHash)
iv := wire.NewInvVect(wire.InvTypeBlock, highestTipHash)
invMsg.AddInvVect(iv)
sp.QueueMessage(invMsg, doneChan)
sp.continueHash = nil

View File

@ -341,7 +341,7 @@ type gbtWorkState struct {
sync.Mutex
lastTxUpdate time.Time
lastGenerated time.Time
tipHashes []daghash.Hash
tipHashes []*daghash.Hash
minTimestamp time.Time
template *mining.BlockTemplate
notifyMap map[string]map[int64]chan struct{}
@ -1410,7 +1410,7 @@ func handleGetBlockHeader(s *Server, cmd interface{}, closeChan <-chan struct{})
// encodeLongPollID encodes the passed details into an ID that can be used to
// uniquely identify a block template.
func encodeLongPollID(parentHashes []daghash.Hash, lastGenerated time.Time) string {
func encodeLongPollID(parentHashes []*daghash.Hash, lastGenerated time.Time) string {
return fmt.Sprintf("%s-%d", daghash.JoinHashesStrings(parentHashes, ""), lastGenerated.Unix())
}
@ -1419,7 +1419,7 @@ func encodeLongPollID(parentHashes []daghash.Hash, lastGenerated time.Time) stri
// that are using long polling for block templates. The ID consists of the
// parent blocks hashes for the associated template and the time the associated
// template was generated.
func decodeLongPollID(longPollID string) ([]daghash.Hash, int64, error) {
func decodeLongPollID(longPollID string) ([]*daghash.Hash, int64, error) {
fields := strings.Split(longPollID, "-")
if len(fields) != 2 {
return nil, 0, errors.New("decodeLongPollID: invalid number of fields")
@ -1431,14 +1431,14 @@ func decodeLongPollID(longPollID string) ([]daghash.Hash, int64, error) {
}
numberOfHashes := len(parentHashesStr) / daghash.HashSize
parentHashes := make([]daghash.Hash, 0, numberOfHashes)
parentHashes := make([]*daghash.Hash, 0, numberOfHashes)
for i := 0; i < len(parentHashesStr); i += daghash.HashSize {
hash, err := daghash.NewHashFromStr(parentHashesStr[i : i+daghash.HashSize])
if err != nil {
return nil, 0, fmt.Errorf("decodeLongPollID: NewHashFromStr: %s", err)
}
parentHashes = append(parentHashes, *hash)
parentHashes = append(parentHashes, hash)
}
lastGenerated, err := strconv.ParseInt(fields[1], 10, 64)
@ -1453,7 +1453,7 @@ func decodeLongPollID(longPollID string) ([]daghash.Hash, int64, error) {
// notified when block templates are stale.
//
// This function MUST be called with the state locked.
func (state *gbtWorkState) notifyLongPollers(tipHashes []daghash.Hash, lastGenerated time.Time) {
func (state *gbtWorkState) notifyLongPollers(tipHashes []*daghash.Hash, lastGenerated time.Time) {
// Notify anything that is waiting for a block template update from a
// hash which is not the hash of the tip of the best chain since their
// work is now invalid.
@ -1501,7 +1501,7 @@ func (state *gbtWorkState) notifyLongPollers(tipHashes []daghash.Hash, lastGener
// NotifyBlockAdded uses the newly-added block to notify any long poll
// clients with a new block template when their existing block template is
// stale due to the newly added block.
func (state *gbtWorkState) NotifyBlockAdded(tipHashes []daghash.Hash) {
func (state *gbtWorkState) NotifyBlockAdded(tipHashes []*daghash.Hash) {
go func() {
state.Lock()
defer state.Unlock()
@ -1540,7 +1540,7 @@ func (state *gbtWorkState) NotifyMempoolTx(lastUpdated time.Time) {
// without requiring a different channel for each client.
//
// This function MUST be called with the state locked.
func (state *gbtWorkState) templateUpdateChan(tipHashes []daghash.Hash, lastGenerated int64) chan struct{} {
func (state *gbtWorkState) templateUpdateChan(tipHashes []*daghash.Hash, lastGenerated int64) chan struct{} {
tipHashesStr := daghash.JoinHashesStrings(tipHashes, "")
// Either get the current list of channels waiting for updates about
// changes to block template for the parent hashes or create a new one.
@ -1674,9 +1674,9 @@ func (state *gbtWorkState) updateBlockTemplate(s *Server, useCoinbaseValue bool)
// Update the merkle root.
block := util.NewBlock(template.Block)
hashMerkleTree := blockdag.BuildHashMerkleTreeStore(block.Transactions())
template.Block.Header.HashMerkleRoot = *hashMerkleTree.Root()
template.Block.Header.HashMerkleRoot = hashMerkleTree.Root()
idMerkleTree := blockdag.BuildIDMerkleTreeStore(block.Transactions())
template.Block.Header.IDMerkleRoot = *idMerkleTree.Root()
template.Block.Header.IDMerkleRoot = idMerkleTree.Root()
}
// Set locals for convenience.
@ -2393,7 +2393,7 @@ func handleGetMiningInfo(s *Server, cmd interface{}, closeChan <-chan struct{})
}
highestTipHash := s.cfg.DAG.HighestTipHash()
selectedBlock, err := s.cfg.DAG.BlockByHash(&highestTipHash)
selectedBlock, err := s.cfg.DAG.BlockByHash(highestTipHash)
if err != nil {
return nil, &btcjson.RPCError{
Code: btcjson.ErrRPCInternal.Code,
@ -3452,7 +3452,7 @@ func verifyDAG(s *Server, level, depth int32) error {
currentHash := s.cfg.DAG.HighestTipHash()
for height := s.cfg.DAG.Height(); height > finishHeight; { //TODO: (Ori) This is probably wrong. Done only for compilation
// Level 0 just looks up the block.
block, err := s.cfg.DAG.BlockByHash(&currentHash)
block, err := s.cfg.DAG.BlockByHash(currentHash)
if err != nil {
log.Errorf("Verify is unable to fetch block at "+
"height %d: %s", height, err)
@ -3470,7 +3470,7 @@ func verifyDAG(s *Server, level, depth int32) error {
}
}
currentHash = *block.MsgBlock().Header.SelectedParentHash()
currentHash = block.MsgBlock().Header.SelectedParentHash()
}
log.Infof("DAG verify completed successfully")

View File

@ -2086,7 +2086,7 @@ func handleRescanBlocks(wsc *wsClient, icmd interface{}) (interface{}, error) {
Message: "Failed to fetch block: " + err.Error(),
}
}
if lastBlockHash != nil && block.MsgBlock().Header.ParentHashes[0] != *lastBlockHash { // TODO: (Stas) This is likely wrong. Modified to satisfy compilation.
if lastBlockHash != nil && !block.MsgBlock().Header.ParentHashes[0].IsEqual(lastBlockHash) { // TODO: (Stas) This is likely wrong. Modified to satisfy compilation.
return nil, &btcjson.RPCError{
Code: btcjson.ErrRPCInvalidParameter,
Message: fmt.Sprintf("Block %s is not a child of %s",

View File

@ -88,8 +88,8 @@ func (b *Block) Hash() *daghash.Hash {
// Cache the block hash and return it.
hash := b.msgBlock.BlockHash()
b.blockHash = &hash
return &hash
b.blockHash = hash
return hash
}
// Tx returns a wrapped transaction (util.Tx) for the transaction at the

View File

@ -38,7 +38,7 @@ func TestBlock(t *testing.T) {
}
// Hash for block 100,000.
wantHashStr := "c076eb7a2c8cb2d8492a9a2a4221f7b031257e71acef37fd12a9967661f57693"
wantHashStr := "5b5168d93a5178acdf82da367f8fb85b0432874e99d5c6518adccc727fd3a012"
wantHash, err := daghash.NewHashFromStr(wantHashStr)
if err != nil {
t.Errorf("NewHashFromStr: %v", err)
@ -306,25 +306,31 @@ func TestBlockErrors(t *testing.T) {
var Block100000 = wire.MsgBlock{
Header: wire.BlockHeader{
Version: 1,
ParentHashes: []daghash.Hash{
[32]byte{ // Make go vet happy.
ParentHashes: []*daghash.Hash{
{ // Make go vet happy.
0x82, 0xdc, 0xbd, 0xe6, 0x88, 0x37, 0x74, 0x5b,
0x78, 0x6b, 0x03, 0x1d, 0xa3, 0x48, 0x3c, 0x45,
0x3f, 0xc3, 0x2e, 0xd4, 0x53, 0x5b, 0x6f, 0x26,
0x26, 0xb0, 0x48, 0x4f, 0x09, 0x00, 0x00, 0x00,
}, // MainNet genesis
[32]byte{ // Make go vet happy.
{ // Make go vet happy.
0xc1, 0x5b, 0x71, 0xfe, 0x20, 0x70, 0x0f, 0xd0,
0x08, 0x49, 0x88, 0x1b, 0x32, 0xb5, 0xbd, 0x13,
0x17, 0xbe, 0x75, 0xe7, 0x29, 0x46, 0xdd, 0x03,
0x01, 0x92, 0x90, 0xf1, 0xca, 0x8a, 0x88, 0x11,
}}, // SimNet genesis
HashMerkleRoot: daghash.Hash([32]byte{ // Make go vet happy.
HashMerkleRoot: &daghash.Hash{ // Make go vet happy.
0x66, 0x57, 0xa9, 0x25, 0x2a, 0xac, 0xd5, 0xc0,
0xb2, 0x94, 0x09, 0x96, 0xec, 0xff, 0x95, 0x22,
0x28, 0xc3, 0x06, 0x7c, 0xc3, 0x8d, 0x48, 0x85,
0xef, 0xb5, 0xa4, 0xac, 0x42, 0x47, 0xe9, 0xf3,
}), // f3e94742aca4b5ef85488dc37c06c3282295ffec960994b2c0d5ac2a25a95766
}, // f3e94742aca4b5ef85488dc37c06c3282295ffec960994b2c0d5ac2a25a95766
IDMerkleRoot: &daghash.Hash{ // Make go vet happy.
0x66, 0x57, 0xa9, 0x25, 0x2a, 0xac, 0xd5, 0xc0,
0xb2, 0x94, 0x09, 0x96, 0xec, 0xff, 0x95, 0x22,
0x28, 0xc3, 0x06, 0x7c, 0xc3, 0x8d, 0x48, 0x85,
0xef, 0xb5, 0xa4, 0xac, 0x42, 0x47, 0xe9, 0xf3,
}, // f3e94742aca4b5ef85488dc37c06c3282295ffec960994b2c0d5ac2a25a95766
Timestamp: time.Unix(1529483563, 0), // 2018-06-20 08:32:43 +0000 UTC
Bits: 0x1e00ffff, // 503382015
Nonce: 0x000ae53f, // 714047

View File

@ -311,7 +311,7 @@ func WithRandomKey() *GCSBuilder {
// data pushes within all the outputs created within a block.
func BuildBasicFilter(block *wire.MsgBlock) (*gcs.Filter, error) {
blockHash := block.BlockHash()
b := WithKeyHash(&blockHash)
b := WithKeyHash(blockHash)
// If the filter had an issue with the specified key, then we force it
// to bubble up here by calling the Key() function.
@ -355,7 +355,7 @@ func BuildBasicFilter(block *wire.MsgBlock) (*gcs.Filter, error) {
// the _hashes_ of each transaction are also inserted into the filter.
func BuildExtFilter(block *wire.MsgBlock) (*gcs.Filter, error) {
blockHash := block.BlockHash()
b := WithKeyHash(&blockHash)
b := WithKeyHash(blockHash)
// If the filter had an issue with the specified key, then we force it
// to bubble up here by calling the Key() function.
@ -382,22 +382,22 @@ func BuildExtFilter(block *wire.MsgBlock) (*gcs.Filter, error) {
}
// GetFilterHash returns the double-SHA256 of the filter.
func GetFilterHash(filter *gcs.Filter) (daghash.Hash, error) {
func GetFilterHash(filter *gcs.Filter) (*daghash.Hash, error) {
filterData, err := filter.NBytes()
if err != nil {
return daghash.Hash{}, err
return &daghash.Hash{}, err
}
return daghash.DoubleHashH(filterData), nil
return daghash.DoubleHashP(filterData), nil
}
// MakeHeaderForFilter makes a filter chain header for a filter, given the
// filter and the previous filter chain header.
func MakeHeaderForFilter(filter *gcs.Filter, parentHeader daghash.Hash) (daghash.Hash, error) {
func MakeHeaderForFilter(filter *gcs.Filter, parentHeader *daghash.Hash) (*daghash.Hash, error) {
filterTip := make([]byte, 2*daghash.HashSize)
filterHash, err := GetFilterHash(filter)
if err != nil {
return daghash.Hash{}, err
return &daghash.Hash{}, err
}
// In the buffer we created above we'll compute hash || parentHash as an
@ -407,5 +407,5 @@ func MakeHeaderForFilter(filter *gcs.Filter, parentHeader daghash.Hash) (daghash
// The final filter hash is the double-sha256 of the hash computed
// above.
return daghash.DoubleHashH(filterTip), nil
return daghash.DoubleHashP(filterTip), nil
}

View File

@ -17,7 +17,7 @@ import (
// RegisterSubnetworkForTest is used to register network on DAG with specified gas limit
func RegisterSubnetworkForTest(dag *blockdag.BlockDAG, params *dagconfig.Params, gasLimit uint64) (*subnetworkid.SubnetworkID, error) {
buildNextBlock := func(parentHashes []daghash.Hash, txs []*wire.MsgTx) (*util.Block, error) {
buildNextBlock := func(parentHashes []*daghash.Hash, txs []*wire.MsgTx) (*util.Block, error) {
msgBlock, err := mining.PrepareBlockForTest(dag, params, parentHashes, txs, false, 1)
if err != nil {
return nil, err
@ -65,7 +65,7 @@ func RegisterSubnetworkForTest(dag *blockdag.BlockDAG, params *dagconfig.Params,
registryTx := wire.NewRegistryMsgTx(1, []*wire.TxIn{txIn}, []*wire.TxOut{txOut}, gasLimit)
// Add it to the DAG
registryBlock, err := buildNextBlock([]daghash.Hash{*fundsBlock.Hash()}, []*wire.MsgTx{registryTx})
registryBlock, err := buildNextBlock([]*daghash.Hash{fundsBlock.Hash()}, []*wire.MsgTx{registryTx})
if err != nil {
return nil, fmt.Errorf("could not build registry block: %s", err)
}

View File

@ -418,13 +418,13 @@ func BenchmarkDecodeHeaders(b *testing.B) {
if err != nil {
b.Fatalf("NewHashFromStr: unexpected error: %v", err)
}
parentHashes := make([]daghash.Hash, MaxNumParentBlocks)
parentHashes := make([]*daghash.Hash, MaxNumParentBlocks)
for j := byte(0); j < MaxNumParentBlocks; j++ {
hash, err := daghash.NewHashFromStr(fmt.Sprintf("%x%x", i, j))
if err != nil {
b.Fatalf("NewHashFromStr: unexpected error: %v", err)
}
parentHashes[i] = *hash
parentHashes[i] = hash
}
m.AddBlockHeader(NewBlockHeader(1, parentHashes, hash, hash, 0, uint64(i)))
}
@ -572,7 +572,7 @@ func BenchmarkDecodeMerkleBlock(b *testing.B) {
if err != nil {
b.Fatalf("NewHashFromStr: unexpected error: %v", err)
}
m.Header = *NewBlockHeader(1, []daghash.Hash{*hash}, hash, hash, 0, uint64(10000))
m.Header = *NewBlockHeader(1, []*daghash.Hash{hash}, hash, hash, 0, uint64(10000))
for i := 0; i < 105; i++ {
hash, err := daghash.NewHashFromStr(fmt.Sprintf("%x", i))
if err != nil {

View File

@ -35,13 +35,13 @@ type BlockHeader struct {
Version int32
// Hashes of the parent block headers in the blockDAG.
ParentHashes []daghash.Hash
ParentHashes []*daghash.Hash
// HashMerkleRoot is the merkle tree reference to hash of all transactions for the block.
HashMerkleRoot daghash.Hash
HashMerkleRoot *daghash.Hash
// IDMerkleRoot is the merkle tree reference to hash of all transactions' IDs for the block.
IDMerkleRoot daghash.Hash
IDMerkleRoot *daghash.Hash
// Time the block was created.
Timestamp time.Time
@ -59,7 +59,7 @@ func (h *BlockHeader) NumParentBlocks() byte {
}
// BlockHash computes the block identifier hash for the given block header.
func (h *BlockHeader) BlockHash() daghash.Hash {
func (h *BlockHeader) BlockHash() *daghash.Hash {
// Encode the header and double sha256 everything prior to the number of
// transactions. Ignore the error returns since there is no way the
// encode could fail except being out of memory which would cause a
@ -67,7 +67,7 @@ func (h *BlockHeader) BlockHash() daghash.Hash {
buf := bytes.NewBuffer(make([]byte, 0, BaseBlockHeaderPayload+h.NumParentBlocks()))
_ = writeBlockHeader(buf, 0, h)
return daghash.DoubleHashH(buf.Bytes())
return daghash.DoubleHashP(buf.Bytes())
}
// SelectedParentHash returns the hash of the selected block header.
@ -76,7 +76,7 @@ func (h *BlockHeader) SelectedParentHash() *daghash.Hash {
return nil
}
return &h.ParentHashes[0]
return h.ParentHashes[0]
}
// IsGenesis returns true iff this block is a genesis block
@ -129,7 +129,7 @@ func (h *BlockHeader) SerializeSize() int {
// NewBlockHeader returns a new BlockHeader using the provided version, previous
// block hash, hash merkle root, ID merkle root difficulty bits, and nonce used to generate the
// block with defaults or calclulated values for the remaining fields.
func NewBlockHeader(version int32, parentHashes []daghash.Hash, hashMerkleRoot *daghash.Hash,
func NewBlockHeader(version int32, parentHashes []*daghash.Hash, hashMerkleRoot *daghash.Hash,
idMerkleRoot *daghash.Hash, bits uint32, nonce uint64) *BlockHeader {
// Limit the timestamp to one second precision since the protocol
@ -137,8 +137,8 @@ func NewBlockHeader(version int32, parentHashes []daghash.Hash, hashMerkleRoot *
return &BlockHeader{
Version: version,
ParentHashes: parentHashes,
HashMerkleRoot: *hashMerkleRoot,
IDMerkleRoot: *idMerkleRoot,
HashMerkleRoot: hashMerkleRoot,
IDMerkleRoot: idMerkleRoot,
Timestamp: time.Unix(time.Now().Unix(), 0),
Bits: bits,
Nonce: nonce,
@ -155,14 +155,18 @@ func readBlockHeader(r io.Reader, pver uint32, bh *BlockHeader) error {
return err
}
bh.ParentHashes = make([]daghash.Hash, numParentBlocks)
bh.ParentHashes = make([]*daghash.Hash, numParentBlocks)
for i := byte(0); i < numParentBlocks; i++ {
err := readElement(r, &bh.ParentHashes[i])
hash := &daghash.Hash{}
err := readElement(r, hash)
if err != nil {
return err
}
bh.ParentHashes[i] = hash
}
return readElements(r, &bh.HashMerkleRoot, &bh.IDMerkleRoot, (*int64Time)(&bh.Timestamp), &bh.Bits, &bh.Nonce)
bh.HashMerkleRoot = &daghash.Hash{}
bh.IDMerkleRoot = &daghash.Hash{}
return readElements(r, bh.HashMerkleRoot, bh.IDMerkleRoot, (*int64Time)(&bh.Timestamp), &bh.Bits, &bh.Nonce)
}
// writeBlockHeader writes a bitcoin block header to w. See Serialize for
@ -170,6 +174,14 @@ func readBlockHeader(r io.Reader, pver uint32, bh *BlockHeader) error {
// opposed to encoding for the wire.
func writeBlockHeader(w io.Writer, pver uint32, bh *BlockHeader) error {
sec := int64(bh.Timestamp.Unix())
return writeElements(w, bh.Version, bh.NumParentBlocks(), &bh.ParentHashes, &bh.HashMerkleRoot, &bh.IDMerkleRoot,
if err := writeElements(w, bh.Version, bh.NumParentBlocks()); err != nil {
return err
}
for _, hash := range bh.ParentHashes {
if err := writeElement(w, hash); err != nil {
return err
}
}
return writeElements(w, bh.HashMerkleRoot, bh.IDMerkleRoot,
sec, bh.Bits, bh.Nonce)
}

View File

@ -22,19 +22,19 @@ func TestBlockHeader(t *testing.T) {
t.Errorf("random.Uint64: Error generating nonce: %v", err)
}
hashes := []daghash.Hash{mainNetGenesisHash, simNetGenesisHash}
hashes := []*daghash.Hash{mainNetGenesisHash, simNetGenesisHash}
merkleHash := mainNetGenesisMerkleRoot
idMerkleRoot := &exampleIDMerkleRoot
idMerkleRoot := exampleIDMerkleRoot
bits := uint32(0x1d00ffff)
bh := NewBlockHeader(1, hashes, &merkleHash, idMerkleRoot, bits, nonce)
bh := NewBlockHeader(1, hashes, merkleHash, idMerkleRoot, bits, nonce)
// Ensure we get the same data back out.
if !reflect.DeepEqual(bh.ParentHashes, hashes) {
t.Errorf("NewBlockHeader: wrong prev hashes - got %v, want %v",
spew.Sprint(bh.ParentHashes), spew.Sprint(hashes))
}
if !bh.HashMerkleRoot.IsEqual(&merkleHash) {
if !bh.HashMerkleRoot.IsEqual(merkleHash) {
t.Errorf("NewBlockHeader: wrong merkle root - got %v, want %v",
spew.Sprint(bh.HashMerkleRoot), spew.Sprint(merkleHash))
}
@ -58,7 +58,7 @@ func TestBlockHeaderWire(t *testing.T) {
bits := uint32(0x1d00ffff)
baseBlockHdr := &BlockHeader{
Version: 1,
ParentHashes: []daghash.Hash{mainNetGenesisHash, simNetGenesisHash},
ParentHashes: []*daghash.Hash{mainNetGenesisHash, simNetGenesisHash},
HashMerkleRoot: mainNetGenesisMerkleRoot,
IDMerkleRoot: exampleIDMerkleRoot,
Timestamp: time.Unix(0x495fab29, 0), // 2009-01-03 12:15:05 -0600 CST
@ -169,7 +169,7 @@ func TestBlockHeaderSerialize(t *testing.T) {
bits := uint32(0x1d00ffff)
baseBlockHdr := &BlockHeader{
Version: 1,
ParentHashes: []daghash.Hash{mainNetGenesisHash, simNetGenesisHash},
ParentHashes: []*daghash.Hash{mainNetGenesisHash, simNetGenesisHash},
HashMerkleRoot: mainNetGenesisMerkleRoot,
IDMerkleRoot: exampleIDMerkleRoot,
Timestamp: time.Unix(0x495fab29, 0), // 2009-01-03 12:15:05 -0600 CST
@ -253,7 +253,7 @@ func TestBlockHeaderSerializeSize(t *testing.T) {
timestamp := time.Unix(0x495fab29, 0) // 2009-01-03 12:15:05 -0600 CST
baseBlockHdr := &BlockHeader{
Version: 1,
ParentHashes: []daghash.Hash{mainNetGenesisHash, simNetGenesisHash},
ParentHashes: []*daghash.Hash{mainNetGenesisHash, simNetGenesisHash},
HashMerkleRoot: mainNetGenesisMerkleRoot,
IDMerkleRoot: mainNetGenesisMerkleRoot,
Timestamp: timestamp,
@ -263,7 +263,7 @@ func TestBlockHeaderSerializeSize(t *testing.T) {
genesisBlockHdr := &BlockHeader{
Version: 1,
ParentHashes: []daghash.Hash{},
ParentHashes: []*daghash.Hash{},
HashMerkleRoot: mainNetGenesisMerkleRoot,
IDMerkleRoot: mainNetGenesisMerkleRoot,
Timestamp: timestamp,
@ -300,7 +300,7 @@ func TestIsGenesis(t *testing.T) {
baseBlockHdr := &BlockHeader{
Version: 1,
ParentHashes: []daghash.Hash{mainNetGenesisHash, simNetGenesisHash},
ParentHashes: []*daghash.Hash{mainNetGenesisHash, simNetGenesisHash},
HashMerkleRoot: mainNetGenesisMerkleRoot,
Timestamp: timestamp,
Bits: bits,
@ -308,7 +308,7 @@ func TestIsGenesis(t *testing.T) {
}
genesisBlockHdr := &BlockHeader{
Version: 1,
ParentHashes: []daghash.Hash{},
ParentHashes: []*daghash.Hash{},
HashMerkleRoot: mainNetGenesisMerkleRoot,
Timestamp: timestamp,
Bits: bits,

View File

@ -17,32 +17,32 @@ import (
// mainNetGenesisHash is the hash of the first block in the block chain for the
// main network (genesis block).
var mainNetGenesisHash = daghash.Hash([daghash.HashSize]byte{ // Make go vet happy.
var mainNetGenesisHash = &daghash.Hash{ // Make go vet happy.
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72,
0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f,
0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c,
0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00,
})
}
// simNetGenesisHash is the hash of the first block in the block chain for the
// simulation test network.
var simNetGenesisHash = daghash.Hash([daghash.HashSize]byte{ // Make go vet happy.
var simNetGenesisHash = &daghash.Hash{ // Make go vet happy.
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a,
0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0,
0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91,
0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68,
})
}
// mainNetGenesisMerkleRoot is the hash of the first transaction in the genesis
// block for the main network.
var mainNetGenesisMerkleRoot = daghash.Hash([daghash.HashSize]byte{ // Make go vet happy.
var mainNetGenesisMerkleRoot = &daghash.Hash{ // Make go vet happy.
0x4a, 0x5e, 0x1e, 0x4b, 0xaa, 0xb8, 0x9f, 0x3a,
0x32, 0x51, 0x8a, 0x88, 0xc3, 0x1b, 0xc8, 0x7f,
0x61, 0x8f, 0x76, 0x67, 0x3e, 0x2c, 0xc7, 0x7a,
0xb2, 0x12, 0x7b, 0x7a, 0xfd, 0xed, 0xa3, 0x3b,
})
}
var exampleIDMerkleRoot = daghash.Hash{
var exampleIDMerkleRoot = &daghash.Hash{
0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63,
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
0x09, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C,

View File

@ -54,27 +54,28 @@ func (invtype InvType) String() string {
// as specified by the Type field, that a peer wants, has, or does not have to
// another peer.
type InvVect struct {
Type InvType // Type of data
Hash daghash.Hash // Hash of the data
Type InvType // Type of data
Hash *daghash.Hash // Hash of the data
}
// NewInvVect returns a new InvVect using the provided type and hash.
func NewInvVect(typ InvType, hash *daghash.Hash) *InvVect {
return &InvVect{
Type: typ,
Hash: *hash,
Hash: hash,
}
}
// readInvVect reads an encoded InvVect from r depending on the protocol
// version.
func readInvVect(r io.Reader, pver uint32, iv *InvVect) error {
return readElements(r, &iv.Type, &iv.Hash)
iv.Hash = &daghash.Hash{}
return readElements(r, &iv.Type, iv.Hash)
}
// writeInvVect serializes an InvVect to w depending on the protocol version.
func writeInvVect(w io.Writer, pver uint32, iv *InvVect) error {
return writeElements(w, iv.Type, &iv.Hash)
return writeElements(w, iv.Type, iv.Hash)
}
func (iv *InvVect) String() string {

View File

@ -68,7 +68,7 @@ func TestInvVectWire(t *testing.T) {
// errInvVect is an inventory vector with an error.
errInvVect := InvVect{
Type: InvTypeError,
Hash: daghash.Hash{},
Hash: &daghash.Hash{},
}
// errInvVectEncoded is the wire encoded bytes of errInvVect.
@ -83,7 +83,7 @@ func TestInvVectWire(t *testing.T) {
// txInvVect is an inventory vector representing a transaction.
txInvVect := InvVect{
Type: InvTypeTx,
Hash: *baseHash,
Hash: baseHash,
}
// txInvVectEncoded is the wire encoded bytes of txInvVect.
@ -98,7 +98,7 @@ func TestInvVectWire(t *testing.T) {
// blockInvVect is an inventory vector representing a block.
blockInvVect := InvVect{
Type: InvTypeBlock,
Hash: *baseHash,
Hash: baseHash,
}
// blockInvVectEncoded is the wire encoded bytes of blockInvVect.

View File

@ -69,7 +69,7 @@ func TestMessage(t *testing.T) {
msgFilterAdd := NewMsgFilterAdd([]byte{0x01})
msgFilterClear := NewMsgFilterClear()
msgFilterLoad := NewMsgFilterLoad([]byte{0x01}, 10, 0, BloomUpdateNone)
bh := NewBlockHeader(1, []daghash.Hash{mainNetGenesisHash, simNetGenesisHash}, &daghash.Hash{}, &daghash.Hash{}, 0, 0)
bh := NewBlockHeader(1, []*daghash.Hash{mainNetGenesisHash, simNetGenesisHash}, &daghash.Hash{}, &daghash.Hash{}, 0, 0)
msgMerkleBlock := NewMsgMerkleBlock(bh)
msgReject := NewMsgReject("block", RejectDuplicate, "duplicate block")
msgGetCFilters := NewMsgGetCFilters(GCSFilterExtended, 0, &daghash.Hash{})

View File

@ -227,7 +227,7 @@ func (msg *MsgBlock) MaxPayloadLength(pver uint32) uint32 {
}
// BlockHash computes the block identifier hash for this block.
func (msg *MsgBlock) BlockHash() daghash.Hash {
func (msg *MsgBlock) BlockHash() *daghash.Hash {
return msg.Header.BlockHash()
}

View File

@ -24,8 +24,8 @@ func TestBlock(t *testing.T) {
// Block 1 header.
parentHashes := blockOne.Header.ParentHashes
hashMerkleRoot := &blockOne.Header.HashMerkleRoot
idMerkleRoot := &blockOne.Header.IDMerkleRoot
hashMerkleRoot := blockOne.Header.HashMerkleRoot
idMerkleRoot := blockOne.Header.IDMerkleRoot
bits := blockOne.Header.Bits
nonce := blockOne.Header.Nonce
bh := NewBlockHeader(1, parentHashes, hashMerkleRoot, idMerkleRoot, bits, nonce)
@ -495,8 +495,8 @@ func TestBlockSerializeSize(t *testing.T) {
var blockOne = MsgBlock{
Header: BlockHeader{
Version: 1,
ParentHashes: []daghash.Hash{mainNetGenesisHash, simNetGenesisHash},
HashMerkleRoot: daghash.Hash(mainNetGenesisMerkleRoot),
ParentHashes: []*daghash.Hash{mainNetGenesisHash, simNetGenesisHash},
HashMerkleRoot: mainNetGenesisMerkleRoot,
IDMerkleRoot: exampleIDMerkleRoot,
Timestamp: time.Unix(0x4966bc61, 0), // 2009-01-08 20:54:25 -0600 CST
Bits: 0x1d00ffff, // 486604799

View File

@ -23,7 +23,7 @@ const (
// for details on requesting the headers.
type MsgCFCheckpt struct {
FilterType FilterType
StopHash daghash.Hash
StopHash *daghash.Hash
FilterHeaders []*daghash.Hash
}
@ -49,7 +49,8 @@ func (msg *MsgCFCheckpt) BtcDecode(r io.Reader, pver uint32) error {
}
// Read stop hash
err = readElement(r, &msg.StopHash)
msg.StopHash = &daghash.Hash{}
err = readElement(r, msg.StopHash)
if err != nil {
return err
}
@ -143,7 +144,7 @@ func NewMsgCFCheckpt(filterType FilterType, stopHash *daghash.Hash,
headersCount int) *MsgCFCheckpt {
return &MsgCFCheckpt{
FilterType: filterType,
StopHash: *stopHash,
StopHash: stopHash,
FilterHeaders: make([]*daghash.Hash, 0, headersCount),
}
}

View File

@ -28,8 +28,8 @@ const (
// MsgGetCFHeaders for details on requesting the headers.
type MsgCFHeaders struct {
FilterType FilterType
StopHash daghash.Hash
PrevFilterHeader daghash.Hash
StopHash *daghash.Hash
PrevFilterHeader *daghash.Hash
FilterHashes []*daghash.Hash
}
@ -55,13 +55,15 @@ func (msg *MsgCFHeaders) BtcDecode(r io.Reader, pver uint32) error {
}
// Read stop hash
err = readElement(r, &msg.StopHash)
msg.StopHash = &daghash.Hash{}
err = readElement(r, msg.StopHash)
if err != nil {
return err
}
// Read prev filter header
err = readElement(r, &msg.PrevFilterHeader)
msg.PrevFilterHeader = &daghash.Hash{}
err = readElement(r, msg.PrevFilterHeader)
if err != nil {
return err
}
@ -175,6 +177,8 @@ func (msg *MsgCFHeaders) MaxPayloadLength(pver uint32) uint32 {
// the Message interface. See MsgCFHeaders for details.
func NewMsgCFHeaders() *MsgCFHeaders {
return &MsgCFHeaders{
FilterHashes: make([]*daghash.Hash, 0, MaxCFHeadersPerMsg),
FilterHashes: make([]*daghash.Hash, 0, MaxCFHeadersPerMsg),
StopHash: &daghash.ZeroHash,
PrevFilterHeader: &daghash.ZeroHash,
}
}

View File

@ -33,7 +33,7 @@ const MaxBlockLocatorsPerMsg = 500
type MsgGetBlocks struct {
ProtocolVersion uint32
BlockLocatorHashes []*daghash.Hash
HashStop daghash.Hash
HashStop *daghash.Hash
}
// AddBlockLocatorHash adds a new block locator hash to the message.
@ -80,7 +80,8 @@ func (msg *MsgGetBlocks) BtcDecode(r io.Reader, pver uint32) error {
msg.AddBlockLocatorHash(hash)
}
return readElement(r, &msg.HashStop)
msg.HashStop = &daghash.Hash{}
return readElement(r, msg.HashStop)
}
// BtcEncode encodes the receiver to w using the bitcoin protocol encoding.
@ -110,7 +111,7 @@ func (msg *MsgGetBlocks) BtcEncode(w io.Writer, pver uint32) error {
}
}
return writeElement(w, &msg.HashStop)
return writeElement(w, msg.HashStop)
}
// Command returns the protocol command string for the message. This is part
@ -134,6 +135,6 @@ func NewMsgGetBlocks(hashStop *daghash.Hash) *MsgGetBlocks {
return &MsgGetBlocks{
ProtocolVersion: ProtocolVersion,
BlockLocatorHashes: make([]*daghash.Hash, 0, MaxBlockLocatorsPerMsg),
HashStop: *hashStop,
HashStop: hashStop,
}
}

View File

@ -251,10 +251,10 @@ func TestGetBlocksWireErrors(t *testing.T) {
// block locator hashes.
maxGetBlocks := NewMsgGetBlocks(hashStop)
for i := 0; i < MaxBlockLocatorsPerMsg; i++ {
maxGetBlocks.AddBlockLocatorHash(&mainNetGenesisHash)
maxGetBlocks.AddBlockLocatorHash(mainNetGenesisHash)
}
maxGetBlocks.BlockLocatorHashes = append(maxGetBlocks.BlockLocatorHashes,
&mainNetGenesisHash)
mainNetGenesisHash)
maxGetBlocksEncoded := []byte{
0x01, 0x00, 0x00, 0x00, // Protocol version 1
0xfd, 0xf5, 0x01, // Varint for number of block loc hashes (501)

View File

@ -15,7 +15,7 @@ import (
// get headers in the chain of basic (0x00) or extended (0x01) headers.
type MsgGetCFCheckpt struct {
FilterType FilterType
StopHash daghash.Hash
StopHash *daghash.Hash
}
// BtcDecode decodes r using the bitcoin protocol encoding into the receiver.
@ -26,7 +26,8 @@ func (msg *MsgGetCFCheckpt) BtcDecode(r io.Reader, pver uint32) error {
return err
}
return readElement(r, &msg.StopHash)
msg.StopHash = &daghash.Hash{}
return readElement(r, msg.StopHash)
}
// BtcEncode encodes the receiver to w using the bitcoin protocol encoding.
@ -37,7 +38,7 @@ func (msg *MsgGetCFCheckpt) BtcEncode(w io.Writer, pver uint32) error {
return err
}
return writeElement(w, &msg.StopHash)
return writeElement(w, msg.StopHash)
}
// Command returns the protocol command string for the message. This is part
@ -59,6 +60,6 @@ func (msg *MsgGetCFCheckpt) MaxPayloadLength(pver uint32) uint32 {
func NewMsgGetCFCheckpt(filterType FilterType, stopHash *daghash.Hash) *MsgGetCFCheckpt {
return &MsgGetCFCheckpt{
FilterType: filterType,
StopHash: *stopHash,
StopHash: stopHash,
}
}

View File

@ -16,7 +16,7 @@ import (
type MsgGetCFHeaders struct {
FilterType FilterType
StartHeight uint32
StopHash daghash.Hash
StopHash *daghash.Hash
}
// BtcDecode decodes r using the bitcoin protocol encoding into the receiver.
@ -32,7 +32,8 @@ func (msg *MsgGetCFHeaders) BtcDecode(r io.Reader, pver uint32) error {
return err
}
return readElement(r, &msg.StopHash)
msg.StopHash = &daghash.Hash{}
return readElement(r, msg.StopHash)
}
// BtcEncode encodes the receiver to w using the bitcoin protocol encoding.
@ -48,7 +49,7 @@ func (msg *MsgGetCFHeaders) BtcEncode(w io.Writer, pver uint32) error {
return err
}
return writeElement(w, &msg.StopHash)
return writeElement(w, msg.StopHash)
}
// Command returns the protocol command string for the message. This is part
@ -72,6 +73,6 @@ func NewMsgGetCFHeaders(filterType FilterType, startHeight uint32,
return &MsgGetCFHeaders{
FilterType: filterType,
StartHeight: startHeight,
StopHash: *stopHash,
StopHash: stopHash,
}
}

View File

@ -20,7 +20,7 @@ const MaxGetCFiltersReqRange = 1000
type MsgGetCFilters struct {
FilterType FilterType
StartHeight uint32
StopHash daghash.Hash
StopHash *daghash.Hash
}
// BtcDecode decodes r using the bitcoin protocol encoding into the receiver.
@ -36,7 +36,8 @@ func (msg *MsgGetCFilters) BtcDecode(r io.Reader, pver uint32) error {
return err
}
return readElement(r, &msg.StopHash)
msg.StopHash = &daghash.Hash{}
return readElement(r, msg.StopHash)
}
// BtcEncode encodes the receiver to w using the bitcoin protocol encoding.
@ -52,7 +53,7 @@ func (msg *MsgGetCFilters) BtcEncode(w io.Writer, pver uint32) error {
return err
}
return writeElement(w, &msg.StopHash)
return writeElement(w, msg.StopHash)
}
// Command returns the protocol command string for the message. This is part
@ -76,6 +77,6 @@ func NewMsgGetCFilters(filterType FilterType, startHeight uint32,
return &MsgGetCFilters{
FilterType: filterType,
StartHeight: startHeight,
StopHash: *stopHash,
StopHash: stopHash,
}
}

View File

@ -30,7 +30,7 @@ import (
type MsgGetHeaders struct {
ProtocolVersion uint32
BlockLocatorHashes []*daghash.Hash
HashStop daghash.Hash
HashStop *daghash.Hash
}
// AddBlockLocatorHash adds a new block locator hash to the message.
@ -77,7 +77,8 @@ func (msg *MsgGetHeaders) BtcDecode(r io.Reader, pver uint32) error {
msg.AddBlockLocatorHash(hash)
}
return readElement(r, &msg.HashStop)
msg.HashStop = &daghash.Hash{}
return readElement(r, msg.HashStop)
}
// BtcEncode encodes the receiver to w using the bitcoin protocol encoding.
@ -108,7 +109,7 @@ func (msg *MsgGetHeaders) BtcEncode(w io.Writer, pver uint32) error {
}
}
return writeElement(w, &msg.HashStop)
return writeElement(w, msg.HashStop)
}
// Command returns the protocol command string for the message. This is part
@ -132,5 +133,6 @@ func NewMsgGetHeaders() *MsgGetHeaders {
return &MsgGetHeaders{
BlockLocatorHashes: make([]*daghash.Hash, 0,
MaxBlockLocatorsPerMsg),
HashStop: &daghash.ZeroHash,
}
}

View File

@ -111,7 +111,7 @@ func TestGetHeadersWire(t *testing.T) {
// MsgGetHeaders message with multiple block locators and a stop hash.
multiLocators := NewMsgGetHeaders()
multiLocators.ProtocolVersion = pver
multiLocators.HashStop = *hashStop
multiLocators.HashStop = hashStop
multiLocators.AddBlockLocatorHash(hashLocator2)
multiLocators.AddBlockLocatorHash(hashLocator)
multiLocatorsEncoded := []byte{
@ -218,7 +218,7 @@ func TestGetHeadersWireErrors(t *testing.T) {
// MsgGetHeaders message with multiple block locators and a stop hash.
baseGetHeaders := NewMsgGetHeaders()
baseGetHeaders.ProtocolVersion = pver
baseGetHeaders.HashStop = *hashStop
baseGetHeaders.HashStop = hashStop
baseGetHeaders.AddBlockLocatorHash(hashLocator2)
baseGetHeaders.AddBlockLocatorHash(hashLocator)
baseGetHeadersEncoded := []byte{
@ -242,10 +242,10 @@ func TestGetHeadersWireErrors(t *testing.T) {
// block locator hashes.
maxGetHeaders := NewMsgGetHeaders()
for i := 0; i < MaxBlockLocatorsPerMsg; i++ {
maxGetHeaders.AddBlockLocatorHash(&mainNetGenesisHash)
maxGetHeaders.AddBlockLocatorHash(mainNetGenesisHash)
}
maxGetHeaders.BlockLocatorHashes = append(maxGetHeaders.BlockLocatorHashes,
&mainNetGenesisHash)
mainNetGenesisHash)
maxGetHeadersEncoded := []byte{
0x01, 0x00, 0x00, 0x00, // Protocol version 1
0xfd, 0xf5, 0x01, // Varint for number of block loc hashes (501)

View File

@ -61,12 +61,12 @@ func TestHeaders(t *testing.T) {
// TestHeadersWire tests the MsgHeaders wire encode and decode for various
// numbers of headers and protocol versions.
func TestHeadersWire(t *testing.T) {
hashes := []daghash.Hash{mainNetGenesisHash, simNetGenesisHash}
hashes := []*daghash.Hash{mainNetGenesisHash, simNetGenesisHash}
hashMerkleRoot := blockOne.Header.HashMerkleRoot
idMerkleRoot := blockOne.Header.IDMerkleRoot
bits := uint32(0x1d00ffff)
nonce := uint64(0x9962e301)
bh := NewBlockHeader(1, hashes, &hashMerkleRoot, &idMerkleRoot, bits, nonce)
bh := NewBlockHeader(1, hashes, hashMerkleRoot, idMerkleRoot, bits, nonce)
bh.Version = blockOne.Header.Version
bh.Timestamp = blockOne.Header.Timestamp
@ -165,12 +165,12 @@ func TestHeadersWireErrors(t *testing.T) {
pver := ProtocolVersion
wireErr := &MessageError{}
hashes := []daghash.Hash{mainNetGenesisHash, simNetGenesisHash}
hashes := []*daghash.Hash{mainNetGenesisHash, simNetGenesisHash}
hashMerkleRoot := blockOne.Header.HashMerkleRoot
idMerkleRoot := blockOne.Header.IDMerkleRoot
bits := uint32(0x1d00ffff)
nonce := uint64(0x9962e301)
bh := NewBlockHeader(1, hashes, &hashMerkleRoot, &idMerkleRoot, bits, nonce)
bh := NewBlockHeader(1, hashes, hashMerkleRoot, idMerkleRoot, bits, nonce)
bh.Version = blockOne.Header.Version
bh.Timestamp = blockOne.Header.Timestamp
@ -216,7 +216,7 @@ func TestHeadersWireErrors(t *testing.T) {
// Intentionally invalid block header that has a transaction count used
// to force errors.
bhTrans := NewBlockHeader(1, hashes, &hashMerkleRoot, &idMerkleRoot, bits, nonce)
bhTrans := NewBlockHeader(1, hashes, hashMerkleRoot, idMerkleRoot, bits, nonce)
bhTrans.Version = blockOne.Header.Version
bhTrans.Timestamp = blockOne.Header.Timestamp

View File

@ -66,15 +66,16 @@ func (msg *MsgMerkleBlock) BtcDecode(r io.Reader, pver uint32) error {
// Create a contiguous slice of hashes to deserialize into in order to
// reduce the number of allocations.
hashes := make([]daghash.Hash, count)
hashes := make([]*daghash.Hash, count)
msg.Hashes = make([]*daghash.Hash, 0, count)
for i := uint64(0); i < count; i++ {
hash := &hashes[i]
hash := &daghash.Hash{}
err := readElement(r, hash)
if err != nil {
return err
}
msg.AddTxHash(hash)
hashes[i] = hash
}
msg.Flags, err = ReadVarBytes(r, pver, maxFlagsPerMerkleBlock,

View File

@ -22,8 +22,8 @@ func TestMerkleBlock(t *testing.T) {
// Block 1 header.
parentHashes := blockOne.Header.ParentHashes
hashMerkleRoot := &blockOne.Header.HashMerkleRoot
idMerkleRoot := &blockOne.Header.IDMerkleRoot
hashMerkleRoot := blockOne.Header.HashMerkleRoot
idMerkleRoot := blockOne.Header.IDMerkleRoot
bits := blockOne.Header.Bits
nonce := blockOne.Header.Nonce
bh := NewBlockHeader(1, parentHashes, hashMerkleRoot, idMerkleRoot, bits, nonce)
@ -115,8 +115,8 @@ func TestMerkleBlock(t *testing.T) {
func TestMerkleBlockCrossProtocol(t *testing.T) {
// Block 1 header.
parentHashes := blockOne.Header.ParentHashes
hashMerkleRoot := &blockOne.Header.HashMerkleRoot
idMerkleRoot := &blockOne.Header.IDMerkleRoot
hashMerkleRoot := blockOne.Header.HashMerkleRoot
idMerkleRoot := blockOne.Header.IDMerkleRoot
bits := blockOne.Header.Bits
nonce := blockOne.Header.Nonce
bh := NewBlockHeader(1, parentHashes, hashMerkleRoot, idMerkleRoot, bits, nonce)
@ -319,13 +319,13 @@ func TestMerkleBlockOverflowErrors(t *testing.T) {
var merkleBlockOne = MsgMerkleBlock{
Header: BlockHeader{
Version: 1,
ParentHashes: []daghash.Hash{mainNetGenesisHash, simNetGenesisHash},
HashMerkleRoot: daghash.Hash([daghash.HashSize]byte{ // Make go vet happy.
ParentHashes: []*daghash.Hash{mainNetGenesisHash, simNetGenesisHash},
HashMerkleRoot: &daghash.Hash{ // Make go vet happy.
0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44,
0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67,
0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1,
0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e,
}),
},
IDMerkleRoot: exampleIDMerkleRoot,
Timestamp: time.Unix(0x4966bc61, 0), // 2009-01-08 20:54:25 -0600 CST
Bits: 0x1d00ffff, // 486604799

View File

@ -68,7 +68,7 @@ type MsgReject struct {
// Hash identifies a specific block or transaction that was rejected
// and therefore only applies the MsgBlock and MsgTx messages.
Hash daghash.Hash
Hash *daghash.Hash
}
// BtcDecode decodes r using the bitcoin protocol encoding into the receiver.
@ -98,7 +98,8 @@ func (msg *MsgReject) BtcDecode(r io.Reader, pver uint32) error {
// CmdBlock and CmdTx messages have an additional hash field that
// identifies the specific block or transaction.
if msg.Cmd == CmdBlock || msg.Cmd == CmdTx {
err := readElement(r, &msg.Hash)
msg.Hash = &daghash.Hash{}
err := readElement(r, msg.Hash)
if err != nil {
return err
}
@ -132,7 +133,7 @@ func (msg *MsgReject) BtcEncode(w io.Writer, pver uint32) error {
// CmdBlock and CmdTx messages have an additional hash field that
// identifies the specific block or transaction.
if msg.Cmd == CmdBlock || msg.Cmd == CmdTx {
err := writeElement(w, &msg.Hash)
err := writeElement(w, msg.Hash)
if err != nil {
return err
}
@ -163,5 +164,6 @@ func NewMsgReject(command string, code RejectCode, reason string) *MsgReject {
Cmd: command,
Code: code,
Reason: reason,
Hash: &daghash.ZeroHash,
}
}

View File

@ -112,7 +112,7 @@ func TestRejectLatest(t *testing.T) {
t.Errorf("Should get same reject reason - got %v, want %v",
readMsg.Reason, msg.Reason)
}
if msg.Hash != readMsg.Hash {
if !msg.Hash.IsEqual(readMsg.Hash) {
t.Errorf("Should get same reject hash - got %v, want %v",
readMsg.Hash, msg.Hash)
}