[DEV-211] Change fields of serialized blocks and transactions to match spec (#104)

* [DEV-211] change block fields

* [DEV-211] change block fields

* [DEV-211] change comments to adhere to the new block field names
This commit is contained in:
Ori Newman 2018-11-05 13:11:42 +02:00 committed by stasatdaglabs
parent d70e2be641
commit 9519b9f2a1
43 changed files with 518 additions and 518 deletions

View File

@ -23,7 +23,7 @@ import (
func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) error { func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) error {
// The height of this block is one more than the referenced previous // The height of this block is one more than the referenced previous
// block. // block.
parents, err := lookupPreviousNodes(block, dag) parents, err := lookupParentNodes(block, dag)
if err != nil { if err != nil {
return err return err
} }
@ -85,18 +85,18 @@ func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) er
return nil return nil
} }
func lookupPreviousNodes(block *util.Block, blockDAG *BlockDAG) (blockSet, error) { func lookupParentNodes(block *util.Block, blockDAG *BlockDAG) (blockSet, error) {
header := block.MsgBlock().Header header := block.MsgBlock().Header
prevHashes := header.PrevBlocks parentHashes := header.ParentHashes
nodes := newSet() nodes := newSet()
for _, prevHash := range prevHashes { for _, parentHash := range parentHashes {
node := blockDAG.index.LookupNode(&prevHash) node := blockDAG.index.LookupNode(&parentHash)
if node == nil { if node == nil {
str := fmt.Sprintf("previous block %s is unknown", prevHashes) str := fmt.Sprintf("parent block %s is unknown", parentHashes)
return nil, ruleError(ErrPreviousBlockUnknown, str) return nil, ruleError(ErrParentBlockUnknown, str)
} else if blockDAG.index.NodeStatus(node).KnownInvalid() { } else if blockDAG.index.NodeStatus(node).KnownInvalid() {
str := fmt.Sprintf("previous block %s is known to be invalid", prevHashes) str := fmt.Sprintf("parent block %s is known to be invalid", parentHashes)
return nil, ruleError(ErrInvalidAncestorBlock, str) return nil, ruleError(ErrInvalidAncestorBlock, str)
} }

View File

@ -33,15 +33,15 @@ func TestMaybeAcceptBlockErrors(t *testing.T) {
err = dag.maybeAcceptBlock(block, BFNone) err = dag.maybeAcceptBlock(block, BFNone)
if err == nil { if err == nil {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are missing: "+ t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are missing: "+
"Expected: %s, got: <nil>", ErrPreviousBlockUnknown) "Expected: %s, got: <nil>", ErrParentBlockUnknown)
} }
ruleErr, ok := err.(RuleError) ruleErr, ok := err.(RuleError)
if !ok { if !ok {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are missing: "+ t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are missing: "+
"Expected RuleError but got %s", err) "Expected RuleError but got %s", err)
} else if ruleErr.ErrorCode != ErrPreviousBlockUnknown { } else if ruleErr.ErrorCode != ErrParentBlockUnknown {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are missing: "+ t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are missing: "+
"Unexpected error code. Want: %s, got: %s", ErrPreviousBlockUnknown, ruleErr.ErrorCode) "Unexpected error code. Want: %s, got: %s", ErrParentBlockUnknown, ruleErr.ErrorCode)
} }
// Test rejecting the block if its parents are invalid // Test rejecting the block if its parents are invalid

View File

@ -164,8 +164,8 @@ func (node *blockNode) Header() *wire.BlockHeader {
// No lock is needed because all accessed fields are immutable. // No lock is needed because all accessed fields are immutable.
return &wire.BlockHeader{ return &wire.BlockHeader{
Version: node.version, Version: node.version,
NumPrevBlocks: byte(len(node.parents)), NumParentBlocks: byte(len(node.parents)),
PrevBlocks: node.PrevHashes(), ParentHashes: node.ParentHashes(),
MerkleRoot: node.merkleRoot, MerkleRoot: node.merkleRoot,
Timestamp: time.Unix(node.timestamp, 0), Timestamp: time.Unix(node.timestamp, 0),
Bits: node.bits, Bits: node.bits,
@ -228,7 +228,7 @@ func (node *blockNode) CalcPastMedianTime() time.Time {
return time.Unix(medianTimestamp, 0) return time.Unix(medianTimestamp, 0)
} }
func (node *blockNode) PrevHashes() []daghash.Hash { func (node *blockNode) ParentHashes() []daghash.Hash {
return node.parents.hashes() return node.parents.hashes()
} }

View File

@ -187,7 +187,7 @@ func newTestNode(parents blockSet, blockVersion int32, bits uint32, timestamp ti
// Make up a header and create a block node from it. // Make up a header and create a block node from it.
header := &wire.BlockHeader{ header := &wire.BlockHeader{
Version: blockVersion, Version: blockVersion,
PrevBlocks: parents.hashes(), ParentHashes: parents.hashes(),
Bits: bits, Bits: bits,
Timestamp: timestamp, Timestamp: timestamp,
} }

View File

@ -205,14 +205,14 @@ func (dag *BlockDAG) GetOrphanRoot(hash *daghash.Hash) *daghash.Hash {
// Keep looping while the parent of each orphaned block is // Keep looping while the parent of each orphaned block is
// known and is an orphan itself. // known and is an orphan itself.
orphanRoot := hash orphanRoot := hash
prevHash := hash parentHash := hash
for { for {
orphan, exists := dag.orphans[*prevHash] orphan, exists := dag.orphans[*parentHash]
if !exists { if !exists {
break break
} }
orphanRoot = prevHash orphanRoot = parentHash
prevHash = orphan.block.MsgBlock().Header.SelectedPrevBlock() parentHash = orphan.block.MsgBlock().Header.SelectedParentHash()
} }
return orphanRoot return orphanRoot
@ -233,8 +233,8 @@ func (dag *BlockDAG) removeOrphanBlock(orphan *orphanBlock) {
// for loop is intentionally used over a range here as range does not // for loop is intentionally used over a range here as range does not
// reevaluate the slice on each iteration nor does it adjust the index // reevaluate the slice on each iteration nor does it adjust the index
// for the modified slice. // for the modified slice.
prevHash := orphan.block.MsgBlock().Header.SelectedPrevBlock() parentHash := orphan.block.MsgBlock().Header.SelectedParentHash()
orphans := dag.prevOrphans[*prevHash] orphans := dag.prevOrphans[*parentHash]
for i := 0; i < len(orphans); i++ { for i := 0; i < len(orphans); i++ {
hash := orphans[i].block.Hash() hash := orphans[i].block.Hash()
if hash.IsEqual(orphanHash) { if hash.IsEqual(orphanHash) {
@ -244,12 +244,12 @@ func (dag *BlockDAG) removeOrphanBlock(orphan *orphanBlock) {
i-- i--
} }
} }
dag.prevOrphans[*prevHash] = orphans dag.prevOrphans[*parentHash] = orphans
// Remove the map entry altogether if there are no longer any orphans // Remove the map entry altogether if there are no longer any orphans
// which depend on the parent hash. // which depend on the parent hash.
if len(dag.prevOrphans[*prevHash]) == 0 { if len(dag.prevOrphans[*parentHash]) == 0 {
delete(dag.prevOrphans, *prevHash) delete(dag.prevOrphans, *parentHash)
} }
} }
@ -296,9 +296,9 @@ func (dag *BlockDAG) addOrphanBlock(block *util.Block) {
} }
dag.orphans[*block.Hash()] = oBlock dag.orphans[*block.Hash()] = oBlock
// Add to previous hash lookup index for faster dependency lookups. // Add to parent hash lookup index for faster dependency lookups.
prevHash := block.MsgBlock().Header.SelectedPrevBlock() parentHash := block.MsgBlock().Header.SelectedParentHash()
dag.prevOrphans[*prevHash] = append(dag.prevOrphans[*prevHash], oBlock) dag.prevOrphans[*parentHash] = append(dag.prevOrphans[*parentHash], oBlock)
} }
// SequenceLock represents the converted relative lock-time in seconds, and // SequenceLock represents the converted relative lock-time in seconds, and

View File

@ -587,7 +587,7 @@ func chainedNodes(parents blockSet, numNodes int) []*blockNode {
// This is invalid, but all that is needed is enough to get the // This is invalid, but all that is needed is enough to get the
// synthetic tests to work. // synthetic tests to work.
header := wire.BlockHeader{Nonce: testNoncePrng.Uint64()} header := wire.BlockHeader{Nonce: testNoncePrng.Uint64()}
header.PrevBlocks = tips.hashes() header.ParentHashes = tips.hashes()
nodes[i] = newBlockNode(&header, tips, dagconfig.SimNetParams.K) nodes[i] = newBlockNode(&header, tips, dagconfig.SimNetParams.K)
tips = setFromSlice(nodes[i]) tips = setFromSlice(nodes[i])
} }

View File

@ -892,7 +892,7 @@ func (dag *BlockDAG) initDAGState() error {
"found %s", blockHash)) "found %s", blockHash))
} }
} else { } else {
for _, hash := range header.PrevBlocks { for _, hash := range header.ParentHashes {
parent := dag.index.LookupNode(&hash) parent := dag.index.LookupNode(&hash)
if parent == nil { if parent == nil {
return AssertError(fmt.Sprintf("initDAGState: Could "+ return AssertError(fmt.Sprintf("initDAGState: Could "+

View File

@ -194,17 +194,17 @@ const (
// the stack. // the stack.
ErrScriptValidation ErrScriptValidation
// ErrPreviousBlockUnknown indicates that the previous block is not known. // ErrParentBlockUnknown indicates that the parent block is not known.
ErrPreviousBlockUnknown ErrParentBlockUnknown
// ErrInvalidAncestorBlock indicates that an ancestor of this block has // ErrInvalidAncestorBlock indicates that an ancestor of this block has
// already failed validation. // already failed validation.
ErrInvalidAncestorBlock ErrInvalidAncestorBlock
// ErrPrevBlockNotBest indicates that the block's previous block is not the // ErrParentBlockNotCurrentTips indicates that the block's parents are not the
// current chain tip. This is not a block validation rule, but is required // current tips. This is not a block validation rule, but is required
// for block proposals submitted via getblocktemplate RPC. // for block proposals submitted via getblocktemplate RPC.
ErrPrevBlockNotBest ErrParentBlockNotCurrentTips
// ErrWithDiff indicates that there was an error with UTXOSet.WithDiff // ErrWithDiff indicates that there was an error with UTXOSet.WithDiff
ErrWithDiff ErrWithDiff
@ -248,9 +248,9 @@ var errorCodeStrings = map[ErrorCode]string{
ErrBadCoinbaseHeight: "ErrBadCoinbaseHeight", ErrBadCoinbaseHeight: "ErrBadCoinbaseHeight",
ErrScriptMalformed: "ErrScriptMalformed", ErrScriptMalformed: "ErrScriptMalformed",
ErrScriptValidation: "ErrScriptValidation", ErrScriptValidation: "ErrScriptValidation",
ErrPreviousBlockUnknown: "ErrPreviousBlockUnknown", ErrParentBlockUnknown: "ErrParentBlockUnknown",
ErrInvalidAncestorBlock: "ErrInvalidAncestorBlock", ErrInvalidAncestorBlock: "ErrInvalidAncestorBlock",
ErrPrevBlockNotBest: "ErrPrevBlockNotBest", ErrParentBlockNotCurrentTips: "ErrParentBlockNotCurrentTips",
ErrWithDiff: "ErrWithDiff", ErrWithDiff: "ErrWithDiff",
} }

View File

@ -51,9 +51,9 @@ func TestErrorCodeStringer(t *testing.T) {
{ErrBadCoinbaseHeight, "ErrBadCoinbaseHeight"}, {ErrBadCoinbaseHeight, "ErrBadCoinbaseHeight"},
{ErrScriptMalformed, "ErrScriptMalformed"}, {ErrScriptMalformed, "ErrScriptMalformed"},
{ErrScriptValidation, "ErrScriptValidation"}, {ErrScriptValidation, "ErrScriptValidation"},
{ErrPreviousBlockUnknown, "ErrPreviousBlockUnknown"}, {ErrParentBlockUnknown, "ErrParentBlockUnknown"},
{ErrInvalidAncestorBlock, "ErrInvalidAncestorBlock"}, {ErrInvalidAncestorBlock, "ErrInvalidAncestorBlock"},
{ErrPrevBlockNotBest, "ErrPrevBlockNotBest"}, {ErrParentBlockNotCurrentTips, "ErrParentBlockNotCurrentTips"},
{ErrWithDiff, "ErrWithDiff"}, {ErrWithDiff, "ErrWithDiff"},
{0xffff, "Unknown ErrorCode (65535)"}, {0xffff, "Unknown ErrorCode (65535)"},
} }

View File

@ -510,8 +510,8 @@ func (g *testGenerator) nextBlock(blockName string, spend *spendableOut, mungers
block := wire.MsgBlock{ block := wire.MsgBlock{
Header: wire.BlockHeader{ Header: wire.BlockHeader{
Version: 1, Version: 1,
NumPrevBlocks: 1, // TODO: (Stas) This is wrong. Modified only to satisfy compilation. NumParentBlocks: 1, // TODO: (Stas) This is wrong. Modified only to satisfy compilation.
PrevBlocks: []daghash.Hash{g.tip.BlockHash()}, // TODO: (Stas) This is wrong. Modified only to satisfy compilation. ParentHashes: []daghash.Hash{g.tip.BlockHash()}, // TODO: (Stas) This is wrong. Modified only to satisfy compilation.
MerkleRoot: calcMerkleRoot(txns), MerkleRoot: calcMerkleRoot(txns),
Bits: g.params.PowLimitBits, Bits: g.params.PowLimitBits,
Timestamp: ts, Timestamp: ts,
@ -607,7 +607,7 @@ func (g *testGenerator) saveSpendableCoinbaseOuts() {
// reaching the block that has already had the coinbase outputs // reaching the block that has already had the coinbase outputs
// collected. // collected.
var collectBlocks []*wire.MsgBlock var collectBlocks []*wire.MsgBlock
for b := g.tip; b != nil; b = g.blocks[*b.Header.SelectedPrevBlock()] { for b := g.tip; b != nil; b = g.blocks[*b.Header.SelectedParentHash()] {
if b.BlockHash() == g.prevCollectedHash { if b.BlockHash() == g.prevCollectedHash {
break break
} }
@ -1554,9 +1554,9 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
// ... -> b33(9) -> b35(10) -> b39(11) -> b42(12) -> b43(13) -> b53(14) // ... -> b33(9) -> b35(10) -> b39(11) -> b42(12) -> b43(13) -> b53(14)
// \-> b54(15) // \-> b54(15)
g.nextBlock("b54", outs[15], func(b *wire.MsgBlock) { g.nextBlock("b54", outs[15], func(b *wire.MsgBlock) {
medianBlock := g.blocks[*b.Header.SelectedPrevBlock()] medianBlock := g.blocks[*b.Header.SelectedParentHash()]
for i := 0; i < medianTimeBlocks/2; i++ { for i := 0; i < medianTimeBlocks/2; i++ {
medianBlock = g.blocks[*medianBlock.Header.SelectedPrevBlock()] medianBlock = g.blocks[*medianBlock.Header.SelectedParentHash()]
} }
b.Header.Timestamp = medianBlock.Header.Timestamp b.Header.Timestamp = medianBlock.Header.Timestamp
}) })
@ -1568,9 +1568,9 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
// ... -> b33(9) -> b35(10) -> b39(11) -> b42(12) -> b43(13) -> b53(14) -> b55(15) // ... -> b33(9) -> b35(10) -> b39(11) -> b42(12) -> b43(13) -> b53(14) -> b55(15)
g.setTip("b53") g.setTip("b53")
g.nextBlock("b55", outs[15], func(b *wire.MsgBlock) { g.nextBlock("b55", outs[15], func(b *wire.MsgBlock) {
medianBlock := g.blocks[*b.Header.SelectedPrevBlock()] medianBlock := g.blocks[*b.Header.SelectedParentHash()]
for i := 0; i < medianTimeBlocks/2; i++ { for i := 0; i < medianTimeBlocks/2; i++ {
medianBlock = g.blocks[*medianBlock.Header.SelectedPrevBlock()] medianBlock = g.blocks[*medianBlock.Header.SelectedParentHash()]
} }
medianBlockTime := medianBlock.Header.Timestamp medianBlockTime := medianBlock.Header.Timestamp
b.Header.Timestamp = medianBlockTime.Add(time.Second) b.Header.Timestamp = medianBlockTime.Add(time.Second)
@ -1718,7 +1718,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
g.nextBlock("b61", outs[18], func(b *wire.MsgBlock) { g.nextBlock("b61", outs[18], func(b *wire.MsgBlock) {
// Duplicate the coinbase of the parent block to force the // Duplicate the coinbase of the parent block to force the
// condition. // condition.
parent := g.blocks[*b.Header.SelectedPrevBlock()] parent := g.blocks[*b.Header.SelectedParentHash()]
b.Transactions[0] = parent.Transactions[0] b.Transactions[0] = parent.Transactions[0]
}) })
rejected(blockdag.ErrOverwriteTx) rejected(blockdag.ErrOverwriteTx)

View File

@ -55,8 +55,8 @@ var (
regTestGenesisBlock = wire.MsgBlock{ regTestGenesisBlock = wire.MsgBlock{
Header: wire.BlockHeader{ Header: wire.BlockHeader{
Version: 1, Version: 1,
NumPrevBlocks: 0, NumParentBlocks: 0,
PrevBlocks: []daghash.Hash{}, ParentHashes: []daghash.Hash{},
MerkleRoot: *newHashFromStr("4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b"), MerkleRoot: *newHashFromStr("4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b"),
Timestamp: time.Unix(0x5b28c636, 0), // 2018-06-19 09:00:38 +0000 UTC Timestamp: time.Unix(0x5b28c636, 0), // 2018-06-19 09:00:38 +0000 UTC
Bits: 0x207fffff, // 545259519 [7fffff0000000000000000000000000000000000000000000000000000000000] Bits: 0x207fffff, // 545259519 [7fffff0000000000000000000000000000000000000000000000000000000000]

View File

@ -179,7 +179,7 @@ func storeFilter(dbTx database.Tx, block *util.Block, f *gcs.Filter,
if header.IsGenesis() { if header.IsGenesis() {
prevHeader = &zeroHash prevHeader = &zeroHash
} else { } else {
ph := header.SelectedPrevBlock() ph := header.SelectedParentHash()
pfh, err := dbFetchFilterIdxEntry(dbTx, hkey, ph) pfh, err := dbFetchFilterIdxEntry(dbTx, hkey, ph)
if err != nil { if err != nil {
return err return err

View File

@ -210,22 +210,22 @@ func (dag *BlockDAG) ProcessBlock(block *util.Block, flags BehaviorFlags) (bool,
} }
// Handle orphan blocks. // Handle orphan blocks.
allPrevBlocksExist := true allParentsExist := true
for _, prevBlock := range blockHeader.PrevBlocks { for _, parentHash := range blockHeader.ParentHashes {
prevBlockExists, err := dag.blockExists(&prevBlock) parentExists, err := dag.blockExists(&parentHash)
if err != nil { if err != nil {
return false, err return false, err
} }
if !prevBlockExists { if !parentExists {
log.Infof("Adding orphan block %v with parent %v", blockHash, prevBlock) log.Infof("Adding orphan block %v with parent %v", blockHash, parentHash)
dag.addOrphanBlock(block) dag.addOrphanBlock(block)
allPrevBlocksExist = false allParentsExist = false
} }
} }
if !allPrevBlocksExist { if !allParentsExist {
return true, nil return true, nil
} }

View File

@ -431,14 +431,14 @@ func checkBlockHeaderSanity(header *wire.BlockHeader, powLimit *big.Int, timeSou
//checkBlockParentsOrder ensures that the block's parents are ordered by hash //checkBlockParentsOrder ensures that the block's parents are ordered by hash
func checkBlockParentsOrder(header *wire.BlockHeader) error { func checkBlockParentsOrder(header *wire.BlockHeader) error {
sortedHashes := make([]daghash.Hash, 0, len(header.PrevBlocks)) sortedHashes := make([]daghash.Hash, 0, len(header.ParentHashes))
for _, hash := range header.PrevBlocks { for _, hash := range header.ParentHashes {
sortedHashes = append(sortedHashes, hash) sortedHashes = append(sortedHashes, hash)
} }
sort.Slice(sortedHashes, func(i, j int) bool { sort.Slice(sortedHashes, func(i, j int) bool {
return daghash.Less(&sortedHashes[i], &sortedHashes[j]) return daghash.Less(&sortedHashes[i], &sortedHashes[j])
}) })
if !daghash.AreEqual(header.PrevBlocks, sortedHashes) { if !daghash.AreEqual(header.ParentHashes, sortedHashes) {
return ruleError(ErrWrongParentsOrder, "block parents are not ordered by hash") return ruleError(ErrWrongParentsOrder, "block parents are not ordered by hash")
} }
return nil return nil
@ -1076,11 +1076,11 @@ func (dag *BlockDAG) CheckConnectBlockTemplate(block *util.Block) error {
// current chain. // current chain.
tips := dag.virtual.tips() tips := dag.virtual.tips()
header := block.MsgBlock().Header header := block.MsgBlock().Header
prevHashes := header.PrevBlocks parentHashes := header.ParentHashes
if !tips.hashesEqual(prevHashes) { if !tips.hashesEqual(parentHashes) {
str := fmt.Sprintf("previous blocks must be the currents tips %v, "+ str := fmt.Sprintf("parent blocks must be the currents tips %v, "+
"instead got %v", tips, prevHashes) "instead got %v", tips, parentHashes)
return ruleError(ErrPrevBlockNotBest, str) return ruleError(ErrParentBlockNotCurrentTips, str)
} }
err := checkBlockSanity(block, dag.dagParams.PowLimit, dag.timeSource, flags) err := checkBlockSanity(block, dag.dagParams.PowLimit, dag.timeSource, flags)
@ -1088,7 +1088,7 @@ func (dag *BlockDAG) CheckConnectBlockTemplate(block *util.Block) error {
return err return err
} }
parents, err := lookupPreviousNodes(block, dag) parents, err := lookupParentNodes(block, dag)
if err != nil { if err != nil {
return err return err
} }

View File

@ -169,8 +169,8 @@ func TestCheckBlockSanity(t *testing.T) {
var invalidParentsOrderBlock = wire.MsgBlock{ var invalidParentsOrderBlock = wire.MsgBlock{
Header: wire.BlockHeader{ Header: wire.BlockHeader{
Version: 1, Version: 1,
NumPrevBlocks: 2, NumParentBlocks: 2,
PrevBlocks: []daghash.Hash{ ParentHashes: []daghash.Hash{
[32]byte{ // Make go vet happy. [32]byte{ // Make go vet happy.
0x4b, 0xb0, 0x75, 0x35, 0xdf, 0xd5, 0x8e, 0x0b, 0x4b, 0xb0, 0x75, 0x35, 0xdf, 0xd5, 0x8e, 0x0b,
0x3c, 0xd6, 0x4f, 0xd7, 0x15, 0x52, 0x80, 0x87, 0x3c, 0xd6, 0x4f, 0xd7, 0x15, 0x52, 0x80, 0x87,
@ -610,8 +610,8 @@ func TestValidateParents(t *testing.T) {
var Block100000 = wire.MsgBlock{ var Block100000 = wire.MsgBlock{
Header: wire.BlockHeader{ Header: wire.BlockHeader{
Version: 1, Version: 1,
NumPrevBlocks: 2, NumParentBlocks: 2,
PrevBlocks: []daghash.Hash{ ParentHashes: []daghash.Hash{
[32]byte{ // Make go vet happy. [32]byte{ // Make go vet happy.
0x16, 0x5e, 0x38, 0xe8, 0xb3, 0x91, 0x45, 0x95, 0x16, 0x5e, 0x38, 0xe8, 0xb3, 0x91, 0x45, 0x95,
0xd9, 0xc6, 0x41, 0xf3, 0xb8, 0xee, 0xc2, 0xf3, 0xd9, 0xc6, 0x41, 0xf3, 0xb8, 0xee, 0xc2, 0xf3,

View File

@ -20,7 +20,7 @@ type GetBlockHeaderVerboseResult struct {
Nonce uint64 `json:"nonce"` Nonce uint64 `json:"nonce"`
Bits string `json:"bits"` Bits string `json:"bits"`
Difficulty float64 `json:"difficulty"` Difficulty float64 `json:"difficulty"`
PreviousHashes []string `json:"previousblockhashes,omitempty"` ParentHashes []string `json:"parentblockhashes,omitempty"`
NextHashes []string `json:"nextblockhashes,omitempty"` NextHashes []string `json:"nextblockhashes,omitempty"`
} }
@ -41,7 +41,7 @@ type GetBlockVerboseResult struct {
Nonce uint64 `json:"nonce"` Nonce uint64 `json:"nonce"`
Bits string `json:"bits"` Bits string `json:"bits"`
Difficulty float64 `json:"difficulty"` Difficulty float64 `json:"difficulty"`
PreviousHashes []string `json:"previousblockhashes"` ParentHashes []string `json:"parentblockhashes"`
NextHashes []string `json:"nextblockhashes,omitempty"` NextHashes []string `json:"nextblockhashes,omitempty"`
} }
@ -136,7 +136,7 @@ type GetBlockTemplateResult struct {
Bits string `json:"bits"` Bits string `json:"bits"`
CurTime int64 `json:"curtime"` CurTime int64 `json:"curtime"`
Height int64 `json:"height"` Height int64 `json:"height"`
PreviousHashes []string `json:"previousblockhashes"` ParentHashes []string `json:"parentblockhashes"`
SigOpLimit int64 `json:"sigoplimit,omitempty"` SigOpLimit int64 `json:"sigoplimit,omitempty"`
SizeLimit int64 `json:"sizelimit,omitempty"` SizeLimit int64 `json:"sizelimit,omitempty"`
Transactions []GetBlockTemplateResultTx `json:"transactions"` Transactions []GetBlockTemplateResultTx `json:"transactions"`

View File

@ -111,16 +111,16 @@ func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) {
} }
// Don't bother trying to process orphans. // Don't bother trying to process orphans.
prevBlocks := block.MsgBlock().Header.PrevBlocks parentHashes := block.MsgBlock().Header.ParentHashes
if len(prevBlocks) > 0 { if len(parentHashes) > 0 {
exist, err := bi.dag.HaveBlocks(prevBlocks) exist, err := bi.dag.HaveBlocks(parentHashes)
if err != nil { if err != nil {
return false, err return false, err
} }
if !exist { if !exist {
return false, fmt.Errorf("import file contains block "+ return false, fmt.Errorf("import file contains block "+
"%v which does not link to the available "+ "%v which does not link to the available "+
"block DAG", prevBlocks) "block DAG", parentHashes)
} }
} }

View File

@ -106,8 +106,8 @@ func findCandidates(dag *blockdag.BlockDAG, highestTipHash *daghash.Hash) ([]*da
candidates = append(candidates, &checkpoint) candidates = append(candidates, &checkpoint)
} }
prevBlockHashes := block.MsgBlock().Header.PrevBlocks parentHashes := block.MsgBlock().Header.ParentHashes
selectedBlockHash := &prevBlockHashes[0] selectedBlockHash := &parentHashes[0]
block, err = dag.BlockByHash(selectedBlockHash) block, err = dag.BlockByHash(selectedBlockHash)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -79,8 +79,8 @@ var genesisMerkleRoot = daghash.Hash([daghash.HashSize]byte{ // Make go vet happ
var genesisBlock = wire.MsgBlock{ var genesisBlock = wire.MsgBlock{
Header: wire.BlockHeader{ Header: wire.BlockHeader{
Version: 1, Version: 1,
NumPrevBlocks: 0, NumParentBlocks: 0,
PrevBlocks: []daghash.Hash{}, ParentHashes: []daghash.Hash{},
MerkleRoot: genesisMerkleRoot, // 4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b MerkleRoot: genesisMerkleRoot, // 4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b
Timestamp: time.Unix(0x5bbe076c, 0), // 2018-10-10 14:06:36 +0000 UTC Timestamp: time.Unix(0x5bbe076c, 0), // 2018-10-10 14:06:36 +0000 UTC
Bits: 0x1e00ffff, // 503382015 [000000ffff000000000000000000000000000000000000000000000000000000] Bits: 0x1e00ffff, // 503382015 [000000ffff000000000000000000000000000000000000000000000000000000]
@ -108,8 +108,8 @@ var regTestGenesisMerkleRoot = genesisMerkleRoot
var regTestGenesisBlock = wire.MsgBlock{ var regTestGenesisBlock = wire.MsgBlock{
Header: wire.BlockHeader{ Header: wire.BlockHeader{
Version: 1, Version: 1,
NumPrevBlocks: 0, NumParentBlocks: 0,
PrevBlocks: []daghash.Hash{}, ParentHashes: []daghash.Hash{},
MerkleRoot: regTestGenesisMerkleRoot, // 4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b MerkleRoot: regTestGenesisMerkleRoot, // 4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b
Timestamp: time.Unix(0x5bbe0d4b, 0), // 2018-06-19 09:00:38 +0000 UTC Timestamp: time.Unix(0x5bbe0d4b, 0), // 2018-06-19 09:00:38 +0000 UTC
Bits: 0x207fffff, // 545259519 [7fffff0000000000000000000000000000000000000000000000000000000000] Bits: 0x207fffff, // 545259519 [7fffff0000000000000000000000000000000000000000000000000000000000]
@ -137,8 +137,8 @@ var testNet3GenesisMerkleRoot = genesisMerkleRoot
var testNet3GenesisBlock = wire.MsgBlock{ var testNet3GenesisBlock = wire.MsgBlock{
Header: wire.BlockHeader{ Header: wire.BlockHeader{
Version: 1, Version: 1,
NumPrevBlocks: 0, NumParentBlocks: 0,
PrevBlocks: []daghash.Hash{}, ParentHashes: []daghash.Hash{},
MerkleRoot: testNet3GenesisMerkleRoot, // 4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b MerkleRoot: testNet3GenesisMerkleRoot, // 4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b
Timestamp: time.Unix(0x5bbe0e49, 0), // 2018-06-19 09:04:06 +0000 UTC Timestamp: time.Unix(0x5bbe0e49, 0), // 2018-06-19 09:04:06 +0000 UTC
Bits: 0x1e00ffff, // 503382015 [000000ffff000000000000000000000000000000000000000000000000000000] Bits: 0x1e00ffff, // 503382015 [000000ffff000000000000000000000000000000000000000000000000000000]
@ -166,8 +166,8 @@ var simNetGenesisMerkleRoot = genesisMerkleRoot
var simNetGenesisBlock = wire.MsgBlock{ var simNetGenesisBlock = wire.MsgBlock{
Header: wire.BlockHeader{ Header: wire.BlockHeader{
Version: 1, Version: 1,
NumPrevBlocks: 0, NumParentBlocks: 0,
PrevBlocks: []daghash.Hash{}, ParentHashes: []daghash.Hash{},
MerkleRoot: simNetGenesisMerkleRoot, // 4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b MerkleRoot: simNetGenesisMerkleRoot, // 4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b
Timestamp: time.Unix(0x5bbe00fe, 0), // 2018-10-10 13:39:10 +0000 UTC Timestamp: time.Unix(0x5bbe00fe, 0), // 2018-10-10 13:39:10 +0000 UTC
Bits: 0x207fffff, // 545259519 [7fffff0000000000000000000000000000000000000000000000000000000000] Bits: 0x207fffff, // 545259519 [7fffff0000000000000000000000000000000000000000000000000000000000]

View File

@ -131,11 +131,11 @@ func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) {
} }
// Don't bother trying to process orphans. // Don't bother trying to process orphans.
prevHashes := block.MsgBlock().Header.PrevBlocks parentHashes := block.MsgBlock().Header.ParentHashes
for _, prevHash := range prevHashes { for _, parentHash := range parentHashes {
var exists bool var exists bool
err := bi.db.View(func(tx database.Tx) error { err := bi.db.View(func(tx database.Tx) error {
exists, err = tx.HasBlock(&prevHash) exists, err = tx.HasBlock(&parentHash)
return err return err
}) })
if err != nil { if err != nil {
@ -144,7 +144,7 @@ func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) {
if !exists { if !exists {
return false, fmt.Errorf("import file contains block "+ return false, fmt.Errorf("import file contains block "+
"%v which does not link to the available "+ "%v which does not link to the available "+
"block chain", prevHash) "block chain", parentHash)
} }
} }

View File

@ -1234,23 +1234,23 @@ func (tx *transaction) fetchBlockRow(hash *daghash.Hash) ([]byte, error) {
return blockRow, nil return blockRow, nil
} }
// The offset in a block header at which numPrevBlocks resides. // The offset in a block header at which NumParentBlocks resides.
const numPrevBlocksOffset = 4 const numParentBlocksOffset = 4
// fetchBlockHeaderSize fetches the numPrevBlocks field out of the block header // fetchBlockHeaderSize fetches the NumParentBlocks field out of the block header
// and uses it to compute the total size of the block header // and uses it to compute the total size of the block header
func (tx *transaction) fetchBlockHeaderSize(hash *daghash.Hash) (byte, error) { func (tx *transaction) fetchBlockHeaderSize(hash *daghash.Hash) (byte, error) {
r, err := tx.FetchBlockRegion(&database.BlockRegion{ r, err := tx.FetchBlockRegion(&database.BlockRegion{
Hash: hash, Hash: hash,
Offset: numPrevBlocksOffset, Offset: numParentBlocksOffset,
Len: 1, Len: 1,
}) })
if err != nil { if err != nil {
return 0, err return 0, err
} }
numPrevBlocks := r[0] numParentBlocks := r[0]
return numPrevBlocks*daghash.HashSize + wire.BaseBlockHeaderPayload, nil return numParentBlocks*daghash.HashSize + wire.BaseBlockHeaderPayload, nil
} }
// FetchBlockHeader returns the raw serialized bytes for the block header // FetchBlockHeader returns the raw serialized bytes for the block header
@ -1282,13 +1282,13 @@ func (tx *transaction) FetchBlockHeader(hash *daghash.Hash) ([]byte, error) {
}) })
} }
// fetchBlockHeadersSizes fetches the numPrevBlocks fields out of the block headers // fetchBlockHeadersSizes fetches the NumParentBlocks fields out of the block headers
// and uses it to compute the total sizes of the block headers // and uses it to compute the total sizes of the block headers
func (tx *transaction) fetchBlockHeadersSizes(hashes []daghash.Hash) ([]byte, error) { func (tx *transaction) fetchBlockHeadersSizes(hashes []daghash.Hash) ([]byte, error) {
regions := make([]database.BlockRegion, len(hashes)) regions := make([]database.BlockRegion, len(hashes))
for i := range hashes { for i := range hashes {
regions[i].Hash = &hashes[i] regions[i].Hash = &hashes[i]
regions[i].Offset = numPrevBlocksOffset regions[i].Offset = numParentBlocksOffset
regions[i].Len = 1 regions[i].Len = 1
} }
rs, err := tx.FetchBlockRegions(regions) rs, err := tx.FetchBlockRegions(regions)
@ -1298,8 +1298,8 @@ func (tx *transaction) fetchBlockHeadersSizes(hashes []daghash.Hash) ([]byte, er
sizes := make([]byte, len(hashes)) sizes := make([]byte, len(hashes))
for i, r := range rs { for i, r := range rs {
numPrevBlocks := r[0] numParentBlocks := r[0]
sizes[i] = numPrevBlocks*daghash.HashSize + wire.BaseBlockHeaderPayload sizes[i] = numParentBlocks*daghash.HashSize + wire.BaseBlockHeaderPayload
} }
return sizes, nil return sizes, nil

View File

@ -45,8 +45,8 @@ func generateBlock(parent *wire.MsgBlock) *wire.MsgBlock {
return &wire.MsgBlock{ return &wire.MsgBlock{
Header: wire.BlockHeader{ Header: wire.BlockHeader{
Version: 1, Version: 1,
NumPrevBlocks: 1, NumParentBlocks: 1,
PrevBlocks: []daghash.Hash{parent.BlockHash()}, ParentHashes: []daghash.Hash{parent.BlockHash()},
MerkleRoot: genesisMerkleRoot, MerkleRoot: genesisMerkleRoot,
Timestamp: time.Unix(0x5b28c4c8, 0), // 2018-06-19 08:54:32 +0000 UTC Timestamp: time.Unix(0x5b28c4c8, 0), // 2018-06-19 08:54:32 +0000 UTC
Bits: 0x1e00ffff, // 503382015 [000000ffff000000000000000000000000000000000000000000000000000000] Bits: 0x1e00ffff, // 503382015 [000000ffff000000000000000000000000000000000000000000000000000000]

View File

@ -435,22 +435,22 @@ func TestBIP0068AndCsv(t *testing.T) {
// Now mine 10 additional blocks giving the inputs generated above a // Now mine 10 additional blocks giving the inputs generated above a
// age of 11. Space out each block 10 minutes after the previous block. // age of 11. Space out each block 10 minutes after the previous block.
prevBlockHash, err := r.Node.GetBestBlockHash() parentBlockHash, err := r.Node.GetBestBlockHash()
if err != nil { if err != nil {
t.Fatalf("unable to get prior block hash: %v", err) t.Fatalf("unable to get prior block hash: %v", err)
} }
prevBlock, err := r.Node.GetBlock(prevBlockHash) parentBlock, err := r.Node.GetBlock(parentBlockHash)
if err != nil { if err != nil {
t.Fatalf("unable to get block: %v", err) t.Fatalf("unable to get block: %v", err)
} }
for i := 0; i < relativeBlockLock; i++ { for i := 0; i < relativeBlockLock; i++ {
timeStamp := prevBlock.Header.Timestamp.Add(time.Minute * 10) timeStamp := parentBlock.Header.Timestamp.Add(time.Minute * 10)
b, err := r.GenerateAndSubmitBlock(nil, -1, timeStamp) b, err := r.GenerateAndSubmitBlock(nil, -1, timeStamp)
if err != nil { if err != nil {
t.Fatalf("unable to generate block: %v", err) t.Fatalf("unable to generate block: %v", err)
} }
prevBlock = b.MsgBlock() parentBlock = b.MsgBlock()
} }
// A helper function to create fully signed transactions in-line during // A helper function to create fully signed transactions in-line during

View File

@ -132,37 +132,37 @@ func createCoinbaseTx(coinbaseScript []byte, nextBlockHeight int32,
// initialized), then the timestamp of the previous block will be used plus 1 // initialized), then the timestamp of the previous block will be used plus 1
// second is used. Passing nil for the previous block results in a block that // second is used. Passing nil for the previous block results in a block that
// builds off of the genesis block for the specified chain. // builds off of the genesis block for the specified chain.
func CreateBlock(prevBlock *util.Block, inclusionTxs []*util.Tx, func CreateBlock(parentBlock *util.Block, inclusionTxs []*util.Tx,
blockVersion int32, blockTime time.Time, miningAddr util.Address, blockVersion int32, blockTime time.Time, miningAddr util.Address,
mineTo []wire.TxOut, net *dagconfig.Params) (*util.Block, error) { mineTo []wire.TxOut, net *dagconfig.Params) (*util.Block, error) {
var ( var (
prevHash *daghash.Hash parentHash *daghash.Hash
blockHeight int32 blockHeight int32
prevBlockTime time.Time parentBlockTime time.Time
) )
// If the previous block isn't specified, then we'll construct a block // If the parent block isn't specified, then we'll construct a block
// that builds off of the genesis block for the chain. // that builds off of the genesis block for the chain.
if prevBlock == nil { if parentBlock == nil {
prevHash = net.GenesisHash parentHash = net.GenesisHash
blockHeight = 1 blockHeight = 1
prevBlockTime = net.GenesisBlock.Header.Timestamp.Add(time.Minute) parentBlockTime = net.GenesisBlock.Header.Timestamp.Add(time.Minute)
} else { } else {
prevHash = prevBlock.Hash() parentHash = parentBlock.Hash()
blockHeight = prevBlock.Height() + 1 blockHeight = parentBlock.Height() + 1
prevBlockTime = prevBlock.MsgBlock().Header.Timestamp parentBlockTime = parentBlock.MsgBlock().Header.Timestamp
} }
// If a target block time was specified, then use that as the header's // If a target block time was specified, then use that as the header's
// timestamp. Otherwise, add one second to the previous block unless // timestamp. Otherwise, add one second to the parent block unless
// it's the genesis block in which case use the current time. // it's the genesis block in which case use the current time.
var ts time.Time var ts time.Time
switch { switch {
case !blockTime.IsZero(): case !blockTime.IsZero():
ts = blockTime ts = blockTime
default: default:
ts = prevBlockTime.Add(time.Second) ts = parentBlockTime.Add(time.Second)
} }
extraNonce := uint64(0) extraNonce := uint64(0)
@ -185,7 +185,7 @@ func CreateBlock(prevBlock *util.Block, inclusionTxs []*util.Tx,
var block wire.MsgBlock var block wire.MsgBlock
block.Header = wire.BlockHeader{ block.Header = wire.BlockHeader{
Version: blockVersion, Version: blockVersion,
PrevBlocks: []daghash.Hash{*prevHash}, ParentHashes: []daghash.Hash{*parentHash},
MerkleRoot: *merkles[len(merkles)-1], MerkleRoot: *merkles[len(merkles)-1],
Timestamp: ts, Timestamp: ts,
Bits: net.PowLimitBits, Bits: net.PowLimitBits,

View File

@ -435,19 +435,19 @@ func (h *Harness) GenerateAndSubmitBlockWithCustomCoinbaseOutputs(
blockVersion = BlockVersion blockVersion = BlockVersion
} }
prevBlockHash, prevBlockHeight, err := h.Node.GetBestBlock() parentBlockHash, parentBlockHeight, err := h.Node.GetBestBlock()
if err != nil { if err != nil {
return nil, err return nil, err
} }
mBlock, err := h.Node.GetBlock(prevBlockHash) mBlock, err := h.Node.GetBlock(parentBlockHash)
if err != nil { if err != nil {
return nil, err return nil, err
} }
prevBlock := util.NewBlock(mBlock) parentBlock := util.NewBlock(mBlock)
prevBlock.SetHeight(prevBlockHeight) parentBlock.SetHeight(parentBlockHeight)
// Create a new block including the specified transactions // Create a new block including the specified transactions
newBlock, err := CreateBlock(prevBlock, txns, blockVersion, newBlock, err := CreateBlock(parentBlock, txns, blockVersion,
blockTime, h.wallet.coinbaseAddr, mineTo, h.ActiveNet) blockTime, h.wallet.coinbaseAddr, mineTo, h.ActiveNet)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -80,20 +80,20 @@ func syncBlocks(nodes []*Harness) error {
retry: retry:
for !blocksMatch { for !blocksMatch {
var prevHash *daghash.Hash var parentHash *daghash.Hash
var prevHeight int32 var prevHeight int32
for _, node := range nodes { for _, node := range nodes {
blockHash, blockHeight, err := node.Node.GetBestBlock() blockHash, blockHeight, err := node.Node.GetBestBlock()
if err != nil { if err != nil {
return err return err
} }
if prevHash != nil && (*blockHash != *prevHash || if parentHash != nil && (*blockHash != *parentHash ||
blockHeight != prevHeight) { blockHeight != prevHeight) {
time.Sleep(time.Millisecond * 100) time.Sleep(time.Millisecond * 100)
continue retry continue retry
} }
prevHash, prevHeight = blockHash, blockHeight parentHash, prevHeight = blockHash, blockHeight
} }
blocksMatch = true blocksMatch = true

View File

@ -162,9 +162,9 @@ func (m *CPUMiner) submitBlock(block *util.Block) bool {
// a new block, but the check only happens periodically, so it is // a new block, but the check only happens periodically, so it is
// possible a block was found and submitted in between. // possible a block was found and submitted in between.
msgBlock := block.MsgBlock() msgBlock := block.MsgBlock()
if !daghash.AreEqual(msgBlock.Header.PrevBlocks, m.g.TipHashes()) { if !daghash.AreEqual(msgBlock.Header.ParentHashes, m.g.TipHashes()) {
log.Debugf("Block submitted via CPU miner with previous "+ log.Debugf("Block submitted via CPU miner with previous "+
"blocks %s is stale", msgBlock.Header.PrevBlocks) "blocks %s is stale", msgBlock.Header.ParentHashes)
return false return false
} }
@ -247,7 +247,7 @@ func (m *CPUMiner) solveBlock(msgBlock *wire.MsgBlock, blockHeight int32,
hashesCompleted = 0 hashesCompleted = 0
// The current block is stale if the DAG has changed. // The current block is stale if the DAG has changed.
if !daghash.AreEqual(header.PrevBlocks, m.g.TipHashes()) { if !daghash.AreEqual(header.ParentHashes, m.g.TipHashes()) {
return false return false
} }

View File

@ -716,8 +716,8 @@ mempoolLoop:
var msgBlock wire.MsgBlock var msgBlock wire.MsgBlock
msgBlock.Header = wire.BlockHeader{ msgBlock.Header = wire.BlockHeader{
Version: nextBlockVersion, Version: nextBlockVersion,
NumPrevBlocks: byte(len(g.dag.TipHashes())), NumParentBlocks: byte(len(g.dag.TipHashes())),
PrevBlocks: g.dag.TipHashes(), ParentHashes: g.dag.TipHashes(),
MerkleRoot: *merkles[len(merkles)-1], MerkleRoot: *merkles[len(merkles)-1],
Timestamp: ts, Timestamp: ts,
Bits: reqDifficulty, Bits: reqDifficulty,

View File

@ -656,10 +656,10 @@ func (sm *SyncManager) handleBlockMsg(bmsg *blockMsg) {
// for headers starting from the block after this one up to the next // for headers starting from the block after this one up to the next
// checkpoint. // checkpoint.
prevHeight := sm.nextCheckpoint.Height prevHeight := sm.nextCheckpoint.Height
prevHash := sm.nextCheckpoint.Hash parentHash := sm.nextCheckpoint.Hash
sm.nextCheckpoint = sm.findNextHeaderCheckpoint(prevHeight) sm.nextCheckpoint = sm.findNextHeaderCheckpoint(prevHeight)
if sm.nextCheckpoint != nil { if sm.nextCheckpoint != nil {
locator := blockdag.BlockLocator([]*daghash.Hash{prevHash}) locator := blockdag.BlockLocator([]*daghash.Hash{parentHash})
err := peer.PushGetHeadersMsg(locator, sm.nextCheckpoint.Hash) err := peer.PushGetHeadersMsg(locator, sm.nextCheckpoint.Hash)
if err != nil { if err != nil {
log.Warnf("Failed to send getheaders message to "+ log.Warnf("Failed to send getheaders message to "+
@ -780,7 +780,7 @@ func (sm *SyncManager) handleHeadersMsg(hmsg *headersMsg) {
// add it to the list of headers. // add it to the list of headers.
node := headerNode{hash: &blockHash} node := headerNode{hash: &blockHash}
prevNode := prevNodeEl.Value.(*headerNode) prevNode := prevNodeEl.Value.(*headerNode)
if prevNode.hash.IsEqual(&blockHeader.PrevBlocks[0]) { // TODO: (Stas) This is wrong. Modified only to satisfy compilation. if prevNode.hash.IsEqual(&blockHeader.ParentHashes[0]) { // TODO: (Stas) This is wrong. Modified only to satisfy compilation.
node.height = prevNode.height + 1 node.height = prevNode.height + 1
e := sm.headerList.PushBack(&node) e := sm.headerList.PushBack(&node)
if sm.startHeader == nil { if sm.startHeader == nil {

View File

@ -816,18 +816,18 @@ func (sp *Peer) OnGetCFHeaders(_ *peer.Peer, msg *wire.MsgGetCFHeaders) {
// Populate the PrevFilterHeader field. // Populate the PrevFilterHeader field.
if msg.StartHeight > 0 { if msg.StartHeight > 0 {
prevBlockHash := &hashList[0] parentHash := &hashList[0]
// Fetch the raw committed filter header bytes from the // Fetch the raw committed filter header bytes from the
// database. // database.
headerBytes, err := sp.server.CfIndex.FilterHeaderByBlockHash( headerBytes, err := sp.server.CfIndex.FilterHeaderByBlockHash(
prevBlockHash, msg.FilterType) parentHash, msg.FilterType)
if err != nil { if err != nil {
peerLog.Errorf("Error retrieving CF header: %v", err) peerLog.Errorf("Error retrieving CF header: %v", err)
return return
} }
if len(headerBytes) == 0 { if len(headerBytes) == 0 {
peerLog.Warnf("Could not obtain CF header for %v", prevBlockHash) peerLog.Warnf("Could not obtain CF header for %v", parentHash)
return return
} }

View File

@ -75,7 +75,7 @@ const (
gbtNonceRange = "000000000000ffffffffffff" gbtNonceRange = "000000000000ffffffffffff"
// gbtRegenerateSeconds is the number of seconds that must pass before // gbtRegenerateSeconds is the number of seconds that must pass before
// a new template is generated when the previous block hash has not // a new template is generated when the parent block hashes has not
// changed and there have been changes to the available transactions // changed and there have been changes to the available transactions
// in the memory pool. // in the memory pool.
gbtRegenerateSeconds = 60 gbtRegenerateSeconds = 60
@ -90,7 +90,7 @@ var (
// declared here to avoid the overhead of creating the slice on every // declared here to avoid the overhead of creating the slice on every
// invocation for constant data. // invocation for constant data.
gbtMutableFields = []string{ gbtMutableFields = []string{
"time", "transactions/add", "prevblock", "coinbase/append", "time", "transactions/add", "parentblock", "coinbase/append",
} }
// gbtCoinbaseAux describes additional data that miners should include // gbtCoinbaseAux describes additional data that miners should include
@ -1137,7 +1137,7 @@ func handleGetBlock(s *Server, cmd interface{}, closeChan <-chan struct{}) (inte
Version: blockHeader.Version, Version: blockHeader.Version,
VersionHex: fmt.Sprintf("%08x", blockHeader.Version), VersionHex: fmt.Sprintf("%08x", blockHeader.Version),
MerkleRoot: blockHeader.MerkleRoot.String(), MerkleRoot: blockHeader.MerkleRoot.String(),
PreviousHashes: daghash.Strings(blockHeader.PrevBlocks), ParentHashes: daghash.Strings(blockHeader.ParentHashes),
Nonce: blockHeader.Nonce, Nonce: blockHeader.Nonce,
Time: blockHeader.Timestamp.Unix(), Time: blockHeader.Timestamp.Unix(),
Confirmations: uint64(1 + s.cfg.DAG.Height() - blockHeight), //TODO: (Ori) This is probably wrong. Done only for compilation Confirmations: uint64(1 + s.cfg.DAG.Height() - blockHeight), //TODO: (Ori) This is probably wrong. Done only for compilation
@ -1332,7 +1332,7 @@ func handleGetBlockHeader(s *Server, cmd interface{}, closeChan <-chan struct{})
VersionHex: fmt.Sprintf("%08x", blockHeader.Version), VersionHex: fmt.Sprintf("%08x", blockHeader.Version),
MerkleRoot: blockHeader.MerkleRoot.String(), MerkleRoot: blockHeader.MerkleRoot.String(),
NextHashes: nextHashStrings, NextHashes: nextHashStrings,
PreviousHashes: daghash.Strings(blockHeader.PrevBlocks), ParentHashes: daghash.Strings(blockHeader.ParentHashes),
Nonce: uint64(blockHeader.Nonce), Nonce: uint64(blockHeader.Nonce),
Time: blockHeader.Timestamp.Unix(), Time: blockHeader.Timestamp.Unix(),
Bits: strconv.FormatInt(int64(blockHeader.Bits), 16), Bits: strconv.FormatInt(int64(blockHeader.Bits), 16),
@ -1343,14 +1343,14 @@ func handleGetBlockHeader(s *Server, cmd interface{}, closeChan <-chan struct{})
// encodeLongPollID encodes the passed details into an ID that can be used to // encodeLongPollID encodes the passed details into an ID that can be used to
// uniquely identify a block template. // uniquely identify a block template.
func encodeLongPollID(prevHashes []daghash.Hash, lastGenerated time.Time) string { func encodeLongPollID(parentHashes []daghash.Hash, lastGenerated time.Time) string {
return fmt.Sprintf("%s-%d", daghash.JoinHashesStrings(prevHashes, ""), lastGenerated.Unix()) return fmt.Sprintf("%s-%d", daghash.JoinHashesStrings(parentHashes, ""), lastGenerated.Unix())
} }
// decodeLongPollID decodes an ID that is used to uniquely identify a block // decodeLongPollID decodes an ID that is used to uniquely identify a block
// template. This is mainly used as a mechanism to track when to update clients // template. This is mainly used as a mechanism to track when to update clients
// that are using long polling for block templates. The ID consists of the // that are using long polling for block templates. The ID consists of the
// previous blocks hashes for the associated template and the time the associated // parent blocks hashes for the associated template and the time the associated
// template was generated. // template was generated.
func decodeLongPollID(longPollID string) ([]daghash.Hash, int64, error) { func decodeLongPollID(longPollID string) ([]daghash.Hash, int64, error) {
fields := strings.Split(longPollID, "-") fields := strings.Split(longPollID, "-")
@ -1358,20 +1358,20 @@ func decodeLongPollID(longPollID string) ([]daghash.Hash, int64, error) {
return nil, 0, errors.New("decodeLongPollID: invalid number of fields") return nil, 0, errors.New("decodeLongPollID: invalid number of fields")
} }
prevHashesStr := fields[0] parentHashesStr := fields[0]
if len(prevHashesStr)%daghash.HashSize != 0 { if len(parentHashesStr)%daghash.HashSize != 0 {
return nil, 0, errors.New("decodeLongPollID: invalid previous hashes format") return nil, 0, errors.New("decodeLongPollID: invalid parent hashes format")
} }
numberOfHashes := len(prevHashesStr) / daghash.HashSize numberOfHashes := len(parentHashesStr) / daghash.HashSize
prevHashes := make([]daghash.Hash, 0, numberOfHashes) parentHashes := make([]daghash.Hash, 0, numberOfHashes)
for i := 0; i < len(prevHashesStr); i += daghash.HashSize { for i := 0; i < len(parentHashesStr); i += daghash.HashSize {
hash, err := daghash.NewHashFromStr(prevHashesStr[i : i+daghash.HashSize]) hash, err := daghash.NewHashFromStr(parentHashesStr[i : i+daghash.HashSize])
if err != nil { if err != nil {
return nil, 0, fmt.Errorf("decodeLongPollID: NewHashFromStr: %v", err) return nil, 0, fmt.Errorf("decodeLongPollID: NewHashFromStr: %v", err)
} }
prevHashes = append(prevHashes, *hash) parentHashes = append(parentHashes, *hash)
} }
lastGenerated, err := strconv.ParseInt(fields[1], 10, 64) lastGenerated, err := strconv.ParseInt(fields[1], 10, 64)
@ -1379,7 +1379,7 @@ func decodeLongPollID(longPollID string) ([]daghash.Hash, int64, error) {
return nil, 0, fmt.Errorf("decodeLongPollID: Cannot parse timestamp: %v", lastGenerated) return nil, 0, fmt.Errorf("decodeLongPollID: Cannot parse timestamp: %v", lastGenerated)
} }
return prevHashes, lastGenerated, nil return parentHashes, lastGenerated, nil
} }
// notifyLongPollers notifies any channels that have been registered to be // notifyLongPollers notifies any channels that have been registered to be
@ -1467,7 +1467,7 @@ func (state *gbtWorkState) NotifyMempoolTx(lastUpdated time.Time) {
} }
// templateUpdateChan returns a channel that will be closed once the block // templateUpdateChan returns a channel that will be closed once the block
// template associated with the passed previous hash and last generated time // template associated with the passed parent hashes and last generated time
// is stale. The function will return existing channels for duplicate // is stale. The function will return existing channels for duplicate
// parameters which allows multiple clients to wait for the same block template // parameters which allows multiple clients to wait for the same block template
// without requiring a different channel for each client. // without requiring a different channel for each client.
@ -1476,7 +1476,7 @@ func (state *gbtWorkState) NotifyMempoolTx(lastUpdated time.Time) {
func (state *gbtWorkState) templateUpdateChan(tipHashes []daghash.Hash, lastGenerated int64) chan struct{} { func (state *gbtWorkState) templateUpdateChan(tipHashes []daghash.Hash, lastGenerated int64) chan struct{} {
tipHashesStr := daghash.JoinHashesStrings(tipHashes, "") tipHashesStr := daghash.JoinHashesStrings(tipHashes, "")
// Either get the current list of channels waiting for updates about // Either get the current list of channels waiting for updates about
// changes to block template for the previous hash or create a new one. // changes to block template for the parent hashes or create a new one.
channels, ok := state.notifyMap[tipHashesStr] channels, ok := state.notifyMap[tipHashesStr]
if !ok { if !ok {
m := make(map[int64]chan struct{}) m := make(map[int64]chan struct{})
@ -1713,7 +1713,7 @@ func (state *gbtWorkState) blockTemplateResult(useCoinbaseValue bool, submitOld
Bits: strconv.FormatInt(int64(header.Bits), 16), Bits: strconv.FormatInt(int64(header.Bits), 16),
CurTime: header.Timestamp.Unix(), CurTime: header.Timestamp.Unix(),
Height: int64(template.Height), Height: int64(template.Height),
PreviousHashes: daghash.Strings(header.PrevBlocks), ParentHashes: daghash.Strings(header.ParentHashes),
SigOpLimit: blockdag.MaxSigOpsPerBlock, SigOpLimit: blockdag.MaxSigOpsPerBlock,
SizeLimit: wire.MaxBlockPayload, SizeLimit: wire.MaxBlockPayload,
Transactions: transactions, Transactions: transactions,
@ -1790,7 +1790,7 @@ func handleGetBlockTemplateLongPoll(s *Server, longPollID string, useCoinbaseVal
// Just return the current block template if the long poll ID provided by // Just return the current block template if the long poll ID provided by
// the caller is invalid. // the caller is invalid.
prevHashes, lastGenerated, err := decodeLongPollID(longPollID) parentHashes, lastGenerated, err := decodeLongPollID(longPollID)
if err != nil { if err != nil {
result, err := state.blockTemplateResult(useCoinbaseValue, nil) result, err := state.blockTemplateResult(useCoinbaseValue, nil)
if err != nil { if err != nil {
@ -1805,7 +1805,7 @@ func handleGetBlockTemplateLongPoll(s *Server, longPollID string, useCoinbaseVal
// Return the block template now if the specific block template // Return the block template now if the specific block template
// identified by the long poll ID no longer matches the current block // identified by the long poll ID no longer matches the current block
// template as this means the provided template is stale. // template as this means the provided template is stale.
areHashesEqual := daghash.AreEqual(state.template.Block.Header.PrevBlocks, prevHashes) areHashesEqual := daghash.AreEqual(state.template.Block.Header.ParentHashes, parentHashes)
if !areHashesEqual || if !areHashesEqual ||
lastGenerated != state.lastGenerated.Unix() { lastGenerated != state.lastGenerated.Unix() {
@ -1824,11 +1824,11 @@ func handleGetBlockTemplateLongPoll(s *Server, longPollID string, useCoinbaseVal
return result, nil return result, nil
} }
// Register the previous hash and last generated time for notifications // Register the parent hashes and last generated time for notifications
// Get a channel that will be notified when the template associated with // Get a channel that will be notified when the template associated with
// the provided ID is stale and a new block template should be returned to // the provided ID is stale and a new block template should be returned to
// the caller. // the caller.
longPollChan := state.templateUpdateChan(prevHashes, lastGenerated) longPollChan := state.templateUpdateChan(parentHashes, lastGenerated)
state.Unlock() state.Unlock()
select { select {
@ -2032,12 +2032,12 @@ func chainErrToGBTErrString(err error) string {
return "bad-script-malformed" return "bad-script-malformed"
case blockdag.ErrScriptValidation: case blockdag.ErrScriptValidation:
return "bad-script-validate" return "bad-script-validate"
case blockdag.ErrPreviousBlockUnknown: case blockdag.ErrParentBlockUnknown:
return "prev-blk-not-found" return "parent-blk-not-found"
case blockdag.ErrInvalidAncestorBlock: case blockdag.ErrInvalidAncestorBlock:
return "bad-prevblk" return "bad-parentblk"
case blockdag.ErrPrevBlockNotBest: case blockdag.ErrParentBlockNotCurrentTips:
return "inconclusive-not-best-prvblk" return "inconclusive-not-best-parentblk"
} }
return "rejected: " + err.Error() return "rejected: " + err.Error()
@ -2079,11 +2079,11 @@ func handleGetBlockTemplateProposal(s *Server, request *btcjson.TemplateRequest)
} }
block := util.NewBlock(&msgBlock) block := util.NewBlock(&msgBlock)
// Ensure the block is building from the expected previous blocks. // Ensure the block is building from the expected parent blocks.
expectedPrevHashes := s.cfg.DAG.TipHashes() expectedParentHashes := s.cfg.DAG.TipHashes()
prevHashes := block.MsgBlock().Header.PrevBlocks parentHashes := block.MsgBlock().Header.ParentHashes
if !daghash.AreEqual(expectedPrevHashes, prevHashes) { if !daghash.AreEqual(expectedParentHashes, parentHashes) {
return "bad-prevblk", nil return "bad-parentblk", nil
} }
if err := s.cfg.DAG.CheckConnectBlockTemplate(block); err != nil { if err := s.cfg.DAG.CheckConnectBlockTemplate(block); err != nil {
@ -3375,7 +3375,7 @@ func verifyDAG(s *Server, level, depth int32) error {
} }
} }
currentHash = *block.MsgBlock().Header.SelectedPrevBlock() currentHash = *block.MsgBlock().Header.SelectedParentHash()
} }
log.Infof("Chain verify completed successfully") log.Infof("Chain verify completed successfully")

View File

@ -241,7 +241,7 @@ var helpDescsEnUS = map[string]string{
"getblockverboseresult-nonce": "The block nonce", "getblockverboseresult-nonce": "The block nonce",
"getblockverboseresult-bits": "The bits which represent the block difficulty", "getblockverboseresult-bits": "The bits which represent the block difficulty",
"getblockverboseresult-difficulty": "The proof-of-work difficulty as a multiple of the minimum difficulty", "getblockverboseresult-difficulty": "The proof-of-work difficulty as a multiple of the minimum difficulty",
"getblockverboseresult-previousblockhashes": "The hashes of the previous blocks", "getblockverboseresult-parentblockhashes": "The hashes of the parent blocks",
"getblockverboseresult-nextblockhashes": "The hashes of the next blocks (only if there are any)", "getblockverboseresult-nextblockhashes": "The hashes of the next blocks (only if there are any)",
// GetBlockCountCmd help. // GetBlockCountCmd help.
@ -272,7 +272,7 @@ var helpDescsEnUS = map[string]string{
"getblockheaderverboseresult-nonce": "The block nonce", "getblockheaderverboseresult-nonce": "The block nonce",
"getblockheaderverboseresult-bits": "The bits which represent the block difficulty", "getblockheaderverboseresult-bits": "The bits which represent the block difficulty",
"getblockheaderverboseresult-difficulty": "The proof-of-work difficulty as a multiple of the minimum difficulty", "getblockheaderverboseresult-difficulty": "The proof-of-work difficulty as a multiple of the minimum difficulty",
"getblockheaderverboseresult-previousblockhashes": "The hashes of the previous blocks", "getblockheaderverboseresult-parentblockhashes": "The hashes of the parent blocks",
"getblockheaderverboseresult-nextblockhashes": "The hashes of the next blocks (only if there are any)", "getblockheaderverboseresult-nextblockhashes": "The hashes of the next blocks (only if there are any)",
// TemplateRequest help. // TemplateRequest help.
@ -300,7 +300,7 @@ var helpDescsEnUS = map[string]string{
"getblocktemplateresult-bits": "Hex-encoded compressed difficulty", "getblocktemplateresult-bits": "Hex-encoded compressed difficulty",
"getblocktemplateresult-curtime": "Current time as seen by the server (recommended for block time); must fall within mintime/maxtime rules", "getblocktemplateresult-curtime": "Current time as seen by the server (recommended for block time); must fall within mintime/maxtime rules",
"getblocktemplateresult-height": "Height of the block to be solved", "getblocktemplateresult-height": "Height of the block to be solved",
"getblocktemplateresult-previousblockhashes": "Hex-encoded big-endian hashes of the previous blocks", "getblocktemplateresult-parentblockhashes": "Hex-encoded big-endian hashes of the parent blocks",
"getblocktemplateresult-sigoplimit": "Number of sigops allowed in blocks ", "getblocktemplateresult-sigoplimit": "Number of sigops allowed in blocks ",
"getblocktemplateresult-sizelimit": "Number of bytes allowed in blocks", "getblocktemplateresult-sizelimit": "Number of bytes allowed in blocks",
"getblocktemplateresult-transactions": "Array of transactions as JSON objects", "getblocktemplateresult-transactions": "Array of transactions as JSON objects",

View File

@ -2084,7 +2084,7 @@ func handleRescanBlocks(wsc *wsClient, icmd interface{}) (interface{}, error) {
Message: "Failed to fetch block: " + err.Error(), Message: "Failed to fetch block: " + err.Error(),
} }
} }
if lastBlockHash != nil && block.MsgBlock().Header.PrevBlocks[0] != *lastBlockHash { // TODO: (Stas) This is likely wrong. Modified to satisfy compilation. if lastBlockHash != nil && block.MsgBlock().Header.ParentHashes[0] != *lastBlockHash { // TODO: (Stas) This is likely wrong. Modified to satisfy compilation.
return nil, &btcjson.RPCError{ return nil, &btcjson.RPCError{
Code: btcjson.ErrRPCInvalidParameter, Code: btcjson.ErrRPCInvalidParameter,
Message: fmt.Sprintf("Block %v is not a child of %v", Message: fmt.Sprintf("Block %v is not a child of %v",

View File

@ -305,8 +305,8 @@ func TestBlockErrors(t *testing.T) {
var Block100000 = wire.MsgBlock{ var Block100000 = wire.MsgBlock{
Header: wire.BlockHeader{ Header: wire.BlockHeader{
Version: 1, Version: 1,
NumPrevBlocks: 2, NumParentBlocks: 2,
PrevBlocks: []daghash.Hash{ ParentHashes: []daghash.Hash{
[32]byte{ // Make go vet happy. [32]byte{ // Make go vet happy.
0x82, 0xdc, 0xbd, 0xe6, 0x88, 0x37, 0x74, 0x5b, 0x82, 0xdc, 0xbd, 0xe6, 0x88, 0x37, 0x74, 0x5b,
0x78, 0x6b, 0x03, 0x1d, 0xa3, 0x48, 0x3c, 0x45, 0x78, 0x6b, 0x03, 0x1d, 0xa3, 0x48, 0x3c, 0x45,

View File

@ -487,8 +487,8 @@ func TestFilterInsertUpdateNone(t *testing.T) {
func TestFilterInsertP2PubKeyOnly(t *testing.T) { func TestFilterInsertP2PubKeyOnly(t *testing.T) {
blockBytes := []byte{ blockBytes := []byte{
0x01, 0x00, 0x00, 0x00, // Version 0x01, 0x00, 0x00, 0x00, // Version
0x01, // NumPrevBlocks 0x01, // NumParentBlocks
0x82, 0xBB, 0x86, 0x9C, 0xF3, 0xA7, 0x93, 0x43, 0x2A, 0x66, 0xE8, // HashPrevBlocks 0x82, 0xBB, 0x86, 0x9C, 0xF3, 0xA7, 0x93, 0x43, 0x2A, 0x66, 0xE8, // ParentHashes
0x26, 0xE0, 0x5A, 0x6F, 0xC3, 0x74, 0x69, 0xF8, 0xEF, 0xB7, 0x42, 0x26, 0xE0, 0x5A, 0x6F, 0xC3, 0x74, 0x69, 0xF8, 0xEF, 0xB7, 0x42,
0x1D, 0xC8, 0x80, 0x67, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1D, 0xC8, 0x80, 0x67, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63, 0x65, 0x9C, 0x79, // HashMerkleRoot 0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63, 0x65, 0x9C, 0x79, // HashMerkleRoot

View File

@ -18,8 +18,8 @@ import (
func TestMerkleBlock3(t *testing.T) { func TestMerkleBlock3(t *testing.T) {
blockBytes := []byte{ blockBytes := []byte{
0x01, 0x00, 0x00, 0x00, // Version 0x01, 0x00, 0x00, 0x00, // Version
0x01, //NumPrevBlocks 0x01, // NumParentBlocks
0x79, 0xCD, 0xA8, 0x56, 0xB1, 0x43, 0xD9, 0xDB, 0x2C, 0x1C, 0xAF, //HashPrevBlocks 0x79, 0xCD, 0xA8, 0x56, 0xB1, 0x43, 0xD9, 0xDB, 0x2C, 0x1C, 0xAF, // ParentHashes
0xF0, 0x1D, 0x1A, 0xEC, 0xC8, 0x63, 0x0D, 0x30, 0x62, 0x5D, 0x10, 0xF0, 0x1D, 0x1A, 0xEC, 0xC8, 0x63, 0x0D, 0x30, 0x62, 0x5D, 0x10,
0xE8, 0xB4, 0xB8, 0xB0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE8, 0xB4, 0xB8, 0xB0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xB5, 0x0C, 0xC0, 0x69, 0xD6, 0xA3, 0xE3, 0x3E, 0x3F, 0xF8, 0x4A, // HashMerkleRoot 0xB5, 0x0C, 0xC0, 0x69, 0xD6, 0xA3, 0xE3, 0x3E, 0x3F, 0xF8, 0x4A, // HashMerkleRoot

View File

@ -372,17 +372,17 @@ func GetFilterHash(filter *gcs.Filter) (daghash.Hash, error) {
// MakeHeaderForFilter makes a filter chain header for a filter, given the // MakeHeaderForFilter makes a filter chain header for a filter, given the
// filter and the previous filter chain header. // filter and the previous filter chain header.
func MakeHeaderForFilter(filter *gcs.Filter, prevHeader daghash.Hash) (daghash.Hash, error) { func MakeHeaderForFilter(filter *gcs.Filter, parentHeader daghash.Hash) (daghash.Hash, error) {
filterTip := make([]byte, 2*daghash.HashSize) filterTip := make([]byte, 2*daghash.HashSize)
filterHash, err := GetFilterHash(filter) filterHash, err := GetFilterHash(filter)
if err != nil { if err != nil {
return daghash.Hash{}, err return daghash.Hash{}, err
} }
// In the buffer we created above we'll compute hash || prevHash as an // In the buffer we created above we'll compute hash || parentHash as an
// intermediate value. // intermediate value.
copy(filterTip, filterHash[:]) copy(filterTip, filterHash[:])
copy(filterTip[daghash.HashSize:], prevHeader[:]) copy(filterTip[daghash.HashSize:], parentHeader[:])
// The final filter hash is the double-sha256 of the hash computed // The final filter hash is the double-sha256 of the hash computed
// above. // above.

View File

@ -411,7 +411,7 @@ func BenchmarkDecodeGetHeaders(b *testing.B) {
// BenchmarkDecodeHeaders performs a benchmark on how long it takes to // BenchmarkDecodeHeaders performs a benchmark on how long it takes to
// decode a headers message with the maximum number of headers and maximum number of // decode a headers message with the maximum number of headers and maximum number of
// previous hashes per header. // parent hashes per header.
func BenchmarkDecodeHeaders(b *testing.B) { func BenchmarkDecodeHeaders(b *testing.B) {
// Create a message with the maximum number of headers. // Create a message with the maximum number of headers.
pver := ProtocolVersion pver := ProtocolVersion
@ -421,15 +421,15 @@ func BenchmarkDecodeHeaders(b *testing.B) {
if err != nil { if err != nil {
b.Fatalf("NewHashFromStr: unexpected error: %v", err) b.Fatalf("NewHashFromStr: unexpected error: %v", err)
} }
prevBlocks := make([]daghash.Hash, MaxNumPrevBlocks) parentHashes := make([]daghash.Hash, MaxNumParentBlocks)
for j := byte(0); j < MaxNumPrevBlocks; j++ { for j := byte(0); j < MaxNumParentBlocks; j++ {
hash, err := daghash.NewHashFromStr(fmt.Sprintf("%x%x", i, j)) hash, err := daghash.NewHashFromStr(fmt.Sprintf("%x%x", i, j))
if err != nil { if err != nil {
b.Fatalf("NewHashFromStr: unexpected error: %v", err) b.Fatalf("NewHashFromStr: unexpected error: %v", err)
} }
prevBlocks[i] = *hash parentHashes[i] = *hash
} }
m.AddBlockHeader(NewBlockHeader(1, prevBlocks, hash, 0, uint64(i))) m.AddBlockHeader(NewBlockHeader(1, parentHashes, hash, 0, uint64(i)))
} }
// Serialize it so the bytes are available to test the decode below. // Serialize it so the bytes are available to test the decode below.

View File

@ -13,20 +13,20 @@ import (
) )
// BaseBlockHeaderPayload is the base number of bytes a block header can be, // BaseBlockHeaderPayload is the base number of bytes a block header can be,
// not including the list of previous block headers. // not including the list of parent block headers.
// Version 4 bytes + Timestamp 8 bytes + Bits 4 bytes + Nonce 8 bytes + // Version 4 bytes + Timestamp 8 bytes + Bits 4 bytes + Nonce 8 bytes +
// + NumPrevBlocks 1 byte + MerkleRoot hash. // + NumParentBlocks 1 byte + MerkleRoot hash.
// To get total size of block header len(PrevBlocks) * daghash.HashSize should be // To get total size of block header len(ParentHashes) * daghash.HashSize should be
// added to this value // added to this value
const BaseBlockHeaderPayload = 25 + (daghash.HashSize) const BaseBlockHeaderPayload = 25 + (daghash.HashSize)
// MaxNumPrevBlocks is the maximum number of previous blocks a block can reference. // MaxNumParentBlocks is the maximum number of parent blocks a block can reference.
// Currently set to 255 as the maximum number NumPrevBlocks can be due to it being a byte // Currently set to 255 as the maximum number NumParentBlocks can be due to it being a byte
const MaxNumPrevBlocks = 255 const MaxNumParentBlocks = 255
// MaxBlockHeaderPayload is the maximum number of bytes a block header can be. // MaxBlockHeaderPayload is the maximum number of bytes a block header can be.
// BaseBlockHeaderPayload + up to MaxNumPrevBlocks hashes of previous blocks // BaseBlockHeaderPayload + up to MaxNumParentBlocks hashes of parent blocks
const MaxBlockHeaderPayload = BaseBlockHeaderPayload + (MaxNumPrevBlocks * daghash.HashSize) const MaxBlockHeaderPayload = BaseBlockHeaderPayload + (MaxNumParentBlocks * daghash.HashSize)
// BlockHeader defines information about a block and is used in the bitcoin // BlockHeader defines information about a block and is used in the bitcoin
// block (MsgBlock) and headers (MsgHeader) messages. // block (MsgBlock) and headers (MsgHeader) messages.
@ -34,11 +34,11 @@ type BlockHeader struct {
// Version of the block. This is not the same as the protocol version. // Version of the block. This is not the same as the protocol version.
Version int32 Version int32
// Number of entries in PrevBlocks // Number of entries in ParentHashes
NumPrevBlocks byte NumParentBlocks byte
// Hashes of the previous block headers in the blockDAG. // Hashes of the parent block headers in the blockDAG.
PrevBlocks []daghash.Hash ParentHashes []daghash.Hash
// Merkle tree reference to hash of all transactions for the block. // Merkle tree reference to hash of all transactions for the block.
MerkleRoot daghash.Hash MerkleRoot daghash.Hash
@ -59,24 +59,24 @@ func (h *BlockHeader) BlockHash() daghash.Hash {
// transactions. Ignore the error returns since there is no way the // transactions. Ignore the error returns since there is no way the
// encode could fail except being out of memory which would cause a // encode could fail except being out of memory which would cause a
// run-time panic. // run-time panic.
buf := bytes.NewBuffer(make([]byte, 0, BaseBlockHeaderPayload+len(h.PrevBlocks))) buf := bytes.NewBuffer(make([]byte, 0, BaseBlockHeaderPayload+len(h.ParentHashes)))
_ = writeBlockHeader(buf, 0, h) _ = writeBlockHeader(buf, 0, h)
return daghash.DoubleHashH(buf.Bytes()) return daghash.DoubleHashH(buf.Bytes())
} }
// SelectedPrevBlock returns the hash of the selected block header. // SelectedParentHash returns the hash of the selected block header.
func (h *BlockHeader) SelectedPrevBlock() *daghash.Hash { func (h *BlockHeader) SelectedParentHash() *daghash.Hash {
if h.NumPrevBlocks == 0 { if h.NumParentBlocks == 0 {
return nil return nil
} }
return &h.PrevBlocks[0] return &h.ParentHashes[0]
} }
// IsGenesis returns true iff this block is a genesis block // IsGenesis returns true iff this block is a genesis block
func (h *BlockHeader) IsGenesis() bool { func (h *BlockHeader) IsGenesis() bool {
return h.NumPrevBlocks == 0 return h.NumParentBlocks == 0
} }
// BtcDecode decodes r using the bitcoin protocol encoding into the receiver. // BtcDecode decodes r using the bitcoin protocol encoding into the receiver.
@ -118,21 +118,21 @@ func (h *BlockHeader) Serialize(w io.Writer) error {
// SerializeSize returns the number of bytes it would take to serialize the // SerializeSize returns the number of bytes it would take to serialize the
// block header. // block header.
func (h *BlockHeader) SerializeSize() int { func (h *BlockHeader) SerializeSize() int {
return BaseBlockHeaderPayload + int(h.NumPrevBlocks)*daghash.HashSize return BaseBlockHeaderPayload + int(h.NumParentBlocks)*daghash.HashSize
} }
// NewBlockHeader returns a new BlockHeader using the provided version, previous // NewBlockHeader returns a new BlockHeader using the provided version, previous
// block hash, merkle root hash, difficulty bits, and nonce used to generate the // block hash, merkle root hash, difficulty bits, and nonce used to generate the
// block with defaults or calclulated values for the remaining fields. // block with defaults or calclulated values for the remaining fields.
func NewBlockHeader(version int32, prevHashes []daghash.Hash, merkleRootHash *daghash.Hash, func NewBlockHeader(version int32, parentHashes []daghash.Hash, merkleRootHash *daghash.Hash,
bits uint32, nonce uint64) *BlockHeader { bits uint32, nonce uint64) *BlockHeader {
// Limit the timestamp to one second precision since the protocol // Limit the timestamp to one second precision since the protocol
// doesn't support better. // doesn't support better.
return &BlockHeader{ return &BlockHeader{
Version: version, Version: version,
NumPrevBlocks: byte(len(prevHashes)), NumParentBlocks: byte(len(parentHashes)),
PrevBlocks: prevHashes, ParentHashes: parentHashes,
MerkleRoot: *merkleRootHash, MerkleRoot: *merkleRootHash,
Timestamp: time.Unix(time.Now().Unix(), 0), Timestamp: time.Unix(time.Now().Unix(), 0),
Bits: bits, Bits: bits,
@ -144,14 +144,14 @@ func NewBlockHeader(version int32, prevHashes []daghash.Hash, merkleRootHash *da
// decoding block headers stored to disk, such as in a database, as opposed to // decoding block headers stored to disk, such as in a database, as opposed to
// decoding from the wire. // decoding from the wire.
func readBlockHeader(r io.Reader, pver uint32, bh *BlockHeader) error { func readBlockHeader(r io.Reader, pver uint32, bh *BlockHeader) error {
err := readElements(r, &bh.Version, &bh.NumPrevBlocks) err := readElements(r, &bh.Version, &bh.NumParentBlocks)
if err != nil { if err != nil {
return err return err
} }
bh.PrevBlocks = make([]daghash.Hash, bh.NumPrevBlocks) bh.ParentHashes = make([]daghash.Hash, bh.NumParentBlocks)
for i := byte(0); i < bh.NumPrevBlocks; i++ { for i := byte(0); i < bh.NumParentBlocks; i++ {
err := readElement(r, &bh.PrevBlocks[i]) err := readElement(r, &bh.ParentHashes[i])
if err != nil { if err != nil {
return err return err
} }
@ -164,6 +164,6 @@ func readBlockHeader(r io.Reader, pver uint32, bh *BlockHeader) error {
// opposed to encoding for the wire. // opposed to encoding for the wire.
func writeBlockHeader(w io.Writer, pver uint32, bh *BlockHeader) error { func writeBlockHeader(w io.Writer, pver uint32, bh *BlockHeader) error {
sec := int64(bh.Timestamp.Unix()) sec := int64(bh.Timestamp.Unix())
return writeElements(w, bh.Version, bh.NumPrevBlocks, &bh.PrevBlocks, &bh.MerkleRoot, return writeElements(w, bh.Version, bh.NumParentBlocks, &bh.ParentHashes, &bh.MerkleRoot,
sec, bh.Bits, bh.Nonce) sec, bh.Bits, bh.Nonce)
} }

View File

@ -28,9 +28,9 @@ func TestBlockHeader(t *testing.T) {
bh := NewBlockHeader(1, hashes, &merkleHash, bits, nonce) bh := NewBlockHeader(1, hashes, &merkleHash, bits, nonce)
// Ensure we get the same data back out. // Ensure we get the same data back out.
if !reflect.DeepEqual(bh.PrevBlocks, hashes) { if !reflect.DeepEqual(bh.ParentHashes, hashes) {
t.Errorf("NewBlockHeader: wrong prev hashes - got %v, want %v", t.Errorf("NewBlockHeader: wrong prev hashes - got %v, want %v",
spew.Sprint(bh.PrevBlocks), spew.Sprint(hashes)) spew.Sprint(bh.ParentHashes), spew.Sprint(hashes))
} }
if !bh.MerkleRoot.IsEqual(&merkleHash) { if !bh.MerkleRoot.IsEqual(&merkleHash) {
t.Errorf("NewBlockHeader: wrong merkle root - got %v, want %v", t.Errorf("NewBlockHeader: wrong merkle root - got %v, want %v",
@ -56,8 +56,8 @@ func TestBlockHeaderWire(t *testing.T) {
bits := uint32(0x1d00ffff) bits := uint32(0x1d00ffff)
baseBlockHdr := &BlockHeader{ baseBlockHdr := &BlockHeader{
Version: 1, Version: 1,
NumPrevBlocks: 2, NumParentBlocks: 2,
PrevBlocks: []daghash.Hash{mainNetGenesisHash, simNetGenesisHash}, ParentHashes: []daghash.Hash{mainNetGenesisHash, simNetGenesisHash},
MerkleRoot: mainNetGenesisMerkleRoot, MerkleRoot: mainNetGenesisMerkleRoot,
Timestamp: time.Unix(0x495fab29, 0), // 2009-01-03 12:15:05 -0600 CST Timestamp: time.Unix(0x495fab29, 0), // 2009-01-03 12:15:05 -0600 CST
Bits: bits, Bits: bits,
@ -67,12 +67,12 @@ func TestBlockHeaderWire(t *testing.T) {
// baseBlockHdrEncoded is the wire encoded bytes of baseBlockHdr. // baseBlockHdrEncoded is the wire encoded bytes of baseBlockHdr.
baseBlockHdrEncoded := []byte{ baseBlockHdrEncoded := []byte{
0x01, 0x00, 0x00, 0x00, // Version 1 0x01, 0x00, 0x00, 0x00, // Version 1
0x02, // NumPrevBlocks 0x02, // NumParentBlocks
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // PrevBlock mainNetGenesisHash 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // mainNetGenesisHash
0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f,
0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c,
0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00,
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // PrevBlock simNetGenesisHash 0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // simNetGenesisHash
0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0, 0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0,
0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91, 0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91,
0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68, 0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68,
@ -195,8 +195,8 @@ func TestBlockHeaderSerialize(t *testing.T) {
bits := uint32(0x1d00ffff) bits := uint32(0x1d00ffff)
baseBlockHdr := &BlockHeader{ baseBlockHdr := &BlockHeader{
Version: 1, Version: 1,
NumPrevBlocks: 2, NumParentBlocks: 2,
PrevBlocks: []daghash.Hash{mainNetGenesisHash, simNetGenesisHash}, ParentHashes: []daghash.Hash{mainNetGenesisHash, simNetGenesisHash},
MerkleRoot: mainNetGenesisMerkleRoot, MerkleRoot: mainNetGenesisMerkleRoot,
Timestamp: time.Unix(0x495fab29, 0), // 2009-01-03 12:15:05 -0600 CST Timestamp: time.Unix(0x495fab29, 0), // 2009-01-03 12:15:05 -0600 CST
Bits: bits, Bits: bits,
@ -206,12 +206,12 @@ func TestBlockHeaderSerialize(t *testing.T) {
// baseBlockHdrEncoded is the wire encoded bytes of baseBlockHdr. // baseBlockHdrEncoded is the wire encoded bytes of baseBlockHdr.
baseBlockHdrEncoded := []byte{ baseBlockHdrEncoded := []byte{
0x01, 0x00, 0x00, 0x00, // Version 1 0x01, 0x00, 0x00, 0x00, // Version 1
0x02, // NumPrevBlocks 0x02, // NumParentBlocks
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // PrevBlock mainNetGenesisHash 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // mainNetGenesisHash
0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f,
0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c,
0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00,
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // PrevBlock simNetGenesisHash 0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // simNetGenesisHash
0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0, 0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0,
0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91, 0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91,
0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68, 0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68,
@ -275,8 +275,8 @@ func TestBlockHeaderSerializeSize(t *testing.T) {
timestamp := time.Unix(0x495fab29, 0) // 2009-01-03 12:15:05 -0600 CST timestamp := time.Unix(0x495fab29, 0) // 2009-01-03 12:15:05 -0600 CST
baseBlockHdr := &BlockHeader{ baseBlockHdr := &BlockHeader{
Version: 1, Version: 1,
NumPrevBlocks: 2, NumParentBlocks: 2,
PrevBlocks: []daghash.Hash{mainNetGenesisHash, simNetGenesisHash}, ParentHashes: []daghash.Hash{mainNetGenesisHash, simNetGenesisHash},
MerkleRoot: mainNetGenesisMerkleRoot, MerkleRoot: mainNetGenesisMerkleRoot,
Timestamp: timestamp, Timestamp: timestamp,
Bits: bits, Bits: bits,
@ -285,8 +285,8 @@ func TestBlockHeaderSerializeSize(t *testing.T) {
genesisBlockHdr := &BlockHeader{ genesisBlockHdr := &BlockHeader{
Version: 1, Version: 1,
NumPrevBlocks: 0, NumParentBlocks: 0,
PrevBlocks: []daghash.Hash{}, ParentHashes: []daghash.Hash{},
MerkleRoot: mainNetGenesisMerkleRoot, MerkleRoot: mainNetGenesisMerkleRoot,
Timestamp: timestamp, Timestamp: timestamp,
Bits: bits, Bits: bits,
@ -322,8 +322,8 @@ func TestIsGenesis(t *testing.T) {
baseBlockHdr := &BlockHeader{ baseBlockHdr := &BlockHeader{
Version: 1, Version: 1,
NumPrevBlocks: 2, NumParentBlocks: 2,
PrevBlocks: []daghash.Hash{mainNetGenesisHash, simNetGenesisHash}, ParentHashes: []daghash.Hash{mainNetGenesisHash, simNetGenesisHash},
MerkleRoot: mainNetGenesisMerkleRoot, MerkleRoot: mainNetGenesisMerkleRoot,
Timestamp: timestamp, Timestamp: timestamp,
Bits: bits, Bits: bits,
@ -331,8 +331,8 @@ func TestIsGenesis(t *testing.T) {
} }
genesisBlockHdr := &BlockHeader{ genesisBlockHdr := &BlockHeader{
Version: 1, Version: 1,
NumPrevBlocks: 0, NumParentBlocks: 0,
PrevBlocks: []daghash.Hash{}, ParentHashes: []daghash.Hash{},
MerkleRoot: mainNetGenesisMerkleRoot, MerkleRoot: mainNetGenesisMerkleRoot,
Timestamp: timestamp, Timestamp: timestamp,
Bits: bits, Bits: bits,

View File

@ -21,11 +21,11 @@ func TestBlock(t *testing.T) {
pver := ProtocolVersion pver := ProtocolVersion
// Block 1 header. // Block 1 header.
prevHashes := blockOne.Header.PrevBlocks parentHashes := blockOne.Header.ParentHashes
merkleHash := &blockOne.Header.MerkleRoot merkleHash := &blockOne.Header.MerkleRoot
bits := blockOne.Header.Bits bits := blockOne.Header.Bits
nonce := blockOne.Header.Nonce nonce := blockOne.Header.Nonce
bh := NewBlockHeader(1, prevHashes, merkleHash, bits, nonce) bh := NewBlockHeader(1, parentHashes, merkleHash, bits, nonce)
// Ensure the command is expected value. // Ensure the command is expected value.
wantCmd := "block" wantCmd := "block"
@ -334,7 +334,7 @@ func TestBlockSerializeErrors(t *testing.T) {
}{ }{
// Force error in version. // Force error in version.
{&blockOne, blockOneBytes, 0, io.ErrShortWrite, io.EOF}, {&blockOne, blockOneBytes, 0, io.ErrShortWrite, io.EOF},
// Force error in numPrevBlocks. // Force error in numParentBlocks.
{&blockOne, blockOneBytes, 4, io.ErrShortWrite, io.EOF}, {&blockOne, blockOneBytes, 4, io.ErrShortWrite, io.EOF},
// Force error in prev block hash #1. // Force error in prev block hash #1.
{&blockOne, blockOneBytes, 5, io.ErrShortWrite, io.EOF}, {&blockOne, blockOneBytes, 5, io.ErrShortWrite, io.EOF},
@ -405,12 +405,12 @@ func TestBlockOverflowErrors(t *testing.T) {
{ {
[]byte{ []byte{
0x01, 0x00, 0x00, 0x00, // Version 1 0x01, 0x00, 0x00, 0x00, // Version 1
0x02, // NumPrevBlocks 0x02, // NumParentBlocks
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // PrevBlock mainNetGenesisHash 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // mainNetGenesisHash
0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f,
0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c,
0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00,
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // PrevBlock simNetGenesisHash 0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // simNetGenesisHash
0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0, 0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0,
0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91, 0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91,
0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68, 0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68,
@ -492,8 +492,8 @@ func TestBlockSerializeSize(t *testing.T) {
var blockOne = MsgBlock{ var blockOne = MsgBlock{
Header: BlockHeader{ Header: BlockHeader{
Version: 1, Version: 1,
NumPrevBlocks: 2, NumParentBlocks: 2,
PrevBlocks: []daghash.Hash{mainNetGenesisHash, simNetGenesisHash}, ParentHashes: []daghash.Hash{mainNetGenesisHash, simNetGenesisHash},
MerkleRoot: daghash.Hash(mainNetGenesisMerkleRoot), MerkleRoot: daghash.Hash(mainNetGenesisMerkleRoot),
Timestamp: time.Unix(0x4966bc61, 0), // 2009-01-08 20:54:25 -0600 CST Timestamp: time.Unix(0x4966bc61, 0), // 2009-01-08 20:54:25 -0600 CST
@ -541,12 +541,12 @@ var blockOne = MsgBlock{
// Block one serialized bytes. // Block one serialized bytes.
var blockOneBytes = []byte{ var blockOneBytes = []byte{
0x01, 0x00, 0x00, 0x00, // Version 1 0x01, 0x00, 0x00, 0x00, // Version 1
0x02, // NumPrevBlocks 0x02, // NumParentBlocks
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // PrevBlock mainNetGenesisHash 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // mainNetGenesisHash
0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f,
0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c,
0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00,
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // PrevBlock simNetGenesisHash 0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // simNetGenesisHash
0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0, 0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0,
0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91, 0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91,
0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68, 0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68,

View File

@ -81,12 +81,12 @@ func TestHeadersWire(t *testing.T) {
oneHeaderEncoded := []byte{ oneHeaderEncoded := []byte{
0x01, // VarInt for number of headers. 0x01, // VarInt for number of headers.
0x01, 0x00, 0x00, 0x00, // Version 1 0x01, 0x00, 0x00, 0x00, // Version 1
0x02, // NumPrevBlocks 0x02, // NumParentBlocks
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // PrevBlock mainNetGenesisHash 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // mainNetGenesisHash
0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f,
0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c,
0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00,
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // PrevBlock simNetGenesisHash 0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // simNetGenesisHash
0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0, 0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0,
0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91, 0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91,
0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68, 0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68,
@ -237,12 +237,12 @@ func TestHeadersWireErrors(t *testing.T) {
oneHeaderEncoded := []byte{ oneHeaderEncoded := []byte{
0x01, // VarInt for number of headers. 0x01, // VarInt for number of headers.
0x01, 0x00, 0x00, 0x00, // Version 1 0x01, 0x00, 0x00, 0x00, // Version 1
0x02, // NumPrevBlocks 0x02, // NumParentBlocks
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // PrevBlock mainNetGenesisHash 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // mainNetGenesisHash
0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f,
0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c,
0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00,
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // PrevBlock simNetGenesisHash 0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // simNetGenesisHash
0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0, 0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0,
0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91, 0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91,
0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68, 0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68,
@ -278,12 +278,12 @@ func TestHeadersWireErrors(t *testing.T) {
transHeaderEncoded := []byte{ transHeaderEncoded := []byte{
0x01, // VarInt for number of headers. 0x01, // VarInt for number of headers.
0x01, 0x00, 0x00, 0x00, // Version 1 0x01, 0x00, 0x00, 0x00, // Version 1
0x02, // NumPrevBlocks 0x02, // NumParentBlocks
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // PrevBlock mainNetGenesisHash 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // mainNetGenesisHash
0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f,
0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c,
0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00,
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // PrevBlock simNetGenesisHash 0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // simNetGenesisHash
0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0, 0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0,
0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91, 0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91,
0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68, 0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68,

View File

@ -21,11 +21,11 @@ func TestMerkleBlock(t *testing.T) {
pver := ProtocolVersion pver := ProtocolVersion
// Block 1 header. // Block 1 header.
prevHashes := blockOne.Header.PrevBlocks parentHashes := blockOne.Header.ParentHashes
merkleHash := &blockOne.Header.MerkleRoot merkleHash := &blockOne.Header.MerkleRoot
bits := blockOne.Header.Bits bits := blockOne.Header.Bits
nonce := blockOne.Header.Nonce nonce := blockOne.Header.Nonce
bh := NewBlockHeader(1, prevHashes, merkleHash, bits, nonce) bh := NewBlockHeader(1, parentHashes, merkleHash, bits, nonce)
// Ensure the command is expected value. // Ensure the command is expected value.
wantCmd := "merkleblock" wantCmd := "merkleblock"
@ -113,11 +113,11 @@ func TestMerkleBlock(t *testing.T) {
// the latest protocol version and decoding with BIP0031Version. // the latest protocol version and decoding with BIP0031Version.
func TestMerkleBlockCrossProtocol(t *testing.T) { func TestMerkleBlockCrossProtocol(t *testing.T) {
// Block 1 header. // Block 1 header.
prevHashes := blockOne.Header.PrevBlocks parentHashes := blockOne.Header.ParentHashes
merkleHash := &blockOne.Header.MerkleRoot merkleHash := &blockOne.Header.MerkleRoot
bits := blockOne.Header.Bits bits := blockOne.Header.Bits
nonce := blockOne.Header.Nonce nonce := blockOne.Header.Nonce
bh := NewBlockHeader(1, prevHashes, merkleHash, bits, nonce) bh := NewBlockHeader(1, parentHashes, merkleHash, bits, nonce)
msg := NewMsgMerkleBlock(bh) msg := NewMsgMerkleBlock(bh)
@ -338,8 +338,8 @@ func TestMerkleBlockOverflowErrors(t *testing.T) {
var merkleBlockOne = MsgMerkleBlock{ var merkleBlockOne = MsgMerkleBlock{
Header: BlockHeader{ Header: BlockHeader{
Version: 1, Version: 1,
NumPrevBlocks: 2, NumParentBlocks: 2,
PrevBlocks: []daghash.Hash{mainNetGenesisHash, simNetGenesisHash}, ParentHashes: []daghash.Hash{mainNetGenesisHash, simNetGenesisHash},
MerkleRoot: daghash.Hash([daghash.HashSize]byte{ // Make go vet happy. MerkleRoot: daghash.Hash([daghash.HashSize]byte{ // Make go vet happy.
0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44, 0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44,
0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67,
@ -366,12 +366,12 @@ var merkleBlockOne = MsgMerkleBlock{
// block one of the block chain where the first transaction matches. // block one of the block chain where the first transaction matches.
var merkleBlockOneBytes = []byte{ var merkleBlockOneBytes = []byte{
0x01, 0x00, 0x00, 0x00, // Version 1 0x01, 0x00, 0x00, 0x00, // Version 1
0x02, // NumPrevBlocks 0x02, // NumParentBlocks
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // PrevBlock mainNetGenesisHash 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // mainNetGenesisHash
0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f,
0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c,
0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00,
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // PrevBlock simNetGenesisHash 0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // simNetGenesisHash
0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0, 0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0,
0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91, 0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91,
0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68, 0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68,