[DEV-211] Change fields of serialized blocks and transactions to match spec (#104)

* [DEV-211] change block fields

* [DEV-211] change block fields

* [DEV-211] change comments to adhere to the new block field names
This commit is contained in:
Ori Newman 2018-11-05 13:11:42 +02:00 committed by stasatdaglabs
parent d70e2be641
commit 9519b9f2a1
43 changed files with 518 additions and 518 deletions

View File

@ -23,7 +23,7 @@ import (
func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) error {
// The height of this block is one more than the referenced previous
// block.
parents, err := lookupPreviousNodes(block, dag)
parents, err := lookupParentNodes(block, dag)
if err != nil {
return err
}
@ -85,18 +85,18 @@ func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) er
return nil
}
func lookupPreviousNodes(block *util.Block, blockDAG *BlockDAG) (blockSet, error) {
func lookupParentNodes(block *util.Block, blockDAG *BlockDAG) (blockSet, error) {
header := block.MsgBlock().Header
prevHashes := header.PrevBlocks
parentHashes := header.ParentHashes
nodes := newSet()
for _, prevHash := range prevHashes {
node := blockDAG.index.LookupNode(&prevHash)
for _, parentHash := range parentHashes {
node := blockDAG.index.LookupNode(&parentHash)
if node == nil {
str := fmt.Sprintf("previous block %s is unknown", prevHashes)
return nil, ruleError(ErrPreviousBlockUnknown, str)
str := fmt.Sprintf("parent block %s is unknown", parentHashes)
return nil, ruleError(ErrParentBlockUnknown, str)
} else if blockDAG.index.NodeStatus(node).KnownInvalid() {
str := fmt.Sprintf("previous block %s is known to be invalid", prevHashes)
str := fmt.Sprintf("parent block %s is known to be invalid", parentHashes)
return nil, ruleError(ErrInvalidAncestorBlock, str)
}

View File

@ -33,15 +33,15 @@ func TestMaybeAcceptBlockErrors(t *testing.T) {
err = dag.maybeAcceptBlock(block, BFNone)
if err == nil {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are missing: "+
"Expected: %s, got: <nil>", ErrPreviousBlockUnknown)
"Expected: %s, got: <nil>", ErrParentBlockUnknown)
}
ruleErr, ok := err.(RuleError)
if !ok {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are missing: "+
"Expected RuleError but got %s", err)
} else if ruleErr.ErrorCode != ErrPreviousBlockUnknown {
} else if ruleErr.ErrorCode != ErrParentBlockUnknown {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are missing: "+
"Unexpected error code. Want: %s, got: %s", ErrPreviousBlockUnknown, ruleErr.ErrorCode)
"Unexpected error code. Want: %s, got: %s", ErrParentBlockUnknown, ruleErr.ErrorCode)
}
// Test rejecting the block if its parents are invalid

View File

@ -163,13 +163,13 @@ func newBlockNode(blockHeader *wire.BlockHeader, parents blockSet, phantomK uint
func (node *blockNode) Header() *wire.BlockHeader {
// No lock is needed because all accessed fields are immutable.
return &wire.BlockHeader{
Version: node.version,
NumPrevBlocks: byte(len(node.parents)),
PrevBlocks: node.PrevHashes(),
MerkleRoot: node.merkleRoot,
Timestamp: time.Unix(node.timestamp, 0),
Bits: node.bits,
Nonce: node.nonce,
Version: node.version,
NumParentBlocks: byte(len(node.parents)),
ParentHashes: node.ParentHashes(),
MerkleRoot: node.merkleRoot,
Timestamp: time.Unix(node.timestamp, 0),
Bits: node.bits,
Nonce: node.nonce,
}
}
@ -228,7 +228,7 @@ func (node *blockNode) CalcPastMedianTime() time.Time {
return time.Unix(medianTimestamp, 0)
}
func (node *blockNode) PrevHashes() []daghash.Hash {
func (node *blockNode) ParentHashes() []daghash.Hash {
return node.parents.hashes()
}

View File

@ -186,10 +186,10 @@ func newTestDAG(params *dagconfig.Params) *BlockDAG {
func newTestNode(parents blockSet, blockVersion int32, bits uint32, timestamp time.Time, phantomK uint32) *blockNode {
// Make up a header and create a block node from it.
header := &wire.BlockHeader{
Version: blockVersion,
PrevBlocks: parents.hashes(),
Bits: bits,
Timestamp: timestamp,
Version: blockVersion,
ParentHashes: parents.hashes(),
Bits: bits,
Timestamp: timestamp,
}
return newBlockNode(header, parents, phantomK)
}

View File

@ -205,14 +205,14 @@ func (dag *BlockDAG) GetOrphanRoot(hash *daghash.Hash) *daghash.Hash {
// Keep looping while the parent of each orphaned block is
// known and is an orphan itself.
orphanRoot := hash
prevHash := hash
parentHash := hash
for {
orphan, exists := dag.orphans[*prevHash]
orphan, exists := dag.orphans[*parentHash]
if !exists {
break
}
orphanRoot = prevHash
prevHash = orphan.block.MsgBlock().Header.SelectedPrevBlock()
orphanRoot = parentHash
parentHash = orphan.block.MsgBlock().Header.SelectedParentHash()
}
return orphanRoot
@ -233,8 +233,8 @@ func (dag *BlockDAG) removeOrphanBlock(orphan *orphanBlock) {
// for loop is intentionally used over a range here as range does not
// reevaluate the slice on each iteration nor does it adjust the index
// for the modified slice.
prevHash := orphan.block.MsgBlock().Header.SelectedPrevBlock()
orphans := dag.prevOrphans[*prevHash]
parentHash := orphan.block.MsgBlock().Header.SelectedParentHash()
orphans := dag.prevOrphans[*parentHash]
for i := 0; i < len(orphans); i++ {
hash := orphans[i].block.Hash()
if hash.IsEqual(orphanHash) {
@ -244,12 +244,12 @@ func (dag *BlockDAG) removeOrphanBlock(orphan *orphanBlock) {
i--
}
}
dag.prevOrphans[*prevHash] = orphans
dag.prevOrphans[*parentHash] = orphans
// Remove the map entry altogether if there are no longer any orphans
// which depend on the parent hash.
if len(dag.prevOrphans[*prevHash]) == 0 {
delete(dag.prevOrphans, *prevHash)
if len(dag.prevOrphans[*parentHash]) == 0 {
delete(dag.prevOrphans, *parentHash)
}
}
@ -296,9 +296,9 @@ func (dag *BlockDAG) addOrphanBlock(block *util.Block) {
}
dag.orphans[*block.Hash()] = oBlock
// Add to previous hash lookup index for faster dependency lookups.
prevHash := block.MsgBlock().Header.SelectedPrevBlock()
dag.prevOrphans[*prevHash] = append(dag.prevOrphans[*prevHash], oBlock)
// Add to parent hash lookup index for faster dependency lookups.
parentHash := block.MsgBlock().Header.SelectedParentHash()
dag.prevOrphans[*parentHash] = append(dag.prevOrphans[*parentHash], oBlock)
}
// SequenceLock represents the converted relative lock-time in seconds, and

View File

@ -587,7 +587,7 @@ func chainedNodes(parents blockSet, numNodes int) []*blockNode {
// This is invalid, but all that is needed is enough to get the
// synthetic tests to work.
header := wire.BlockHeader{Nonce: testNoncePrng.Uint64()}
header.PrevBlocks = tips.hashes()
header.ParentHashes = tips.hashes()
nodes[i] = newBlockNode(&header, tips, dagconfig.SimNetParams.K)
tips = setFromSlice(nodes[i])
}

View File

@ -892,7 +892,7 @@ func (dag *BlockDAG) initDAGState() error {
"found %s", blockHash))
}
} else {
for _, hash := range header.PrevBlocks {
for _, hash := range header.ParentHashes {
parent := dag.index.LookupNode(&hash)
if parent == nil {
return AssertError(fmt.Sprintf("initDAGState: Could "+

View File

@ -194,17 +194,17 @@ const (
// the stack.
ErrScriptValidation
// ErrPreviousBlockUnknown indicates that the previous block is not known.
ErrPreviousBlockUnknown
// ErrParentBlockUnknown indicates that the parent block is not known.
ErrParentBlockUnknown
// ErrInvalidAncestorBlock indicates that an ancestor of this block has
// already failed validation.
ErrInvalidAncestorBlock
// ErrPrevBlockNotBest indicates that the block's previous block is not the
// current chain tip. This is not a block validation rule, but is required
// ErrParentBlockNotCurrentTips indicates that the block's parents are not the
// current tips. This is not a block validation rule, but is required
// for block proposals submitted via getblocktemplate RPC.
ErrPrevBlockNotBest
ErrParentBlockNotCurrentTips
// ErrWithDiff indicates that there was an error with UTXOSet.WithDiff
ErrWithDiff
@ -212,46 +212,46 @@ const (
// Map of ErrorCode values back to their constant names for pretty printing.
var errorCodeStrings = map[ErrorCode]string{
ErrDuplicateBlock: "ErrDuplicateBlock",
ErrBlockTooBig: "ErrBlockTooBig",
ErrBlockVersionTooOld: "ErrBlockVersionTooOld",
ErrInvalidTime: "ErrInvalidTime",
ErrTimeTooOld: "ErrTimeTooOld",
ErrTimeTooNew: "ErrTimeTooNew",
ErrDifficultyTooLow: "ErrDifficultyTooLow",
ErrUnexpectedDifficulty: "ErrUnexpectedDifficulty",
ErrHighHash: "ErrHighHash",
ErrBadMerkleRoot: "ErrBadMerkleRoot",
ErrBadCheckpoint: "ErrBadCheckpoint",
ErrForkTooOld: "ErrForkTooOld",
ErrCheckpointTimeTooOld: "ErrCheckpointTimeTooOld",
ErrNoTransactions: "ErrNoTransactions",
ErrNoTxInputs: "ErrNoTxInputs",
ErrNoTxOutputs: "ErrNoTxOutputs",
ErrTxTooBig: "ErrTxTooBig",
ErrBadTxOutValue: "ErrBadTxOutValue",
ErrDuplicateTxInputs: "ErrDuplicateTxInputs",
ErrBadTxInput: "ErrBadTxInput",
ErrMissingTxOut: "ErrMissingTxOut",
ErrUnfinalizedTx: "ErrUnfinalizedTx",
ErrDuplicateTx: "ErrDuplicateTx",
ErrOverwriteTx: "ErrOverwriteTx",
ErrImmatureSpend: "ErrImmatureSpend",
ErrSpendTooHigh: "ErrSpendTooHigh",
ErrBadFees: "ErrBadFees",
ErrTooManySigOps: "ErrTooManySigOps",
ErrFirstTxNotCoinbase: "ErrFirstTxNotCoinbase",
ErrMultipleCoinbases: "ErrMultipleCoinbases",
ErrBadCoinbaseScriptLen: "ErrBadCoinbaseScriptLen",
ErrBadCoinbaseValue: "ErrBadCoinbaseValue",
ErrMissingCoinbaseHeight: "ErrMissingCoinbaseHeight",
ErrBadCoinbaseHeight: "ErrBadCoinbaseHeight",
ErrScriptMalformed: "ErrScriptMalformed",
ErrScriptValidation: "ErrScriptValidation",
ErrPreviousBlockUnknown: "ErrPreviousBlockUnknown",
ErrInvalidAncestorBlock: "ErrInvalidAncestorBlock",
ErrPrevBlockNotBest: "ErrPrevBlockNotBest",
ErrWithDiff: "ErrWithDiff",
ErrDuplicateBlock: "ErrDuplicateBlock",
ErrBlockTooBig: "ErrBlockTooBig",
ErrBlockVersionTooOld: "ErrBlockVersionTooOld",
ErrInvalidTime: "ErrInvalidTime",
ErrTimeTooOld: "ErrTimeTooOld",
ErrTimeTooNew: "ErrTimeTooNew",
ErrDifficultyTooLow: "ErrDifficultyTooLow",
ErrUnexpectedDifficulty: "ErrUnexpectedDifficulty",
ErrHighHash: "ErrHighHash",
ErrBadMerkleRoot: "ErrBadMerkleRoot",
ErrBadCheckpoint: "ErrBadCheckpoint",
ErrForkTooOld: "ErrForkTooOld",
ErrCheckpointTimeTooOld: "ErrCheckpointTimeTooOld",
ErrNoTransactions: "ErrNoTransactions",
ErrNoTxInputs: "ErrNoTxInputs",
ErrNoTxOutputs: "ErrNoTxOutputs",
ErrTxTooBig: "ErrTxTooBig",
ErrBadTxOutValue: "ErrBadTxOutValue",
ErrDuplicateTxInputs: "ErrDuplicateTxInputs",
ErrBadTxInput: "ErrBadTxInput",
ErrMissingTxOut: "ErrMissingTxOut",
ErrUnfinalizedTx: "ErrUnfinalizedTx",
ErrDuplicateTx: "ErrDuplicateTx",
ErrOverwriteTx: "ErrOverwriteTx",
ErrImmatureSpend: "ErrImmatureSpend",
ErrSpendTooHigh: "ErrSpendTooHigh",
ErrBadFees: "ErrBadFees",
ErrTooManySigOps: "ErrTooManySigOps",
ErrFirstTxNotCoinbase: "ErrFirstTxNotCoinbase",
ErrMultipleCoinbases: "ErrMultipleCoinbases",
ErrBadCoinbaseScriptLen: "ErrBadCoinbaseScriptLen",
ErrBadCoinbaseValue: "ErrBadCoinbaseValue",
ErrMissingCoinbaseHeight: "ErrMissingCoinbaseHeight",
ErrBadCoinbaseHeight: "ErrBadCoinbaseHeight",
ErrScriptMalformed: "ErrScriptMalformed",
ErrScriptValidation: "ErrScriptValidation",
ErrParentBlockUnknown: "ErrParentBlockUnknown",
ErrInvalidAncestorBlock: "ErrInvalidAncestorBlock",
ErrParentBlockNotCurrentTips: "ErrParentBlockNotCurrentTips",
ErrWithDiff: "ErrWithDiff",
}
// String returns the ErrorCode as a human-readable name.

View File

@ -51,9 +51,9 @@ func TestErrorCodeStringer(t *testing.T) {
{ErrBadCoinbaseHeight, "ErrBadCoinbaseHeight"},
{ErrScriptMalformed, "ErrScriptMalformed"},
{ErrScriptValidation, "ErrScriptValidation"},
{ErrPreviousBlockUnknown, "ErrPreviousBlockUnknown"},
{ErrParentBlockUnknown, "ErrParentBlockUnknown"},
{ErrInvalidAncestorBlock, "ErrInvalidAncestorBlock"},
{ErrPrevBlockNotBest, "ErrPrevBlockNotBest"},
{ErrParentBlockNotCurrentTips, "ErrParentBlockNotCurrentTips"},
{ErrWithDiff, "ErrWithDiff"},
{0xffff, "Unknown ErrorCode (65535)"},
}

View File

@ -509,13 +509,13 @@ func (g *testGenerator) nextBlock(blockName string, spend *spendableOut, mungers
block := wire.MsgBlock{
Header: wire.BlockHeader{
Version: 1,
NumPrevBlocks: 1, // TODO: (Stas) This is wrong. Modified only to satisfy compilation.
PrevBlocks: []daghash.Hash{g.tip.BlockHash()}, // TODO: (Stas) This is wrong. Modified only to satisfy compilation.
MerkleRoot: calcMerkleRoot(txns),
Bits: g.params.PowLimitBits,
Timestamp: ts,
Nonce: 0, // To be solved.
Version: 1,
NumParentBlocks: 1, // TODO: (Stas) This is wrong. Modified only to satisfy compilation.
ParentHashes: []daghash.Hash{g.tip.BlockHash()}, // TODO: (Stas) This is wrong. Modified only to satisfy compilation.
MerkleRoot: calcMerkleRoot(txns),
Bits: g.params.PowLimitBits,
Timestamp: ts,
Nonce: 0, // To be solved.
},
Transactions: txns,
}
@ -607,7 +607,7 @@ func (g *testGenerator) saveSpendableCoinbaseOuts() {
// reaching the block that has already had the coinbase outputs
// collected.
var collectBlocks []*wire.MsgBlock
for b := g.tip; b != nil; b = g.blocks[*b.Header.SelectedPrevBlock()] {
for b := g.tip; b != nil; b = g.blocks[*b.Header.SelectedParentHash()] {
if b.BlockHash() == g.prevCollectedHash {
break
}
@ -1554,9 +1554,9 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
// ... -> b33(9) -> b35(10) -> b39(11) -> b42(12) -> b43(13) -> b53(14)
// \-> b54(15)
g.nextBlock("b54", outs[15], func(b *wire.MsgBlock) {
medianBlock := g.blocks[*b.Header.SelectedPrevBlock()]
medianBlock := g.blocks[*b.Header.SelectedParentHash()]
for i := 0; i < medianTimeBlocks/2; i++ {
medianBlock = g.blocks[*medianBlock.Header.SelectedPrevBlock()]
medianBlock = g.blocks[*medianBlock.Header.SelectedParentHash()]
}
b.Header.Timestamp = medianBlock.Header.Timestamp
})
@ -1568,9 +1568,9 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
// ... -> b33(9) -> b35(10) -> b39(11) -> b42(12) -> b43(13) -> b53(14) -> b55(15)
g.setTip("b53")
g.nextBlock("b55", outs[15], func(b *wire.MsgBlock) {
medianBlock := g.blocks[*b.Header.SelectedPrevBlock()]
medianBlock := g.blocks[*b.Header.SelectedParentHash()]
for i := 0; i < medianTimeBlocks/2; i++ {
medianBlock = g.blocks[*medianBlock.Header.SelectedPrevBlock()]
medianBlock = g.blocks[*medianBlock.Header.SelectedParentHash()]
}
medianBlockTime := medianBlock.Header.Timestamp
b.Header.Timestamp = medianBlockTime.Add(time.Second)
@ -1718,7 +1718,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
g.nextBlock("b61", outs[18], func(b *wire.MsgBlock) {
// Duplicate the coinbase of the parent block to force the
// condition.
parent := g.blocks[*b.Header.SelectedPrevBlock()]
parent := g.blocks[*b.Header.SelectedParentHash()]
b.Transactions[0] = parent.Transactions[0]
})
rejected(blockdag.ErrOverwriteTx)

View File

@ -54,13 +54,13 @@ var (
// as the public transaction ledger for the regression test network.
regTestGenesisBlock = wire.MsgBlock{
Header: wire.BlockHeader{
Version: 1,
NumPrevBlocks: 0,
PrevBlocks: []daghash.Hash{},
MerkleRoot: *newHashFromStr("4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b"),
Timestamp: time.Unix(0x5b28c636, 0), // 2018-06-19 09:00:38 +0000 UTC
Bits: 0x207fffff, // 545259519 [7fffff0000000000000000000000000000000000000000000000000000000000]
Nonce: 1,
Version: 1,
NumParentBlocks: 0,
ParentHashes: []daghash.Hash{},
MerkleRoot: *newHashFromStr("4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b"),
Timestamp: time.Unix(0x5b28c636, 0), // 2018-06-19 09:00:38 +0000 UTC
Bits: 0x207fffff, // 545259519 [7fffff0000000000000000000000000000000000000000000000000000000000]
Nonce: 1,
},
Transactions: []*wire.MsgTx{{
Version: 1,

View File

@ -179,7 +179,7 @@ func storeFilter(dbTx database.Tx, block *util.Block, f *gcs.Filter,
if header.IsGenesis() {
prevHeader = &zeroHash
} else {
ph := header.SelectedPrevBlock()
ph := header.SelectedParentHash()
pfh, err := dbFetchFilterIdxEntry(dbTx, hkey, ph)
if err != nil {
return err

View File

@ -210,22 +210,22 @@ func (dag *BlockDAG) ProcessBlock(block *util.Block, flags BehaviorFlags) (bool,
}
// Handle orphan blocks.
allPrevBlocksExist := true
for _, prevBlock := range blockHeader.PrevBlocks {
prevBlockExists, err := dag.blockExists(&prevBlock)
allParentsExist := true
for _, parentHash := range blockHeader.ParentHashes {
parentExists, err := dag.blockExists(&parentHash)
if err != nil {
return false, err
}
if !prevBlockExists {
log.Infof("Adding orphan block %v with parent %v", blockHash, prevBlock)
if !parentExists {
log.Infof("Adding orphan block %v with parent %v", blockHash, parentHash)
dag.addOrphanBlock(block)
allPrevBlocksExist = false
allParentsExist = false
}
}
if !allPrevBlocksExist {
if !allParentsExist {
return true, nil
}

View File

@ -431,14 +431,14 @@ func checkBlockHeaderSanity(header *wire.BlockHeader, powLimit *big.Int, timeSou
//checkBlockParentsOrder ensures that the block's parents are ordered by hash
func checkBlockParentsOrder(header *wire.BlockHeader) error {
sortedHashes := make([]daghash.Hash, 0, len(header.PrevBlocks))
for _, hash := range header.PrevBlocks {
sortedHashes := make([]daghash.Hash, 0, len(header.ParentHashes))
for _, hash := range header.ParentHashes {
sortedHashes = append(sortedHashes, hash)
}
sort.Slice(sortedHashes, func(i, j int) bool {
return daghash.Less(&sortedHashes[i], &sortedHashes[j])
})
if !daghash.AreEqual(header.PrevBlocks, sortedHashes) {
if !daghash.AreEqual(header.ParentHashes, sortedHashes) {
return ruleError(ErrWrongParentsOrder, "block parents are not ordered by hash")
}
return nil
@ -1076,11 +1076,11 @@ func (dag *BlockDAG) CheckConnectBlockTemplate(block *util.Block) error {
// current chain.
tips := dag.virtual.tips()
header := block.MsgBlock().Header
prevHashes := header.PrevBlocks
if !tips.hashesEqual(prevHashes) {
str := fmt.Sprintf("previous blocks must be the currents tips %v, "+
"instead got %v", tips, prevHashes)
return ruleError(ErrPrevBlockNotBest, str)
parentHashes := header.ParentHashes
if !tips.hashesEqual(parentHashes) {
str := fmt.Sprintf("parent blocks must be the currents tips %v, "+
"instead got %v", tips, parentHashes)
return ruleError(ErrParentBlockNotCurrentTips, str)
}
err := checkBlockSanity(block, dag.dagParams.PowLimit, dag.timeSource, flags)
@ -1088,7 +1088,7 @@ func (dag *BlockDAG) CheckConnectBlockTemplate(block *util.Block) error {
return err
}
parents, err := lookupPreviousNodes(block, dag)
parents, err := lookupParentNodes(block, dag)
if err != nil {
return err
}

View File

@ -168,9 +168,9 @@ func TestCheckBlockSanity(t *testing.T) {
var invalidParentsOrderBlock = wire.MsgBlock{
Header: wire.BlockHeader{
Version: 1,
NumPrevBlocks: 2,
PrevBlocks: []daghash.Hash{
Version: 1,
NumParentBlocks: 2,
ParentHashes: []daghash.Hash{
[32]byte{ // Make go vet happy.
0x4b, 0xb0, 0x75, 0x35, 0xdf, 0xd5, 0x8e, 0x0b,
0x3c, 0xd6, 0x4f, 0xd7, 0x15, 0x52, 0x80, 0x87,
@ -609,9 +609,9 @@ func TestValidateParents(t *testing.T) {
// test Block operations.
var Block100000 = wire.MsgBlock{
Header: wire.BlockHeader{
Version: 1,
NumPrevBlocks: 2,
PrevBlocks: []daghash.Hash{
Version: 1,
NumParentBlocks: 2,
ParentHashes: []daghash.Hash{
[32]byte{ // Make go vet happy.
0x16, 0x5e, 0x38, 0xe8, 0xb3, 0x91, 0x45, 0x95,
0xd9, 0xc6, 0x41, 0xf3, 0xb8, 0xee, 0xc2, 0xf3,

View File

@ -10,39 +10,39 @@ import "encoding/json"
// the verbose flag is set. When the verbose flag is not set, getblockheader
// returns a hex-encoded string.
type GetBlockHeaderVerboseResult struct {
Hash string `json:"hash"`
Confirmations uint64 `json:"confirmations"`
Height int32 `json:"height"`
Version int32 `json:"version"`
VersionHex string `json:"versionHex"`
MerkleRoot string `json:"merkleroot"`
Time int64 `json:"time"`
Nonce uint64 `json:"nonce"`
Bits string `json:"bits"`
Difficulty float64 `json:"difficulty"`
PreviousHashes []string `json:"previousblockhashes,omitempty"`
NextHashes []string `json:"nextblockhashes,omitempty"`
Hash string `json:"hash"`
Confirmations uint64 `json:"confirmations"`
Height int32 `json:"height"`
Version int32 `json:"version"`
VersionHex string `json:"versionHex"`
MerkleRoot string `json:"merkleroot"`
Time int64 `json:"time"`
Nonce uint64 `json:"nonce"`
Bits string `json:"bits"`
Difficulty float64 `json:"difficulty"`
ParentHashes []string `json:"parentblockhashes,omitempty"`
NextHashes []string `json:"nextblockhashes,omitempty"`
}
// GetBlockVerboseResult models the data from the getblock command when the
// verbose flag is set. When the verbose flag is not set, getblock returns a
// hex-encoded string.
type GetBlockVerboseResult struct {
Hash string `json:"hash"`
Confirmations uint64 `json:"confirmations"`
Size int32 `json:"size"`
Height int64 `json:"height"`
Version int32 `json:"version"`
VersionHex string `json:"versionHex"`
MerkleRoot string `json:"merkleroot"`
Tx []string `json:"tx,omitempty"`
RawTx []TxRawResult `json:"rawtx,omitempty"`
Time int64 `json:"time"`
Nonce uint64 `json:"nonce"`
Bits string `json:"bits"`
Difficulty float64 `json:"difficulty"`
PreviousHashes []string `json:"previousblockhashes"`
NextHashes []string `json:"nextblockhashes,omitempty"`
Hash string `json:"hash"`
Confirmations uint64 `json:"confirmations"`
Size int32 `json:"size"`
Height int64 `json:"height"`
Version int32 `json:"version"`
VersionHex string `json:"versionHex"`
MerkleRoot string `json:"merkleroot"`
Tx []string `json:"tx,omitempty"`
RawTx []TxRawResult `json:"rawtx,omitempty"`
Time int64 `json:"time"`
Nonce uint64 `json:"nonce"`
Bits string `json:"bits"`
Difficulty float64 `json:"difficulty"`
ParentHashes []string `json:"parentblockhashes"`
NextHashes []string `json:"nextblockhashes,omitempty"`
}
// CreateMultiSigResult models the data returned from the createmultisig
@ -133,18 +133,18 @@ type GetBlockTemplateResultAux struct {
type GetBlockTemplateResult struct {
// Base fields from BIP 0022. CoinbaseAux is optional. One of
// CoinbaseTxn or CoinbaseValue must be specified, but not both.
Bits string `json:"bits"`
CurTime int64 `json:"curtime"`
Height int64 `json:"height"`
PreviousHashes []string `json:"previousblockhashes"`
SigOpLimit int64 `json:"sigoplimit,omitempty"`
SizeLimit int64 `json:"sizelimit,omitempty"`
Transactions []GetBlockTemplateResultTx `json:"transactions"`
Version int32 `json:"version"`
CoinbaseAux *GetBlockTemplateResultAux `json:"coinbaseaux,omitempty"`
CoinbaseTxn *GetBlockTemplateResultTx `json:"coinbasetxn,omitempty"`
CoinbaseValue *uint64 `json:"coinbasevalue,omitempty"`
WorkID string `json:"workid,omitempty"`
Bits string `json:"bits"`
CurTime int64 `json:"curtime"`
Height int64 `json:"height"`
ParentHashes []string `json:"parentblockhashes"`
SigOpLimit int64 `json:"sigoplimit,omitempty"`
SizeLimit int64 `json:"sizelimit,omitempty"`
Transactions []GetBlockTemplateResultTx `json:"transactions"`
Version int32 `json:"version"`
CoinbaseAux *GetBlockTemplateResultAux `json:"coinbaseaux,omitempty"`
CoinbaseTxn *GetBlockTemplateResultTx `json:"coinbasetxn,omitempty"`
CoinbaseValue *uint64 `json:"coinbasevalue,omitempty"`
WorkID string `json:"workid,omitempty"`
// Optional long polling from BIP 0022.
LongPollID string `json:"longpollid,omitempty"`

View File

@ -111,16 +111,16 @@ func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) {
}
// Don't bother trying to process orphans.
prevBlocks := block.MsgBlock().Header.PrevBlocks
if len(prevBlocks) > 0 {
exist, err := bi.dag.HaveBlocks(prevBlocks)
parentHashes := block.MsgBlock().Header.ParentHashes
if len(parentHashes) > 0 {
exist, err := bi.dag.HaveBlocks(parentHashes)
if err != nil {
return false, err
}
if !exist {
return false, fmt.Errorf("import file contains block "+
"%v which does not link to the available "+
"block DAG", prevBlocks)
"block DAG", parentHashes)
}
}

View File

@ -106,8 +106,8 @@ func findCandidates(dag *blockdag.BlockDAG, highestTipHash *daghash.Hash) ([]*da
candidates = append(candidates, &checkpoint)
}
prevBlockHashes := block.MsgBlock().Header.PrevBlocks
selectedBlockHash := &prevBlockHashes[0]
parentHashes := block.MsgBlock().Header.ParentHashes
selectedBlockHash := &parentHashes[0]
block, err = dag.BlockByHash(selectedBlockHash)
if err != nil {
return nil, err

View File

@ -78,13 +78,13 @@ var genesisMerkleRoot = daghash.Hash([daghash.HashSize]byte{ // Make go vet happ
// public transaction ledger for the main network.
var genesisBlock = wire.MsgBlock{
Header: wire.BlockHeader{
Version: 1,
NumPrevBlocks: 0,
PrevBlocks: []daghash.Hash{},
MerkleRoot: genesisMerkleRoot, // 4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b
Timestamp: time.Unix(0x5bbe076c, 0), // 2018-10-10 14:06:36 +0000 UTC
Bits: 0x1e00ffff, // 503382015 [000000ffff000000000000000000000000000000000000000000000000000000]
Nonce: 0x80000000000d8796, // 9223372036855662486
Version: 1,
NumParentBlocks: 0,
ParentHashes: []daghash.Hash{},
MerkleRoot: genesisMerkleRoot, // 4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b
Timestamp: time.Unix(0x5bbe076c, 0), // 2018-10-10 14:06:36 +0000 UTC
Bits: 0x1e00ffff, // 503382015 [000000ffff000000000000000000000000000000000000000000000000000000]
Nonce: 0x80000000000d8796, // 9223372036855662486
},
Transactions: []*wire.MsgTx{&genesisCoinbaseTx},
}
@ -107,13 +107,13 @@ var regTestGenesisMerkleRoot = genesisMerkleRoot
// as the public transaction ledger for the regression test network.
var regTestGenesisBlock = wire.MsgBlock{
Header: wire.BlockHeader{
Version: 1,
NumPrevBlocks: 0,
PrevBlocks: []daghash.Hash{},
MerkleRoot: regTestGenesisMerkleRoot, // 4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b
Timestamp: time.Unix(0x5bbe0d4b, 0), // 2018-06-19 09:00:38 +0000 UTC
Bits: 0x207fffff, // 545259519 [7fffff0000000000000000000000000000000000000000000000000000000000]
Nonce: 0x00000000,
Version: 1,
NumParentBlocks: 0,
ParentHashes: []daghash.Hash{},
MerkleRoot: regTestGenesisMerkleRoot, // 4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b
Timestamp: time.Unix(0x5bbe0d4b, 0), // 2018-06-19 09:00:38 +0000 UTC
Bits: 0x207fffff, // 545259519 [7fffff0000000000000000000000000000000000000000000000000000000000]
Nonce: 0x00000000,
},
Transactions: []*wire.MsgTx{&genesisCoinbaseTx},
}
@ -136,13 +136,13 @@ var testNet3GenesisMerkleRoot = genesisMerkleRoot
// serves as the public transaction ledger for the test network (version 3).
var testNet3GenesisBlock = wire.MsgBlock{
Header: wire.BlockHeader{
Version: 1,
NumPrevBlocks: 0,
PrevBlocks: []daghash.Hash{},
MerkleRoot: testNet3GenesisMerkleRoot, // 4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b
Timestamp: time.Unix(0x5bbe0e49, 0), // 2018-06-19 09:04:06 +0000 UTC
Bits: 0x1e00ffff, // 503382015 [000000ffff000000000000000000000000000000000000000000000000000000]
Nonce: 0xc00000000032560b, // 2150570811
Version: 1,
NumParentBlocks: 0,
ParentHashes: []daghash.Hash{},
MerkleRoot: testNet3GenesisMerkleRoot, // 4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b
Timestamp: time.Unix(0x5bbe0e49, 0), // 2018-06-19 09:04:06 +0000 UTC
Bits: 0x1e00ffff, // 503382015 [000000ffff000000000000000000000000000000000000000000000000000000]
Nonce: 0xc00000000032560b, // 2150570811
},
Transactions: []*wire.MsgTx{&genesisCoinbaseTx},
}
@ -165,13 +165,13 @@ var simNetGenesisMerkleRoot = genesisMerkleRoot
// as the public transaction ledger for the simulation test network.
var simNetGenesisBlock = wire.MsgBlock{
Header: wire.BlockHeader{
Version: 1,
NumPrevBlocks: 0,
PrevBlocks: []daghash.Hash{},
MerkleRoot: simNetGenesisMerkleRoot, // 4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b
Timestamp: time.Unix(0x5bbe00fe, 0), // 2018-10-10 13:39:10 +0000 UTC
Bits: 0x207fffff, // 545259519 [7fffff0000000000000000000000000000000000000000000000000000000000]
Nonce: 0xdffffffffffffffc, // 1610612733
Version: 1,
NumParentBlocks: 0,
ParentHashes: []daghash.Hash{},
MerkleRoot: simNetGenesisMerkleRoot, // 4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b
Timestamp: time.Unix(0x5bbe00fe, 0), // 2018-10-10 13:39:10 +0000 UTC
Bits: 0x207fffff, // 545259519 [7fffff0000000000000000000000000000000000000000000000000000000000]
Nonce: 0xdffffffffffffffc, // 1610612733
},
Transactions: []*wire.MsgTx{&genesisCoinbaseTx},
}

View File

@ -131,11 +131,11 @@ func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) {
}
// Don't bother trying to process orphans.
prevHashes := block.MsgBlock().Header.PrevBlocks
for _, prevHash := range prevHashes {
parentHashes := block.MsgBlock().Header.ParentHashes
for _, parentHash := range parentHashes {
var exists bool
err := bi.db.View(func(tx database.Tx) error {
exists, err = tx.HasBlock(&prevHash)
exists, err = tx.HasBlock(&parentHash)
return err
})
if err != nil {
@ -144,7 +144,7 @@ func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) {
if !exists {
return false, fmt.Errorf("import file contains block "+
"%v which does not link to the available "+
"block chain", prevHash)
"block chain", parentHash)
}
}

View File

@ -1234,23 +1234,23 @@ func (tx *transaction) fetchBlockRow(hash *daghash.Hash) ([]byte, error) {
return blockRow, nil
}
// The offset in a block header at which numPrevBlocks resides.
const numPrevBlocksOffset = 4
// The offset in a block header at which NumParentBlocks resides.
const numParentBlocksOffset = 4
// fetchBlockHeaderSize fetches the numPrevBlocks field out of the block header
// fetchBlockHeaderSize fetches the NumParentBlocks field out of the block header
// and uses it to compute the total size of the block header
func (tx *transaction) fetchBlockHeaderSize(hash *daghash.Hash) (byte, error) {
r, err := tx.FetchBlockRegion(&database.BlockRegion{
Hash: hash,
Offset: numPrevBlocksOffset,
Offset: numParentBlocksOffset,
Len: 1,
})
if err != nil {
return 0, err
}
numPrevBlocks := r[0]
return numPrevBlocks*daghash.HashSize + wire.BaseBlockHeaderPayload, nil
numParentBlocks := r[0]
return numParentBlocks*daghash.HashSize + wire.BaseBlockHeaderPayload, nil
}
// FetchBlockHeader returns the raw serialized bytes for the block header
@ -1282,13 +1282,13 @@ func (tx *transaction) FetchBlockHeader(hash *daghash.Hash) ([]byte, error) {
})
}
// fetchBlockHeadersSizes fetches the numPrevBlocks fields out of the block headers
// fetchBlockHeadersSizes fetches the NumParentBlocks fields out of the block headers
// and uses it to compute the total sizes of the block headers
func (tx *transaction) fetchBlockHeadersSizes(hashes []daghash.Hash) ([]byte, error) {
regions := make([]database.BlockRegion, len(hashes))
for i := range hashes {
regions[i].Hash = &hashes[i]
regions[i].Offset = numPrevBlocksOffset
regions[i].Offset = numParentBlocksOffset
regions[i].Len = 1
}
rs, err := tx.FetchBlockRegions(regions)
@ -1298,8 +1298,8 @@ func (tx *transaction) fetchBlockHeadersSizes(hashes []daghash.Hash) ([]byte, er
sizes := make([]byte, len(hashes))
for i, r := range rs {
numPrevBlocks := r[0]
sizes[i] = numPrevBlocks*daghash.HashSize + wire.BaseBlockHeaderPayload
numParentBlocks := r[0]
sizes[i] = numParentBlocks*daghash.HashSize + wire.BaseBlockHeaderPayload
}
return sizes, nil

View File

@ -44,13 +44,13 @@ func generateBlocks(out *os.File, numBlocks int) {
func generateBlock(parent *wire.MsgBlock) *wire.MsgBlock {
return &wire.MsgBlock{
Header: wire.BlockHeader{
Version: 1,
NumPrevBlocks: 1,
PrevBlocks: []daghash.Hash{parent.BlockHash()},
MerkleRoot: genesisMerkleRoot,
Timestamp: time.Unix(0x5b28c4c8, 0), // 2018-06-19 08:54:32 +0000 UTC
Bits: 0x1e00ffff, // 503382015 [000000ffff000000000000000000000000000000000000000000000000000000]
Nonce: 0xc0192550, // 2148484547
Version: 1,
NumParentBlocks: 1,
ParentHashes: []daghash.Hash{parent.BlockHash()},
MerkleRoot: genesisMerkleRoot,
Timestamp: time.Unix(0x5b28c4c8, 0), // 2018-06-19 08:54:32 +0000 UTC
Bits: 0x1e00ffff, // 503382015 [000000ffff000000000000000000000000000000000000000000000000000000]
Nonce: 0xc0192550, // 2148484547
},
Transactions: []*wire.MsgTx{&genesisCoinbaseTx},
}

View File

@ -435,22 +435,22 @@ func TestBIP0068AndCsv(t *testing.T) {
// Now mine 10 additional blocks giving the inputs generated above a
// age of 11. Space out each block 10 minutes after the previous block.
prevBlockHash, err := r.Node.GetBestBlockHash()
parentBlockHash, err := r.Node.GetBestBlockHash()
if err != nil {
t.Fatalf("unable to get prior block hash: %v", err)
}
prevBlock, err := r.Node.GetBlock(prevBlockHash)
parentBlock, err := r.Node.GetBlock(parentBlockHash)
if err != nil {
t.Fatalf("unable to get block: %v", err)
}
for i := 0; i < relativeBlockLock; i++ {
timeStamp := prevBlock.Header.Timestamp.Add(time.Minute * 10)
timeStamp := parentBlock.Header.Timestamp.Add(time.Minute * 10)
b, err := r.GenerateAndSubmitBlock(nil, -1, timeStamp)
if err != nil {
t.Fatalf("unable to generate block: %v", err)
}
prevBlock = b.MsgBlock()
parentBlock = b.MsgBlock()
}
// A helper function to create fully signed transactions in-line during

View File

@ -132,37 +132,37 @@ func createCoinbaseTx(coinbaseScript []byte, nextBlockHeight int32,
// initialized), then the timestamp of the previous block will be used plus 1
// second is used. Passing nil for the previous block results in a block that
// builds off of the genesis block for the specified chain.
func CreateBlock(prevBlock *util.Block, inclusionTxs []*util.Tx,
func CreateBlock(parentBlock *util.Block, inclusionTxs []*util.Tx,
blockVersion int32, blockTime time.Time, miningAddr util.Address,
mineTo []wire.TxOut, net *dagconfig.Params) (*util.Block, error) {
var (
prevHash *daghash.Hash
blockHeight int32
prevBlockTime time.Time
parentHash *daghash.Hash
blockHeight int32
parentBlockTime time.Time
)
// If the previous block isn't specified, then we'll construct a block
// If the parent block isn't specified, then we'll construct a block
// that builds off of the genesis block for the chain.
if prevBlock == nil {
prevHash = net.GenesisHash
if parentBlock == nil {
parentHash = net.GenesisHash
blockHeight = 1
prevBlockTime = net.GenesisBlock.Header.Timestamp.Add(time.Minute)
parentBlockTime = net.GenesisBlock.Header.Timestamp.Add(time.Minute)
} else {
prevHash = prevBlock.Hash()
blockHeight = prevBlock.Height() + 1
prevBlockTime = prevBlock.MsgBlock().Header.Timestamp
parentHash = parentBlock.Hash()
blockHeight = parentBlock.Height() + 1
parentBlockTime = parentBlock.MsgBlock().Header.Timestamp
}
// If a target block time was specified, then use that as the header's
// timestamp. Otherwise, add one second to the previous block unless
// timestamp. Otherwise, add one second to the parent block unless
// it's the genesis block in which case use the current time.
var ts time.Time
switch {
case !blockTime.IsZero():
ts = blockTime
default:
ts = prevBlockTime.Add(time.Second)
ts = parentBlockTime.Add(time.Second)
}
extraNonce := uint64(0)
@ -184,11 +184,11 @@ func CreateBlock(prevBlock *util.Block, inclusionTxs []*util.Tx,
merkles := blockdag.BuildMerkleTreeStore(blockTxns)
var block wire.MsgBlock
block.Header = wire.BlockHeader{
Version: blockVersion,
PrevBlocks: []daghash.Hash{*prevHash},
MerkleRoot: *merkles[len(merkles)-1],
Timestamp: ts,
Bits: net.PowLimitBits,
Version: blockVersion,
ParentHashes: []daghash.Hash{*parentHash},
MerkleRoot: *merkles[len(merkles)-1],
Timestamp: ts,
Bits: net.PowLimitBits,
}
for _, tx := range blockTxns {
if err := block.AddTransaction(tx.MsgTx()); err != nil {

View File

@ -435,19 +435,19 @@ func (h *Harness) GenerateAndSubmitBlockWithCustomCoinbaseOutputs(
blockVersion = BlockVersion
}
prevBlockHash, prevBlockHeight, err := h.Node.GetBestBlock()
parentBlockHash, parentBlockHeight, err := h.Node.GetBestBlock()
if err != nil {
return nil, err
}
mBlock, err := h.Node.GetBlock(prevBlockHash)
mBlock, err := h.Node.GetBlock(parentBlockHash)
if err != nil {
return nil, err
}
prevBlock := util.NewBlock(mBlock)
prevBlock.SetHeight(prevBlockHeight)
parentBlock := util.NewBlock(mBlock)
parentBlock.SetHeight(parentBlockHeight)
// Create a new block including the specified transactions
newBlock, err := CreateBlock(prevBlock, txns, blockVersion,
newBlock, err := CreateBlock(parentBlock, txns, blockVersion,
blockTime, h.wallet.coinbaseAddr, mineTo, h.ActiveNet)
if err != nil {
return nil, err

View File

@ -80,20 +80,20 @@ func syncBlocks(nodes []*Harness) error {
retry:
for !blocksMatch {
var prevHash *daghash.Hash
var parentHash *daghash.Hash
var prevHeight int32
for _, node := range nodes {
blockHash, blockHeight, err := node.Node.GetBestBlock()
if err != nil {
return err
}
if prevHash != nil && (*blockHash != *prevHash ||
if parentHash != nil && (*blockHash != *parentHash ||
blockHeight != prevHeight) {
time.Sleep(time.Millisecond * 100)
continue retry
}
prevHash, prevHeight = blockHash, blockHeight
parentHash, prevHeight = blockHash, blockHeight
}
blocksMatch = true

View File

@ -162,9 +162,9 @@ func (m *CPUMiner) submitBlock(block *util.Block) bool {
// a new block, but the check only happens periodically, so it is
// possible a block was found and submitted in between.
msgBlock := block.MsgBlock()
if !daghash.AreEqual(msgBlock.Header.PrevBlocks, m.g.TipHashes()) {
if !daghash.AreEqual(msgBlock.Header.ParentHashes, m.g.TipHashes()) {
log.Debugf("Block submitted via CPU miner with previous "+
"blocks %s is stale", msgBlock.Header.PrevBlocks)
"blocks %s is stale", msgBlock.Header.ParentHashes)
return false
}
@ -247,7 +247,7 @@ func (m *CPUMiner) solveBlock(msgBlock *wire.MsgBlock, blockHeight int32,
hashesCompleted = 0
// The current block is stale if the DAG has changed.
if !daghash.AreEqual(header.PrevBlocks, m.g.TipHashes()) {
if !daghash.AreEqual(header.ParentHashes, m.g.TipHashes()) {
return false
}

View File

@ -715,12 +715,12 @@ mempoolLoop:
merkles := blockdag.BuildMerkleTreeStore(blockTxns)
var msgBlock wire.MsgBlock
msgBlock.Header = wire.BlockHeader{
Version: nextBlockVersion,
NumPrevBlocks: byte(len(g.dag.TipHashes())),
PrevBlocks: g.dag.TipHashes(),
MerkleRoot: *merkles[len(merkles)-1],
Timestamp: ts,
Bits: reqDifficulty,
Version: nextBlockVersion,
NumParentBlocks: byte(len(g.dag.TipHashes())),
ParentHashes: g.dag.TipHashes(),
MerkleRoot: *merkles[len(merkles)-1],
Timestamp: ts,
Bits: reqDifficulty,
}
for _, tx := range blockTxns {
if err := msgBlock.AddTransaction(tx.MsgTx()); err != nil {

View File

@ -656,10 +656,10 @@ func (sm *SyncManager) handleBlockMsg(bmsg *blockMsg) {
// for headers starting from the block after this one up to the next
// checkpoint.
prevHeight := sm.nextCheckpoint.Height
prevHash := sm.nextCheckpoint.Hash
parentHash := sm.nextCheckpoint.Hash
sm.nextCheckpoint = sm.findNextHeaderCheckpoint(prevHeight)
if sm.nextCheckpoint != nil {
locator := blockdag.BlockLocator([]*daghash.Hash{prevHash})
locator := blockdag.BlockLocator([]*daghash.Hash{parentHash})
err := peer.PushGetHeadersMsg(locator, sm.nextCheckpoint.Hash)
if err != nil {
log.Warnf("Failed to send getheaders message to "+
@ -780,7 +780,7 @@ func (sm *SyncManager) handleHeadersMsg(hmsg *headersMsg) {
// add it to the list of headers.
node := headerNode{hash: &blockHash}
prevNode := prevNodeEl.Value.(*headerNode)
if prevNode.hash.IsEqual(&blockHeader.PrevBlocks[0]) { // TODO: (Stas) This is wrong. Modified only to satisfy compilation.
if prevNode.hash.IsEqual(&blockHeader.ParentHashes[0]) { // TODO: (Stas) This is wrong. Modified only to satisfy compilation.
node.height = prevNode.height + 1
e := sm.headerList.PushBack(&node)
if sm.startHeader == nil {

View File

@ -816,18 +816,18 @@ func (sp *Peer) OnGetCFHeaders(_ *peer.Peer, msg *wire.MsgGetCFHeaders) {
// Populate the PrevFilterHeader field.
if msg.StartHeight > 0 {
prevBlockHash := &hashList[0]
parentHash := &hashList[0]
// Fetch the raw committed filter header bytes from the
// database.
headerBytes, err := sp.server.CfIndex.FilterHeaderByBlockHash(
prevBlockHash, msg.FilterType)
parentHash, msg.FilterType)
if err != nil {
peerLog.Errorf("Error retrieving CF header: %v", err)
return
}
if len(headerBytes) == 0 {
peerLog.Warnf("Could not obtain CF header for %v", prevBlockHash)
peerLog.Warnf("Could not obtain CF header for %v", parentHash)
return
}

View File

@ -75,7 +75,7 @@ const (
gbtNonceRange = "000000000000ffffffffffff"
// gbtRegenerateSeconds is the number of seconds that must pass before
// a new template is generated when the previous block hash has not
// a new template is generated when the parent block hashes has not
// changed and there have been changes to the available transactions
// in the memory pool.
gbtRegenerateSeconds = 60
@ -90,7 +90,7 @@ var (
// declared here to avoid the overhead of creating the slice on every
// invocation for constant data.
gbtMutableFields = []string{
"time", "transactions/add", "prevblock", "coinbase/append",
"time", "transactions/add", "parentblock", "coinbase/append",
}
// gbtCoinbaseAux describes additional data that miners should include
@ -1133,19 +1133,19 @@ func handleGetBlock(s *Server, cmd interface{}, closeChan <-chan struct{}) (inte
params := s.cfg.DAGParams
blockHeader := &blk.MsgBlock().Header
blockReply := btcjson.GetBlockVerboseResult{
Hash: c.Hash,
Version: blockHeader.Version,
VersionHex: fmt.Sprintf("%08x", blockHeader.Version),
MerkleRoot: blockHeader.MerkleRoot.String(),
PreviousHashes: daghash.Strings(blockHeader.PrevBlocks),
Nonce: blockHeader.Nonce,
Time: blockHeader.Timestamp.Unix(),
Confirmations: uint64(1 + s.cfg.DAG.Height() - blockHeight), //TODO: (Ori) This is probably wrong. Done only for compilation
Height: int64(blockHeight),
Size: int32(len(blkBytes)),
Bits: strconv.FormatInt(int64(blockHeader.Bits), 16),
Difficulty: getDifficultyRatio(blockHeader.Bits, params),
NextHashes: nextHashStrings,
Hash: c.Hash,
Version: blockHeader.Version,
VersionHex: fmt.Sprintf("%08x", blockHeader.Version),
MerkleRoot: blockHeader.MerkleRoot.String(),
ParentHashes: daghash.Strings(blockHeader.ParentHashes),
Nonce: blockHeader.Nonce,
Time: blockHeader.Timestamp.Unix(),
Confirmations: uint64(1 + s.cfg.DAG.Height() - blockHeight), //TODO: (Ori) This is probably wrong. Done only for compilation
Height: int64(blockHeight),
Size: int32(len(blkBytes)),
Bits: strconv.FormatInt(int64(blockHeader.Bits), 16),
Difficulty: getDifficultyRatio(blockHeader.Bits, params),
NextHashes: nextHashStrings,
}
if c.VerboseTx == nil || !*c.VerboseTx {
@ -1325,32 +1325,32 @@ func handleGetBlockHeader(s *Server, cmd interface{}, closeChan <-chan struct{})
params := s.cfg.DAGParams
blockHeaderReply := btcjson.GetBlockHeaderVerboseResult{
Hash: c.Hash,
Confirmations: uint64(1 + s.cfg.DAG.Height() - blockHeight), //TODO: (Ori) This is probably wrong. Done only for compilation
Height: blockHeight,
Version: blockHeader.Version,
VersionHex: fmt.Sprintf("%08x", blockHeader.Version),
MerkleRoot: blockHeader.MerkleRoot.String(),
NextHashes: nextHashStrings,
PreviousHashes: daghash.Strings(blockHeader.PrevBlocks),
Nonce: uint64(blockHeader.Nonce),
Time: blockHeader.Timestamp.Unix(),
Bits: strconv.FormatInt(int64(blockHeader.Bits), 16),
Difficulty: getDifficultyRatio(blockHeader.Bits, params),
Hash: c.Hash,
Confirmations: uint64(1 + s.cfg.DAG.Height() - blockHeight), //TODO: (Ori) This is probably wrong. Done only for compilation
Height: blockHeight,
Version: blockHeader.Version,
VersionHex: fmt.Sprintf("%08x", blockHeader.Version),
MerkleRoot: blockHeader.MerkleRoot.String(),
NextHashes: nextHashStrings,
ParentHashes: daghash.Strings(blockHeader.ParentHashes),
Nonce: uint64(blockHeader.Nonce),
Time: blockHeader.Timestamp.Unix(),
Bits: strconv.FormatInt(int64(blockHeader.Bits), 16),
Difficulty: getDifficultyRatio(blockHeader.Bits, params),
}
return blockHeaderReply, nil
}
// encodeLongPollID encodes the passed details into an ID that can be used to
// uniquely identify a block template.
func encodeLongPollID(prevHashes []daghash.Hash, lastGenerated time.Time) string {
return fmt.Sprintf("%s-%d", daghash.JoinHashesStrings(prevHashes, ""), lastGenerated.Unix())
func encodeLongPollID(parentHashes []daghash.Hash, lastGenerated time.Time) string {
return fmt.Sprintf("%s-%d", daghash.JoinHashesStrings(parentHashes, ""), lastGenerated.Unix())
}
// decodeLongPollID decodes an ID that is used to uniquely identify a block
// template. This is mainly used as a mechanism to track when to update clients
// that are using long polling for block templates. The ID consists of the
// previous blocks hashes for the associated template and the time the associated
// parent blocks hashes for the associated template and the time the associated
// template was generated.
func decodeLongPollID(longPollID string) ([]daghash.Hash, int64, error) {
fields := strings.Split(longPollID, "-")
@ -1358,20 +1358,20 @@ func decodeLongPollID(longPollID string) ([]daghash.Hash, int64, error) {
return nil, 0, errors.New("decodeLongPollID: invalid number of fields")
}
prevHashesStr := fields[0]
if len(prevHashesStr)%daghash.HashSize != 0 {
return nil, 0, errors.New("decodeLongPollID: invalid previous hashes format")
parentHashesStr := fields[0]
if len(parentHashesStr)%daghash.HashSize != 0 {
return nil, 0, errors.New("decodeLongPollID: invalid parent hashes format")
}
numberOfHashes := len(prevHashesStr) / daghash.HashSize
numberOfHashes := len(parentHashesStr) / daghash.HashSize
prevHashes := make([]daghash.Hash, 0, numberOfHashes)
parentHashes := make([]daghash.Hash, 0, numberOfHashes)
for i := 0; i < len(prevHashesStr); i += daghash.HashSize {
hash, err := daghash.NewHashFromStr(prevHashesStr[i : i+daghash.HashSize])
for i := 0; i < len(parentHashesStr); i += daghash.HashSize {
hash, err := daghash.NewHashFromStr(parentHashesStr[i : i+daghash.HashSize])
if err != nil {
return nil, 0, fmt.Errorf("decodeLongPollID: NewHashFromStr: %v", err)
}
prevHashes = append(prevHashes, *hash)
parentHashes = append(parentHashes, *hash)
}
lastGenerated, err := strconv.ParseInt(fields[1], 10, 64)
@ -1379,7 +1379,7 @@ func decodeLongPollID(longPollID string) ([]daghash.Hash, int64, error) {
return nil, 0, fmt.Errorf("decodeLongPollID: Cannot parse timestamp: %v", lastGenerated)
}
return prevHashes, lastGenerated, nil
return parentHashes, lastGenerated, nil
}
// notifyLongPollers notifies any channels that have been registered to be
@ -1467,7 +1467,7 @@ func (state *gbtWorkState) NotifyMempoolTx(lastUpdated time.Time) {
}
// templateUpdateChan returns a channel that will be closed once the block
// template associated with the passed previous hash and last generated time
// template associated with the passed parent hashes and last generated time
// is stale. The function will return existing channels for duplicate
// parameters which allows multiple clients to wait for the same block template
// without requiring a different channel for each client.
@ -1476,7 +1476,7 @@ func (state *gbtWorkState) NotifyMempoolTx(lastUpdated time.Time) {
func (state *gbtWorkState) templateUpdateChan(tipHashes []daghash.Hash, lastGenerated int64) chan struct{} {
tipHashesStr := daghash.JoinHashesStrings(tipHashes, "")
// Either get the current list of channels waiting for updates about
// changes to block template for the previous hash or create a new one.
// changes to block template for the parent hashes or create a new one.
channels, ok := state.notifyMap[tipHashesStr]
if !ok {
m := make(map[int64]chan struct{})
@ -1710,22 +1710,22 @@ func (state *gbtWorkState) blockTemplateResult(useCoinbaseValue bool, submitOld
targetDifficulty := fmt.Sprintf("%064x", blockdag.CompactToBig(header.Bits))
longPollID := encodeLongPollID(state.tipHashes, state.lastGenerated)
reply := btcjson.GetBlockTemplateResult{
Bits: strconv.FormatInt(int64(header.Bits), 16),
CurTime: header.Timestamp.Unix(),
Height: int64(template.Height),
PreviousHashes: daghash.Strings(header.PrevBlocks),
SigOpLimit: blockdag.MaxSigOpsPerBlock,
SizeLimit: wire.MaxBlockPayload,
Transactions: transactions,
Version: header.Version,
LongPollID: longPollID,
SubmitOld: submitOld,
Target: targetDifficulty,
MinTime: state.minTimestamp.Unix(),
MaxTime: maxTime.Unix(),
Mutable: gbtMutableFields,
NonceRange: gbtNonceRange,
Capabilities: gbtCapabilities,
Bits: strconv.FormatInt(int64(header.Bits), 16),
CurTime: header.Timestamp.Unix(),
Height: int64(template.Height),
ParentHashes: daghash.Strings(header.ParentHashes),
SigOpLimit: blockdag.MaxSigOpsPerBlock,
SizeLimit: wire.MaxBlockPayload,
Transactions: transactions,
Version: header.Version,
LongPollID: longPollID,
SubmitOld: submitOld,
Target: targetDifficulty,
MinTime: state.minTimestamp.Unix(),
MaxTime: maxTime.Unix(),
Mutable: gbtMutableFields,
NonceRange: gbtNonceRange,
Capabilities: gbtCapabilities,
}
if useCoinbaseValue {
@ -1790,7 +1790,7 @@ func handleGetBlockTemplateLongPoll(s *Server, longPollID string, useCoinbaseVal
// Just return the current block template if the long poll ID provided by
// the caller is invalid.
prevHashes, lastGenerated, err := decodeLongPollID(longPollID)
parentHashes, lastGenerated, err := decodeLongPollID(longPollID)
if err != nil {
result, err := state.blockTemplateResult(useCoinbaseValue, nil)
if err != nil {
@ -1805,7 +1805,7 @@ func handleGetBlockTemplateLongPoll(s *Server, longPollID string, useCoinbaseVal
// Return the block template now if the specific block template
// identified by the long poll ID no longer matches the current block
// template as this means the provided template is stale.
areHashesEqual := daghash.AreEqual(state.template.Block.Header.PrevBlocks, prevHashes)
areHashesEqual := daghash.AreEqual(state.template.Block.Header.ParentHashes, parentHashes)
if !areHashesEqual ||
lastGenerated != state.lastGenerated.Unix() {
@ -1824,11 +1824,11 @@ func handleGetBlockTemplateLongPoll(s *Server, longPollID string, useCoinbaseVal
return result, nil
}
// Register the previous hash and last generated time for notifications
// Register the parent hashes and last generated time for notifications
// Get a channel that will be notified when the template associated with
// the provided ID is stale and a new block template should be returned to
// the caller.
longPollChan := state.templateUpdateChan(prevHashes, lastGenerated)
longPollChan := state.templateUpdateChan(parentHashes, lastGenerated)
state.Unlock()
select {
@ -2032,12 +2032,12 @@ func chainErrToGBTErrString(err error) string {
return "bad-script-malformed"
case blockdag.ErrScriptValidation:
return "bad-script-validate"
case blockdag.ErrPreviousBlockUnknown:
return "prev-blk-not-found"
case blockdag.ErrParentBlockUnknown:
return "parent-blk-not-found"
case blockdag.ErrInvalidAncestorBlock:
return "bad-prevblk"
case blockdag.ErrPrevBlockNotBest:
return "inconclusive-not-best-prvblk"
return "bad-parentblk"
case blockdag.ErrParentBlockNotCurrentTips:
return "inconclusive-not-best-parentblk"
}
return "rejected: " + err.Error()
@ -2079,11 +2079,11 @@ func handleGetBlockTemplateProposal(s *Server, request *btcjson.TemplateRequest)
}
block := util.NewBlock(&msgBlock)
// Ensure the block is building from the expected previous blocks.
expectedPrevHashes := s.cfg.DAG.TipHashes()
prevHashes := block.MsgBlock().Header.PrevBlocks
if !daghash.AreEqual(expectedPrevHashes, prevHashes) {
return "bad-prevblk", nil
// Ensure the block is building from the expected parent blocks.
expectedParentHashes := s.cfg.DAG.TipHashes()
parentHashes := block.MsgBlock().Header.ParentHashes
if !daghash.AreEqual(expectedParentHashes, parentHashes) {
return "bad-parentblk", nil
}
if err := s.cfg.DAG.CheckConnectBlockTemplate(block); err != nil {
@ -3375,7 +3375,7 @@ func verifyDAG(s *Server, level, depth int32) error {
}
}
currentHash = *block.MsgBlock().Header.SelectedPrevBlock()
currentHash = *block.MsgBlock().Header.SelectedParentHash()
}
log.Infof("Chain verify completed successfully")

View File

@ -228,21 +228,21 @@ var helpDescsEnUS = map[string]string{
"searchrawtransactionsresult-size": "The size of the transaction in bytes",
// GetBlockVerboseResult help.
"getblockverboseresult-hash": "The hash of the block (same as provided)",
"getblockverboseresult-confirmations": "The number of confirmations",
"getblockverboseresult-size": "The size of the block",
"getblockverboseresult-height": "The height of the block in the block chain",
"getblockverboseresult-version": "The block version",
"getblockverboseresult-versionHex": "The block version in hexadecimal",
"getblockverboseresult-merkleroot": "Root hash of the merkle tree",
"getblockverboseresult-tx": "The transaction hashes (only when verbosetx=false)",
"getblockverboseresult-rawtx": "The transactions as JSON objects (only when verbosetx=true)",
"getblockverboseresult-time": "The block time in seconds since 1 Jan 1970 GMT",
"getblockverboseresult-nonce": "The block nonce",
"getblockverboseresult-bits": "The bits which represent the block difficulty",
"getblockverboseresult-difficulty": "The proof-of-work difficulty as a multiple of the minimum difficulty",
"getblockverboseresult-previousblockhashes": "The hashes of the previous blocks",
"getblockverboseresult-nextblockhashes": "The hashes of the next blocks (only if there are any)",
"getblockverboseresult-hash": "The hash of the block (same as provided)",
"getblockverboseresult-confirmations": "The number of confirmations",
"getblockverboseresult-size": "The size of the block",
"getblockverboseresult-height": "The height of the block in the block chain",
"getblockverboseresult-version": "The block version",
"getblockverboseresult-versionHex": "The block version in hexadecimal",
"getblockverboseresult-merkleroot": "Root hash of the merkle tree",
"getblockverboseresult-tx": "The transaction hashes (only when verbosetx=false)",
"getblockverboseresult-rawtx": "The transactions as JSON objects (only when verbosetx=true)",
"getblockverboseresult-time": "The block time in seconds since 1 Jan 1970 GMT",
"getblockverboseresult-nonce": "The block nonce",
"getblockverboseresult-bits": "The bits which represent the block difficulty",
"getblockverboseresult-difficulty": "The proof-of-work difficulty as a multiple of the minimum difficulty",
"getblockverboseresult-parentblockhashes": "The hashes of the parent blocks",
"getblockverboseresult-nextblockhashes": "The hashes of the next blocks (only if there are any)",
// GetBlockCountCmd help.
"getBlockCount--synopsis": "Returns the number of blocks in the longest block chain.",
@ -262,18 +262,18 @@ var helpDescsEnUS = map[string]string{
"getBlockHeader--result0": "The block header hash",
// GetBlockHeaderVerboseResult help.
"getblockheaderverboseresult-hash": "The hash of the block (same as provided)",
"getblockheaderverboseresult-confirmations": "The number of confirmations",
"getblockheaderverboseresult-height": "The height of the block in the block chain",
"getblockheaderverboseresult-version": "The block version",
"getblockheaderverboseresult-versionHex": "The block version in hexadecimal",
"getblockheaderverboseresult-merkleroot": "Root hash of the merkle tree",
"getblockheaderverboseresult-time": "The block time in seconds since 1 Jan 1970 GMT",
"getblockheaderverboseresult-nonce": "The block nonce",
"getblockheaderverboseresult-bits": "The bits which represent the block difficulty",
"getblockheaderverboseresult-difficulty": "The proof-of-work difficulty as a multiple of the minimum difficulty",
"getblockheaderverboseresult-previousblockhashes": "The hashes of the previous blocks",
"getblockheaderverboseresult-nextblockhashes": "The hashes of the next blocks (only if there are any)",
"getblockheaderverboseresult-hash": "The hash of the block (same as provided)",
"getblockheaderverboseresult-confirmations": "The number of confirmations",
"getblockheaderverboseresult-height": "The height of the block in the block chain",
"getblockheaderverboseresult-version": "The block version",
"getblockheaderverboseresult-versionHex": "The block version in hexadecimal",
"getblockheaderverboseresult-merkleroot": "Root hash of the merkle tree",
"getblockheaderverboseresult-time": "The block time in seconds since 1 Jan 1970 GMT",
"getblockheaderverboseresult-nonce": "The block nonce",
"getblockheaderverboseresult-bits": "The bits which represent the block difficulty",
"getblockheaderverboseresult-difficulty": "The proof-of-work difficulty as a multiple of the minimum difficulty",
"getblockheaderverboseresult-parentblockhashes": "The hashes of the parent blocks",
"getblockheaderverboseresult-nextblockhashes": "The hashes of the next blocks (only if there are any)",
// TemplateRequest help.
"templaterequest-mode": "This is 'template', 'proposal', or omitted",
@ -297,29 +297,29 @@ var helpDescsEnUS = map[string]string{
"getblocktemplateresultaux-flags": "Hex-encoded byte-for-byte data to include in the coinbase signature script",
// GetBlockTemplateResult help.
"getblocktemplateresult-bits": "Hex-encoded compressed difficulty",
"getblocktemplateresult-curtime": "Current time as seen by the server (recommended for block time); must fall within mintime/maxtime rules",
"getblocktemplateresult-height": "Height of the block to be solved",
"getblocktemplateresult-previousblockhashes": "Hex-encoded big-endian hashes of the previous blocks",
"getblocktemplateresult-sigoplimit": "Number of sigops allowed in blocks ",
"getblocktemplateresult-sizelimit": "Number of bytes allowed in blocks",
"getblocktemplateresult-transactions": "Array of transactions as JSON objects",
"getblocktemplateresult-version": "The block version",
"getblocktemplateresult-coinbaseaux": "Data that should be included in the coinbase signature script",
"getblocktemplateresult-coinbasetxn": "Information about the coinbase transaction",
"getblocktemplateresult-coinbasevalue": "Total amount available for the coinbase in Satoshi",
"getblocktemplateresult-workid": "This value must be returned with result if provided (not provided)",
"getblocktemplateresult-longpollid": "Identifier for long poll request which allows monitoring for expiration",
"getblocktemplateresult-longpolluri": "An alternate URI to use for long poll requests if provided (not provided)",
"getblocktemplateresult-submitold": "Not applicable",
"getblocktemplateresult-target": "Hex-encoded big-endian number which valid results must be less than",
"getblocktemplateresult-expires": "Maximum number of seconds (starting from when the server sent the response) this work is valid for",
"getblocktemplateresult-maxtime": "Maximum allowed time",
"getblocktemplateresult-mintime": "Minimum allowed time",
"getblocktemplateresult-mutable": "List of mutations the server explicitly allows",
"getblocktemplateresult-noncerange": "Two concatenated hex-encoded big-endian 64-bit integers which represent the valid ranges of nonces the miner may scan",
"getblocktemplateresult-capabilities": "List of server capabilities including 'proposal' to indicate support for block proposals",
"getblocktemplateresult-reject-reason": "Reason the proposal was invalid as-is (only applies to proposal responses)",
"getblocktemplateresult-bits": "Hex-encoded compressed difficulty",
"getblocktemplateresult-curtime": "Current time as seen by the server (recommended for block time); must fall within mintime/maxtime rules",
"getblocktemplateresult-height": "Height of the block to be solved",
"getblocktemplateresult-parentblockhashes": "Hex-encoded big-endian hashes of the parent blocks",
"getblocktemplateresult-sigoplimit": "Number of sigops allowed in blocks ",
"getblocktemplateresult-sizelimit": "Number of bytes allowed in blocks",
"getblocktemplateresult-transactions": "Array of transactions as JSON objects",
"getblocktemplateresult-version": "The block version",
"getblocktemplateresult-coinbaseaux": "Data that should be included in the coinbase signature script",
"getblocktemplateresult-coinbasetxn": "Information about the coinbase transaction",
"getblocktemplateresult-coinbasevalue": "Total amount available for the coinbase in Satoshi",
"getblocktemplateresult-workid": "This value must be returned with result if provided (not provided)",
"getblocktemplateresult-longpollid": "Identifier for long poll request which allows monitoring for expiration",
"getblocktemplateresult-longpolluri": "An alternate URI to use for long poll requests if provided (not provided)",
"getblocktemplateresult-submitold": "Not applicable",
"getblocktemplateresult-target": "Hex-encoded big-endian number which valid results must be less than",
"getblocktemplateresult-expires": "Maximum number of seconds (starting from when the server sent the response) this work is valid for",
"getblocktemplateresult-maxtime": "Maximum allowed time",
"getblocktemplateresult-mintime": "Minimum allowed time",
"getblocktemplateresult-mutable": "List of mutations the server explicitly allows",
"getblocktemplateresult-noncerange": "Two concatenated hex-encoded big-endian 64-bit integers which represent the valid ranges of nonces the miner may scan",
"getblocktemplateresult-capabilities": "List of server capabilities including 'proposal' to indicate support for block proposals",
"getblocktemplateresult-reject-reason": "Reason the proposal was invalid as-is (only applies to proposal responses)",
// GetBlockTemplateCmd help.
"getBlockTemplate--synopsis": "Returns a JSON object with information necessary to construct a block to mine or accepts a proposal to validate.\n" +

View File

@ -2084,7 +2084,7 @@ func handleRescanBlocks(wsc *wsClient, icmd interface{}) (interface{}, error) {
Message: "Failed to fetch block: " + err.Error(),
}
}
if lastBlockHash != nil && block.MsgBlock().Header.PrevBlocks[0] != *lastBlockHash { // TODO: (Stas) This is likely wrong. Modified to satisfy compilation.
if lastBlockHash != nil && block.MsgBlock().Header.ParentHashes[0] != *lastBlockHash { // TODO: (Stas) This is likely wrong. Modified to satisfy compilation.
return nil, &btcjson.RPCError{
Code: btcjson.ErrRPCInvalidParameter,
Message: fmt.Sprintf("Block %v is not a child of %v",

View File

@ -304,9 +304,9 @@ func TestBlockErrors(t *testing.T) {
// test Block operations.
var Block100000 = wire.MsgBlock{
Header: wire.BlockHeader{
Version: 1,
NumPrevBlocks: 2,
PrevBlocks: []daghash.Hash{
Version: 1,
NumParentBlocks: 2,
ParentHashes: []daghash.Hash{
[32]byte{ // Make go vet happy.
0x82, 0xdc, 0xbd, 0xe6, 0x88, 0x37, 0x74, 0x5b,
0x78, 0x6b, 0x03, 0x1d, 0xa3, 0x48, 0x3c, 0x45,

View File

@ -487,8 +487,8 @@ func TestFilterInsertUpdateNone(t *testing.T) {
func TestFilterInsertP2PubKeyOnly(t *testing.T) {
blockBytes := []byte{
0x01, 0x00, 0x00, 0x00, // Version
0x01, // NumPrevBlocks
0x82, 0xBB, 0x86, 0x9C, 0xF3, 0xA7, 0x93, 0x43, 0x2A, 0x66, 0xE8, // HashPrevBlocks
0x01, // NumParentBlocks
0x82, 0xBB, 0x86, 0x9C, 0xF3, 0xA7, 0x93, 0x43, 0x2A, 0x66, 0xE8, // ParentHashes
0x26, 0xE0, 0x5A, 0x6F, 0xC3, 0x74, 0x69, 0xF8, 0xEF, 0xB7, 0x42,
0x1D, 0xC8, 0x80, 0x67, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63, 0x65, 0x9C, 0x79, // HashMerkleRoot

View File

@ -18,8 +18,8 @@ import (
func TestMerkleBlock3(t *testing.T) {
blockBytes := []byte{
0x01, 0x00, 0x00, 0x00, // Version
0x01, //NumPrevBlocks
0x79, 0xCD, 0xA8, 0x56, 0xB1, 0x43, 0xD9, 0xDB, 0x2C, 0x1C, 0xAF, //HashPrevBlocks
0x01, // NumParentBlocks
0x79, 0xCD, 0xA8, 0x56, 0xB1, 0x43, 0xD9, 0xDB, 0x2C, 0x1C, 0xAF, // ParentHashes
0xF0, 0x1D, 0x1A, 0xEC, 0xC8, 0x63, 0x0D, 0x30, 0x62, 0x5D, 0x10,
0xE8, 0xB4, 0xB8, 0xB0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xB5, 0x0C, 0xC0, 0x69, 0xD6, 0xA3, 0xE3, 0x3E, 0x3F, 0xF8, 0x4A, // HashMerkleRoot

View File

@ -372,17 +372,17 @@ func GetFilterHash(filter *gcs.Filter) (daghash.Hash, error) {
// MakeHeaderForFilter makes a filter chain header for a filter, given the
// filter and the previous filter chain header.
func MakeHeaderForFilter(filter *gcs.Filter, prevHeader daghash.Hash) (daghash.Hash, error) {
func MakeHeaderForFilter(filter *gcs.Filter, parentHeader daghash.Hash) (daghash.Hash, error) {
filterTip := make([]byte, 2*daghash.HashSize)
filterHash, err := GetFilterHash(filter)
if err != nil {
return daghash.Hash{}, err
}
// In the buffer we created above we'll compute hash || prevHash as an
// In the buffer we created above we'll compute hash || parentHash as an
// intermediate value.
copy(filterTip, filterHash[:])
copy(filterTip[daghash.HashSize:], prevHeader[:])
copy(filterTip[daghash.HashSize:], parentHeader[:])
// The final filter hash is the double-sha256 of the hash computed
// above.

View File

@ -411,7 +411,7 @@ func BenchmarkDecodeGetHeaders(b *testing.B) {
// BenchmarkDecodeHeaders performs a benchmark on how long it takes to
// decode a headers message with the maximum number of headers and maximum number of
// previous hashes per header.
// parent hashes per header.
func BenchmarkDecodeHeaders(b *testing.B) {
// Create a message with the maximum number of headers.
pver := ProtocolVersion
@ -421,15 +421,15 @@ func BenchmarkDecodeHeaders(b *testing.B) {
if err != nil {
b.Fatalf("NewHashFromStr: unexpected error: %v", err)
}
prevBlocks := make([]daghash.Hash, MaxNumPrevBlocks)
for j := byte(0); j < MaxNumPrevBlocks; j++ {
parentHashes := make([]daghash.Hash, MaxNumParentBlocks)
for j := byte(0); j < MaxNumParentBlocks; j++ {
hash, err := daghash.NewHashFromStr(fmt.Sprintf("%x%x", i, j))
if err != nil {
b.Fatalf("NewHashFromStr: unexpected error: %v", err)
}
prevBlocks[i] = *hash
parentHashes[i] = *hash
}
m.AddBlockHeader(NewBlockHeader(1, prevBlocks, hash, 0, uint64(i)))
m.AddBlockHeader(NewBlockHeader(1, parentHashes, hash, 0, uint64(i)))
}
// Serialize it so the bytes are available to test the decode below.

View File

@ -13,20 +13,20 @@ import (
)
// BaseBlockHeaderPayload is the base number of bytes a block header can be,
// not including the list of previous block headers.
// not including the list of parent block headers.
// Version 4 bytes + Timestamp 8 bytes + Bits 4 bytes + Nonce 8 bytes +
// + NumPrevBlocks 1 byte + MerkleRoot hash.
// To get total size of block header len(PrevBlocks) * daghash.HashSize should be
// + NumParentBlocks 1 byte + MerkleRoot hash.
// To get total size of block header len(ParentHashes) * daghash.HashSize should be
// added to this value
const BaseBlockHeaderPayload = 25 + (daghash.HashSize)
// MaxNumPrevBlocks is the maximum number of previous blocks a block can reference.
// Currently set to 255 as the maximum number NumPrevBlocks can be due to it being a byte
const MaxNumPrevBlocks = 255
// MaxNumParentBlocks is the maximum number of parent blocks a block can reference.
// Currently set to 255 as the maximum number NumParentBlocks can be due to it being a byte
const MaxNumParentBlocks = 255
// MaxBlockHeaderPayload is the maximum number of bytes a block header can be.
// BaseBlockHeaderPayload + up to MaxNumPrevBlocks hashes of previous blocks
const MaxBlockHeaderPayload = BaseBlockHeaderPayload + (MaxNumPrevBlocks * daghash.HashSize)
// BaseBlockHeaderPayload + up to MaxNumParentBlocks hashes of parent blocks
const MaxBlockHeaderPayload = BaseBlockHeaderPayload + (MaxNumParentBlocks * daghash.HashSize)
// BlockHeader defines information about a block and is used in the bitcoin
// block (MsgBlock) and headers (MsgHeader) messages.
@ -34,11 +34,11 @@ type BlockHeader struct {
// Version of the block. This is not the same as the protocol version.
Version int32
// Number of entries in PrevBlocks
NumPrevBlocks byte
// Number of entries in ParentHashes
NumParentBlocks byte
// Hashes of the previous block headers in the blockDAG.
PrevBlocks []daghash.Hash
// Hashes of the parent block headers in the blockDAG.
ParentHashes []daghash.Hash
// Merkle tree reference to hash of all transactions for the block.
MerkleRoot daghash.Hash
@ -59,24 +59,24 @@ func (h *BlockHeader) BlockHash() daghash.Hash {
// transactions. Ignore the error returns since there is no way the
// encode could fail except being out of memory which would cause a
// run-time panic.
buf := bytes.NewBuffer(make([]byte, 0, BaseBlockHeaderPayload+len(h.PrevBlocks)))
buf := bytes.NewBuffer(make([]byte, 0, BaseBlockHeaderPayload+len(h.ParentHashes)))
_ = writeBlockHeader(buf, 0, h)
return daghash.DoubleHashH(buf.Bytes())
}
// SelectedPrevBlock returns the hash of the selected block header.
func (h *BlockHeader) SelectedPrevBlock() *daghash.Hash {
if h.NumPrevBlocks == 0 {
// SelectedParentHash returns the hash of the selected block header.
func (h *BlockHeader) SelectedParentHash() *daghash.Hash {
if h.NumParentBlocks == 0 {
return nil
}
return &h.PrevBlocks[0]
return &h.ParentHashes[0]
}
// IsGenesis returns true iff this block is a genesis block
func (h *BlockHeader) IsGenesis() bool {
return h.NumPrevBlocks == 0
return h.NumParentBlocks == 0
}
// BtcDecode decodes r using the bitcoin protocol encoding into the receiver.
@ -118,25 +118,25 @@ func (h *BlockHeader) Serialize(w io.Writer) error {
// SerializeSize returns the number of bytes it would take to serialize the
// block header.
func (h *BlockHeader) SerializeSize() int {
return BaseBlockHeaderPayload + int(h.NumPrevBlocks)*daghash.HashSize
return BaseBlockHeaderPayload + int(h.NumParentBlocks)*daghash.HashSize
}
// NewBlockHeader returns a new BlockHeader using the provided version, previous
// block hash, merkle root hash, difficulty bits, and nonce used to generate the
// block with defaults or calclulated values for the remaining fields.
func NewBlockHeader(version int32, prevHashes []daghash.Hash, merkleRootHash *daghash.Hash,
func NewBlockHeader(version int32, parentHashes []daghash.Hash, merkleRootHash *daghash.Hash,
bits uint32, nonce uint64) *BlockHeader {
// Limit the timestamp to one second precision since the protocol
// doesn't support better.
return &BlockHeader{
Version: version,
NumPrevBlocks: byte(len(prevHashes)),
PrevBlocks: prevHashes,
MerkleRoot: *merkleRootHash,
Timestamp: time.Unix(time.Now().Unix(), 0),
Bits: bits,
Nonce: nonce,
Version: version,
NumParentBlocks: byte(len(parentHashes)),
ParentHashes: parentHashes,
MerkleRoot: *merkleRootHash,
Timestamp: time.Unix(time.Now().Unix(), 0),
Bits: bits,
Nonce: nonce,
}
}
@ -144,14 +144,14 @@ func NewBlockHeader(version int32, prevHashes []daghash.Hash, merkleRootHash *da
// decoding block headers stored to disk, such as in a database, as opposed to
// decoding from the wire.
func readBlockHeader(r io.Reader, pver uint32, bh *BlockHeader) error {
err := readElements(r, &bh.Version, &bh.NumPrevBlocks)
err := readElements(r, &bh.Version, &bh.NumParentBlocks)
if err != nil {
return err
}
bh.PrevBlocks = make([]daghash.Hash, bh.NumPrevBlocks)
for i := byte(0); i < bh.NumPrevBlocks; i++ {
err := readElement(r, &bh.PrevBlocks[i])
bh.ParentHashes = make([]daghash.Hash, bh.NumParentBlocks)
for i := byte(0); i < bh.NumParentBlocks; i++ {
err := readElement(r, &bh.ParentHashes[i])
if err != nil {
return err
}
@ -164,6 +164,6 @@ func readBlockHeader(r io.Reader, pver uint32, bh *BlockHeader) error {
// opposed to encoding for the wire.
func writeBlockHeader(w io.Writer, pver uint32, bh *BlockHeader) error {
sec := int64(bh.Timestamp.Unix())
return writeElements(w, bh.Version, bh.NumPrevBlocks, &bh.PrevBlocks, &bh.MerkleRoot,
return writeElements(w, bh.Version, bh.NumParentBlocks, &bh.ParentHashes, &bh.MerkleRoot,
sec, bh.Bits, bh.Nonce)
}

View File

@ -28,9 +28,9 @@ func TestBlockHeader(t *testing.T) {
bh := NewBlockHeader(1, hashes, &merkleHash, bits, nonce)
// Ensure we get the same data back out.
if !reflect.DeepEqual(bh.PrevBlocks, hashes) {
if !reflect.DeepEqual(bh.ParentHashes, hashes) {
t.Errorf("NewBlockHeader: wrong prev hashes - got %v, want %v",
spew.Sprint(bh.PrevBlocks), spew.Sprint(hashes))
spew.Sprint(bh.ParentHashes), spew.Sprint(hashes))
}
if !bh.MerkleRoot.IsEqual(&merkleHash) {
t.Errorf("NewBlockHeader: wrong merkle root - got %v, want %v",
@ -55,24 +55,24 @@ func TestBlockHeaderWire(t *testing.T) {
// baseBlockHdr is used in the various tests as a baseline BlockHeader.
bits := uint32(0x1d00ffff)
baseBlockHdr := &BlockHeader{
Version: 1,
NumPrevBlocks: 2,
PrevBlocks: []daghash.Hash{mainNetGenesisHash, simNetGenesisHash},
MerkleRoot: mainNetGenesisMerkleRoot,
Timestamp: time.Unix(0x495fab29, 0), // 2009-01-03 12:15:05 -0600 CST
Bits: bits,
Nonce: nonce,
Version: 1,
NumParentBlocks: 2,
ParentHashes: []daghash.Hash{mainNetGenesisHash, simNetGenesisHash},
MerkleRoot: mainNetGenesisMerkleRoot,
Timestamp: time.Unix(0x495fab29, 0), // 2009-01-03 12:15:05 -0600 CST
Bits: bits,
Nonce: nonce,
}
// baseBlockHdrEncoded is the wire encoded bytes of baseBlockHdr.
baseBlockHdrEncoded := []byte{
0x01, 0x00, 0x00, 0x00, // Version 1
0x02, // NumPrevBlocks
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // PrevBlock mainNetGenesisHash
0x02, // NumParentBlocks
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // mainNetGenesisHash
0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f,
0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c,
0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00,
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // PrevBlock simNetGenesisHash
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // simNetGenesisHash
0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0,
0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91,
0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68,
@ -194,24 +194,24 @@ func TestBlockHeaderSerialize(t *testing.T) {
// baseBlockHdr is used in the various tests as a baseline BlockHeader.
bits := uint32(0x1d00ffff)
baseBlockHdr := &BlockHeader{
Version: 1,
NumPrevBlocks: 2,
PrevBlocks: []daghash.Hash{mainNetGenesisHash, simNetGenesisHash},
MerkleRoot: mainNetGenesisMerkleRoot,
Timestamp: time.Unix(0x495fab29, 0), // 2009-01-03 12:15:05 -0600 CST
Bits: bits,
Nonce: nonce,
Version: 1,
NumParentBlocks: 2,
ParentHashes: []daghash.Hash{mainNetGenesisHash, simNetGenesisHash},
MerkleRoot: mainNetGenesisMerkleRoot,
Timestamp: time.Unix(0x495fab29, 0), // 2009-01-03 12:15:05 -0600 CST
Bits: bits,
Nonce: nonce,
}
// baseBlockHdrEncoded is the wire encoded bytes of baseBlockHdr.
baseBlockHdrEncoded := []byte{
0x01, 0x00, 0x00, 0x00, // Version 1
0x02, // NumPrevBlocks
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // PrevBlock mainNetGenesisHash
0x02, // NumParentBlocks
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // mainNetGenesisHash
0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f,
0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c,
0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00,
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // PrevBlock simNetGenesisHash
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // simNetGenesisHash
0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0,
0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91,
0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68,
@ -274,23 +274,23 @@ func TestBlockHeaderSerializeSize(t *testing.T) {
bits := uint32(0x1d00ffff)
timestamp := time.Unix(0x495fab29, 0) // 2009-01-03 12:15:05 -0600 CST
baseBlockHdr := &BlockHeader{
Version: 1,
NumPrevBlocks: 2,
PrevBlocks: []daghash.Hash{mainNetGenesisHash, simNetGenesisHash},
MerkleRoot: mainNetGenesisMerkleRoot,
Timestamp: timestamp,
Bits: bits,
Nonce: nonce,
Version: 1,
NumParentBlocks: 2,
ParentHashes: []daghash.Hash{mainNetGenesisHash, simNetGenesisHash},
MerkleRoot: mainNetGenesisMerkleRoot,
Timestamp: timestamp,
Bits: bits,
Nonce: nonce,
}
genesisBlockHdr := &BlockHeader{
Version: 1,
NumPrevBlocks: 0,
PrevBlocks: []daghash.Hash{},
MerkleRoot: mainNetGenesisMerkleRoot,
Timestamp: timestamp,
Bits: bits,
Nonce: nonce,
Version: 1,
NumParentBlocks: 0,
ParentHashes: []daghash.Hash{},
MerkleRoot: mainNetGenesisMerkleRoot,
Timestamp: timestamp,
Bits: bits,
Nonce: nonce,
}
tests := []struct {
in *BlockHeader // Block header to encode
@ -321,22 +321,22 @@ func TestIsGenesis(t *testing.T) {
timestamp := time.Unix(0x495fab29, 0) // 2009-01-03 12:15:05 -0600 CST
baseBlockHdr := &BlockHeader{
Version: 1,
NumPrevBlocks: 2,
PrevBlocks: []daghash.Hash{mainNetGenesisHash, simNetGenesisHash},
MerkleRoot: mainNetGenesisMerkleRoot,
Timestamp: timestamp,
Bits: bits,
Nonce: nonce,
Version: 1,
NumParentBlocks: 2,
ParentHashes: []daghash.Hash{mainNetGenesisHash, simNetGenesisHash},
MerkleRoot: mainNetGenesisMerkleRoot,
Timestamp: timestamp,
Bits: bits,
Nonce: nonce,
}
genesisBlockHdr := &BlockHeader{
Version: 1,
NumPrevBlocks: 0,
PrevBlocks: []daghash.Hash{},
MerkleRoot: mainNetGenesisMerkleRoot,
Timestamp: timestamp,
Bits: bits,
Nonce: nonce,
Version: 1,
NumParentBlocks: 0,
ParentHashes: []daghash.Hash{},
MerkleRoot: mainNetGenesisMerkleRoot,
Timestamp: timestamp,
Bits: bits,
Nonce: nonce,
}
tests := []struct {

View File

@ -21,11 +21,11 @@ func TestBlock(t *testing.T) {
pver := ProtocolVersion
// Block 1 header.
prevHashes := blockOne.Header.PrevBlocks
parentHashes := blockOne.Header.ParentHashes
merkleHash := &blockOne.Header.MerkleRoot
bits := blockOne.Header.Bits
nonce := blockOne.Header.Nonce
bh := NewBlockHeader(1, prevHashes, merkleHash, bits, nonce)
bh := NewBlockHeader(1, parentHashes, merkleHash, bits, nonce)
// Ensure the command is expected value.
wantCmd := "block"
@ -334,7 +334,7 @@ func TestBlockSerializeErrors(t *testing.T) {
}{
// Force error in version.
{&blockOne, blockOneBytes, 0, io.ErrShortWrite, io.EOF},
// Force error in numPrevBlocks.
// Force error in numParentBlocks.
{&blockOne, blockOneBytes, 4, io.ErrShortWrite, io.EOF},
// Force error in prev block hash #1.
{&blockOne, blockOneBytes, 5, io.ErrShortWrite, io.EOF},
@ -405,12 +405,12 @@ func TestBlockOverflowErrors(t *testing.T) {
{
[]byte{
0x01, 0x00, 0x00, 0x00, // Version 1
0x02, // NumPrevBlocks
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // PrevBlock mainNetGenesisHash
0x02, // NumParentBlocks
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // mainNetGenesisHash
0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f,
0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c,
0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00,
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // PrevBlock simNetGenesisHash
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // simNetGenesisHash
0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0,
0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91,
0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68,
@ -491,10 +491,10 @@ func TestBlockSerializeSize(t *testing.T) {
// blockOne is the first block in the mainnet block chain.
var blockOne = MsgBlock{
Header: BlockHeader{
Version: 1,
NumPrevBlocks: 2,
PrevBlocks: []daghash.Hash{mainNetGenesisHash, simNetGenesisHash},
MerkleRoot: daghash.Hash(mainNetGenesisMerkleRoot),
Version: 1,
NumParentBlocks: 2,
ParentHashes: []daghash.Hash{mainNetGenesisHash, simNetGenesisHash},
MerkleRoot: daghash.Hash(mainNetGenesisMerkleRoot),
Timestamp: time.Unix(0x4966bc61, 0), // 2009-01-08 20:54:25 -0600 CST
Bits: 0x1d00ffff, // 486604799
@ -541,12 +541,12 @@ var blockOne = MsgBlock{
// Block one serialized bytes.
var blockOneBytes = []byte{
0x01, 0x00, 0x00, 0x00, // Version 1
0x02, // NumPrevBlocks
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // PrevBlock mainNetGenesisHash
0x02, // NumParentBlocks
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // mainNetGenesisHash
0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f,
0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c,
0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00,
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // PrevBlock simNetGenesisHash
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // simNetGenesisHash
0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0,
0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91,
0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68,

View File

@ -81,12 +81,12 @@ func TestHeadersWire(t *testing.T) {
oneHeaderEncoded := []byte{
0x01, // VarInt for number of headers.
0x01, 0x00, 0x00, 0x00, // Version 1
0x02, // NumPrevBlocks
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // PrevBlock mainNetGenesisHash
0x02, // NumParentBlocks
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // mainNetGenesisHash
0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f,
0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c,
0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00,
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // PrevBlock simNetGenesisHash
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // simNetGenesisHash
0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0,
0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91,
0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68,
@ -237,12 +237,12 @@ func TestHeadersWireErrors(t *testing.T) {
oneHeaderEncoded := []byte{
0x01, // VarInt for number of headers.
0x01, 0x00, 0x00, 0x00, // Version 1
0x02, // NumPrevBlocks
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // PrevBlock mainNetGenesisHash
0x02, // NumParentBlocks
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // mainNetGenesisHash
0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f,
0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c,
0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00,
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // PrevBlock simNetGenesisHash
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // simNetGenesisHash
0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0,
0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91,
0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68,
@ -278,12 +278,12 @@ func TestHeadersWireErrors(t *testing.T) {
transHeaderEncoded := []byte{
0x01, // VarInt for number of headers.
0x01, 0x00, 0x00, 0x00, // Version 1
0x02, // NumPrevBlocks
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // PrevBlock mainNetGenesisHash
0x02, // NumParentBlocks
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // mainNetGenesisHash
0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f,
0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c,
0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00,
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // PrevBlock simNetGenesisHash
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // simNetGenesisHash
0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0,
0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91,
0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68,

View File

@ -21,11 +21,11 @@ func TestMerkleBlock(t *testing.T) {
pver := ProtocolVersion
// Block 1 header.
prevHashes := blockOne.Header.PrevBlocks
parentHashes := blockOne.Header.ParentHashes
merkleHash := &blockOne.Header.MerkleRoot
bits := blockOne.Header.Bits
nonce := blockOne.Header.Nonce
bh := NewBlockHeader(1, prevHashes, merkleHash, bits, nonce)
bh := NewBlockHeader(1, parentHashes, merkleHash, bits, nonce)
// Ensure the command is expected value.
wantCmd := "merkleblock"
@ -113,11 +113,11 @@ func TestMerkleBlock(t *testing.T) {
// the latest protocol version and decoding with BIP0031Version.
func TestMerkleBlockCrossProtocol(t *testing.T) {
// Block 1 header.
prevHashes := blockOne.Header.PrevBlocks
parentHashes := blockOne.Header.ParentHashes
merkleHash := &blockOne.Header.MerkleRoot
bits := blockOne.Header.Bits
nonce := blockOne.Header.Nonce
bh := NewBlockHeader(1, prevHashes, merkleHash, bits, nonce)
bh := NewBlockHeader(1, parentHashes, merkleHash, bits, nonce)
msg := NewMsgMerkleBlock(bh)
@ -337,9 +337,9 @@ func TestMerkleBlockOverflowErrors(t *testing.T) {
// where the first transaction matches.
var merkleBlockOne = MsgMerkleBlock{
Header: BlockHeader{
Version: 1,
NumPrevBlocks: 2,
PrevBlocks: []daghash.Hash{mainNetGenesisHash, simNetGenesisHash},
Version: 1,
NumParentBlocks: 2,
ParentHashes: []daghash.Hash{mainNetGenesisHash, simNetGenesisHash},
MerkleRoot: daghash.Hash([daghash.HashSize]byte{ // Make go vet happy.
0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44,
0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67,
@ -366,12 +366,12 @@ var merkleBlockOne = MsgMerkleBlock{
// block one of the block chain where the first transaction matches.
var merkleBlockOneBytes = []byte{
0x01, 0x00, 0x00, 0x00, // Version 1
0x02, // NumPrevBlocks
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // PrevBlock mainNetGenesisHash
0x02, // NumParentBlocks
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // mainNetGenesisHash
0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f,
0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c,
0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00,
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // PrevBlock simNetGenesisHash
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // simNetGenesisHash
0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0,
0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91,
0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68,