[NOD-669] Rename start/endHash -> low/highHash (#591)

* [NOD-669] Remove the "get" from getBlueBlocksBetween.

* [NOD-669] Remove the "Get" from GetBlueBlocksHeadersBetween.

* [NOD-669] In blueBlocksBetween, rename startHash to lowHash and stopHash to highHash.

* [NOD-669] Rename startHash to lowHash and stopHash to highHash in blockLocator logic.

* [NOD-669] Remove zeroHash logic in blockLocator.

* [NOD-669] Finish renaming startHash and stopHash in blockdag.

* [NOD-669] Rename startHash and stopHash in blockdag where I previously missed it.

* [NOD-669] Rename startHash and stopHash in blockdag where I previously missed it some more.

* [NOD-669] Rename startHash and stopHash in blockdag where I previously missed it some more some more.

* [NOD-669] Fix bad grammar in method names.

* [NOD-669] Rename lowHash to blockHash in SelectedParentChain.

* [NOD-669] Fix a comment.
This commit is contained in:
stasatdaglabs 2020-01-20 12:47:16 +02:00 committed by Svarog
parent 38b4749f20
commit 49418f4222
22 changed files with 227 additions and 227 deletions

View File

@ -21,66 +21,61 @@ import (
// [17 16 14 11 7 2 genesis] // [17 16 14 11 7 2 genesis]
type BlockLocator []*daghash.Hash type BlockLocator []*daghash.Hash
// BlockLocatorFromHashes returns a block locator from start and stop hash. // BlockLocatorFromHashes returns a block locator from high and low hash.
// See BlockLocator for details on the algorithm used to create a block locator. // See BlockLocator for details on the algorithm used to create a block locator.
// //
// In addition to the general algorithm referenced above, this function will
// return the block locator for the selected tip if the passed hash is not currently
// known.
//
// This function is safe for concurrent access. // This function is safe for concurrent access.
func (dag *BlockDAG) BlockLocatorFromHashes(startHash, stopHash *daghash.Hash) (BlockLocator, error) { func (dag *BlockDAG) BlockLocatorFromHashes(highHash, lowHash *daghash.Hash) (BlockLocator, error) {
dag.dagLock.RLock() dag.dagLock.RLock()
defer dag.dagLock.RUnlock() defer dag.dagLock.RUnlock()
startNode := dag.index.LookupNode(startHash)
var stopNode *blockNode highNode := dag.index.LookupNode(highHash)
if !stopHash.IsEqual(&daghash.ZeroHash) { lowNode := dag.index.LookupNode(lowHash)
stopNode = dag.index.LookupNode(stopHash)
} return dag.blockLocator(highNode, lowNode)
return dag.blockLocator(startNode, stopNode)
} }
// blockLocator returns a block locator for the passed start and stop nodes. // blockLocator returns a block locator for the passed high and low nodes.
// The default value for the start node is the selected tip, and the default // The default value for the high node is the selected tip, and the default
// values of the stop node is the genesis block. // values of the low node is the genesis block.
// //
// See the BlockLocator type comments for more details. // See the BlockLocator type comments for more details.
// //
// This function MUST be called with the DAG state lock held (for reads). // This function MUST be called with the DAG state lock held (for reads).
func (dag *BlockDAG) blockLocator(startNode, stopNode *blockNode) (BlockLocator, error) { func (dag *BlockDAG) blockLocator(highNode, lowNode *blockNode) (BlockLocator, error) {
// Use the selected tip if requested. // Use the selected tip if requested.
if startNode == nil { if highNode == nil {
startNode = dag.virtual.selectedParent highNode = dag.virtual.selectedParent
} }
if stopNode == nil { if lowNode == nil {
stopNode = dag.genesis lowNode = dag.genesis
} }
// We use the selected parent of the start node, so the // We use the selected parent of the high node, so the
// block locator won't contain the start node. // block locator won't contain the high node.
startNode = startNode.selectedParent highNode = highNode.selectedParent
node := startNode node := highNode
step := uint64(1) step := uint64(1)
locator := make(BlockLocator, 0) locator := make(BlockLocator, 0)
for node != nil { for node != nil {
locator = append(locator, node.hash) locator = append(locator, node.hash)
// Nothing more to add once the stop node has been added. // Nothing more to add once the low node has been added.
if node.blueScore <= stopNode.blueScore { if node.blueScore <= lowNode.blueScore {
if node != stopNode { if node != lowNode {
return nil, errors.Errorf("startNode and stopNode are " + return nil, errors.Errorf("highNode and lowNode are " +
"not in the same selected parent chain.") "not in the same selected parent chain.")
} }
break break
} }
// Calculate blueScore of previous node to include ensuring the // Calculate blueScore of previous node to include ensuring the
// final node is stopNode. // final node is lowNode.
nextBlueScore := node.blueScore - step nextBlueScore := node.blueScore - step
if nextBlueScore < stopNode.blueScore { if nextBlueScore < lowNode.blueScore {
nextBlueScore = stopNode.blueScore nextBlueScore = lowNode.blueScore
} }
// walk backwards through the nodes to the correct ancestor. // walk backwards through the nodes to the correct ancestor.
@ -99,21 +94,21 @@ func (dag *BlockDAG) blockLocator(startNode, stopNode *blockNode) (BlockLocator,
// sync peer. // sync peer.
// //
// This function MUST be called with the DAG state lock held (for reads). // This function MUST be called with the DAG state lock held (for reads).
func (dag *BlockDAG) FindNextLocatorBoundaries(locator BlockLocator) (startHash, stopHash *daghash.Hash) { func (dag *BlockDAG) FindNextLocatorBoundaries(locator BlockLocator) (highHash, lowHash *daghash.Hash) {
// Find the most recent locator block hash in the DAG. In the case none of // Find the most recent locator block hash in the DAG. In the case none of
// the hashes in the locator are in the DAG, fall back to the genesis block. // the hashes in the locator are in the DAG, fall back to the genesis block.
stopNode := dag.genesis lowNode := dag.genesis
nextBlockLocatorIndex := int64(len(locator) - 1) nextBlockLocatorIndex := int64(len(locator) - 1)
for i, hash := range locator { for i, hash := range locator {
node := dag.index.LookupNode(hash) node := dag.index.LookupNode(hash)
if node != nil { if node != nil {
stopNode = node lowNode = node
nextBlockLocatorIndex = int64(i) - 1 nextBlockLocatorIndex = int64(i) - 1
break break
} }
} }
if nextBlockLocatorIndex < 0 { if nextBlockLocatorIndex < 0 {
return nil, stopNode.hash return nil, lowNode.hash
} }
return locator[nextBlockLocatorIndex], stopNode.hash return locator[nextBlockLocatorIndex], lowNode.hash
} }

View File

@ -1472,44 +1472,44 @@ func (dag *BlockDAG) IsInSelectedParentChain(blockHash *daghash.Hash) bool {
return dag.virtual.selectedParentChainSet.containsHash(blockHash) return dag.virtual.selectedParentChainSet.containsHash(blockHash)
} }
// SelectedParentChain returns the selected parent chain starting from startHash (exclusive) // SelectedParentChain returns the selected parent chain starting from blockHash (exclusive)
// up to the virtual (exclusive). If startHash is nil then the genesis block is used. If // up to the virtual (exclusive). If blockHash is nil then the genesis block is used. If
// startHash is not within the select parent chain, go down its own selected parent chain, // blockHash is not within the select parent chain, go down its own selected parent chain,
// while collecting each block hash in removedChainHashes, until reaching a block within // while collecting each block hash in removedChainHashes, until reaching a block within
// the main selected parent chain. // the main selected parent chain.
// //
// This method MUST be called with the DAG lock held // This method MUST be called with the DAG lock held
func (dag *BlockDAG) SelectedParentChain(startHash *daghash.Hash) ([]*daghash.Hash, []*daghash.Hash, error) { func (dag *BlockDAG) SelectedParentChain(blockHash *daghash.Hash) ([]*daghash.Hash, []*daghash.Hash, error) {
if startHash == nil { if blockHash == nil {
startHash = dag.genesis.hash blockHash = dag.genesis.hash
} }
if !dag.BlockExists(startHash) { if !dag.BlockExists(blockHash) {
return nil, nil, errors.Errorf("startHash %s does not exist in the DAG", startHash) return nil, nil, errors.Errorf("blockHash %s does not exist in the DAG", blockHash)
} }
// If startHash is not in the selected parent chain, go down its selected parent chain // If blockHash is not in the selected parent chain, go down its selected parent chain
// until we find a block that is in the main selected parent chain. // until we find a block that is in the main selected parent chain.
var removedChainHashes []*daghash.Hash var removedChainHashes []*daghash.Hash
for !dag.IsInSelectedParentChain(startHash) { for !dag.IsInSelectedParentChain(blockHash) {
removedChainHashes = append(removedChainHashes, startHash) removedChainHashes = append(removedChainHashes, blockHash)
node := dag.index.LookupNode(startHash) node := dag.index.LookupNode(blockHash)
startHash = node.selectedParent.hash blockHash = node.selectedParent.hash
} }
// Find the index of the startHash in the selectedParentChainSlice // Find the index of the blockHash in the selectedParentChainSlice
startHashIndex := len(dag.virtual.selectedParentChainSlice) - 1 blockHashIndex := len(dag.virtual.selectedParentChainSlice) - 1
for startHashIndex >= 0 { for blockHashIndex >= 0 {
node := dag.virtual.selectedParentChainSlice[startHashIndex] node := dag.virtual.selectedParentChainSlice[blockHashIndex]
if node.hash.IsEqual(startHash) { if node.hash.IsEqual(blockHash) {
break break
} }
startHashIndex-- blockHashIndex--
} }
// Copy all the addedChainHashes starting from startHashIndex (exclusive) // Copy all the addedChainHashes starting from blockHashIndex (exclusive)
addedChainHashes := make([]*daghash.Hash, len(dag.virtual.selectedParentChainSlice)-startHashIndex-1) addedChainHashes := make([]*daghash.Hash, len(dag.virtual.selectedParentChainSlice)-blockHashIndex-1)
for i, node := range dag.virtual.selectedParentChainSlice[startHashIndex+1:] { for i, node := range dag.virtual.selectedParentChainSlice[blockHashIndex+1:] {
addedChainHashes[i] = node.hash addedChainHashes[i] = node.hash
} }
@ -1593,13 +1593,13 @@ func (dag *BlockDAG) SelectedParentHash(blockHash *daghash.Hash) (*daghash.Hash,
return node.selectedParent.hash, nil return node.selectedParent.hash, nil
} }
// getBlueBlocksHashesBetween returns the hashes of the blocks after the provided // blueBlockHashesBetween returns the hashes of the blocks after the provided
// start hash until the provided stop hash is reached, or up to the // low hash until the provided high hash is reached, or up to the
// provided max number of block hashes. // provided max number of block hashes.
// //
// This function MUST be called with the DAG state lock held (for reads). // This function MUST be called with the DAG state lock held (for reads).
func (dag *BlockDAG) getBlueBlocksHashesBetween(startHash, stopHash *daghash.Hash, maxHashes uint64) ([]*daghash.Hash, error) { func (dag *BlockDAG) blueBlockHashesBetween(lowHash, highHash *daghash.Hash, maxHashes uint64) ([]*daghash.Hash, error) {
nodes, err := dag.getBlueBlocksBetween(startHash, stopHash, maxHashes) nodes, err := dag.blueBlocksBetween(lowHash, highHash, maxHashes)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1610,44 +1610,44 @@ func (dag *BlockDAG) getBlueBlocksHashesBetween(startHash, stopHash *daghash.Has
return hashes, nil return hashes, nil
} }
func (dag *BlockDAG) getBlueBlocksBetween(startHash, stopHash *daghash.Hash, maxEntries uint64) ([]*blockNode, error) { func (dag *BlockDAG) blueBlocksBetween(lowHash, highHash *daghash.Hash, maxEntries uint64) ([]*blockNode, error) {
startNode := dag.index.LookupNode(startHash) lowNode := dag.index.LookupNode(lowHash)
if startNode == nil { if lowNode == nil {
return nil, errors.Errorf("Couldn't find start hash %s", startHash) return nil, errors.Errorf("Couldn't find low hash %s", lowHash)
} }
stopNode := dag.index.LookupNode(stopHash) highNode := dag.index.LookupNode(highHash)
if stopNode == nil { if highNode == nil {
return nil, errors.Errorf("Couldn't find stop hash %s", stopHash) return nil, errors.Errorf("Couldn't find high hash %s", highHash)
} }
// In order to get no more then maxEntries of blue blocks from // In order to get no more then maxEntries of blue blocks from
// the future of the start node (including itself), we iterate // the future of the lowNode (including itself), we iterate
// the selected parent chain of the stopNode and add the blues // the selected parent chain of the highNode and add the blues
// each node (including the stopNode itself). This is why the // each node (including the highNode itself). This is why the
// number of returned blocks will be // number of returned blocks will be
// stopNode.blueScore-startNode.blueScore+1. // highNode.blueScore-lowNode.blueScore+1.
// If stopNode.blueScore-startNode.blueScore+1 > maxEntries, we // If highNode.blueScore-lowNode.blueScore+1 > maxEntries, we
// first iterate on the selected parent chain of the stop node // first iterate on the selected parent chain of the highNode
// until we find a new stop node // until we find a new highNode
// where stopNode.blueScore-startNode.blueScore+1 <= maxEntries // where highNode.blueScore-lowNode.blueScore+1 <= maxEntries
for stopNode.blueScore-startNode.blueScore+1 > maxEntries { for highNode.blueScore-lowNode.blueScore+1 > maxEntries {
stopNode = stopNode.selectedParent highNode = highNode.selectedParent
} }
// Populate and return the found nodes. // Populate and return the found nodes.
nodes := make([]*blockNode, 0, stopNode.blueScore-startNode.blueScore+1) nodes := make([]*blockNode, 0, highNode.blueScore-lowNode.blueScore+1)
nodes = append(nodes, stopNode) nodes = append(nodes, highNode)
current := stopNode current := highNode
for current.blueScore > startNode.blueScore { for current.blueScore > lowNode.blueScore {
for _, blue := range current.blues { for _, blue := range current.blues {
nodes = append(nodes, blue) nodes = append(nodes, blue)
} }
current = current.selectedParent current = current.selectedParent
} }
if current != startNode { if current != lowNode {
return nil, errors.Errorf("the start hash is not found in the " + return nil, errors.Errorf("the low hash is not found in the " +
"selected parent chain of the stop hash") "selected parent chain of the high hash")
} }
reversedNodes := make([]*blockNode, len(nodes)) reversedNodes := make([]*blockNode, len(nodes))
for i, node := range nodes { for i, node := range nodes {
@ -1656,14 +1656,14 @@ func (dag *BlockDAG) getBlueBlocksBetween(startHash, stopHash *daghash.Hash, max
return reversedNodes, nil return reversedNodes, nil
} }
// GetBlueBlocksHashesBetween returns the hashes of the blue blocks after the // BlueBlockHashesBetween returns the hashes of the blue blocks after the
// provided start hash until the provided stop hash is reached, or up to the // provided low hash until the provided high hash is reached, or up to the
// provided max number of block hashes. // provided max number of block hashes.
// //
// This function is safe for concurrent access. // This function is safe for concurrent access.
func (dag *BlockDAG) GetBlueBlocksHashesBetween(startHash, stopHash *daghash.Hash, maxHashes uint64) ([]*daghash.Hash, error) { func (dag *BlockDAG) BlueBlockHashesBetween(lowHash, highHash *daghash.Hash, maxHashes uint64) ([]*daghash.Hash, error) {
dag.dagLock.RLock() dag.dagLock.RLock()
hashes, err := dag.getBlueBlocksHashesBetween(startHash, stopHash, maxHashes) hashes, err := dag.blueBlockHashesBetween(lowHash, highHash, maxHashes)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1671,13 +1671,13 @@ func (dag *BlockDAG) GetBlueBlocksHashesBetween(startHash, stopHash *daghash.Has
return hashes, nil return hashes, nil
} }
// getBlueBlocksHeadersBetween returns the headers of the blue blocks after the // blueBlockHeadersBetween returns the headers of the blue blocks after the
// provided start hash until the provided stop hash is reached, or up to the // provided low hash until the provided high hash is reached, or up to the
// provided max number of block headers. // provided max number of block headers.
// //
// This function MUST be called with the DAG state lock held (for reads). // This function MUST be called with the DAG state lock held (for reads).
func (dag *BlockDAG) getBlueBlocksHeadersBetween(startHash, stopHash *daghash.Hash, maxHeaders uint64) ([]*wire.BlockHeader, error) { func (dag *BlockDAG) blueBlockHeadersBetween(lowHash, highHash *daghash.Hash, maxHeaders uint64) ([]*wire.BlockHeader, error) {
nodes, err := dag.getBlueBlocksBetween(startHash, stopHash, maxHeaders) nodes, err := dag.blueBlocksBetween(lowHash, highHash, maxHeaders)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1689,17 +1689,17 @@ func (dag *BlockDAG) getBlueBlocksHeadersBetween(startHash, stopHash *daghash.Ha
} }
// GetTopHeaders returns the top wire.MaxBlockHeadersPerMsg block headers ordered by height. // GetTopHeaders returns the top wire.MaxBlockHeadersPerMsg block headers ordered by height.
func (dag *BlockDAG) GetTopHeaders(startHash *daghash.Hash) ([]*wire.BlockHeader, error) { func (dag *BlockDAG) GetTopHeaders(highHash *daghash.Hash) ([]*wire.BlockHeader, error) {
startNode := &dag.virtual.blockNode highNode := &dag.virtual.blockNode
if startHash != nil { if highHash != nil {
startNode = dag.index.LookupNode(startHash) highNode = dag.index.LookupNode(highHash)
if startNode == nil { if highNode == nil {
return nil, errors.Errorf("Couldn't find the start hash %s in the dag", startHash) return nil, errors.Errorf("Couldn't find the high hash %s in the dag", highHash)
} }
} }
headers := make([]*wire.BlockHeader, 0, startNode.blueScore) headers := make([]*wire.BlockHeader, 0, highNode.blueScore)
queue := newDownHeap() queue := newDownHeap()
queue.pushSet(startNode.parents) queue.pushSet(highNode.parents)
visited := newSet() visited := newSet()
for i := uint32(0); queue.Len() > 0 && len(headers) < wire.MaxBlockHeadersPerMsg; i++ { for i := uint32(0); queue.Len() > 0 && len(headers) < wire.MaxBlockHeadersPerMsg; i++ {
@ -1734,14 +1734,14 @@ func (dag *BlockDAG) RUnlock() {
dag.dagLock.RUnlock() dag.dagLock.RUnlock()
} }
// GetBlueBlocksHeadersBetween returns the headers of the blocks after the provided // BlueBlockHeadersBetween returns the headers of the blocks after the provided
// start hash until the provided stop hash is reached, or up to the // low hash until the provided high hash is reached, or up to the
// provided max number of block headers. // provided max number of block headers.
// //
// This function is safe for concurrent access. // This function is safe for concurrent access.
func (dag *BlockDAG) GetBlueBlocksHeadersBetween(startHash, stopHash *daghash.Hash) ([]*wire.BlockHeader, error) { func (dag *BlockDAG) BlueBlockHeadersBetween(lowHash, highHash *daghash.Hash) ([]*wire.BlockHeader, error) {
dag.dagLock.RLock() dag.dagLock.RLock()
headers, err := dag.getBlueBlocksHeadersBetween(startHash, stopHash, wire.MaxBlockHeadersPerMsg) headers, err := dag.blueBlockHeadersBetween(lowHash, highHash, wire.MaxBlockHeadersPerMsg)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -854,33 +854,33 @@ func (dag *BlockDAG) BlockByHash(hash *daghash.Hash) (*util.Block, error) {
return block, err return block, err
} }
// BlockHashesFrom returns a slice of blocks starting from startHash // BlockHashesFrom returns a slice of blocks starting from lowHash
// ordered by blueScore. If startHash is nil then the genesis block is used. // ordered by blueScore. If lowHash is nil then the genesis block is used.
// //
// This method MUST be called with the DAG lock held // This method MUST be called with the DAG lock held
func (dag *BlockDAG) BlockHashesFrom(startHash *daghash.Hash, limit int) ([]*daghash.Hash, error) { func (dag *BlockDAG) BlockHashesFrom(lowHash *daghash.Hash, limit int) ([]*daghash.Hash, error) {
blockHashes := make([]*daghash.Hash, 0, limit) blockHashes := make([]*daghash.Hash, 0, limit)
if startHash == nil { if lowHash == nil {
startHash = dag.genesis.hash lowHash = dag.genesis.hash
// If we're starting from the beginning we should include the // If we're starting from the beginning we should include the
// genesis hash in the result // genesis hash in the result
blockHashes = append(blockHashes, dag.genesis.hash) blockHashes = append(blockHashes, dag.genesis.hash)
} }
if !dag.BlockExists(startHash) { if !dag.BlockExists(lowHash) {
return nil, errors.Errorf("block %s not found", startHash) return nil, errors.Errorf("block %s not found", lowHash)
} }
blueScore, err := dag.BlueScoreByBlockHash(startHash) blueScore, err := dag.BlueScoreByBlockHash(lowHash)
if err != nil { if err != nil {
return nil, err return nil, err
} }
err = dag.index.db.View(func(dbTx database.Tx) error { err = dag.index.db.View(func(dbTx database.Tx) error {
blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName) blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName)
startKey := BlockIndexKey(startHash, blueScore) lowKey := BlockIndexKey(lowHash, blueScore)
cursor := blockIndexBucket.Cursor() cursor := blockIndexBucket.Cursor()
cursor.Seek(startKey) cursor.Seek(lowKey)
for ok := cursor.Next(); ok; ok = cursor.Next() { for ok := cursor.Next(); ok; ok = cursor.Next() {
key := cursor.Key() key := cursor.Key()
blockHash, err := blockHashFromBlockIndexKey(key) blockHash, err := blockHashFromBlockIndexKey(key)

View File

@ -686,7 +686,7 @@ func (sm *SyncManager) handleInvMsg(imsg *invMsg) {
// Request blocks after the first block's ancestor that exists // Request blocks after the first block's ancestor that exists
// in the selected path chain, one up to the // in the selected path chain, one up to the
// final one the remote peer knows about. // final one the remote peer knows about.
peer.PushGetBlockLocatorMsg(iv.Hash, &daghash.ZeroHash) peer.PushGetBlockLocatorMsg(iv.Hash, sm.dagParams.GenesisHash)
} }
} }
} }

View File

@ -139,16 +139,16 @@ func messageSummary(msg wire.Message) string {
return invSummary(msg.InvList) return invSummary(msg.InvList)
case *wire.MsgGetBlockInvs: case *wire.MsgGetBlockInvs:
return fmt.Sprintf("start hash %s, stop hash %s", msg.StartHash, return fmt.Sprintf("low hash %s, high hash %s", msg.LowHash,
msg.StopHash) msg.HighHash)
case *wire.MsgGetHeaders: case *wire.MsgGetHeaders:
return fmt.Sprintf("start hash %s, stop hash %s", msg.StartHash, return fmt.Sprintf("start hash %s, stop hash %s", msg.StartHash,
msg.StopHash) msg.StopHash)
case *wire.MsgGetBlockLocator: case *wire.MsgGetBlockLocator:
return fmt.Sprintf("start hash %s, stop hash %s", msg.StartHash, return fmt.Sprintf("high hash %s, low hash %s", msg.HighHash,
msg.StopHash) msg.LowHash)
case *wire.MsgBlockLocator: case *wire.MsgBlockLocator:
if len(msg.BlockLocatorHashes) > 0 { if len(msg.BlockLocatorHashes) > 0 {

View File

@ -413,13 +413,13 @@ type Peer struct {
sendHeadersPreferred bool // peer sent a sendheaders message sendHeadersPreferred bool // peer sent a sendheaders message
verAckReceived bool verAckReceived bool
knownInventory *mruInventoryMap knownInventory *mruInventoryMap
prevGetBlockInvsMtx sync.Mutex prevGetBlockInvsMtx sync.Mutex
prevGetBlockInvsStart *daghash.Hash prevGetBlockInvsLow *daghash.Hash
prevGetBlockInvsStop *daghash.Hash prevGetBlockInvsHigh *daghash.Hash
prevGetHdrsMtx sync.Mutex prevGetHdrsMtx sync.Mutex
prevGetHdrsStart *daghash.Hash prevGetHdrsStart *daghash.Hash
prevGetHdrsStop *daghash.Hash prevGetHdrsStop *daghash.Hash
// These fields keep track of statistics for the peer and are protected // These fields keep track of statistics for the peer and are protected
// by the statsMtx mutex. // by the statsMtx mutex.
@ -808,42 +808,42 @@ func (p *Peer) PushAddrMsg(addresses []*wire.NetAddress, subnetworkID *subnetwor
return msg.AddrList, nil return msg.AddrList, nil
} }
// PushGetBlockLocatorMsg sends a getlocator message for the provided start // PushGetBlockLocatorMsg sends a getlocator message for the provided high
// and stop hash. // and low hash.
// //
// This function is safe for concurrent access. // This function is safe for concurrent access.
func (p *Peer) PushGetBlockLocatorMsg(startHash, stopHash *daghash.Hash) { func (p *Peer) PushGetBlockLocatorMsg(highHash, lowHash *daghash.Hash) {
msg := wire.NewMsgGetBlockLocator(startHash, stopHash) msg := wire.NewMsgGetBlockLocator(highHash, lowHash)
p.QueueMessage(msg, nil) p.QueueMessage(msg, nil)
} }
// PushGetBlockInvsMsg sends a getblockinvs message for the provided block locator // PushGetBlockInvsMsg sends a getblockinvs message for the provided block locator
// and stop hash. It will ignore back-to-back duplicate requests. // and high hash. It will ignore back-to-back duplicate requests.
// //
// This function is safe for concurrent access. // This function is safe for concurrent access.
func (p *Peer) PushGetBlockInvsMsg(startHash, stopHash *daghash.Hash) error { func (p *Peer) PushGetBlockInvsMsg(lowHash, highHash *daghash.Hash) error {
// Filter duplicate getblockinvs requests. // Filter duplicate getblockinvs requests.
p.prevGetBlockInvsMtx.Lock() p.prevGetBlockInvsMtx.Lock()
isDuplicate := p.prevGetBlockInvsStop != nil && p.prevGetBlockInvsStart != nil && isDuplicate := p.prevGetBlockInvsHigh != nil && p.prevGetBlockInvsLow != nil &&
startHash != nil && stopHash.IsEqual(p.prevGetBlockInvsStop) && lowHash != nil && highHash.IsEqual(p.prevGetBlockInvsHigh) &&
startHash.IsEqual(p.prevGetBlockInvsStart) lowHash.IsEqual(p.prevGetBlockInvsLow)
p.prevGetBlockInvsMtx.Unlock() p.prevGetBlockInvsMtx.Unlock()
if isDuplicate { if isDuplicate {
log.Tracef("Filtering duplicate [getblockinvs] with start "+ log.Tracef("Filtering duplicate [getblockinvs] with low "+
"hash %s, stop hash %s", startHash, stopHash) "hash %s, high hash %s", lowHash, highHash)
return nil return nil
} }
// Construct the getblockinvs request and queue it to be sent. // Construct the getblockinvs request and queue it to be sent.
msg := wire.NewMsgGetBlockInvs(startHash, stopHash) msg := wire.NewMsgGetBlockInvs(lowHash, highHash)
p.QueueMessage(msg, nil) p.QueueMessage(msg, nil)
// Update the previous getblockinvs request information for filtering // Update the previous getblockinvs request information for filtering
// duplicates. // duplicates.
p.prevGetBlockInvsMtx.Lock() p.prevGetBlockInvsMtx.Lock()
p.prevGetBlockInvsStart = startHash p.prevGetBlockInvsLow = lowHash
p.prevGetBlockInvsStop = stopHash p.prevGetBlockInvsHigh = highHash
p.prevGetBlockInvsMtx.Unlock() p.prevGetBlockInvsMtx.Unlock()
return nil return nil
} }

View File

@ -464,18 +464,18 @@ func (r FutureGetHeadersResult) Receive() ([]wire.BlockHeader, error) {
// of the RPC at some future time by invoking the Receive function on the returned instance. // of the RPC at some future time by invoking the Receive function on the returned instance.
// //
// See GetTopHeaders for the blocking version and more details. // See GetTopHeaders for the blocking version and more details.
func (c *Client) GetTopHeadersAsync(startHash *daghash.Hash) FutureGetHeadersResult { func (c *Client) GetTopHeadersAsync(highHash *daghash.Hash) FutureGetHeadersResult {
var hash *string var hash *string
if startHash != nil { if highHash != nil {
hash = rpcmodel.String(startHash.String()) hash = rpcmodel.String(highHash.String())
} }
cmd := rpcmodel.NewGetTopHeadersCmd(hash) cmd := rpcmodel.NewGetTopHeadersCmd(hash)
return c.sendCmd(cmd) return c.sendCmd(cmd)
} }
// GetTopHeaders sends a getTopHeaders rpc command to the server. // GetTopHeaders sends a getTopHeaders rpc command to the server.
func (c *Client) GetTopHeaders(startHash *daghash.Hash) ([]wire.BlockHeader, error) { func (c *Client) GetTopHeaders(highHash *daghash.Hash) ([]wire.BlockHeader, error) {
return c.GetTopHeadersAsync(startHash).Receive() return c.GetTopHeadersAsync(highHash).Receive()
} }
// GetHeadersAsync returns an instance of a type that can be used to get the result // GetHeadersAsync returns an instance of a type that can be used to get the result

View File

@ -667,29 +667,29 @@ func NewGetCurrentNetCmd() *GetCurrentNetCmd {
// GetTopHeadersCmd defined the getTopHeaders JSON-RPC command. // GetTopHeadersCmd defined the getTopHeaders JSON-RPC command.
type GetTopHeadersCmd struct { type GetTopHeadersCmd struct {
StartHash *string `json:"startHash"` HighHash *string `json:"highHash"`
} }
// NewGetTopHeadersCmd returns a new instance which can be used to issue a // NewGetTopHeadersCmd returns a new instance which can be used to issue a
// getTopHeaders JSON-RPC command. // getTopHeaders JSON-RPC command.
func NewGetTopHeadersCmd(startHash *string) *GetTopHeadersCmd { func NewGetTopHeadersCmd(highHash *string) *GetTopHeadersCmd {
return &GetTopHeadersCmd{ return &GetTopHeadersCmd{
StartHash: startHash, HighHash: highHash,
} }
} }
// GetHeadersCmd defines the getHeaders JSON-RPC command. // GetHeadersCmd defines the getHeaders JSON-RPC command.
type GetHeadersCmd struct { type GetHeadersCmd struct {
StartHash string `json:"startHash"` LowHash string `json:"lowHash"`
StopHash string `json:"stopHash"` HighHash string `json:"highHash"`
} }
// NewGetHeadersCmd returns a new instance which can be used to issue a // NewGetHeadersCmd returns a new instance which can be used to issue a
// getHeaders JSON-RPC command. // getHeaders JSON-RPC command.
func NewGetHeadersCmd(startHash, stopHash string) *GetHeadersCmd { func NewGetHeadersCmd(lowHash, highHash string) *GetHeadersCmd {
return &GetHeadersCmd{ return &GetHeadersCmd{
StartHash: startHash, LowHash: lowHash,
StopHash: stopHash, HighHash: highHash,
} }
} }

View File

@ -952,8 +952,8 @@ func TestRPCServerCommands(t *testing.T) {
}, },
marshalled: `{"jsonrpc":"1.0","method":"getHeaders","params":["",""],"id":1}`, marshalled: `{"jsonrpc":"1.0","method":"getHeaders","params":["",""],"id":1}`,
unmarshalled: &rpcmodel.GetHeadersCmd{ unmarshalled: &rpcmodel.GetHeadersCmd{
StartHash: "", LowHash: "",
StopHash: "", HighHash: "",
}, },
}, },
{ {
@ -969,8 +969,8 @@ func TestRPCServerCommands(t *testing.T) {
}, },
marshalled: `{"jsonrpc":"1.0","method":"getHeaders","params":["000000000000000001f1739002418e2f9a84c47a4fd2a0eb7a787a6b7dc12f16","000000000000000000ba33b33e1fad70b69e234fc24414dd47113bff38f523f7"],"id":1}`, marshalled: `{"jsonrpc":"1.0","method":"getHeaders","params":["000000000000000001f1739002418e2f9a84c47a4fd2a0eb7a787a6b7dc12f16","000000000000000000ba33b33e1fad70b69e234fc24414dd47113bff38f523f7"],"id":1}`,
unmarshalled: &rpcmodel.GetHeadersCmd{ unmarshalled: &rpcmodel.GetHeadersCmd{
StartHash: "000000000000000001f1739002418e2f9a84c47a4fd2a0eb7a787a6b7dc12f16", LowHash: "000000000000000001f1739002418e2f9a84c47a4fd2a0eb7a787a6b7dc12f16",
StopHash: "000000000000000000ba33b33e1fad70b69e234fc24414dd47113bff38f523f7", HighHash: "000000000000000000ba33b33e1fad70b69e234fc24414dd47113bff38f523f7",
}, },
}, },
{ {
@ -998,7 +998,7 @@ func TestRPCServerCommands(t *testing.T) {
}, },
marshalled: `{"jsonrpc":"1.0","method":"getTopHeaders","params":["000000000000000000ba33b33e1fad70b69e234fc24414dd47113bff38f523f7"],"id":1}`, marshalled: `{"jsonrpc":"1.0","method":"getTopHeaders","params":["000000000000000000ba33b33e1fad70b69e234fc24414dd47113bff38f523f7"],"id":1}`,
unmarshalled: &rpcmodel.GetTopHeadersCmd{ unmarshalled: &rpcmodel.GetTopHeadersCmd{
StartHash: rpcmodel.String("000000000000000000ba33b33e1fad70b69e234fc24414dd47113bff38f523f7"), HighHash: rpcmodel.String("000000000000000000ba33b33e1fad70b69e234fc24414dd47113bff38f523f7"),
}, },
}, },
{ {

View File

@ -20,15 +20,20 @@ func (sp *Peer) OnBlockLocator(_ *peer.Peer, msg *wire.MsgBlockLocator) {
} }
// If the first hash of the block locator is known, it means we found // If the first hash of the block locator is known, it means we found
// the highest shared block. // the highest shared block.
firstHash := msg.BlockLocatorHashes[0] highHash := msg.BlockLocatorHashes[0]
if dag.BlockExists(firstHash) { if dag.BlockExists(highHash) {
if dag.IsKnownFinalizedBlock(firstHash) { if dag.IsKnownFinalizedBlock(highHash) {
peerLog.Debugf("Cannot sync with peer %s because the highest"+ peerLog.Debugf("Cannot sync with peer %s because the highest"+
" shared chain block (%s) is below the finality point", sp, firstHash) " shared chain block (%s) is below the finality point", sp, highHash)
sp.server.SyncManager.RemoveFromSyncCandidates(sp.Peer) sp.server.SyncManager.RemoveFromSyncCandidates(sp.Peer)
return return
} }
err := sp.Peer.PushGetBlockInvsMsg(firstHash, sp.Peer.SelectedTip())
// We send the highHash as the GetBlockInvsMsg's lowHash here.
// This is not a mistake. The invs we desire start from the highest
// hash that we know of and end at the highest hash that the peer
// knows of.
err := sp.Peer.PushGetBlockInvsMsg(highHash, sp.Peer.SelectedTip())
if err != nil { if err != nil {
peerLog.Errorf("Failed pushing get blocks message for peer %s: %s", peerLog.Errorf("Failed pushing get blocks message for peer %s: %s",
sp, err) sp, err)
@ -36,9 +41,9 @@ func (sp *Peer) OnBlockLocator(_ *peer.Peer, msg *wire.MsgBlockLocator) {
} }
return return
} }
startHash, stopHash := dag.FindNextLocatorBoundaries(msg.BlockLocatorHashes) highHash, lowHash := dag.FindNextLocatorBoundaries(msg.BlockLocatorHashes)
if startHash == nil { if highHash == nil {
panic("Couldn't find any unknown hashes in the block locator.") panic("Couldn't find any unknown hashes in the block locator.")
} }
sp.PushGetBlockLocatorMsg(startHash, stopHash) sp.PushGetBlockLocatorMsg(highHash, lowHash)
} }

View File

@ -7,23 +7,23 @@ import (
// OnGetBlockInvs is invoked when a peer receives a getblockinvs kaspa // OnGetBlockInvs is invoked when a peer receives a getblockinvs kaspa
// message. // message.
// It finds the blue future between msg.StartHash and msg.StopHash // It finds the blue future between msg.LowHash and msg.HighHash
// and send the invs to the requesting peer. // and send the invs to the requesting peer.
func (sp *Peer) OnGetBlockInvs(_ *peer.Peer, msg *wire.MsgGetBlockInvs) { func (sp *Peer) OnGetBlockInvs(_ *peer.Peer, msg *wire.MsgGetBlockInvs) {
dag := sp.server.DAG dag := sp.server.DAG
// We want to prevent a situation where the syncing peer needs // We want to prevent a situation where the syncing peer needs
// to call getblocks once again, but the block we sent him // to call getblocks once again, but the block we sent it
// won't affect his selected chain, so next time it'll try // won't affect its selected chain, so next time it'll try
// to find the highest shared chain block, it'll find the // to find the highest shared chain block, it'll find the
// same one as before. // same one as before.
// To prevent that we use blockdag.FinalityInterval as maxHashes. // To prevent that we use blockdag.FinalityInterval as maxHashes.
// This way, if one getblocks is not enough to get the peer // This way, if one getblocks is not enough to get the peer
// synced, we can know for sure that its selected chain will // synced, we can know for sure that its selected chain will
// change, so we'll have higher shared chain block. // change, so we'll have higher shared chain block.
hashList, err := dag.GetBlueBlocksHashesBetween(msg.StartHash, msg.StopHash, hashList, err := dag.BlueBlockHashesBetween(msg.LowHash, msg.HighHash,
wire.MaxInvPerMsg) wire.MaxInvPerMsg)
if err != nil { if err != nil {
peerLog.Warnf("Error getting blue blocks between %s and %s: %s", msg.StartHash, msg.StopHash, err) peerLog.Warnf("Error getting blue blocks between %s and %s: %s", msg.LowHash, msg.HighHash, err)
sp.Disconnect() sp.Disconnect()
return return
} }

View File

@ -9,10 +9,10 @@ import (
// OnGetBlockLocator is invoked when a peer receives a getlocator kaspa // OnGetBlockLocator is invoked when a peer receives a getlocator kaspa
// message. // message.
func (sp *Peer) OnGetBlockLocator(_ *peer.Peer, msg *wire.MsgGetBlockLocator) { func (sp *Peer) OnGetBlockLocator(_ *peer.Peer, msg *wire.MsgGetBlockLocator) {
locator, err := sp.server.DAG.BlockLocatorFromHashes(msg.StartHash, msg.StopHash) locator, err := sp.server.DAG.BlockLocatorFromHashes(msg.HighHash, msg.LowHash)
if err != nil || len(locator) == 0 { if err != nil || len(locator) == 0 {
warning := fmt.Sprintf("Couldn't build a block locator between blocks "+ warning := fmt.Sprintf("Couldn't build a block locator between blocks "+
"%s and %s that was requested from peer %s", msg.StartHash, msg.StopHash, sp) "%s and %s that was requested from peer %s", msg.HighHash, msg.LowHash, sp)
if err != nil { if err != nil {
warning = fmt.Sprintf("%s: %s", warning, err) warning = fmt.Sprintf("%s: %s", warning, err)
} }

View File

@ -11,21 +11,21 @@ import (
func handleGetHeaders(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { func handleGetHeaders(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
c := cmd.(*rpcmodel.GetHeadersCmd) c := cmd.(*rpcmodel.GetHeadersCmd)
startHash := &daghash.ZeroHash lowHash := &daghash.ZeroHash
if c.StartHash != "" { if c.LowHash != "" {
err := daghash.Decode(startHash, c.StartHash) err := daghash.Decode(lowHash, c.LowHash)
if err != nil { if err != nil {
return nil, rpcDecodeHexError(c.StopHash) return nil, rpcDecodeHexError(c.HighHash)
} }
} }
stopHash := &daghash.ZeroHash highHash := &daghash.ZeroHash
if c.StopHash != "" { if c.HighHash != "" {
err := daghash.Decode(stopHash, c.StopHash) err := daghash.Decode(highHash, c.HighHash)
if err != nil { if err != nil {
return nil, rpcDecodeHexError(c.StopHash) return nil, rpcDecodeHexError(c.HighHash)
} }
} }
headers, err := s.cfg.SyncMgr.GetBlueBlocksHeadersBetween(startHash, stopHash) headers, err := s.cfg.SyncMgr.BlueBlockHeadersBetween(lowHash, highHash)
if err != nil { if err != nil {
return nil, &rpcmodel.RPCError{ return nil, &rpcmodel.RPCError{
Code: rpcmodel.ErrRPCMisc, Code: rpcmodel.ErrRPCMisc,

View File

@ -11,15 +11,15 @@ import (
func handleGetTopHeaders(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { func handleGetTopHeaders(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
c := cmd.(*rpcmodel.GetTopHeadersCmd) c := cmd.(*rpcmodel.GetTopHeadersCmd)
var startHash *daghash.Hash var highHash *daghash.Hash
if c.StartHash != nil { if c.HighHash != nil {
startHash = &daghash.Hash{} highHash = &daghash.Hash{}
err := daghash.Decode(startHash, *c.StartHash) err := daghash.Decode(highHash, *c.HighHash)
if err != nil { if err != nil {
return nil, rpcDecodeHexError(*c.StartHash) return nil, rpcDecodeHexError(*c.HighHash)
} }
} }
headers, err := s.cfg.DAG.GetTopHeaders(startHash) headers, err := s.cfg.DAG.GetTopHeaders(highHash)
if err != nil { if err != nil {
return nil, internalRPCError(err.Error(), return nil, internalRPCError(err.Error(),
"Failed to get top headers") "Failed to get top headers")

View File

@ -269,12 +269,12 @@ func (b *rpcSyncMgr) SyncPeerID() int32 {
return b.syncMgr.SyncPeerID() return b.syncMgr.SyncPeerID()
} }
// GetBlueBlocksHeadersBetween returns the headers of the blocks after the provided // BlueBlockHeadersBetween returns the headers of the blocks after the provided
// start hash until the provided stop hash is reached, or up to the // low hash until the provided high hash is reached, or up to the
// provided max number of block headers. // provided max number of block headers.
// //
// This function is safe for concurrent access and is part of the // This function is safe for concurrent access and is part of the
// rpcserverSyncManager interface implementation. // rpcserverSyncManager interface implementation.
func (b *rpcSyncMgr) GetBlueBlocksHeadersBetween(startHash, stopHash *daghash.Hash) ([]*wire.BlockHeader, error) { func (b *rpcSyncMgr) BlueBlockHeadersBetween(lowHash, highHash *daghash.Hash) ([]*wire.BlockHeader, error) {
return b.server.DAG.GetBlueBlocksHeadersBetween(startHash, stopHash) return b.server.DAG.BlueBlockHeadersBetween(lowHash, highHash)
} }

View File

@ -735,11 +735,11 @@ type rpcserverSyncManager interface {
// used to sync from or 0 if there is none. // used to sync from or 0 if there is none.
SyncPeerID() int32 SyncPeerID() int32
// GetBlueBlocksHeadersBetween returns the headers of the blocks after the first known // BlueBlockHeadersBetween returns the headers of the blocks after the first known
// block in the provided locators until the provided stop hash or the // block in the provided locators until the provided high hash or the
// current tip is reached, up to a max of wire.MaxBlockHeadersPerMsg // current tip is reached, up to a max of wire.MaxBlockHeadersPerMsg
// hashes. // hashes.
GetBlueBlocksHeadersBetween(startHash, stopHash *daghash.Hash) ([]*wire.BlockHeader, error) BlueBlockHeadersBetween(lowHash, highHash *daghash.Hash) ([]*wire.BlockHeader, error)
} }
// rpcserverConfig is a descriptor containing the RPC server configuration. // rpcserverConfig is a descriptor containing the RPC server configuration.

View File

@ -394,13 +394,13 @@ var helpDescsEnUS = map[string]string{
// GetTopHeadersCmd help. // GetTopHeadersCmd help.
"getTopHeaders--synopsis": "Returns the top block headers starting with the provided start hash (not inclusive)", "getTopHeaders--synopsis": "Returns the top block headers starting with the provided start hash (not inclusive)",
"getTopHeaders-startHash": "Block hash to start including block headers from; if not found, it'll start from the virtual.", "getTopHeaders-highHash": "Block hash to start including block headers from; if not found, it'll start from the virtual.",
"getTopHeaders--result0": "Serialized block headers of all located blocks, limited to some arbitrary maximum number of hashes (currently 2000, which matches the wire protocol headers message, but this is not guaranteed)", "getTopHeaders--result0": "Serialized block headers of all located blocks, limited to some arbitrary maximum number of hashes (currently 2000, which matches the wire protocol headers message, but this is not guaranteed)",
// GetHeadersCmd help. // GetHeadersCmd help.
"getHeaders--synopsis": "Returns block headers starting with the first known block hash from the request", "getHeaders--synopsis": "Returns block headers starting with the first known block hash from the request",
"getHeaders-startHash": "Block hash to start including headers from; if not found, it'll start from the genesis block.", "getHeaders-lowHash": "Block hash to start including headers from; if not found, it'll start from the genesis block.",
"getHeaders-stopHash": "Block hash to stop including block headers for; if not found, all headers to the latest known block are returned.", "getHeaders-highHash": "Block hash to stop including block headers for; if not found, all headers to the latest known block are returned.",
"getHeaders--result0": "Serialized block headers of all located blocks, limited to some arbitrary maximum number of hashes (currently 2000, which matches the wire protocol headers message, but this is not guaranteed)", "getHeaders--result0": "Serialized block headers of all located blocks, limited to some arbitrary maximum number of hashes (currently 2000, which matches the wire protocol headers message, but this is not guaranteed)",
// GetInfoCmd help. // GetInfoCmd help.

View File

@ -442,8 +442,8 @@ func BenchmarkDecodeHeaders(b *testing.B) {
func BenchmarkDecodeGetBlockInvs(b *testing.B) { func BenchmarkDecodeGetBlockInvs(b *testing.B) {
pver := ProtocolVersion pver := ProtocolVersion
var m MsgGetBlockInvs var m MsgGetBlockInvs
m.StartHash = &daghash.Hash{1} m.LowHash = &daghash.Hash{1}
m.StopHash = &daghash.Hash{1} m.HighHash = &daghash.Hash{1}
// Serialize it so the bytes are available to test the decode below. // Serialize it so the bytes are available to test the decode below.
var bb bytes.Buffer var bb bytes.Buffer

View File

@ -14,32 +14,32 @@ import (
// getblockinvs message. It is used to request a list of blocks starting after the // getblockinvs message. It is used to request a list of blocks starting after the
// start hash and until the stop hash. // start hash and until the stop hash.
type MsgGetBlockInvs struct { type MsgGetBlockInvs struct {
StartHash *daghash.Hash LowHash *daghash.Hash
StopHash *daghash.Hash HighHash *daghash.Hash
} }
// KaspaDecode decodes r using the kaspa protocol encoding into the receiver. // KaspaDecode decodes r using the kaspa protocol encoding into the receiver.
// This is part of the Message interface implementation. // This is part of the Message interface implementation.
func (msg *MsgGetBlockInvs) KaspaDecode(r io.Reader, pver uint32) error { func (msg *MsgGetBlockInvs) KaspaDecode(r io.Reader, pver uint32) error {
msg.StartHash = &daghash.Hash{} msg.LowHash = &daghash.Hash{}
err := ReadElement(r, msg.StartHash) err := ReadElement(r, msg.LowHash)
if err != nil { if err != nil {
return err return err
} }
msg.StopHash = &daghash.Hash{} msg.HighHash = &daghash.Hash{}
return ReadElement(r, msg.StopHash) return ReadElement(r, msg.HighHash)
} }
// KaspaEncode encodes the receiver to w using the kaspa protocol encoding. // KaspaEncode encodes the receiver to w using the kaspa protocol encoding.
// This is part of the Message interface implementation. // This is part of the Message interface implementation.
func (msg *MsgGetBlockInvs) KaspaEncode(w io.Writer, pver uint32) error { func (msg *MsgGetBlockInvs) KaspaEncode(w io.Writer, pver uint32) error {
err := WriteElement(w, msg.StartHash) err := WriteElement(w, msg.LowHash)
if err != nil { if err != nil {
return err return err
} }
return WriteElement(w, msg.StopHash) return WriteElement(w, msg.HighHash)
} }
// Command returns the protocol command string for the message. This is part // Command returns the protocol command string for the message. This is part
@ -58,9 +58,9 @@ func (msg *MsgGetBlockInvs) MaxPayloadLength(pver uint32) uint32 {
// NewMsgGetBlockInvs returns a new kaspa getblockinvs message that conforms to the // NewMsgGetBlockInvs returns a new kaspa getblockinvs message that conforms to the
// Message interface using the passed parameters and defaults for the remaining // Message interface using the passed parameters and defaults for the remaining
// fields. // fields.
func NewMsgGetBlockInvs(startHash, stopHash *daghash.Hash) *MsgGetBlockInvs { func NewMsgGetBlockInvs(lowHash, highHash *daghash.Hash) *MsgGetBlockInvs {
return &MsgGetBlockInvs{ return &MsgGetBlockInvs{
StartHash: startHash, LowHash: lowHash,
StopHash: stopHash, HighHash: highHash,
} }
} }

View File

@ -32,9 +32,9 @@ func TestGetBlockInvs(t *testing.T) {
// Ensure we get the same data back out. // Ensure we get the same data back out.
msg := NewMsgGetBlockInvs(startHash, stopHash) msg := NewMsgGetBlockInvs(startHash, stopHash)
if !msg.StopHash.IsEqual(stopHash) { if !msg.HighHash.IsEqual(stopHash) {
t.Errorf("NewMsgGetBlockInvs: wrong stop hash - got %v, want %v", t.Errorf("NewMsgGetBlockInvs: wrong stop hash - got %v, want %v",
msg.StopHash, stopHash) msg.HighHash, stopHash)
} }
// Ensure the command is expected value. // Ensure the command is expected value.

View File

@ -12,21 +12,21 @@ import (
// //
// This message has no payload. // This message has no payload.
type MsgGetBlockLocator struct { type MsgGetBlockLocator struct {
StartHash *daghash.Hash HighHash *daghash.Hash
StopHash *daghash.Hash LowHash *daghash.Hash
} }
// KaspaDecode decodes r using the kaspa protocol encoding into the receiver. // KaspaDecode decodes r using the kaspa protocol encoding into the receiver.
// This is part of the Message interface implementation. // This is part of the Message interface implementation.
func (msg *MsgGetBlockLocator) KaspaDecode(r io.Reader, pver uint32) error { func (msg *MsgGetBlockLocator) KaspaDecode(r io.Reader, pver uint32) error {
msg.StartHash = &daghash.Hash{} msg.HighHash = &daghash.Hash{}
err := ReadElement(r, msg.StartHash) err := ReadElement(r, msg.HighHash)
if err != nil { if err != nil {
return err return err
} }
msg.StopHash = &daghash.Hash{} msg.LowHash = &daghash.Hash{}
err = ReadElement(r, msg.StopHash) err = ReadElement(r, msg.LowHash)
if err != nil { if err != nil {
return err return err
} }
@ -36,12 +36,12 @@ func (msg *MsgGetBlockLocator) KaspaDecode(r io.Reader, pver uint32) error {
// KaspaEncode encodes the receiver to w using the kaspa protocol encoding. // KaspaEncode encodes the receiver to w using the kaspa protocol encoding.
// This is part of the Message interface implementation. // This is part of the Message interface implementation.
func (msg *MsgGetBlockLocator) KaspaEncode(w io.Writer, pver uint32) error { func (msg *MsgGetBlockLocator) KaspaEncode(w io.Writer, pver uint32) error {
err := WriteElement(w, msg.StartHash) err := WriteElement(w, msg.HighHash)
if err != nil { if err != nil {
return err return err
} }
err = WriteElement(w, msg.StopHash) err = WriteElement(w, msg.LowHash)
if err != nil { if err != nil {
return err return err
} }
@ -63,9 +63,9 @@ func (msg *MsgGetBlockLocator) MaxPayloadLength(pver uint32) uint32 {
// NewMsgGetBlockLocator returns a new getlocator message that conforms to the // NewMsgGetBlockLocator returns a new getlocator message that conforms to the
// Message interface using the passed parameters and defaults for the remaining // Message interface using the passed parameters and defaults for the remaining
// fields. // fields.
func NewMsgGetBlockLocator(startHash, stopHash *daghash.Hash) *MsgGetBlockLocator { func NewMsgGetBlockLocator(highHash, lowHash *daghash.Hash) *MsgGetBlockLocator {
return &MsgGetBlockLocator{ return &MsgGetBlockLocator{
StartHash: startHash, HighHash: highHash,
StopHash: stopHash, LowHash: lowHash,
} }
} }

View File

@ -17,7 +17,7 @@ import (
// limited by a specific hash to stop at or the maximum number of block headers // limited by a specific hash to stop at or the maximum number of block headers
// per message, which is currently 2000. // per message, which is currently 2000.
// //
// Set the StopHash field to the hash at which to stop and use // Set the HighHash field to the hash at which to stop and use
// AddBlockLocatorHash to build up the list of block locator hashes. // AddBlockLocatorHash to build up the list of block locator hashes.
// //
// The algorithm for building the block locator hashes should be to add the // The algorithm for building the block locator hashes should be to add the