Merge remote-tracking branch 'origin/dev-20-primitive-blockdag' into dev-31-20-tmp-branch

This commit is contained in:
Ori Newman
2018-07-20 10:25:36 +03:00
8 changed files with 116 additions and 27 deletions

View File

@@ -188,9 +188,9 @@ type BlockDAG struct {
notifications []NotificationCallback
}
// HaveBlock returns whether or not the chain instance has the block represented
// HaveBlock returns whether or not the DAG instance has the block represented
// by the passed hash. This includes checking the various places a block can
// be like part of the main chain, on a side chain, or in the orphan pool.
// be in, like part of the DAG or the orphan pool.
//
// This function is safe for concurrent access.
func (b *BlockDAG) HaveBlock(hash *daghash.Hash) (bool, error) {
@@ -201,6 +201,25 @@ func (b *BlockDAG) HaveBlock(hash *daghash.Hash) (bool, error) {
return exists || b.IsKnownOrphan(hash), nil
}
// HaveBlocks returns whether or not the DAG instances has all blocks represented
// by the passed hashes. This includes checking the various places a block can
// be in, like part of the DAG or the orphan pool.
//
// This function is safe for concurrent access.
func (b *BlockDAG) HaveBlocks(hashes []daghash.Hash) (bool, error) {
for _, hash := range hashes {
haveBlock, err := b.HaveBlock(&hash)
if err != nil {
return false, err
}
if !haveBlock {
return false, nil
}
}
return true, nil
}
// IsKnownOrphan returns whether the passed hash is currently a known orphan.
// Keep in mind that only a limited number of orphans are held onto for a
// limited amount of time, so this function must not be used as an absolute

View File

@@ -13,14 +13,11 @@ import (
"github.com/daglabs/btcd/blockdag"
"github.com/daglabs/btcd/blockdag/indexers"
"github.com/daglabs/btcd/dagconfig/daghash"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/wire"
"github.com/daglabs/btcutil"
)
var zeroHash = daghash.Hash{}
// importResults houses the stats and result as an import operation.
type importResults struct {
blocksProcessed int64
@@ -89,7 +86,7 @@ func (bi *blockImporter) readBlock() ([]byte, error) {
// processBlock potentially imports the block into the database. It first
// deserializes the raw block while checking for errors. Already known blocks
// are skipped and orphan blocks are considered errors. Finally, it runs the
// block through the chain rules to ensure it follows all rules and matches
// block through the DAG rules to ensure it follows all rules and matches
// up to the known checkpoint. Returns whether the block was imported along
// with any potential errors.
func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) {
@@ -114,16 +111,16 @@ func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) {
}
// Don't bother trying to process orphans.
prevHash := &block.MsgBlock().Header.PrevBlock
if !prevHash.IsEqual(&zeroHash) {
exists, err := bi.dag.HaveBlock(prevHash)
prevBlocks := block.MsgBlock().Header.PrevBlocks
if len(prevBlocks) > 0 {
exist, err := bi.dag.HaveBlocks(prevBlocks)
if err != nil {
return false, err
}
if !exists {
if !exist {
return false, fmt.Errorf("import file contains block "+
"%v which does not link to the available "+
"block chain", prevHash)
"block DAG", prevBlocks)
}
}

View File

@@ -34,14 +34,13 @@ func loadBlockDB() (database.DB, error) {
return db, nil
}
// findCandidates searches the chain backwards for checkpoint candidates and
// findCandidates searches the DAG backwards for checkpoint candidates and
// returns a slice of found candidates, if any. It also stops searching for
// candidates at the last checkpoint that is already hard coded into btcchain
// since there is no point in finding candidates before already existing
// checkpoints.
func findCandidates(dag *blockdag.BlockDAG, latestHash *daghash.Hash) ([]*dagconfig.Checkpoint, error) {
// Start with the latest block of the main chain.
block, err := dag.BlockByHash(latestHash)
// candidates at the last checkpoint that is already hard coded since there
// is no point in finding candidates before already existing checkpoints.
func findCandidates(dag *blockdag.BlockDAG, selectedTipHash *daghash.Hash) ([]*dagconfig.Checkpoint, error) {
// Start with the selected tip.
block, err := dag.BlockByHash(selectedTipHash)
if err != nil {
return nil, err
}
@@ -70,7 +69,7 @@ func findCandidates(dag *blockdag.BlockDAG, latestHash *daghash.Hash) ([]*dagcon
}
// For the first checkpoint, the required height is any block after the
// genesis block, so long as the chain has at least the required number
// genesis block, so long as the DAG has at least the required number
// of confirmations (which is enforced above).
if len(activeNetParams.Checkpoints) == 0 {
requiredHeight = 1
@@ -82,7 +81,7 @@ func findCandidates(dag *blockdag.BlockDAG, latestHash *daghash.Hash) ([]*dagcon
fmt.Print("Searching for candidates")
defer fmt.Println()
// Loop backwards through the chain to find checkpoint candidates.
// Loop backwards through the DAG to find checkpoint candidates.
candidates := make([]*dagconfig.Checkpoint, 0, cfg.NumCandidates)
numTested := int32(0)
for len(candidates) < cfg.NumCandidates && block.Height() > requiredHeight {
@@ -107,8 +106,9 @@ func findCandidates(dag *blockdag.BlockDAG, latestHash *daghash.Hash) ([]*dagcon
candidates = append(candidates, &checkpoint)
}
prevHash := &block.MsgBlock().Header.PrevBlock
block, err = dag.BlockByHash(prevHash)
prevBlockHashes := block.MsgBlock().Header.PrevBlocks
selectedBlockHash := &prevBlockHashes[0]
block, err = dag.BlockByHash(selectedBlockHash)
if err != nil {
return nil, err
}

View File

@@ -69,6 +69,27 @@ func (hash *Hash) IsEqual(target *Hash) bool {
return *hash == *target
}
// AreEqual returns true if both slices contain the same hashes.
// Either slice must not contain duplicates.
func AreEqual(first []Hash, second []Hash) bool {
if len(first) != len(second) {
return false
}
hashSet := make(map[Hash]bool)
for _, hash := range first {
hashSet[hash] = true
}
for _, hash := range second {
if !hashSet[hash] {
return false
}
}
return true
}
// NewHash returns a new Hash from a byte slice. An error is returned if
// the number of bytes passed in is not HashSize.
func NewHash(newHash []byte) (*Hash, error) {

View File

@@ -194,3 +194,55 @@ func TestNewHashFromStr(t *testing.T) {
}
}
}
// TestAreEqual executes tests against the AreEqual function.
func TestAreEqual(t *testing.T) {
hash0, _ := NewHashFromStr("0000000000000000000000000000000000000000000000000000000000000000")
hash1, _ := NewHashFromStr("1111111111111111111111111111111111111111111111111111111111111111")
hash2, _ := NewHashFromStr("2222222222222222222222222222222222222222222222222222222222222222")
hash3, _ := NewHashFromStr("3333333333333333333333333333333333333333333333333333333333333333")
hashes0To2 := []Hash{*hash0, *hash1, *hash2}
hashes0To2Shifted := []Hash{*hash2, *hash0, *hash1}
hashes1To3 := []Hash{*hash1, *hash2, *hash3}
hashes0To3 := []Hash{*hash0, *hash1, *hash2, *hash3}
tests := []struct {
name string
first []Hash
second []Hash
expected bool
}{
{
name: "self-equality",
first: hashes0To2,
second: hashes0To2,
expected: true,
},
{
name: "same members, different order",
first: hashes0To2,
second: hashes0To2Shifted,
expected: true,
},
{
name: "same slice length but only some members are equal",
first: hashes0To2,
second: hashes1To3,
expected: false,
},
{
name: "different slice lengths, one slice containing all the other's members",
first: hashes0To3,
second: hashes0To2,
expected: false,
},
}
for _, test := range tests {
result := AreEqual(test.first, test.second)
if result != test.expected {
t.Errorf("unexpected AreEqual result for"+
" test \"%s\". Expected: %t, got: %t.", test.name, test.expected, result)
}
}
}

View File

@@ -162,9 +162,9 @@ func (m *CPUMiner) submitBlock(block *btcutil.Block) bool {
// a new block, but the check only happens periodically, so it is
// possible a block was found and submitted in between.
msgBlock := block.MsgBlock()
if !msgBlock.Header.PrevBlock.IsEqual(&m.g.GetDAGState().SelectedTip.Hash) {
if !daghash.AreEqual(msgBlock.Header.PrevBlocks, m.g.GetDAGState().TipHashes) {
log.Debugf("Block submitted via CPU miner with previous "+
"block %s is stale", msgBlock.Header.PrevBlock)
"blocks %s is stale", msgBlock.Header.PrevBlocks)
return false
}
@@ -248,7 +248,7 @@ func (m *CPUMiner) solveBlock(msgBlock *wire.MsgBlock, blockHeight int32,
// The current block is stale if the DAG has changed.
dagState := m.g.GetDAGState()
if !header.PrevBlock.IsEqual(&dagState.SelectedTip.Hash) {
if !daghash.AreEqual(header.PrevBlocks, dagState.TipHashes) {
return false
}

View File

@@ -764,7 +764,7 @@ mempoolLoop:
var msgBlock wire.MsgBlock
msgBlock.Header = wire.BlockHeader{
Version: nextBlockVersion,
PrevBlock: dagState.SelectedTip.Hash,
PrevBlocks: dagState.TipHashes,
MerkleRoot: *merkles[len(merkles)-1],
Timestamp: ts,
Bits: reqDifficulty,

View File

@@ -783,7 +783,7 @@ func (sm *SyncManager) handleHeadersMsg(hmsg *headersMsg) {
// add it to the list of headers.
node := headerNode{hash: &blockHash}
prevNode := prevNodeEl.Value.(*headerNode)
if prevNode.hash.IsEqual(&blockHeader.PrevBlock) {
if prevNode.hash.IsEqual(&blockHeader.PrevBlocks[0]) { // TODO: (Stas) This is wrong. Modified only to satisfy compilation.
node.height = prevNode.height + 1
e := sm.headerList.PushBack(&node)
if sm.startHeader == nil {