mirror of
https://github.com/kaspanet/kaspad.git
synced 2025-06-06 14:16:43 +00:00
[DEV-31] Fix conflicts with master
This commit is contained in:
commit
16b9a36bcc
@ -347,7 +347,7 @@ func (b *BlockDAG) TstSetCoinbaseMaturity(maturity uint16) {
|
||||
// important to note that this chain has no database associated with it, so
|
||||
// it is not usable with all functions and the tests must take care when making
|
||||
// use of it.
|
||||
func newFakeDag(params *dagconfig.Params) *BlockDAG {
|
||||
func newFakeDAG(params *dagconfig.Params) *BlockDAG {
|
||||
// Create a genesis block node and block index index populated with it
|
||||
// for use when creating the fake chain below.
|
||||
node := newBlockNode(¶ms.GenesisBlock.Header, newSet())
|
||||
|
@ -188,9 +188,9 @@ type BlockDAG struct {
|
||||
notifications []NotificationCallback
|
||||
}
|
||||
|
||||
// HaveBlock returns whether or not the chain instance has the block represented
|
||||
// HaveBlock returns whether or not the DAG instance has the block represented
|
||||
// by the passed hash. This includes checking the various places a block can
|
||||
// be like part of the main chain, on a side chain, or in the orphan pool.
|
||||
// be in, like part of the DAG or the orphan pool.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (b *BlockDAG) HaveBlock(hash *daghash.Hash) (bool, error) {
|
||||
@ -201,6 +201,25 @@ func (b *BlockDAG) HaveBlock(hash *daghash.Hash) (bool, error) {
|
||||
return exists || b.IsKnownOrphan(hash), nil
|
||||
}
|
||||
|
||||
// HaveBlocks returns whether or not the DAG instances has all blocks represented
|
||||
// by the passed hashes. This includes checking the various places a block can
|
||||
// be in, like part of the DAG or the orphan pool.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (b *BlockDAG) HaveBlocks(hashes []daghash.Hash) (bool, error) {
|
||||
for _, hash := range hashes {
|
||||
haveBlock, err := b.HaveBlock(&hash)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !haveBlock {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// IsKnownOrphan returns whether the passed hash is currently a known orphan.
|
||||
// Keep in mind that only a limited number of orphans are held onto for a
|
||||
// limited amount of time, so this function must not be used as an absolute
|
||||
|
@ -120,8 +120,8 @@ func TestCalcSequenceLock(t *testing.T) {
|
||||
|
||||
blockVersion := int32(0x20000000)
|
||||
|
||||
// Generate enough synthetic blocks to activate CSV.
|
||||
chain := newFakeDag(netParams)
|
||||
// Generate enough synthetic blocks for the rest of the test
|
||||
chain := newFakeDAG(netParams)
|
||||
node := chain.dag.SelectedTip()
|
||||
blockTime := node.Header().Timestamp
|
||||
numBlocksToGenerate := uint32(5)
|
||||
@ -448,7 +448,7 @@ func TestLocateInventory(t *testing.T) {
|
||||
// genesis -> 1 -> 2 -> ... -> 15 -> 16 -> 17 -> 18
|
||||
// \-> 16a -> 17a
|
||||
tip := tstTip
|
||||
dag := newFakeDag(&dagconfig.MainNetParams)
|
||||
dag := newFakeDAG(&dagconfig.MainNetParams)
|
||||
branch0Nodes := chainedNodes(setFromSlice(dag.dag.Genesis()), 18)
|
||||
branch1Nodes := chainedNodes(setFromSlice(branch0Nodes[14]), 2)
|
||||
for _, node := range branch0Nodes {
|
||||
@ -788,7 +788,7 @@ func TestHeightToHashRange(t *testing.T) {
|
||||
// genesis -> 1 -> 2 -> ... -> 15 -> 16 -> 17 -> 18
|
||||
// \-> 16a -> 17a -> 18a (unvalidated)
|
||||
tip := tstTip
|
||||
chain := newFakeDag(&dagconfig.MainNetParams)
|
||||
chain := newFakeDAG(&dagconfig.MainNetParams)
|
||||
branch0Nodes := chainedNodes(setFromSlice(chain.dag.Genesis()), 18)
|
||||
branch1Nodes := chainedNodes(setFromSlice(branch0Nodes[14]), 3)
|
||||
for _, node := range branch0Nodes {
|
||||
@ -880,7 +880,7 @@ func TestIntervalBlockHashes(t *testing.T) {
|
||||
// genesis -> 1 -> 2 -> ... -> 15 -> 16 -> 17 -> 18
|
||||
// \-> 16a -> 17a -> 18a (unvalidated)
|
||||
tip := tstTip
|
||||
chain := newFakeDag(&dagconfig.MainNetParams)
|
||||
chain := newFakeDAG(&dagconfig.MainNetParams)
|
||||
branch0Nodes := chainedNodes(setFromSlice(chain.dag.Genesis()), 18)
|
||||
branch1Nodes := chainedNodes(setFromSlice(branch0Nodes[14]), 3)
|
||||
for _, node := range branch0Nodes {
|
||||
|
@ -10,39 +10,39 @@ import "encoding/json"
|
||||
// the verbose flag is set. When the verbose flag is not set, getblockheader
|
||||
// returns a hex-encoded string.
|
||||
type GetBlockHeaderVerboseResult struct {
|
||||
Hash string `json:"hash"`
|
||||
Confirmations uint64 `json:"confirmations"`
|
||||
Height int32 `json:"height"`
|
||||
Version int32 `json:"version"`
|
||||
VersionHex string `json:"versionHex"`
|
||||
MerkleRoot string `json:"merkleroot"`
|
||||
Time int64 `json:"time"`
|
||||
Nonce uint64 `json:"nonce"`
|
||||
Bits string `json:"bits"`
|
||||
Difficulty float64 `json:"difficulty"`
|
||||
PreviousHash string `json:"previousblockhash,omitempty"`
|
||||
NextHash string `json:"nextblockhash,omitempty"`
|
||||
Hash string `json:"hash"`
|
||||
Confirmations uint64 `json:"confirmations"`
|
||||
Height int32 `json:"height"`
|
||||
Version int32 `json:"version"`
|
||||
VersionHex string `json:"versionHex"`
|
||||
MerkleRoot string `json:"merkleroot"`
|
||||
Time int64 `json:"time"`
|
||||
Nonce uint64 `json:"nonce"`
|
||||
Bits string `json:"bits"`
|
||||
Difficulty float64 `json:"difficulty"`
|
||||
PreviousHashes []string `json:"previousblockhashes,omitempty"`
|
||||
NextHash string `json:"nextblockhash,omitempty"`
|
||||
}
|
||||
|
||||
// GetBlockVerboseResult models the data from the getblock command when the
|
||||
// verbose flag is set. When the verbose flag is not set, getblock returns a
|
||||
// hex-encoded string.
|
||||
type GetBlockVerboseResult struct {
|
||||
Hash string `json:"hash"`
|
||||
Confirmations uint64 `json:"confirmations"`
|
||||
Size int32 `json:"size"`
|
||||
Height int64 `json:"height"`
|
||||
Version int32 `json:"version"`
|
||||
VersionHex string `json:"versionHex"`
|
||||
MerkleRoot string `json:"merkleroot"`
|
||||
Tx []string `json:"tx,omitempty"`
|
||||
RawTx []TxRawResult `json:"rawtx,omitempty"`
|
||||
Time int64 `json:"time"`
|
||||
Nonce uint32 `json:"nonce"`
|
||||
Bits string `json:"bits"`
|
||||
Difficulty float64 `json:"difficulty"`
|
||||
PreviousHash string `json:"previousblockhash"`
|
||||
NextHash string `json:"nextblockhash,omitempty"`
|
||||
Hash string `json:"hash"`
|
||||
Confirmations uint64 `json:"confirmations"`
|
||||
Size int32 `json:"size"`
|
||||
Height int64 `json:"height"`
|
||||
Version int32 `json:"version"`
|
||||
VersionHex string `json:"versionHex"`
|
||||
MerkleRoot string `json:"merkleroot"`
|
||||
Tx []string `json:"tx,omitempty"`
|
||||
RawTx []TxRawResult `json:"rawtx,omitempty"`
|
||||
Time int64 `json:"time"`
|
||||
Nonce uint32 `json:"nonce"`
|
||||
Bits string `json:"bits"`
|
||||
Difficulty float64 `json:"difficulty"`
|
||||
PreviousHashes []string `json:"previousblockhashes"`
|
||||
NextHash string `json:"nextblockhash,omitempty"`
|
||||
}
|
||||
|
||||
// CreateMultiSigResult models the data returned from the createmultisig
|
||||
@ -80,7 +80,7 @@ type GetAddedNodeInfoResult struct {
|
||||
type SoftForkDescription struct {
|
||||
ID string `json:"id"`
|
||||
Version uint32 `json:"version"`
|
||||
Reject struct {
|
||||
Reject struct {
|
||||
Status bool `json:"status"`
|
||||
} `json:"reject"`
|
||||
}
|
||||
@ -133,18 +133,18 @@ type GetBlockTemplateResultAux struct {
|
||||
type GetBlockTemplateResult struct {
|
||||
// Base fields from BIP 0022. CoinbaseAux is optional. One of
|
||||
// CoinbaseTxn or CoinbaseValue must be specified, but not both.
|
||||
Bits string `json:"bits"`
|
||||
CurTime int64 `json:"curtime"`
|
||||
Height int64 `json:"height"`
|
||||
PreviousHash string `json:"previousblockhash"`
|
||||
SigOpLimit int64 `json:"sigoplimit,omitempty"`
|
||||
SizeLimit int64 `json:"sizelimit,omitempty"`
|
||||
Transactions []GetBlockTemplateResultTx `json:"transactions"`
|
||||
Version int32 `json:"version"`
|
||||
CoinbaseAux *GetBlockTemplateResultAux `json:"coinbaseaux,omitempty"`
|
||||
CoinbaseTxn *GetBlockTemplateResultTx `json:"coinbasetxn,omitempty"`
|
||||
CoinbaseValue *int64 `json:"coinbasevalue,omitempty"`
|
||||
WorkID string `json:"workid,omitempty"`
|
||||
Bits string `json:"bits"`
|
||||
CurTime int64 `json:"curtime"`
|
||||
Height int64 `json:"height"`
|
||||
PreviousHashes []string `json:"previousblockhashes"`
|
||||
SigOpLimit int64 `json:"sigoplimit,omitempty"`
|
||||
SizeLimit int64 `json:"sizelimit,omitempty"`
|
||||
Transactions []GetBlockTemplateResultTx `json:"transactions"`
|
||||
Version int32 `json:"version"`
|
||||
CoinbaseAux *GetBlockTemplateResultAux `json:"coinbaseaux,omitempty"`
|
||||
CoinbaseTxn *GetBlockTemplateResultTx `json:"coinbasetxn,omitempty"`
|
||||
CoinbaseValue *int64 `json:"coinbasevalue,omitempty"`
|
||||
WorkID string `json:"workid,omitempty"`
|
||||
|
||||
// Optional long polling from BIP 0022.
|
||||
LongPollID string `json:"longpollid,omitempty"`
|
||||
|
@ -177,32 +177,6 @@ func NewStopNotifySpentCmd(outPoints []OutPoint) *StopNotifySpentCmd {
|
||||
}
|
||||
}
|
||||
|
||||
// RescanCmd defines the rescan JSON-RPC command.
|
||||
//
|
||||
// NOTE: Deprecated. Use RescanBlocksCmd instead.
|
||||
type RescanCmd struct {
|
||||
BeginBlock string
|
||||
Addresses []string
|
||||
OutPoints []OutPoint
|
||||
EndBlock *string
|
||||
}
|
||||
|
||||
// NewRescanCmd returns a new instance which can be used to issue a rescan
|
||||
// JSON-RPC command.
|
||||
//
|
||||
// The parameters which are pointers indicate they are optional. Passing nil
|
||||
// for optional parameters will use the default value.
|
||||
//
|
||||
// NOTE: Deprecated. Use NewRescanBlocksCmd instead.
|
||||
func NewRescanCmd(beginBlock string, addresses []string, outPoints []OutPoint, endBlock *string) *RescanCmd {
|
||||
return &RescanCmd{
|
||||
BeginBlock: beginBlock,
|
||||
Addresses: addresses,
|
||||
OutPoints: outPoints,
|
||||
EndBlock: endBlock,
|
||||
}
|
||||
}
|
||||
|
||||
// RescanBlocksCmd defines the rescan JSON-RPC command.
|
||||
//
|
||||
// NOTE: This is a btcd extension ported from github.com/decred/dcrd/dcrjson
|
||||
@ -236,6 +210,5 @@ func init() {
|
||||
MustRegisterCmd("stopnotifynewtransactions", (*StopNotifyNewTransactionsCmd)(nil), flags)
|
||||
MustRegisterCmd("stopnotifyspent", (*StopNotifySpentCmd)(nil), flags)
|
||||
MustRegisterCmd("stopnotifyreceived", (*StopNotifyReceivedCmd)(nil), flags)
|
||||
MustRegisterCmd("rescan", (*RescanCmd)(nil), flags)
|
||||
MustRegisterCmd("rescanblocks", (*RescanBlocksCmd)(nil), flags)
|
||||
}
|
||||
|
@ -154,45 +154,6 @@ func TestDAGSvrWsCmds(t *testing.T) {
|
||||
OutPoints: []btcjson.OutPoint{{Hash: "123", Index: 0}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "rescan",
|
||||
newCmd: func() (interface{}, error) {
|
||||
return btcjson.NewCmd("rescan", "123", `["1Address"]`, `[{"hash":"0000000000000000000000000000000000000000000000000000000000000123","index":0}]`)
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
addrs := []string{"1Address"}
|
||||
ops := []btcjson.OutPoint{{
|
||||
Hash: "0000000000000000000000000000000000000000000000000000000000000123",
|
||||
Index: 0,
|
||||
}}
|
||||
return btcjson.NewRescanCmd("123", addrs, ops, nil)
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"rescan","params":["123",["1Address"],[{"hash":"0000000000000000000000000000000000000000000000000000000000000123","index":0}]],"id":1}`,
|
||||
unmarshalled: &btcjson.RescanCmd{
|
||||
BeginBlock: "123",
|
||||
Addresses: []string{"1Address"},
|
||||
OutPoints: []btcjson.OutPoint{{Hash: "0000000000000000000000000000000000000000000000000000000000000123", Index: 0}},
|
||||
EndBlock: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "rescan optional",
|
||||
newCmd: func() (interface{}, error) {
|
||||
return btcjson.NewCmd("rescan", "123", `["1Address"]`, `[{"hash":"123","index":0}]`, "456")
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
addrs := []string{"1Address"}
|
||||
ops := []btcjson.OutPoint{{Hash: "123", Index: 0}}
|
||||
return btcjson.NewRescanCmd("123", addrs, ops, btcjson.String("456"))
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"rescan","params":["123",["1Address"],[{"hash":"123","index":0}],"456"],"id":1}`,
|
||||
unmarshalled: &btcjson.RescanCmd{
|
||||
BeginBlock: "123",
|
||||
Addresses: []string{"1Address"},
|
||||
OutPoints: []btcjson.OutPoint{{Hash: "123", Index: 0}},
|
||||
EndBlock: btcjson.String("456"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "loadtxfilter",
|
||||
newCmd: func() (interface{}, error) {
|
||||
|
@ -13,14 +13,11 @@ import (
|
||||
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/blockdag/indexers"
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
"github.com/daglabs/btcutil"
|
||||
)
|
||||
|
||||
var zeroHash = daghash.Hash{}
|
||||
|
||||
// importResults houses the stats and result as an import operation.
|
||||
type importResults struct {
|
||||
blocksProcessed int64
|
||||
@ -89,7 +86,7 @@ func (bi *blockImporter) readBlock() ([]byte, error) {
|
||||
// processBlock potentially imports the block into the database. It first
|
||||
// deserializes the raw block while checking for errors. Already known blocks
|
||||
// are skipped and orphan blocks are considered errors. Finally, it runs the
|
||||
// block through the chain rules to ensure it follows all rules and matches
|
||||
// block through the DAG rules to ensure it follows all rules and matches
|
||||
// up to the known checkpoint. Returns whether the block was imported along
|
||||
// with any potential errors.
|
||||
func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) {
|
||||
@ -114,16 +111,16 @@ func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) {
|
||||
}
|
||||
|
||||
// Don't bother trying to process orphans.
|
||||
prevHash := &block.MsgBlock().Header.PrevBlock
|
||||
if !prevHash.IsEqual(&zeroHash) {
|
||||
exists, err := bi.dag.HaveBlock(prevHash)
|
||||
prevBlocks := block.MsgBlock().Header.PrevBlocks
|
||||
if len(prevBlocks) > 0 {
|
||||
exist, err := bi.dag.HaveBlocks(prevBlocks)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !exists {
|
||||
if !exist {
|
||||
return false, fmt.Errorf("import file contains block "+
|
||||
"%v which does not link to the available "+
|
||||
"block chain", prevHash)
|
||||
"block DAG", prevBlocks)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -34,14 +34,13 @@ func loadBlockDB() (database.DB, error) {
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// findCandidates searches the chain backwards for checkpoint candidates and
|
||||
// findCandidates searches the DAG backwards for checkpoint candidates and
|
||||
// returns a slice of found candidates, if any. It also stops searching for
|
||||
// candidates at the last checkpoint that is already hard coded into btcchain
|
||||
// since there is no point in finding candidates before already existing
|
||||
// checkpoints.
|
||||
func findCandidates(dag *blockdag.BlockDAG, latestHash *daghash.Hash) ([]*dagconfig.Checkpoint, error) {
|
||||
// Start with the latest block of the main chain.
|
||||
block, err := dag.BlockByHash(latestHash)
|
||||
// candidates at the last checkpoint that is already hard coded since there
|
||||
// is no point in finding candidates before already existing checkpoints.
|
||||
func findCandidates(dag *blockdag.BlockDAG, selectedTipHash *daghash.Hash) ([]*dagconfig.Checkpoint, error) {
|
||||
// Start with the selected tip.
|
||||
block, err := dag.BlockByHash(selectedTipHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -70,7 +69,7 @@ func findCandidates(dag *blockdag.BlockDAG, latestHash *daghash.Hash) ([]*dagcon
|
||||
}
|
||||
|
||||
// For the first checkpoint, the required height is any block after the
|
||||
// genesis block, so long as the chain has at least the required number
|
||||
// genesis block, so long as the DAG has at least the required number
|
||||
// of confirmations (which is enforced above).
|
||||
if len(activeNetParams.Checkpoints) == 0 {
|
||||
requiredHeight = 1
|
||||
@ -82,7 +81,7 @@ func findCandidates(dag *blockdag.BlockDAG, latestHash *daghash.Hash) ([]*dagcon
|
||||
fmt.Print("Searching for candidates")
|
||||
defer fmt.Println()
|
||||
|
||||
// Loop backwards through the chain to find checkpoint candidates.
|
||||
// Loop backwards through the DAG to find checkpoint candidates.
|
||||
candidates := make([]*dagconfig.Checkpoint, 0, cfg.NumCandidates)
|
||||
numTested := int32(0)
|
||||
for len(candidates) < cfg.NumCandidates && block.Height() > requiredHeight {
|
||||
@ -107,8 +106,9 @@ func findCandidates(dag *blockdag.BlockDAG, latestHash *daghash.Hash) ([]*dagcon
|
||||
candidates = append(candidates, &checkpoint)
|
||||
}
|
||||
|
||||
prevHash := &block.MsgBlock().Header.PrevBlock
|
||||
block, err = dag.BlockByHash(prevHash)
|
||||
prevBlockHashes := block.MsgBlock().Header.PrevBlocks
|
||||
selectedBlockHash := &prevBlockHashes[0]
|
||||
block, err = dag.BlockByHash(selectedBlockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -33,6 +33,15 @@ func (hash Hash) String() string {
|
||||
return hex.EncodeToString(hash[:])
|
||||
}
|
||||
|
||||
func Strings(hashes []Hash) []string {
|
||||
strings := make([]string, len(hashes))
|
||||
for i, hash := range hashes {
|
||||
strings[i] = hash.String()
|
||||
}
|
||||
|
||||
return strings
|
||||
}
|
||||
|
||||
// CloneBytes returns a copy of the bytes which represent the hash as a byte
|
||||
// slice.
|
||||
//
|
||||
@ -69,6 +78,27 @@ func (hash *Hash) IsEqual(target *Hash) bool {
|
||||
return *hash == *target
|
||||
}
|
||||
|
||||
// AreEqual returns true if both slices contain the same hashes.
|
||||
// Either slice must not contain duplicates.
|
||||
func AreEqual(first []Hash, second []Hash) bool {
|
||||
if len(first) != len(second) {
|
||||
return false
|
||||
}
|
||||
|
||||
hashSet := make(map[Hash]bool)
|
||||
for _, hash := range first {
|
||||
hashSet[hash] = true
|
||||
}
|
||||
|
||||
for _, hash := range second {
|
||||
if !hashSet[hash] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// NewHash returns a new Hash from a byte slice. An error is returned if
|
||||
// the number of bytes passed in is not HashSize.
|
||||
func NewHash(newHash []byte) (*Hash, error) {
|
||||
|
@ -194,3 +194,55 @@ func TestNewHashFromStr(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestAreEqual executes tests against the AreEqual function.
|
||||
func TestAreEqual(t *testing.T) {
|
||||
hash0, _ := NewHashFromStr("0000000000000000000000000000000000000000000000000000000000000000")
|
||||
hash1, _ := NewHashFromStr("1111111111111111111111111111111111111111111111111111111111111111")
|
||||
hash2, _ := NewHashFromStr("2222222222222222222222222222222222222222222222222222222222222222")
|
||||
hash3, _ := NewHashFromStr("3333333333333333333333333333333333333333333333333333333333333333")
|
||||
hashes0To2 := []Hash{*hash0, *hash1, *hash2}
|
||||
hashes0To2Shifted := []Hash{*hash2, *hash0, *hash1}
|
||||
hashes1To3 := []Hash{*hash1, *hash2, *hash3}
|
||||
hashes0To3 := []Hash{*hash0, *hash1, *hash2, *hash3}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
first []Hash
|
||||
second []Hash
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "self-equality",
|
||||
first: hashes0To2,
|
||||
second: hashes0To2,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "same members, different order",
|
||||
first: hashes0To2,
|
||||
second: hashes0To2Shifted,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "same slice length but only some members are equal",
|
||||
first: hashes0To2,
|
||||
second: hashes1To3,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "different slice lengths, one slice containing all the other's members",
|
||||
first: hashes0To3,
|
||||
second: hashes0To2,
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
result := AreEqual(test.first, test.second)
|
||||
if result != test.expected {
|
||||
t.Errorf("unexpected AreEqual result for"+
|
||||
" test \"%s\". Expected: %t, got: %t.", test.name, test.expected, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -185,7 +185,7 @@ func CreateBlock(prevBlock *btcutil.Block, inclusionTxs []*btcutil.Tx,
|
||||
var block wire.MsgBlock
|
||||
block.Header = wire.BlockHeader{
|
||||
Version: blockVersion,
|
||||
PrevBlock: *prevHash,
|
||||
PrevBlocks: []daghash.Hash{*prevHash},
|
||||
MerkleRoot: *merkles[len(merkles)-1],
|
||||
Timestamp: ts,
|
||||
Bits: net.PowLimitBits,
|
||||
|
@ -560,7 +560,7 @@ func (m *memWallet) ConfirmedBalance() btcutil.Amount {
|
||||
}
|
||||
|
||||
// keyToAddr maps the passed private to corresponding p2pkh address.
|
||||
func keyToAddr(key *btcec.PrivateKey, net *chaincfg.Params) (btcutil.Address, error) {
|
||||
func keyToAddr(key *btcec.PrivateKey, net *dagconfig.Params) (btcutil.Address, error) {
|
||||
serializedKey := key.PubKey().SerializeCompressed()
|
||||
pubKeyAddr, err := btcutil.NewAddressPubKey(serializedKey, net)
|
||||
if err != nil {
|
||||
|
@ -162,9 +162,9 @@ func (m *CPUMiner) submitBlock(block *btcutil.Block) bool {
|
||||
// a new block, but the check only happens periodically, so it is
|
||||
// possible a block was found and submitted in between.
|
||||
msgBlock := block.MsgBlock()
|
||||
if !msgBlock.Header.PrevBlock.IsEqual(&m.g.GetDAGState().SelectedTip.Hash) {
|
||||
if !daghash.AreEqual(msgBlock.Header.PrevBlocks, m.g.GetDAGState().TipHashes) {
|
||||
log.Debugf("Block submitted via CPU miner with previous "+
|
||||
"block %s is stale", msgBlock.Header.PrevBlock)
|
||||
"blocks %s is stale", msgBlock.Header.PrevBlocks)
|
||||
return false
|
||||
}
|
||||
|
||||
@ -248,7 +248,7 @@ func (m *CPUMiner) solveBlock(msgBlock *wire.MsgBlock, blockHeight int32,
|
||||
|
||||
// The current block is stale if the DAG has changed.
|
||||
dagState := m.g.GetDAGState()
|
||||
if !header.PrevBlock.IsEqual(&dagState.SelectedTip.Hash) {
|
||||
if !daghash.AreEqual(header.PrevBlocks, dagState.TipHashes) {
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -764,7 +764,7 @@ mempoolLoop:
|
||||
var msgBlock wire.MsgBlock
|
||||
msgBlock.Header = wire.BlockHeader{
|
||||
Version: nextBlockVersion,
|
||||
PrevBlock: dagState.SelectedTip.Hash,
|
||||
PrevBlocks: dagState.TipHashes,
|
||||
MerkleRoot: *merkles[len(merkles)-1],
|
||||
Timestamp: ts,
|
||||
Bits: reqDifficulty,
|
||||
|
@ -783,7 +783,7 @@ func (sm *SyncManager) handleHeadersMsg(hmsg *headersMsg) {
|
||||
// add it to the list of headers.
|
||||
node := headerNode{hash: &blockHash}
|
||||
prevNode := prevNodeEl.Value.(*headerNode)
|
||||
if prevNode.hash.IsEqual(&blockHeader.PrevBlock) {
|
||||
if prevNode.hash.IsEqual(&blockHeader.PrevBlocks[0]) { // TODO: (Stas) This is wrong. Modified only to satisfy compilation.
|
||||
node.height = prevNode.height + 1
|
||||
e := sm.headerList.PushBack(&node)
|
||||
if sm.startHeader == nil {
|
||||
|
@ -1136,172 +1136,6 @@ func (r FutureRescanResult) Receive() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// RescanAsync returns an instance of a type that can be used to get the result
|
||||
// of the RPC at some future time by invoking the Receive function on the
|
||||
// returned instance.
|
||||
//
|
||||
// See Rescan for the blocking version and more details.
|
||||
//
|
||||
// NOTE: Rescan requests are not issued on client reconnect and must be
|
||||
// performed manually (ideally with a new start height based on the last
|
||||
// rescan progress notification). See the OnClientConnected notification
|
||||
// callback for a good callsite to reissue rescan requests on connect and
|
||||
// reconnect.
|
||||
//
|
||||
// NOTE: This is a btcd extension and requires a websocket connection.
|
||||
//
|
||||
// NOTE: Deprecated. Use RescanBlocksAsync instead.
|
||||
func (c *Client) RescanAsync(startBlock *daghash.Hash,
|
||||
addresses []btcutil.Address,
|
||||
outpoints []*wire.OutPoint) FutureRescanResult {
|
||||
|
||||
// Not supported in HTTP POST mode.
|
||||
if c.config.HTTPPostMode {
|
||||
return newFutureError(ErrWebsocketsRequired)
|
||||
}
|
||||
|
||||
// Ignore the notification if the client is not interested in
|
||||
// notifications.
|
||||
if c.ntfnHandlers == nil {
|
||||
return newNilFutureResult()
|
||||
}
|
||||
|
||||
// Convert block hashes to strings.
|
||||
var startBlockHashStr string
|
||||
if startBlock != nil {
|
||||
startBlockHashStr = startBlock.String()
|
||||
}
|
||||
|
||||
// Convert addresses to strings.
|
||||
addrs := make([]string, 0, len(addresses))
|
||||
for _, addr := range addresses {
|
||||
addrs = append(addrs, addr.String())
|
||||
}
|
||||
|
||||
// Convert outpoints.
|
||||
ops := make([]btcjson.OutPoint, 0, len(outpoints))
|
||||
for _, op := range outpoints {
|
||||
ops = append(ops, newOutPointFromWire(op))
|
||||
}
|
||||
|
||||
cmd := btcjson.NewRescanCmd(startBlockHashStr, addrs, ops, nil)
|
||||
return c.sendCmd(cmd)
|
||||
}
|
||||
|
||||
// Rescan rescans the block chain starting from the provided starting block to
|
||||
// the end of the longest chain for transactions that pay to the passed
|
||||
// addresses and transactions which spend the passed outpoints.
|
||||
//
|
||||
// The notifications of found transactions are delivered to the notification
|
||||
// handlers associated with client and this call will not return until the
|
||||
// rescan has completed. Calling this function has no effect if there are no
|
||||
// notification handlers and will result in an error if the client is configured
|
||||
// to run in HTTP POST mode.
|
||||
//
|
||||
// The notifications delivered as a result of this call will be via one of
|
||||
// OnRedeemingTx (for transactions which spend from the one of the
|
||||
// passed outpoints), OnRecvTx (for transactions that receive funds
|
||||
// to one of the passed addresses), and OnRescanProgress (for rescan progress
|
||||
// updates).
|
||||
//
|
||||
// See RescanEndBlock to also specify an ending block to finish the rescan
|
||||
// without continuing through the best block on the main chain.
|
||||
//
|
||||
// NOTE: Rescan requests are not issued on client reconnect and must be
|
||||
// performed manually (ideally with a new start height based on the last
|
||||
// rescan progress notification). See the OnClientConnected notification
|
||||
// callback for a good callsite to reissue rescan requests on connect and
|
||||
// reconnect.
|
||||
//
|
||||
// NOTE: This is a btcd extension and requires a websocket connection.
|
||||
//
|
||||
// NOTE: Deprecated. Use RescanBlocks instead.
|
||||
func (c *Client) Rescan(startBlock *daghash.Hash,
|
||||
addresses []btcutil.Address,
|
||||
outpoints []*wire.OutPoint) error {
|
||||
|
||||
return c.RescanAsync(startBlock, addresses, outpoints).Receive()
|
||||
}
|
||||
|
||||
// RescanEndBlockAsync returns an instance of a type that can be used to get
|
||||
// the result of the RPC at some future time by invoking the Receive function on
|
||||
// the returned instance.
|
||||
//
|
||||
// See RescanEndBlock for the blocking version and more details.
|
||||
//
|
||||
// NOTE: This is a btcd extension and requires a websocket connection.
|
||||
//
|
||||
// NOTE: Deprecated. Use RescanBlocksAsync instead.
|
||||
func (c *Client) RescanEndBlockAsync(startBlock *daghash.Hash,
|
||||
addresses []btcutil.Address, outpoints []*wire.OutPoint,
|
||||
endBlock *daghash.Hash) FutureRescanResult {
|
||||
|
||||
// Not supported in HTTP POST mode.
|
||||
if c.config.HTTPPostMode {
|
||||
return newFutureError(ErrWebsocketsRequired)
|
||||
}
|
||||
|
||||
// Ignore the notification if the client is not interested in
|
||||
// notifications.
|
||||
if c.ntfnHandlers == nil {
|
||||
return newNilFutureResult()
|
||||
}
|
||||
|
||||
// Convert block hashes to strings.
|
||||
var startBlockHashStr, endBlockHashStr string
|
||||
if startBlock != nil {
|
||||
startBlockHashStr = startBlock.String()
|
||||
}
|
||||
if endBlock != nil {
|
||||
endBlockHashStr = endBlock.String()
|
||||
}
|
||||
|
||||
// Convert addresses to strings.
|
||||
addrs := make([]string, 0, len(addresses))
|
||||
for _, addr := range addresses {
|
||||
addrs = append(addrs, addr.String())
|
||||
}
|
||||
|
||||
// Convert outpoints.
|
||||
ops := make([]btcjson.OutPoint, 0, len(outpoints))
|
||||
for _, op := range outpoints {
|
||||
ops = append(ops, newOutPointFromWire(op))
|
||||
}
|
||||
|
||||
cmd := btcjson.NewRescanCmd(startBlockHashStr, addrs, ops,
|
||||
&endBlockHashStr)
|
||||
return c.sendCmd(cmd)
|
||||
}
|
||||
|
||||
// RescanEndHeight rescans the block chain starting from the provided starting
|
||||
// block up to the provided ending block for transactions that pay to the
|
||||
// passed addresses and transactions which spend the passed outpoints.
|
||||
//
|
||||
// The notifications of found transactions are delivered to the notification
|
||||
// handlers associated with client and this call will not return until the
|
||||
// rescan has completed. Calling this function has no effect if there are no
|
||||
// notification handlers and will result in an error if the client is configured
|
||||
// to run in HTTP POST mode.
|
||||
//
|
||||
// The notifications delivered as a result of this call will be via one of
|
||||
// OnRedeemingTx (for transactions which spend from the one of the
|
||||
// passed outpoints), OnRecvTx (for transactions that receive funds
|
||||
// to one of the passed addresses), and OnRescanProgress (for rescan progress
|
||||
// updates).
|
||||
//
|
||||
// See Rescan to also perform a rescan through current end of the longest chain.
|
||||
//
|
||||
// NOTE: This is a btcd extension and requires a websocket connection.
|
||||
//
|
||||
// NOTE: Deprecated. Use RescanBlocks instead.
|
||||
func (c *Client) RescanEndHeight(startBlock *daghash.Hash,
|
||||
addresses []btcutil.Address, outpoints []*wire.OutPoint,
|
||||
endBlock *daghash.Hash) error {
|
||||
|
||||
return c.RescanEndBlockAsync(startBlock, addresses, outpoints,
|
||||
endBlock).Receive()
|
||||
}
|
||||
|
||||
// FutureLoadTxFilterResult is a future promise to deliver the result
|
||||
// of a LoadTxFilterAsync RPC invocation (or an applicable error).
|
||||
//
|
||||
|
132
rpcserver.go
132
rpcserver.go
@ -138,7 +138,7 @@ var rpcHandlersBeforeInit = map[string]commandHandler{
|
||||
"getbestblock": handleGetBestBlock,
|
||||
"getbestblockhash": handleGetBestBlockHash,
|
||||
"getblock": handleGetBlock,
|
||||
"getblockchaininfo": handleGetBlockChainInfo,
|
||||
"getblockdaginfo": handleGetBlockDAGInfo,
|
||||
"getblockcount": handleGetBlockCount,
|
||||
"getblockhash": handleGetBlockHash,
|
||||
"getblockheader": handleGetBlockHeader,
|
||||
@ -170,7 +170,7 @@ var rpcHandlersBeforeInit = map[string]commandHandler{
|
||||
"submitblock": handleSubmitBlock,
|
||||
"uptime": handleUptime,
|
||||
"validateaddress": handleValidateAddress,
|
||||
"verifychain": handleVerifyChain,
|
||||
"verifydag": handleVerifyDAG,
|
||||
"verifymessage": handleVerifyMessage,
|
||||
"version": handleVersion,
|
||||
}
|
||||
@ -1096,19 +1096,19 @@ func handleGetBlock(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (i
|
||||
params := s.cfg.ChainParams
|
||||
blockHeader := &blk.MsgBlock().Header
|
||||
blockReply := btcjson.GetBlockVerboseResult{
|
||||
Hash: c.Hash,
|
||||
Version: blockHeader.Version,
|
||||
VersionHex: fmt.Sprintf("%08x", blockHeader.Version),
|
||||
MerkleRoot: blockHeader.MerkleRoot.String(),
|
||||
PreviousHash: blockHeader.PrevBlock.String(),
|
||||
Nonce: blockHeader.Nonce,
|
||||
Time: blockHeader.Timestamp.Unix(),
|
||||
Confirmations: uint64(1 + dagState.SelectedTip.Height - blockHeight),
|
||||
Height: int64(blockHeight),
|
||||
Size: int32(len(blkBytes)),
|
||||
Bits: strconv.FormatInt(int64(blockHeader.Bits), 16),
|
||||
Difficulty: getDifficultyRatio(blockHeader.Bits, params),
|
||||
NextHash: nextHashString,
|
||||
Hash: c.Hash,
|
||||
Version: blockHeader.Version,
|
||||
VersionHex: fmt.Sprintf("%08x", blockHeader.Version),
|
||||
MerkleRoot: blockHeader.MerkleRoot.String(),
|
||||
PreviousHashes: daghash.Strings(blockHeader.PrevBlocks),
|
||||
Nonce: blockHeader.Nonce,
|
||||
Time: blockHeader.Timestamp.Unix(),
|
||||
Confirmations: uint64(1 + dagState.SelectedTip.Height - blockHeight),
|
||||
Height: int64(blockHeight),
|
||||
Size: int32(len(blkBytes)),
|
||||
Bits: strconv.FormatInt(int64(blockHeader.Bits), 16),
|
||||
Difficulty: getDifficultyRatio(blockHeader.Bits, params),
|
||||
NextHash: nextHashString,
|
||||
}
|
||||
|
||||
if c.VerboseTx == nil || !*c.VerboseTx {
|
||||
@ -1156,19 +1156,19 @@ func softForkStatus(state blockdag.ThresholdState) (string, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// handleGetBlockChainInfo implements the getblockchaininfo command.
|
||||
func handleGetBlockChainInfo(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
||||
// Obtain a snapshot of the current best known blockchain state. We'll
|
||||
// handleGetBlockDAGInfo implements the getblockdaginfo command.
|
||||
func handleGetBlockDAGInfo(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
||||
// Obtain a snapshot of the current best known DAG state. We'll
|
||||
// populate the response to this call primarily from this snapshot.
|
||||
params := s.cfg.ChainParams
|
||||
chain := s.cfg.DAG
|
||||
dagState := chain.GetDAGState()
|
||||
dag := s.cfg.DAG
|
||||
dagState := dag.GetDAGState()
|
||||
|
||||
chainInfo := &btcjson.GetBlockChainInfoResult{
|
||||
Chain: params.Name,
|
||||
chainInfo := &btcjson.GetBlockDAGInfoResult{
|
||||
DAG: params.Name,
|
||||
Blocks: dagState.SelectedTip.Height,
|
||||
Headers: dagState.SelectedTip.Height,
|
||||
BestBlockHash: dagState.SelectedTip.Hash.String(),
|
||||
TipHashes: daghash.Strings(dagState.TipHashes),
|
||||
Difficulty: getDifficultyRatio(dagState.SelectedTip.Bits, params),
|
||||
MedianTime: dagState.SelectedTip.MedianTime.Unix(),
|
||||
Pruned: false,
|
||||
@ -1218,9 +1218,9 @@ func handleGetBlockChainInfo(s *rpcServer, cmd interface{}, closeChan <-chan str
|
||||
}
|
||||
}
|
||||
|
||||
// Query the chain for the current status of the deployment as
|
||||
// Query the dag for the current status of the deployment as
|
||||
// identified by its deployment ID.
|
||||
deploymentStatus, err := chain.ThresholdState(uint32(deployment))
|
||||
deploymentStatus, err := dag.ThresholdState(uint32(deployment))
|
||||
if err != nil {
|
||||
context := "Failed to obtain deployment status"
|
||||
return nil, internalRPCError(err.Error(), context)
|
||||
@ -1323,18 +1323,18 @@ func handleGetBlockHeader(s *rpcServer, cmd interface{}, closeChan <-chan struct
|
||||
|
||||
params := s.cfg.ChainParams
|
||||
blockHeaderReply := btcjson.GetBlockHeaderVerboseResult{
|
||||
Hash: c.Hash,
|
||||
Confirmations: uint64(1 + dagState.SelectedTip.Height - blockHeight),
|
||||
Height: blockHeight,
|
||||
Version: blockHeader.Version,
|
||||
VersionHex: fmt.Sprintf("%08x", blockHeader.Version),
|
||||
MerkleRoot: blockHeader.MerkleRoot.String(),
|
||||
NextHash: nextHashString,
|
||||
PreviousHash: blockHeader.PrevBlock.String(),
|
||||
Nonce: uint64(blockHeader.Nonce),
|
||||
Time: blockHeader.Timestamp.Unix(),
|
||||
Bits: strconv.FormatInt(int64(blockHeader.Bits), 16),
|
||||
Difficulty: getDifficultyRatio(blockHeader.Bits, params),
|
||||
Hash: c.Hash,
|
||||
Confirmations: uint64(1 + dagState.SelectedTip.Height - blockHeight),
|
||||
Height: blockHeight,
|
||||
Version: blockHeader.Version,
|
||||
VersionHex: fmt.Sprintf("%08x", blockHeader.Version),
|
||||
MerkleRoot: blockHeader.MerkleRoot.String(),
|
||||
NextHash: nextHashString,
|
||||
PreviousHashes: daghash.Strings(blockHeader.PrevBlocks),
|
||||
Nonce: uint64(blockHeader.Nonce),
|
||||
Time: blockHeader.Timestamp.Unix(),
|
||||
Bits: strconv.FormatInt(int64(blockHeader.Bits), 16),
|
||||
Difficulty: getDifficultyRatio(blockHeader.Bits, params),
|
||||
}
|
||||
return blockHeaderReply, nil
|
||||
}
|
||||
@ -1695,22 +1695,22 @@ func (state *gbtWorkState) blockTemplateResult(useCoinbaseValue bool, submitOld
|
||||
targetDifficulty := fmt.Sprintf("%064x", blockdag.CompactToBig(header.Bits))
|
||||
templateID := encodeTemplateID(state.prevHash, state.lastGenerated)
|
||||
reply := btcjson.GetBlockTemplateResult{
|
||||
Bits: strconv.FormatInt(int64(header.Bits), 16),
|
||||
CurTime: header.Timestamp.Unix(),
|
||||
Height: int64(template.Height),
|
||||
PreviousHash: header.PrevBlock.String(),
|
||||
SigOpLimit: blockdag.MaxSigOpsPerBlock,
|
||||
SizeLimit: wire.MaxBlockPayload,
|
||||
Transactions: transactions,
|
||||
Version: header.Version,
|
||||
LongPollID: templateID,
|
||||
SubmitOld: submitOld,
|
||||
Target: targetDifficulty,
|
||||
MinTime: state.minTimestamp.Unix(),
|
||||
MaxTime: maxTime.Unix(),
|
||||
Mutable: gbtMutableFields,
|
||||
NonceRange: gbtNonceRange,
|
||||
Capabilities: gbtCapabilities,
|
||||
Bits: strconv.FormatInt(int64(header.Bits), 16),
|
||||
CurTime: header.Timestamp.Unix(),
|
||||
Height: int64(template.Height),
|
||||
PreviousHashes: daghash.Strings(header.PrevBlocks),
|
||||
SigOpLimit: blockdag.MaxSigOpsPerBlock,
|
||||
SizeLimit: wire.MaxBlockPayload,
|
||||
Transactions: transactions,
|
||||
Version: header.Version,
|
||||
LongPollID: templateID,
|
||||
SubmitOld: submitOld,
|
||||
Target: targetDifficulty,
|
||||
MinTime: state.minTimestamp.Unix(),
|
||||
MaxTime: maxTime.Unix(),
|
||||
Mutable: gbtMutableFields,
|
||||
NonceRange: gbtNonceRange,
|
||||
Capabilities: gbtCapabilities,
|
||||
}
|
||||
|
||||
if useCoinbaseValue {
|
||||
@ -1790,7 +1790,7 @@ func handleGetBlockTemplateLongPoll(s *rpcServer, longPollID string, useCoinbase
|
||||
// Return the block template now if the specific block template
|
||||
// identified by the long poll ID no longer matches the current block
|
||||
// template as this means the provided template is stale.
|
||||
prevTemplateHash := &state.template.Block.Header.PrevBlock
|
||||
prevTemplateHash := &state.template.Block.Header.PrevBlocks[0] // TODO: (Stas) This is probably wrong. Modified only to satisfy compilation
|
||||
if !prevHash.IsEqual(prevTemplateHash) ||
|
||||
lastGenerated != state.lastGenerated.Unix() {
|
||||
|
||||
@ -1838,7 +1838,7 @@ func handleGetBlockTemplateLongPoll(s *rpcServer, longPollID string, useCoinbase
|
||||
// Include whether or not it is valid to submit work against the old
|
||||
// block template depending on whether or not a solution has already
|
||||
// been found and added to the block chain.
|
||||
submitOld := prevHash.IsEqual(&state.template.Block.Header.PrevBlock)
|
||||
submitOld := prevHash.IsEqual(&state.template.Block.Header.PrevBlocks[0]) // TODO: (Stas) This is probably wrong. Modified only to satisfy compilation
|
||||
result, err := state.blockTemplateResult(useCoinbaseValue, &submitOld)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -2064,10 +2064,10 @@ func handleGetBlockTemplateProposal(s *rpcServer, request *btcjson.TemplateReque
|
||||
}
|
||||
block := btcutil.NewBlock(&msgBlock)
|
||||
|
||||
// Ensure the block is building from the expected previous block.
|
||||
expectedPrevHash := s.cfg.DAG.GetDAGState().SelectedTip.Hash
|
||||
prevHash := &block.MsgBlock().Header.PrevBlock
|
||||
if !expectedPrevHash.IsEqual(prevHash) {
|
||||
// Ensure the block is building from the expected previous blocks.
|
||||
expectedPrevHashes := s.cfg.DAG.GetDAGState().TipHashes
|
||||
prevHashes := block.MsgBlock().Header.PrevBlocks
|
||||
if !daghash.AreEqual(expectedPrevHashes, prevHashes) {
|
||||
return "bad-prevblk", nil
|
||||
}
|
||||
|
||||
@ -2246,7 +2246,7 @@ func handleGetHeaders(s *rpcServer, cmd interface{}, closeChan <-chan struct{})
|
||||
// that are not related to wallet functionality.
|
||||
func handleGetInfo(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
||||
dagState := s.cfg.DAG.GetDAGState()
|
||||
ret := &btcjson.InfoChainResult{
|
||||
ret := &btcjson.InfoDAGResult{
|
||||
Version: int32(1000000*appMajor + 10000*appMinor + 100*appPatch),
|
||||
ProtocolVersion: int32(maxProtocolVersion),
|
||||
Blocks: dagState.SelectedTip.Height,
|
||||
@ -3399,7 +3399,7 @@ func handleUptime(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (int
|
||||
func handleValidateAddress(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
||||
c := cmd.(*btcjson.ValidateAddressCmd)
|
||||
|
||||
result := btcjson.ValidateAddressChainResult{}
|
||||
result := btcjson.ValidateAddressResult{}
|
||||
addr, err := btcutil.DecodeAddress(c.Address, s.cfg.ChainParams)
|
||||
if err != nil {
|
||||
// Return the default value (false) for IsValid.
|
||||
@ -3412,7 +3412,7 @@ func handleValidateAddress(s *rpcServer, cmd interface{}, closeChan <-chan struc
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func verifyChain(s *rpcServer, level, depth int32) error {
|
||||
func verifyDAG(s *rpcServer, level, depth int32) error {
|
||||
dagState := s.cfg.DAG.GetDAGState()
|
||||
finishHeight := dagState.SelectedTip.Height - depth
|
||||
if finishHeight < 0 {
|
||||
@ -3447,9 +3447,9 @@ func verifyChain(s *rpcServer, level, depth int32) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleVerifyChain implements the verifychain command.
|
||||
func handleVerifyChain(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
||||
c := cmd.(*btcjson.VerifyChainCmd)
|
||||
// handleVerifyDAG implements the verifydag command.
|
||||
func handleVerifyDAG(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
||||
c := cmd.(*btcjson.VerifyDAGCmd)
|
||||
|
||||
var checkLevel, checkDepth int32
|
||||
if c.CheckLevel != nil {
|
||||
@ -3459,7 +3459,7 @@ func handleVerifyChain(s *rpcServer, cmd interface{}, closeChan <-chan struct{})
|
||||
checkDepth = *c.CheckDepth
|
||||
}
|
||||
|
||||
err := verifyChain(s, checkLevel, checkDepth)
|
||||
err := verifyDAG(s, checkLevel, checkDepth)
|
||||
return err == nil, nil
|
||||
}
|
||||
|
||||
|
176
rpcserverhelp.go
176
rpcserverhelp.go
@ -167,24 +167,24 @@ var helpDescsEnUS = map[string]string{
|
||||
"getblock--result0": "Hex-encoded bytes of the serialized block",
|
||||
|
||||
// GetBlockChainInfoCmd help.
|
||||
"getblockchaininfo--synopsis": "Returns information about the current blockchain state and the status of any active soft-fork deployments.",
|
||||
"getblockdaginfo--synopsis": "Returns information about the current blockDAG state and the status of any active soft-fork deployments.",
|
||||
|
||||
// GetBlockChainInfoResult help.
|
||||
"getblockchaininforesult-chain": "The name of the chain the daemon is on (testnet, mainnet, etc)",
|
||||
"getblockchaininforesult-blocks": "The number of blocks in the best known chain",
|
||||
"getblockchaininforesult-headers": "The number of headers that we've gathered for in the best known chain",
|
||||
"getblockchaininforesult-bestblockhash": "The block hash for the latest block in the main chain",
|
||||
"getblockchaininforesult-difficulty": "The current chain difficulty",
|
||||
"getblockchaininforesult-mediantime": "The median time from the PoV of the best block in the chain",
|
||||
"getblockchaininforesult-verificationprogress": "An estimate for how much of the best chain we've verified",
|
||||
"getblockchaininforesult-pruned": "A bool that indicates if the node is pruned or not",
|
||||
"getblockchaininforesult-pruneheight": "The lowest block retained in the current pruned chain",
|
||||
"getblockchaininforesult-chainwork": "The total cumulative work in the best chain",
|
||||
"getblockchaininforesult-softforks": "The status of the super-majority soft-forks",
|
||||
"getblockchaininforesult-bip9_softforks": "JSON object describing active BIP0009 deployments",
|
||||
"getblockchaininforesult-bip9_softforks--key": "bip9_softforks",
|
||||
"getblockchaininforesult-bip9_softforks--value": "An object describing a particular BIP009 deployment",
|
||||
"getblockchaininforesult-bip9_softforks--desc": "The status of any defined BIP0009 soft-fork deployments",
|
||||
// GetBlockDAGInfoResult help.
|
||||
"getblockdaginforesult-dag": "The name of the DAG the daemon is on (testnet, mainnet, etc)",
|
||||
"getblockdaginforesult-blocks": "The number of blocks in the best known chain",
|
||||
"getblockdaginforesult-headers": "The number of headers that we've gathered for in the best known chain",
|
||||
"getblockdaginforesult-tiphashes": "The block hashes for the tips in the DAG",
|
||||
"getblockdaginforesult-difficulty": "The current chain difficulty",
|
||||
"getblockdaginforesult-mediantime": "The median time from the PoV of the best block in the chain",
|
||||
"getblockdaginforesult-verificationprogress": "An estimate for how much of the best chain we've verified",
|
||||
"getblockdaginforesult-pruned": "A bool that indicates if the node is pruned or not",
|
||||
"getblockdaginforesult-pruneheight": "The lowest block retained in the current pruned chain",
|
||||
"getblockdaginforesult-dagwork": "The total cumulative work in the DAG",
|
||||
"getblockdaginforesult-softforks": "The status of the super-majority soft-forks",
|
||||
"getblockdaginforesult-bip9_softforks": "JSON object describing active BIP0009 deployments",
|
||||
"getblockdaginforesult-bip9_softforks--key": "bip9_softforks",
|
||||
"getblockdaginforesult-bip9_softforks--value": "An object describing a particular BIP009 deployment",
|
||||
"getblockdaginforesult-bip9_softforks--desc": "The status of any defined BIP0009 soft-fork deployments",
|
||||
|
||||
// SoftForkDescription help.
|
||||
"softforkdescription-reject": "The current activation status of the softfork",
|
||||
@ -221,21 +221,21 @@ var helpDescsEnUS = map[string]string{
|
||||
"searchrawtransactionsresult-size": "The size of the transaction in bytes",
|
||||
|
||||
// GetBlockVerboseResult help.
|
||||
"getblockverboseresult-hash": "The hash of the block (same as provided)",
|
||||
"getblockverboseresult-confirmations": "The number of confirmations",
|
||||
"getblockverboseresult-size": "The size of the block",
|
||||
"getblockverboseresult-height": "The height of the block in the block chain",
|
||||
"getblockverboseresult-version": "The block version",
|
||||
"getblockverboseresult-versionHex": "The block version in hexadecimal",
|
||||
"getblockverboseresult-merkleroot": "Root hash of the merkle tree",
|
||||
"getblockverboseresult-tx": "The transaction hashes (only when verbosetx=false)",
|
||||
"getblockverboseresult-rawtx": "The transactions as JSON objects (only when verbosetx=true)",
|
||||
"getblockverboseresult-time": "The block time in seconds since 1 Jan 1970 GMT",
|
||||
"getblockverboseresult-nonce": "The block nonce",
|
||||
"getblockverboseresult-bits": "The bits which represent the block difficulty",
|
||||
"getblockverboseresult-difficulty": "The proof-of-work difficulty as a multiple of the minimum difficulty",
|
||||
"getblockverboseresult-previousblockhash": "The hash of the previous block",
|
||||
"getblockverboseresult-nextblockhash": "The hash of the next block (only if there is one)",
|
||||
"getblockverboseresult-hash": "The hash of the block (same as provided)",
|
||||
"getblockverboseresult-confirmations": "The number of confirmations",
|
||||
"getblockverboseresult-size": "The size of the block",
|
||||
"getblockverboseresult-height": "The height of the block in the block chain",
|
||||
"getblockverboseresult-version": "The block version",
|
||||
"getblockverboseresult-versionHex": "The block version in hexadecimal",
|
||||
"getblockverboseresult-merkleroot": "Root hash of the merkle tree",
|
||||
"getblockverboseresult-tx": "The transaction hashes (only when verbosetx=false)",
|
||||
"getblockverboseresult-rawtx": "The transactions as JSON objects (only when verbosetx=true)",
|
||||
"getblockverboseresult-time": "The block time in seconds since 1 Jan 1970 GMT",
|
||||
"getblockverboseresult-nonce": "The block nonce",
|
||||
"getblockverboseresult-bits": "The bits which represent the block difficulty",
|
||||
"getblockverboseresult-difficulty": "The proof-of-work difficulty as a multiple of the minimum difficulty",
|
||||
"getblockverboseresult-previousblockhashes": "The hashes of the previous blocks",
|
||||
"getblockverboseresult-nextblockhash": "The hash of the next block (only if there is one)",
|
||||
|
||||
// GetBlockCountCmd help.
|
||||
"getblockcount--synopsis": "Returns the number of blocks in the longest block chain.",
|
||||
@ -255,18 +255,18 @@ var helpDescsEnUS = map[string]string{
|
||||
"getblockheader--result0": "The block header hash",
|
||||
|
||||
// GetBlockHeaderVerboseResult help.
|
||||
"getblockheaderverboseresult-hash": "The hash of the block (same as provided)",
|
||||
"getblockheaderverboseresult-confirmations": "The number of confirmations",
|
||||
"getblockheaderverboseresult-height": "The height of the block in the block chain",
|
||||
"getblockheaderverboseresult-version": "The block version",
|
||||
"getblockheaderverboseresult-versionHex": "The block version in hexadecimal",
|
||||
"getblockheaderverboseresult-merkleroot": "Root hash of the merkle tree",
|
||||
"getblockheaderverboseresult-time": "The block time in seconds since 1 Jan 1970 GMT",
|
||||
"getblockheaderverboseresult-nonce": "The block nonce",
|
||||
"getblockheaderverboseresult-bits": "The bits which represent the block difficulty",
|
||||
"getblockheaderverboseresult-difficulty": "The proof-of-work difficulty as a multiple of the minimum difficulty",
|
||||
"getblockheaderverboseresult-previousblockhash": "The hash of the previous block",
|
||||
"getblockheaderverboseresult-nextblockhash": "The hash of the next block (only if there is one)",
|
||||
"getblockheaderverboseresult-hash": "The hash of the block (same as provided)",
|
||||
"getblockheaderverboseresult-confirmations": "The number of confirmations",
|
||||
"getblockheaderverboseresult-height": "The height of the block in the block chain",
|
||||
"getblockheaderverboseresult-version": "The block version",
|
||||
"getblockheaderverboseresult-versionHex": "The block version in hexadecimal",
|
||||
"getblockheaderverboseresult-merkleroot": "Root hash of the merkle tree",
|
||||
"getblockheaderverboseresult-time": "The block time in seconds since 1 Jan 1970 GMT",
|
||||
"getblockheaderverboseresult-nonce": "The block nonce",
|
||||
"getblockheaderverboseresult-bits": "The bits which represent the block difficulty",
|
||||
"getblockheaderverboseresult-difficulty": "The proof-of-work difficulty as a multiple of the minimum difficulty",
|
||||
"getblockheaderverboseresult-previousblockhashes": "The hashes of the previous blocks",
|
||||
"getblockheaderverboseresult-nextblockhash": "The hash of the next block (only if there is one)",
|
||||
|
||||
// TemplateRequest help.
|
||||
"templaterequest-mode": "This is 'template', 'proposal', or omitted",
|
||||
@ -290,29 +290,29 @@ var helpDescsEnUS = map[string]string{
|
||||
"getblocktemplateresultaux-flags": "Hex-encoded byte-for-byte data to include in the coinbase signature script",
|
||||
|
||||
// GetBlockTemplateResult help.
|
||||
"getblocktemplateresult-bits": "Hex-encoded compressed difficulty",
|
||||
"getblocktemplateresult-curtime": "Current time as seen by the server (recommended for block time); must fall within mintime/maxtime rules",
|
||||
"getblocktemplateresult-height": "Height of the block to be solved",
|
||||
"getblocktemplateresult-previousblockhash": "Hex-encoded big-endian hash of the previous block",
|
||||
"getblocktemplateresult-sigoplimit": "Number of sigops allowed in blocks ",
|
||||
"getblocktemplateresult-sizelimit": "Number of bytes allowed in blocks",
|
||||
"getblocktemplateresult-transactions": "Array of transactions as JSON objects",
|
||||
"getblocktemplateresult-version": "The block version",
|
||||
"getblocktemplateresult-coinbaseaux": "Data that should be included in the coinbase signature script",
|
||||
"getblocktemplateresult-coinbasetxn": "Information about the coinbase transaction",
|
||||
"getblocktemplateresult-coinbasevalue": "Total amount available for the coinbase in Satoshi",
|
||||
"getblocktemplateresult-workid": "This value must be returned with result if provided (not provided)",
|
||||
"getblocktemplateresult-longpollid": "Identifier for long poll request which allows monitoring for expiration",
|
||||
"getblocktemplateresult-longpolluri": "An alternate URI to use for long poll requests if provided (not provided)",
|
||||
"getblocktemplateresult-submitold": "Not applicable",
|
||||
"getblocktemplateresult-target": "Hex-encoded big-endian number which valid results must be less than",
|
||||
"getblocktemplateresult-expires": "Maximum number of seconds (starting from when the server sent the response) this work is valid for",
|
||||
"getblocktemplateresult-maxtime": "Maximum allowed time",
|
||||
"getblocktemplateresult-mintime": "Minimum allowed time",
|
||||
"getblocktemplateresult-mutable": "List of mutations the server explicitly allows",
|
||||
"getblocktemplateresult-noncerange": "Two concatenated hex-encoded big-endian 32-bit integers which represent the valid ranges of nonces the miner may scan",
|
||||
"getblocktemplateresult-capabilities": "List of server capabilities including 'proposal' to indicate support for block proposals",
|
||||
"getblocktemplateresult-reject-reason": "Reason the proposal was invalid as-is (only applies to proposal responses)",
|
||||
"getblocktemplateresult-bits": "Hex-encoded compressed difficulty",
|
||||
"getblocktemplateresult-curtime": "Current time as seen by the server (recommended for block time); must fall within mintime/maxtime rules",
|
||||
"getblocktemplateresult-height": "Height of the block to be solved",
|
||||
"getblocktemplateresult-previousblockhashes": "Hex-encoded big-endian hashes of the previous blocks",
|
||||
"getblocktemplateresult-sigoplimit": "Number of sigops allowed in blocks ",
|
||||
"getblocktemplateresult-sizelimit": "Number of bytes allowed in blocks",
|
||||
"getblocktemplateresult-transactions": "Array of transactions as JSON objects",
|
||||
"getblocktemplateresult-version": "The block version",
|
||||
"getblocktemplateresult-coinbaseaux": "Data that should be included in the coinbase signature script",
|
||||
"getblocktemplateresult-coinbasetxn": "Information about the coinbase transaction",
|
||||
"getblocktemplateresult-coinbasevalue": "Total amount available for the coinbase in Satoshi",
|
||||
"getblocktemplateresult-workid": "This value must be returned with result if provided (not provided)",
|
||||
"getblocktemplateresult-longpollid": "Identifier for long poll request which allows monitoring for expiration",
|
||||
"getblocktemplateresult-longpolluri": "An alternate URI to use for long poll requests if provided (not provided)",
|
||||
"getblocktemplateresult-submitold": "Not applicable",
|
||||
"getblocktemplateresult-target": "Hex-encoded big-endian number which valid results must be less than",
|
||||
"getblocktemplateresult-expires": "Maximum number of seconds (starting from when the server sent the response) this work is valid for",
|
||||
"getblocktemplateresult-maxtime": "Maximum allowed time",
|
||||
"getblocktemplateresult-mintime": "Minimum allowed time",
|
||||
"getblocktemplateresult-mutable": "List of mutations the server explicitly allows",
|
||||
"getblocktemplateresult-noncerange": "Two concatenated hex-encoded big-endian 32-bit integers which represent the valid ranges of nonces the miner may scan",
|
||||
"getblocktemplateresult-capabilities": "List of server capabilities including 'proposal' to indicate support for block proposals",
|
||||
"getblocktemplateresult-reject-reason": "Reason the proposal was invalid as-is (only applies to proposal responses)",
|
||||
|
||||
// GetBlockTemplateCmd help.
|
||||
"getblocktemplate--synopsis": "Returns a JSON object with information necessary to construct a block to mine or accepts a proposal to validate.\n" +
|
||||
@ -355,17 +355,17 @@ var helpDescsEnUS = map[string]string{
|
||||
"gethashespersec--synopsis": "Returns a recent hashes per second performance measurement while generating coins (mining).",
|
||||
"gethashespersec--result0": "The number of hashes per second",
|
||||
|
||||
// InfoChainResult help.
|
||||
"infochainresult-version": "The version of the server",
|
||||
"infochainresult-protocolversion": "The latest supported protocol version",
|
||||
"infochainresult-blocks": "The number of blocks processed",
|
||||
"infochainresult-timeoffset": "The time offset",
|
||||
"infochainresult-connections": "The number of connected peers",
|
||||
"infochainresult-proxy": "The proxy used by the server",
|
||||
"infochainresult-difficulty": "The current target difficulty",
|
||||
"infochainresult-testnet": "Whether or not server is using testnet",
|
||||
"infochainresult-relayfee": "The minimum relay fee for non-free transactions in BTC/KB",
|
||||
"infochainresult-errors": "Any current errors",
|
||||
// InfoDAGResult help.
|
||||
"infodagresult-version": "The version of the server",
|
||||
"infodagresult-protocolversion": "The latest supported protocol version",
|
||||
"infodagresult-blocks": "The number of blocks processed",
|
||||
"infodagresult-timeoffset": "The time offset",
|
||||
"infodagresult-connections": "The number of connected peers",
|
||||
"infodagresult-proxy": "The proxy used by the server",
|
||||
"infodagresult-difficulty": "The current target difficulty",
|
||||
"infodagresult-testnet": "Whether or not server is using testnet",
|
||||
"infodagresult-relayfee": "The minimum relay fee for non-free transactions in BTC/KB",
|
||||
"infodagresult-errors": "Any current errors",
|
||||
|
||||
// InfoWalletResult help.
|
||||
"infowalletresult-version": "The version of the server",
|
||||
@ -551,22 +551,22 @@ var helpDescsEnUS = map[string]string{
|
||||
"submitblock--result1": "The reason the block was rejected",
|
||||
|
||||
// ValidateAddressResult help.
|
||||
"validateaddresschainresult-isvalid": "Whether or not the address is valid",
|
||||
"validateaddresschainresult-address": "The bitcoin address (only when isvalid is true)",
|
||||
"validateaddressresult-isvalid": "Whether or not the address is valid",
|
||||
"validateaddressresult-address": "The bitcoin address (only when isvalid is true)",
|
||||
|
||||
// ValidateAddressCmd help.
|
||||
"validateaddress--synopsis": "Verify an address is valid.",
|
||||
"validateaddress-address": "Bitcoin address to validate",
|
||||
|
||||
// VerifyChainCmd help.
|
||||
"verifychain--synopsis": "Verifies the block chain database.\n" +
|
||||
"verifydag--synopsis": "Verifies the block DAG database.\n" +
|
||||
"The actual checks performed by the checklevel parameter are implementation specific.\n" +
|
||||
"For btcd this is:\n" +
|
||||
"checklevel=0 - Look up each block and ensure it can be loaded from the database.\n" +
|
||||
"checklevel=1 - Perform basic context-free sanity checks on each block.",
|
||||
"verifychain-checklevel": "How thorough the block verification is",
|
||||
"verifychain-checkdepth": "The number of blocks to check",
|
||||
"verifychain--result0": "Whether or not the chain verified",
|
||||
"verifydag-checklevel": "How thorough the block verification is",
|
||||
"verifydag-checkdepth": "The number of blocks to check",
|
||||
"verifydag--result0": "Whether or not the DAG verified",
|
||||
|
||||
// VerifyMessageCmd help.
|
||||
"verifymessage--synopsis": "Verify a signed message.",
|
||||
@ -678,7 +678,7 @@ var rpcResultTypes = map[string][]interface{}{
|
||||
"getblockhash": {(*string)(nil)},
|
||||
"getblockheader": {(*string)(nil), (*btcjson.GetBlockHeaderVerboseResult)(nil)},
|
||||
"getblocktemplate": {(*btcjson.GetBlockTemplateResult)(nil), (*string)(nil), nil},
|
||||
"getblockchaininfo": {(*btcjson.GetBlockChainInfoResult)(nil)},
|
||||
"getblockdaginfo": {(*btcjson.GetBlockDAGInfoResult)(nil)},
|
||||
"getcfilter": {(*string)(nil)},
|
||||
"getcfilterheader": {(*string)(nil)},
|
||||
"getconnectioncount": {(*int32)(nil)},
|
||||
@ -687,7 +687,7 @@ var rpcResultTypes = map[string][]interface{}{
|
||||
"getgenerate": {(*bool)(nil)},
|
||||
"gethashespersec": {(*float64)(nil)},
|
||||
"getheaders": {(*[]string)(nil)},
|
||||
"getinfo": {(*btcjson.InfoChainResult)(nil)},
|
||||
"getinfo": {(*btcjson.InfoDAGResult)(nil)},
|
||||
"getmempoolinfo": {(*btcjson.GetMempoolInfoResult)(nil)},
|
||||
"getmininginfo": {(*btcjson.GetMiningInfoResult)(nil)},
|
||||
"getnettotals": {(*btcjson.GetNetTotalsResult)(nil)},
|
||||
@ -705,8 +705,8 @@ var rpcResultTypes = map[string][]interface{}{
|
||||
"stop": {(*string)(nil)},
|
||||
"submitblock": {nil, (*string)(nil)},
|
||||
"uptime": {(*int64)(nil)},
|
||||
"validateaddress": {(*btcjson.ValidateAddressChainResult)(nil)},
|
||||
"verifychain": {(*bool)(nil)},
|
||||
"validateaddress": {(*btcjson.ValidateAddressResult)(nil)},
|
||||
"verifydag": {(*bool)(nil)},
|
||||
"verifymessage": {(*bool)(nil)},
|
||||
"version": {(*map[string]btcjson.VersionResult)(nil)},
|
||||
|
||||
|
536
rpcwebsocket.go
536
rpcwebsocket.go
@ -16,7 +16,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@ -26,7 +25,6 @@ import (
|
||||
"github.com/daglabs/btcd/btcjson"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/txscript"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
"github.com/daglabs/btcutil"
|
||||
@ -75,7 +73,6 @@ var wsHandlersBeforeInit = map[string]wsCommandHandler{
|
||||
"stopnotifynewtransactions": handleStopNotifyNewTransactions,
|
||||
"stopnotifyspent": handleStopNotifySpent,
|
||||
"stopnotifyreceived": handleStopNotifyReceived,
|
||||
"rescan": handleRescan,
|
||||
"rescanblocks": handleRescanBlocks,
|
||||
}
|
||||
|
||||
@ -1977,168 +1974,6 @@ func deserializeOutpoints(serializedOuts []btcjson.OutPoint) ([]*wire.OutPoint,
|
||||
return outpoints, nil
|
||||
}
|
||||
|
||||
type rescanKeys struct {
|
||||
fallbacks map[string]struct{}
|
||||
pubKeyHashes map[[ripemd160.Size]byte]struct{}
|
||||
scriptHashes map[[ripemd160.Size]byte]struct{}
|
||||
compressedPubKeys map[[33]byte]struct{}
|
||||
uncompressedPubKeys map[[65]byte]struct{}
|
||||
unspent map[wire.OutPoint]struct{}
|
||||
}
|
||||
|
||||
// unspentSlice returns a slice of currently-unspent outpoints for the rescan
|
||||
// lookup keys. This is primarily intended to be used to register outpoints
|
||||
// for continuous notifications after a rescan has completed.
|
||||
func (r *rescanKeys) unspentSlice() []*wire.OutPoint {
|
||||
ops := make([]*wire.OutPoint, 0, len(r.unspent))
|
||||
for op := range r.unspent {
|
||||
opCopy := op
|
||||
ops = append(ops, &opCopy)
|
||||
}
|
||||
return ops
|
||||
}
|
||||
|
||||
// ErrRescanReorg defines the error that is returned when an unrecoverable
|
||||
// reorganize is detected during a rescan.
|
||||
var ErrRescanReorg = btcjson.RPCError{
|
||||
Code: btcjson.ErrRPCDatabase,
|
||||
Message: "Reorganize",
|
||||
}
|
||||
|
||||
// rescanBlock rescans all transactions in a single block. This is a helper
|
||||
// function for handleRescan.
|
||||
func rescanBlock(wsc *wsClient, lookups *rescanKeys, blk *btcutil.Block) {
|
||||
for _, tx := range blk.Transactions() {
|
||||
// Hexadecimal representation of this tx. Only created if
|
||||
// needed, and reused for later notifications if already made.
|
||||
var txHex string
|
||||
|
||||
// All inputs and outputs must be iterated through to correctly
|
||||
// modify the unspent map, however, just a single notification
|
||||
// for any matching transaction inputs or outputs should be
|
||||
// created and sent.
|
||||
spentNotified := false
|
||||
recvNotified := false
|
||||
|
||||
for _, txin := range tx.MsgTx().TxIn {
|
||||
if _, ok := lookups.unspent[txin.PreviousOutPoint]; ok {
|
||||
delete(lookups.unspent, txin.PreviousOutPoint)
|
||||
|
||||
if spentNotified {
|
||||
continue
|
||||
}
|
||||
|
||||
if txHex == "" {
|
||||
txHex = txHexString(tx.MsgTx())
|
||||
}
|
||||
marshalledJSON, err := newRedeemingTxNotification(txHex, tx.Index(), blk)
|
||||
if err != nil {
|
||||
rpcsLog.Errorf("Failed to marshal redeemingtx notification: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
err = wsc.QueueNotification(marshalledJSON)
|
||||
// Stop the rescan early if the websocket client
|
||||
// disconnected.
|
||||
if err == ErrClientQuit {
|
||||
return
|
||||
}
|
||||
spentNotified = true
|
||||
}
|
||||
}
|
||||
|
||||
for txOutIdx, txout := range tx.MsgTx().TxOut {
|
||||
_, addrs, _, _ := txscript.ExtractPkScriptAddrs(
|
||||
txout.PkScript, wsc.server.cfg.ChainParams)
|
||||
|
||||
for _, addr := range addrs {
|
||||
switch a := addr.(type) {
|
||||
case *btcutil.AddressPubKeyHash:
|
||||
if _, ok := lookups.pubKeyHashes[*a.Hash160()]; !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
case *btcutil.AddressScriptHash:
|
||||
if _, ok := lookups.scriptHashes[*a.Hash160()]; !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
case *btcutil.AddressPubKey:
|
||||
found := false
|
||||
switch sa := a.ScriptAddress(); len(sa) {
|
||||
case 33: // Compressed
|
||||
var key [33]byte
|
||||
copy(key[:], sa)
|
||||
if _, ok := lookups.compressedPubKeys[key]; ok {
|
||||
found = true
|
||||
}
|
||||
|
||||
case 65: // Uncompressed
|
||||
var key [65]byte
|
||||
copy(key[:], sa)
|
||||
if _, ok := lookups.uncompressedPubKeys[key]; ok {
|
||||
found = true
|
||||
}
|
||||
|
||||
default:
|
||||
rpcsLog.Warnf("Skipping rescanned pubkey of unknown "+
|
||||
"serialized length %d", len(sa))
|
||||
continue
|
||||
}
|
||||
|
||||
// If the transaction output pays to the pubkey of
|
||||
// a rescanned P2PKH address, include it as well.
|
||||
if !found {
|
||||
pkh := a.AddressPubKeyHash()
|
||||
if _, ok := lookups.pubKeyHashes[*pkh.Hash160()]; !ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
// A new address type must have been added. Encode as a
|
||||
// payment address string and check the fallback map.
|
||||
addrStr := addr.EncodeAddress()
|
||||
_, ok := lookups.fallbacks[addrStr]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
outpoint := wire.OutPoint{
|
||||
Hash: *tx.Hash(),
|
||||
Index: uint32(txOutIdx),
|
||||
}
|
||||
lookups.unspent[outpoint] = struct{}{}
|
||||
|
||||
if recvNotified {
|
||||
continue
|
||||
}
|
||||
|
||||
if txHex == "" {
|
||||
txHex = txHexString(tx.MsgTx())
|
||||
}
|
||||
ntfn := btcjson.NewRecvTxNtfn(txHex,
|
||||
blockDetails(blk, tx.Index()))
|
||||
|
||||
marshalledJSON, err := btcjson.MarshalCmd(nil, ntfn)
|
||||
if err != nil {
|
||||
rpcsLog.Errorf("Failed to marshal recvtx notification: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
err = wsc.QueueNotification(marshalledJSON)
|
||||
// Stop the rescan early if the websocket client
|
||||
// disconnected.
|
||||
if err == ErrClientQuit {
|
||||
return
|
||||
}
|
||||
recvNotified = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// rescanBlockFilter rescans a block for any relevant transactions for the
|
||||
// passed lookup keys. Any discovered transactions are returned hex encoded as
|
||||
// a string slice.
|
||||
@ -2248,7 +2083,7 @@ func handleRescanBlocks(wsc *wsClient, icmd interface{}) (interface{}, error) {
|
||||
Message: "Failed to fetch block: " + err.Error(),
|
||||
}
|
||||
}
|
||||
if lastBlockHash != nil && block.MsgBlock().Header.PrevBlock != *lastBlockHash {
|
||||
if lastBlockHash != nil && block.MsgBlock().Header.PrevBlocks[0] != *lastBlockHash { // TODO: (Stas) This is likely wrong. Modified to satisfy compilation.
|
||||
return nil, &btcjson.RPCError{
|
||||
Code: btcjson.ErrRPCInvalidParameter,
|
||||
Message: fmt.Sprintf("Block %v is not a child of %v",
|
||||
@ -2269,375 +2104,6 @@ func handleRescanBlocks(wsc *wsClient, icmd interface{}) (interface{}, error) {
|
||||
return &discoveredData, nil
|
||||
}
|
||||
|
||||
// recoverFromReorg attempts to recover from a detected reorganize during a
|
||||
// rescan. It fetches a new range of block shas from the database and
|
||||
// verifies that the new range of blocks is on the same fork as a previous
|
||||
// range of blocks. If this condition does not hold true, the JSON-RPC error
|
||||
// for an unrecoverable reorganize is returned.
|
||||
func recoverFromReorg(dag *blockdag.BlockDAG, minBlock, maxBlock int32,
|
||||
lastBlock *daghash.Hash) ([]daghash.Hash, error) {
|
||||
|
||||
hashList, err := dag.HeightRange(minBlock, maxBlock)
|
||||
if err != nil {
|
||||
rpcsLog.Errorf("Error looking up block range: %v", err)
|
||||
return nil, &btcjson.RPCError{
|
||||
Code: btcjson.ErrRPCDatabase,
|
||||
Message: "Database error: " + err.Error(),
|
||||
}
|
||||
}
|
||||
if lastBlock == nil || len(hashList) == 0 {
|
||||
return hashList, nil
|
||||
}
|
||||
|
||||
blk, err := dag.BlockByHash(&hashList[0])
|
||||
if err != nil {
|
||||
rpcsLog.Errorf("Error looking up possibly reorged block: %v",
|
||||
err)
|
||||
return nil, &btcjson.RPCError{
|
||||
Code: btcjson.ErrRPCDatabase,
|
||||
Message: "Database error: " + err.Error(),
|
||||
}
|
||||
}
|
||||
jsonErr := descendantBlock(lastBlock, blk)
|
||||
if jsonErr != nil {
|
||||
return nil, jsonErr
|
||||
}
|
||||
return hashList, nil
|
||||
}
|
||||
|
||||
// descendantBlock returns the appropriate JSON-RPC error if a current block
|
||||
// fetched during a reorganize is not a direct child of the parent block hash.
|
||||
func descendantBlock(prevHash *daghash.Hash, curBlock *btcutil.Block) error {
|
||||
curHash := &curBlock.MsgBlock().Header.PrevBlock
|
||||
if !prevHash.IsEqual(curHash) {
|
||||
rpcsLog.Errorf("Stopping rescan for reorged block %v "+
|
||||
"(replaced by block %v)", prevHash, curHash)
|
||||
return &ErrRescanReorg
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleRescan implements the rescan command extension for websocket
|
||||
// connections.
|
||||
//
|
||||
// NOTE: This does not smartly handle reorgs, and fixing requires database
|
||||
// changes (for safe, concurrent access to full block ranges, and support
|
||||
// for other chains than the best chain). It will, however, detect whether
|
||||
// a reorg removed a block that was previously processed, and result in the
|
||||
// handler erroring. Clients must handle this by finding a block still in
|
||||
// the chain (perhaps from a rescanprogress notification) to resume their
|
||||
// rescan.
|
||||
func handleRescan(wsc *wsClient, icmd interface{}) (interface{}, error) {
|
||||
cmd, ok := icmd.(*btcjson.RescanCmd)
|
||||
if !ok {
|
||||
return nil, btcjson.ErrRPCInternal
|
||||
}
|
||||
|
||||
outpoints := make([]*wire.OutPoint, 0, len(cmd.OutPoints))
|
||||
for i := range cmd.OutPoints {
|
||||
cmdOutpoint := &cmd.OutPoints[i]
|
||||
blockHash, err := daghash.NewHashFromStr(cmdOutpoint.Hash)
|
||||
if err != nil {
|
||||
return nil, rpcDecodeHexError(cmdOutpoint.Hash)
|
||||
}
|
||||
outpoint := wire.NewOutPoint(blockHash, cmdOutpoint.Index)
|
||||
outpoints = append(outpoints, outpoint)
|
||||
}
|
||||
|
||||
numAddrs := len(cmd.Addresses)
|
||||
if numAddrs == 1 {
|
||||
rpcsLog.Info("Beginning rescan for 1 address")
|
||||
} else {
|
||||
rpcsLog.Infof("Beginning rescan for %d addresses", numAddrs)
|
||||
}
|
||||
|
||||
// Build lookup maps.
|
||||
lookups := rescanKeys{
|
||||
fallbacks: map[string]struct{}{},
|
||||
pubKeyHashes: map[[ripemd160.Size]byte]struct{}{},
|
||||
scriptHashes: map[[ripemd160.Size]byte]struct{}{},
|
||||
compressedPubKeys: map[[33]byte]struct{}{},
|
||||
uncompressedPubKeys: map[[65]byte]struct{}{},
|
||||
unspent: map[wire.OutPoint]struct{}{},
|
||||
}
|
||||
var compressedPubkey [33]byte
|
||||
var uncompressedPubkey [65]byte
|
||||
params := wsc.server.cfg.ChainParams
|
||||
for _, addrStr := range cmd.Addresses {
|
||||
addr, err := btcutil.DecodeAddress(addrStr, params)
|
||||
if err != nil {
|
||||
jsonErr := btcjson.RPCError{
|
||||
Code: btcjson.ErrRPCInvalidAddressOrKey,
|
||||
Message: "Rescan address " + addrStr + ": " +
|
||||
err.Error(),
|
||||
}
|
||||
return nil, &jsonErr
|
||||
}
|
||||
switch a := addr.(type) {
|
||||
case *btcutil.AddressPubKeyHash:
|
||||
lookups.pubKeyHashes[*a.Hash160()] = struct{}{}
|
||||
|
||||
case *btcutil.AddressScriptHash:
|
||||
lookups.scriptHashes[*a.Hash160()] = struct{}{}
|
||||
|
||||
case *btcutil.AddressPubKey:
|
||||
pubkeyBytes := a.ScriptAddress()
|
||||
switch len(pubkeyBytes) {
|
||||
case 33: // Compressed
|
||||
copy(compressedPubkey[:], pubkeyBytes)
|
||||
lookups.compressedPubKeys[compressedPubkey] = struct{}{}
|
||||
|
||||
case 65: // Uncompressed
|
||||
copy(uncompressedPubkey[:], pubkeyBytes)
|
||||
lookups.uncompressedPubKeys[uncompressedPubkey] = struct{}{}
|
||||
|
||||
default:
|
||||
jsonErr := btcjson.RPCError{
|
||||
Code: btcjson.ErrRPCInvalidAddressOrKey,
|
||||
Message: "Pubkey " + addrStr + " is of unknown length",
|
||||
}
|
||||
return nil, &jsonErr
|
||||
}
|
||||
|
||||
default:
|
||||
// A new address type must have been added. Use encoded
|
||||
// payment address string as a fallback until a fast path
|
||||
// is added.
|
||||
lookups.fallbacks[addrStr] = struct{}{}
|
||||
}
|
||||
}
|
||||
for _, outpoint := range outpoints {
|
||||
lookups.unspent[*outpoint] = struct{}{}
|
||||
}
|
||||
|
||||
dag := wsc.server.cfg.DAG
|
||||
|
||||
minBlockHash, err := daghash.NewHashFromStr(cmd.BeginBlock)
|
||||
if err != nil {
|
||||
return nil, rpcDecodeHexError(cmd.BeginBlock)
|
||||
}
|
||||
minBlock, err := dag.BlockHeightByHash(minBlockHash)
|
||||
if err != nil {
|
||||
return nil, &btcjson.RPCError{
|
||||
Code: btcjson.ErrRPCBlockNotFound,
|
||||
Message: "Error getting block: " + err.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
maxBlock := int32(math.MaxInt32)
|
||||
if cmd.EndBlock != nil {
|
||||
maxBlockHash, err := daghash.NewHashFromStr(*cmd.EndBlock)
|
||||
if err != nil {
|
||||
return nil, rpcDecodeHexError(*cmd.EndBlock)
|
||||
}
|
||||
maxBlock, err = dag.BlockHeightByHash(maxBlockHash)
|
||||
if err != nil {
|
||||
return nil, &btcjson.RPCError{
|
||||
Code: btcjson.ErrRPCBlockNotFound,
|
||||
Message: "Error getting block: " + err.Error(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// lastBlock and lastBlockHash track the previously-rescanned block.
|
||||
// They equal nil when no previous blocks have been rescanned.
|
||||
var lastBlock *btcutil.Block
|
||||
var lastBlockHash *daghash.Hash
|
||||
|
||||
// A ticker is created to wait at least 10 seconds before notifying the
|
||||
// websocket client of the current progress completed by the rescan.
|
||||
ticker := time.NewTicker(10 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
// Instead of fetching all block shas at once, fetch in smaller chunks
|
||||
// to ensure large rescans consume a limited amount of memory.
|
||||
fetchRange:
|
||||
for minBlock < maxBlock {
|
||||
// Limit the max number of hashes to fetch at once to the
|
||||
// maximum number of items allowed in a single inventory.
|
||||
// This value could be higher since it's not creating inventory
|
||||
// messages, but this mirrors the limiting logic used in the
|
||||
// peer-to-peer protocol.
|
||||
maxLoopBlock := maxBlock
|
||||
if maxLoopBlock-minBlock > wire.MaxInvPerMsg {
|
||||
maxLoopBlock = minBlock + wire.MaxInvPerMsg
|
||||
}
|
||||
hashList, err := dag.HeightRange(minBlock, maxLoopBlock)
|
||||
if err != nil {
|
||||
rpcsLog.Errorf("Error looking up block range: %v", err)
|
||||
return nil, &btcjson.RPCError{
|
||||
Code: btcjson.ErrRPCDatabase,
|
||||
Message: "Database error: " + err.Error(),
|
||||
}
|
||||
}
|
||||
if len(hashList) == 0 {
|
||||
// The rescan is finished if no blocks hashes for this
|
||||
// range were successfully fetched and a stop block
|
||||
// was provided.
|
||||
if maxBlock != math.MaxInt32 {
|
||||
break
|
||||
}
|
||||
|
||||
// If the rescan is through the current block, set up
|
||||
// the client to continue to receive notifications
|
||||
// regarding all rescanned addresses and the current set
|
||||
// of unspent outputs.
|
||||
//
|
||||
// This is done safely by temporarily grabbing exclusive
|
||||
// access of the block manager. If no more blocks have
|
||||
// been attached between this pause and the fetch above,
|
||||
// then it is safe to register the websocket client for
|
||||
// continuous notifications if necessary. Otherwise,
|
||||
// continue the fetch loop again to rescan the new
|
||||
// blocks (or error due to an irrecoverable reorganize).
|
||||
pauseGuard := wsc.server.cfg.SyncMgr.Pause()
|
||||
dagState := wsc.server.cfg.DAG.GetDAGState()
|
||||
curHash := &dagState.SelectedTip.Hash
|
||||
again := true
|
||||
if lastBlockHash == nil || *lastBlockHash == *curHash {
|
||||
again = false
|
||||
n := wsc.server.ntfnMgr
|
||||
n.RegisterSpentRequests(wsc, lookups.unspentSlice())
|
||||
n.RegisterTxOutAddressRequests(wsc, cmd.Addresses)
|
||||
}
|
||||
close(pauseGuard)
|
||||
if err != nil {
|
||||
rpcsLog.Errorf("Error fetching best block "+
|
||||
"hash: %v", err)
|
||||
return nil, &btcjson.RPCError{
|
||||
Code: btcjson.ErrRPCDatabase,
|
||||
Message: "Database error: " +
|
||||
err.Error(),
|
||||
}
|
||||
}
|
||||
if again {
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
loopHashList:
|
||||
for i := range hashList {
|
||||
blk, err := dag.BlockByHash(&hashList[i])
|
||||
if err != nil {
|
||||
// Only handle reorgs if a block could not be
|
||||
// found for the hash.
|
||||
if dbErr, ok := err.(database.Error); !ok ||
|
||||
dbErr.ErrorCode != database.ErrBlockNotFound {
|
||||
|
||||
rpcsLog.Errorf("Error looking up "+
|
||||
"block: %v", err)
|
||||
return nil, &btcjson.RPCError{
|
||||
Code: btcjson.ErrRPCDatabase,
|
||||
Message: "Database error: " +
|
||||
err.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
// If an absolute max block was specified, don't
|
||||
// attempt to handle the reorg.
|
||||
if maxBlock != math.MaxInt32 {
|
||||
rpcsLog.Errorf("Stopping rescan for "+
|
||||
"reorged block %v",
|
||||
cmd.EndBlock)
|
||||
return nil, &ErrRescanReorg
|
||||
}
|
||||
|
||||
// If the lookup for the previously valid block
|
||||
// hash failed, there may have been a reorg.
|
||||
// Fetch a new range of block hashes and verify
|
||||
// that the previously processed block (if there
|
||||
// was any) still exists in the database. If it
|
||||
// doesn't, we error.
|
||||
//
|
||||
// A goto is used to branch executation back to
|
||||
// before the range was evaluated, as it must be
|
||||
// reevaluated for the new hashList.
|
||||
minBlock += int32(i)
|
||||
hashList, err = recoverFromReorg(dag,
|
||||
minBlock, maxBlock, lastBlockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(hashList) == 0 {
|
||||
break fetchRange
|
||||
}
|
||||
goto loopHashList
|
||||
}
|
||||
if i == 0 && lastBlockHash != nil {
|
||||
// Ensure the new hashList is on the same fork
|
||||
// as the last block from the old hashList.
|
||||
jsonErr := descendantBlock(lastBlockHash, blk)
|
||||
if jsonErr != nil {
|
||||
return nil, jsonErr
|
||||
}
|
||||
}
|
||||
|
||||
// A select statement is used to stop rescans if the
|
||||
// client requesting the rescan has disconnected.
|
||||
select {
|
||||
case <-wsc.quit:
|
||||
rpcsLog.Debugf("Stopped rescan at height %v "+
|
||||
"for disconnected client", blk.Height())
|
||||
return nil, nil
|
||||
default:
|
||||
rescanBlock(wsc, &lookups, blk)
|
||||
lastBlock = blk
|
||||
lastBlockHash = blk.Hash()
|
||||
}
|
||||
|
||||
// Periodically notify the client of the progress
|
||||
// completed. Continue with next block if no progress
|
||||
// notification is needed yet.
|
||||
select {
|
||||
case <-ticker.C: // fallthrough
|
||||
default:
|
||||
continue
|
||||
}
|
||||
|
||||
n := btcjson.NewRescanProgressNtfn(hashList[i].String(),
|
||||
blk.Height(), blk.MsgBlock().Header.Timestamp.Unix())
|
||||
mn, err := btcjson.MarshalCmd(nil, n)
|
||||
if err != nil {
|
||||
rpcsLog.Errorf("Failed to marshal rescan "+
|
||||
"progress notification: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err = wsc.QueueNotification(mn); err == ErrClientQuit {
|
||||
// Finished if the client disconnected.
|
||||
rpcsLog.Debugf("Stopped rescan at height %v "+
|
||||
"for disconnected client", blk.Height())
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
minBlock += int32(len(hashList))
|
||||
}
|
||||
|
||||
// Notify websocket client of the finished rescan. Due to how btcd
|
||||
// asynchronously queues notifications to not block calling code,
|
||||
// there is no guarantee that any of the notifications created during
|
||||
// rescan (such as rescanprogress, recvtx and redeemingtx) will be
|
||||
// received before the rescan RPC returns. Therefore, another method
|
||||
// is needed to safely inform clients that all rescan notifications have
|
||||
// been sent.
|
||||
n := btcjson.NewRescanFinishedNtfn(lastBlockHash.String(),
|
||||
lastBlock.Height(),
|
||||
lastBlock.MsgBlock().Header.Timestamp.Unix())
|
||||
if mn, err := btcjson.MarshalCmd(nil, n); err != nil {
|
||||
rpcsLog.Errorf("Failed to marshal rescan finished "+
|
||||
"notification: %v", err)
|
||||
} else {
|
||||
// The rescan is finished, so we don't care whether the client
|
||||
// has disconnected at this point, so discard error.
|
||||
_ = wsc.QueueNotification(mn)
|
||||
}
|
||||
|
||||
rpcsLog.Info("Finished rescan")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
wsHandlers = wsHandlersBeforeInit
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user