[NOD-592] Remove TODOs and XXXs from the codebase (#890)

* [NOD-592] Remove TODOs related to fake nonces.

* [NOD-592] Remove irrelevant TODOs from handleRescanBlocks and parseTxAcceptedVerboseNtfnParams.

* [NOD-592] Fix TODO in handleGetTxOut.

* [NOD-592] Remove irrelevant TODO from updateAddress.

* [NOD-592] Move StandardVerifyFlags to a separate file.

* [NOD-592] Remove TODOs in sign.go.

* [NOD-592] Remove TODO in scriptval_test.go.

* [NOD-592] Remove TODO in reachabilitystore.go.

* [NOD-592] Remove XXXs.

* [NOD-592] Fix a comment.

* [NOD-557] Move AddAddressByIP out of AddressManager since it's used only for tests..

* [NOD-557] Remove rescan blocks.

* [NOD-592] Fix handleGetTxOut.
This commit is contained in:
stasatdaglabs 2020-08-23 17:17:06 +03:00 committed by GitHub
parent 667b2d46e9
commit 15b545ee2b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
26 changed files with 62 additions and 366 deletions

View File

@ -352,7 +352,7 @@ func BenchmarkReadBlockHeader(b *testing.B) {
0x3a, 0x9f, 0xb8, 0xaa, 0x4b, 0x1e, 0x5e, 0x4a, // MerkleRoot
0x29, 0xab, 0x5f, 0x49, 0x00, 0x00, 0x00, 0x00, // Timestamp
0xff, 0xff, 0x00, 0x1d, // Bits
0xf3, 0xe0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // Fake Nonce. TODO: (Ori) Replace to a real nonce
0xf3, 0xe0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // Fake Nonce
0x00, // TxnCount Varint
}
r := bytes.NewReader(buf)

View File

@ -92,7 +92,7 @@ func TestBlockHeaderEncoding(t *testing.T) {
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
0x99, 0x0f, 0xed, 0x15, 0x73, 0x01, 0x00, 0x00, // Timestamp
0xff, 0xff, 0x00, 0x1d, // Bits
0xf3, 0xe0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // Fake Nonce. TODO: (Ori) Replace to a real nonce
0xf3, 0xe0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // Fake Nonce
}
tests := []struct {
@ -208,7 +208,7 @@ func TestBlockHeaderSerialize(t *testing.T) {
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
0x99, 0x0f, 0xed, 0x15, 0x73, 0x01, 0x00, 0x00, // Timestamp
0xff, 0xff, 0x00, 0x1d, // Bits
0xf3, 0xe0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // Fake Nonce. TODO: (Ori) Replace to a real nonce
0xf3, 0xe0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // Fake Nonce
}
tests := []struct {

View File

@ -431,7 +431,7 @@ func TestBlockOverflowErrors(t *testing.T) {
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
0x61, 0xbc, 0x66, 0x49, 0x00, 0x00, 0x00, 0x00, // Timestamp
0xff, 0xff, 0x00, 0x1d, // Bits
0x01, 0xe3, 0x62, 0x99, 0x00, 0x00, 0x00, 0x00, // Fake Nonce. TODO: (Ori) Replace to a real nonce
0x01, 0xe3, 0x62, 0x99, 0x00, 0x00, 0x00, 0x00, // Fake Nonce
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, // TxnCount
}, pver, &MessageError{},
@ -572,7 +572,7 @@ var blockOneBytes = []byte{
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
0x99, 0x0f, 0xed, 0x15, 0x73, 0x01, 0x00, 0x00, // Timestamp
0xff, 0xff, 0x00, 0x1d, // Bits
0x01, 0xe3, 0x62, 0x99, 0x00, 0x00, 0x00, 0x00, // Fake Nonce. TODO: (Ori) Replace to a real nonce
0x01, 0xe3, 0x62, 0x99, 0x00, 0x00, 0x00, 0x00, // Fake Nonce
0x01, // TxnCount
0x01, 0x00, 0x00, 0x00, // Version
0x01, // Varint for number of transaction inputs

View File

@ -10,12 +10,11 @@ import (
"strings"
)
// XXX pedro: we will probably need to bump this.
const (
// ProtocolVersion is the latest protocol version this package supports.
ProtocolVersion uint32 = 1
// defaultServices describes the default services that are supported by
// DefaultServices describes the default services that are supported by
// the server.
DefaultServices = SFNodeNetwork | SFNodeBloom | SFNodeCF
)

View File

@ -108,7 +108,6 @@ func (store *reachabilityStore) clearDirtyEntries() {
}
func (store *reachabilityStore) init(dbContext dbaccess.Context) error {
// TODO: (Stas) This is a quick and dirty hack.
// We iterate over the entire bucket twice:
// * First, populate the loaded set with all entries
// * Second, connect the parent/children pointers in each entry

View File

@ -16,7 +16,7 @@ import (
// TestCheckBlockScripts ensures that validating the all of the scripts in a
// known-good block doesn't return an error.
func TestCheckBlockScripts(t *testing.T) {
t.Skip() // TODO: Reactivate this test once we have blocks from testnet.
t.Skip()
runtime.GOMAXPROCS(runtime.NumCPU())
testBlockNum := 277647

12
domain/txscript/policy.go Normal file
View File

@ -0,0 +1,12 @@
package txscript
const (
// StandardVerifyFlags are the script flags which are used when
// executing transaction scripts to enforce additional checks which
// are required for the script to be considered standard. These checks
// help reduce issues related to transaction malleability as well as
// allow pay-to-script hash transactions. Note these flags are
// different than what is required for the consensus rules in that they
// are more strict.
StandardVerifyFlags = ScriptDiscourageUpgradableNops
)

View File

@ -108,10 +108,6 @@ func sign(dagParams *dagconfig.Params, tx *appmessage.MsgTx, idx int,
func mergeScripts(dagParams *dagconfig.Params, tx *appmessage.MsgTx, idx int,
class ScriptClass, sigScript, prevScript []byte) ([]byte, error) {
// TODO: the scripthash and multisig paths here are overly
// inefficient in that they will recompute already known data.
// some internal refactoring could probably make this avoid needless
// extra calculations.
switch class {
case ScriptHashTy:
// Remove the last push in the script and then recurse.
@ -210,7 +206,6 @@ func SignTxOutput(dagParams *dagconfig.Params, tx *appmessage.MsgTx, idx int,
}
if class == ScriptHashTy {
// TODO keep the sub addressed and pass down to merge.
realSigScript, _, _, err := sign(dagParams, tx, idx,
sigScript, hashType, kdb, sdb)
if err != nil {
@ -223,7 +218,6 @@ func SignTxOutput(dagParams *dagconfig.Params, tx *appmessage.MsgTx, idx int,
builder.AddData(sigScript)
sigScript, _ = builder.Script()
// TODO keep a copy of the script for merging.
}
// Merge scripts. with any previous data, if any.

View File

@ -12,20 +12,6 @@ import (
"github.com/kaspanet/kaspad/util"
)
const (
// StandardVerifyFlags are the script flags which are used when
// executing transaction scripts to enforce additional checks which
// are required for the script to be considered standard. These checks
// help reduce issues related to transaction malleability as well as
// allow pay-to-script hash transactions. Note these flags are
// different than what is required for the consensus rules in that they
// are more strict.
//
// TODO: This definition does not belong here. It belongs in a policy
// package.
StandardVerifyFlags = ScriptDiscourageUpgradableNops
)
// ScriptClass is an enumeration for the list of standard types of script.
type ScriptClass byte

View File

@ -224,7 +224,6 @@ func (am *AddressManager) updateAddress(netAddress, sourceAddress *appmessage.Ne
addressKey := NetAddressKey(netAddress)
knownAddress := am.knownAddress(netAddress)
if knownAddress != nil {
// TODO: only update addresses periodically.
// Update the last seen time and services.
// note that to prevent causing excess garbage on getaddr
// messages the netaddresses in addrmaanger are *immutable*,
@ -783,28 +782,6 @@ func (am *AddressManager) AddAddress(address, sourceAddress *appmessage.NetAddre
am.updateAddress(address, sourceAddress, subnetworkID)
}
// AddAddressByIP adds an address where we are given an ip:port and not a
// appmessage.NetAddress.
func (am *AddressManager) AddAddressByIP(addressIP string, subnetworkID *subnetworkid.SubnetworkID) error {
// Split IP and port
ipString, portString, err := net.SplitHostPort(addressIP)
if err != nil {
return err
}
// Put it in appmessage.Netaddress
ip := net.ParseIP(ipString)
if ip == nil {
return errors.Errorf("invalid ip %s", ipString)
}
port, err := strconv.ParseUint(portString, 10, 0)
if err != nil {
return errors.Errorf("invalid port %s: %s", portString, err)
}
netAddress := appmessage.NewNetAddressIPPort(ip, uint16(port), 0)
am.AddAddress(netAddress, netAddress, subnetworkID) // XXX use correct src address
return nil
}
// numAddresses returns the number of addresses that belongs to a specific subnetwork id
// which are known to the address manager.
func (am *AddressManager) numAddresses(subnetworkID *subnetworkid.SubnetworkID) int {

View File

@ -177,7 +177,7 @@ func TestAddAddressByIP(t *testing.T) {
amgr, teardown := newAddrManagerForTest(t, "TestAddAddressByIP", nil)
defer teardown()
for i, test := range tests {
err := amgr.AddAddressByIP(test.addrIP, nil)
err := AddAddressByIP(amgr, test.addrIP, nil)
if test.err != nil && err == nil {
t.Errorf("TestAddAddressByIP test %d failed expected an error and got none", i)
continue
@ -253,7 +253,7 @@ func TestAttempt(t *testing.T) {
defer teardown()
// Add a new address and get it
err := amgr.AddAddressByIP(someIP+":8333", nil)
err := AddAddressByIP(amgr, someIP+":8333", nil)
if err != nil {
t.Fatalf("Adding address failed: %v", err)
}
@ -276,7 +276,7 @@ func TestConnected(t *testing.T) {
defer teardown()
// Add a new address and get it
err := amgr.AddAddressByIP(someIP+":8333", nil)
err := AddAddressByIP(amgr, someIP+":8333", nil)
if err != nil {
t.Fatalf("Adding address failed: %v", err)
}
@ -454,7 +454,7 @@ func TestGetAddress(t *testing.T) {
}
// Add a new address and get it
err := amgr.AddAddressByIP(someIP+":8332", localSubnetworkID)
err := AddAddressByIP(amgr, someIP+":8332", localSubnetworkID)
if err != nil {
t.Fatalf("Adding address failed: %v", err)
}
@ -481,7 +481,7 @@ func TestGetAddress(t *testing.T) {
// Now we repeat the same process, but now the address has the expected subnetwork ID.
// Add a new address and get it
err = amgr.AddAddressByIP(someIP+":8333", localSubnetworkID)
err = AddAddressByIP(amgr, someIP+":8333", localSubnetworkID)
if err != nil {
t.Fatalf("Adding address failed: %v", err)
}
@ -555,15 +555,6 @@ func TestGetBestLocalAddress(t *testing.T) {
appmessage.NetAddress{IP: net.ParseIP("2001:470::1")},
appmessage.NetAddress{IP: net.ParseIP("2001:470::1")},
},
/* XXX
{
// Remote connection from Tor
appmessage.NetAddress{IP: net.ParseIP("fd87:d87e:eb43::100")},
appmessage.NetAddress{IP: net.IPv4zero},
appmessage.NetAddress{IP: net.ParseIP("204.124.8.100")},
appmessage.NetAddress{IP: net.ParseIP("fd87:d87e:eb43:25::1")},
},
*/
}
amgr, teardown := newAddrManagerForTest(t, "TestGetBestLocalAddress", nil)
@ -633,5 +624,4 @@ func TestNetAddressKey(t *testing.T) {
continue
}
}
}

View File

@ -0,0 +1,31 @@
package addressmanager
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/pkg/errors"
"net"
"strconv"
)
// AddAddressByIP adds an address where we are given an ip:port and not a
// appmessage.NetAddress.
func AddAddressByIP(am *AddressManager, addressIP string, subnetworkID *subnetworkid.SubnetworkID) error {
// Split IP and port
ipString, portString, err := net.SplitHostPort(addressIP)
if err != nil {
return err
}
// Put it in appmessage.Netaddress
ip := net.ParseIP(ipString)
if ip == nil {
return errors.Errorf("invalid ip %s", ipString)
}
port, err := strconv.ParseUint(portString, 10, 0)
if err != nil {
return errors.Errorf("invalid port %s: %s", portString, err)
}
netAddress := appmessage.NewNetAddressIPPort(ip, uint16(port), 0)
am.AddAddress(netAddress, netAddress, subnetworkID)
return nil
}

View File

@ -703,46 +703,3 @@ func (c *Client) GetTxOutAsync(txHash *daghash.Hash, index uint32, mempool bool)
func (c *Client) GetTxOut(txHash *daghash.Hash, index uint32, mempool bool) (*model.GetTxOutResult, error) {
return c.GetTxOutAsync(txHash, index, mempool).Receive()
}
// FutureRescanBlocksResult is a future promise to deliver the result of a
// RescanBlocksAsync RPC invocation (or an applicable error).
type FutureRescanBlocksResult chan *response
// Receive waits for the response promised by the future and returns the
// discovered rescanblocks data.
func (r FutureRescanBlocksResult) Receive() ([]model.RescannedBlock, error) {
res, err := receiveFuture(r)
if err != nil {
return nil, err
}
var rescanBlocksResult []model.RescannedBlock
err = json.Unmarshal(res, &rescanBlocksResult)
if err != nil {
return nil, errors.Wrap(err, "couldn't decode rescanBlocks response")
}
return rescanBlocksResult, nil
}
// RescanBlocksAsync returns an instance of a type that can be used to get the
// result of the RPC at some future time by invoking the Receive function on the
// returned instance.
//
// See RescanBlocks for the blocking version and more details.
func (c *Client) RescanBlocksAsync(blockHashes []*daghash.Hash) FutureRescanBlocksResult {
strBlockHashes := make([]string, len(blockHashes))
for i := range blockHashes {
strBlockHashes[i] = blockHashes[i].String()
}
cmd := model.NewRescanBlocksCmd(strBlockHashes)
return c.sendCmd(cmd)
}
// RescanBlocks rescans the blocks identified by blockHashes, in order, using
// the client's loaded transaction filter. The blocks do not need to be on the
// main dag, but they do need to be adjacent to each other.
func (c *Client) RescanBlocks(blockHashes []*daghash.Hash) ([]model.RescannedBlock, error) {
return c.RescanBlocksAsync(blockHashes).Receive()
}

View File

@ -547,9 +547,7 @@ func (c *Client) reregisterNtfns() error {
// ignoreResends is a set of all methods for requests that are "long running"
// are not be reissued by the client on reconnect.
var ignoreResends = map[string]struct{}{
"rescan": {},
}
var ignoreResends = map[string]struct{}{}
func (c *Client) collectResendRequests() []*jsonRequest {
c.requestLock.Lock()

View File

@ -439,9 +439,6 @@ func parseTxAcceptedVerboseNtfnParams(params []json.RawMessage) (*model.TxRawRes
return nil, err
}
// TODO: change txacceptedverbose notification callbacks to use nicer
// types for all details about the transaction (i.e. decoding hashes
// from their string encoding).
return &rawTx, nil
}
@ -613,7 +610,7 @@ func (c *Client) LoadTxFilterAsync(reload bool, addresses []util.Address,
// LoadTxFilter loads, reloads, or adds data to a websocket client's transaction
// filter. The filter is consistently updated based on inspected transactions
// during mempool acceptance, block acceptance, and for all rescanned blocks.
// during mempool acceptance, and for block acceptance.
func (c *Client) LoadTxFilter(reload bool, addresses []util.Address, outpoints []appmessage.Outpoint) error {
return c.LoadTxFilterAsync(reload, addresses, outpoints).Receive()
}

View File

@ -34,14 +34,9 @@ func handleGetTxOut(s *Server, cmd interface{}, closeChan <-chan struct{}) (inte
if c.IncludeMempool != nil {
includeMempool = *c.IncludeMempool
}
// TODO: This is racy. It should attempt to fetch it directly and check
// the error.
if includeMempool && s.txMempool.HaveTransaction(txID) {
tx, ok := s.txMempool.FetchTransaction(txID)
if !ok {
return nil, rpcNoTxInfoError(txID)
}
tx, ok := s.txMempool.FetchTransaction(txID)
if includeMempool && ok {
mtx := tx.MsgTx()
if c.Vout > uint32(len(mtx.TxOut)-1) {
return nil, &model.RPCError{

View File

@ -1,71 +0,0 @@
package rpc
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/domain/dagconfig"
"github.com/kaspanet/kaspad/domain/txscript"
"github.com/kaspanet/kaspad/util"
)
// rescanBlockFilter rescans a block for any relevant transactions for the
// passed lookup keys. Any discovered transactions are returned hex encoded as
// a string slice.
//
// NOTE: This extension is ported from github.com/decred/dcrd
func rescanBlockFilter(filter *wsClientFilter, block *util.Block, params *dagconfig.Params) []string {
var transactions []string
filter.mu.Lock()
defer filter.mu.Unlock()
for _, tx := range block.Transactions() {
msgTx := tx.MsgTx()
// Keep track of whether the transaction has already been added
// to the result. It shouldn't be added twice.
added := false
// Scan inputs if not a coinbase transaction.
if !msgTx.IsCoinBase() {
for _, input := range msgTx.TxIn {
if !filter.existsUnspentOutpointNoLock(&input.PreviousOutpoint) {
continue
}
if !added {
transactions = append(
transactions,
txHexString(msgTx))
added = true
}
}
}
// Scan outputs.
for i, output := range msgTx.TxOut {
_, addr, err := txscript.ExtractScriptPubKeyAddress(
output.ScriptPubKey, params)
if err != nil {
continue
}
if addr != nil {
if !filter.existsAddress(addr) {
continue
}
op := appmessage.Outpoint{
TxID: *tx.ID(),
Index: uint32(i),
}
filter.addUnspentOutpoint(&op)
if !added {
transactions = append(
transactions,
txHexString(msgTx))
added = true
}
}
}
}
return transactions
}

View File

@ -1,72 +0,0 @@
package rpc
import (
"fmt"
"github.com/kaspanet/kaspad/infrastructure/network/rpc/model"
"github.com/kaspanet/kaspad/util/daghash"
)
// handleRescanBlocks implements the rescanBlocks command extension for
// websocket connections.
//
// NOTE: This extension is ported from github.com/decred/dcrd
func handleRescanBlocks(wsc *wsClient, icmd interface{}) (interface{}, error) {
cmd, ok := icmd.(*model.RescanBlocksCmd)
if !ok {
return nil, model.ErrRPCInternal
}
// Load client's transaction filter. Must exist in order to continue.
filter := wsc.FilterData()
if filter == nil {
return nil, &model.RPCError{
Code: model.ErrRPCMisc,
Message: "Transaction filter must be loaded before rescanning",
}
}
blockHashes := make([]*daghash.Hash, len(cmd.BlockHashes))
for i := range cmd.BlockHashes {
hash, err := daghash.NewHashFromStr(cmd.BlockHashes[i])
if err != nil {
return nil, err
}
blockHashes[i] = hash
}
discoveredData := make([]model.RescannedBlock, 0, len(blockHashes))
// Iterate over each block in the request and rescan. When a block
// contains relevant transactions, add it to the response.
bc := wsc.server.dag
params := wsc.server.dag.Params
var lastBlockHash *daghash.Hash
for i := range blockHashes {
block, err := bc.BlockByHash(blockHashes[i])
if err != nil {
return nil, &model.RPCError{
Code: model.ErrRPCBlockNotFound,
Message: "Failed to fetch block: " + err.Error(),
}
}
if lastBlockHash != nil && !block.MsgBlock().Header.ParentHashes[0].IsEqual(lastBlockHash) { // TODO: (Stas) This is likely wrong. Modified to satisfy compilation.
return nil, &model.RPCError{
Code: model.ErrRPCInvalidParameter,
Message: fmt.Sprintf("Block %s is not a child of %s",
blockHashes[i], lastBlockHash),
}
}
lastBlockHash = blockHashes[i]
transactions := rescanBlockFilter(filter, block, params)
if len(transactions) != 0 {
discoveredData = append(discoveredData, model.RescannedBlock{
Hash: cmd.BlockHashes[i],
Transactions: transactions,
})
}
}
return &discoveredData, nil
}

View File

@ -123,18 +123,6 @@ func NewLoadTxFilterCmd(reload bool, addresses []string, outpoints []Outpoint) *
}
}
// RescanBlocksCmd defines the rescan JSON-RPC command.
type RescanBlocksCmd struct {
// Block hashes as a string array.
BlockHashes []string
}
// NewRescanBlocksCmd returns a new instance which can be used to issue a rescan
// JSON-RPC command.
func NewRescanBlocksCmd(blockHashes []string) *RescanBlocksCmd {
return &RescanBlocksCmd{BlockHashes: blockHashes}
}
func init() {
// The commands in this file are only usable by websockets.
flags := UFWebsocketOnly
@ -148,5 +136,4 @@ func init() {
MustRegisterCommand("stopNotifyBlocks", (*StopNotifyBlocksCmd)(nil), flags)
MustRegisterCommand("stopNotifyChainChanges", (*StopNotifyChainChangesCmd)(nil), flags)
MustRegisterCommand("stopNotifyNewTransactions", (*StopNotifyNewTransactionsCmd)(nil), flags)
MustRegisterCommand("rescanBlocks", (*RescanBlocksCmd)(nil), flags)
}

View File

@ -157,20 +157,6 @@ func TestRPCServerWebsocketCommands(t *testing.T) {
Outpoints: []model.Outpoint{{TxID: "0000000000000000000000000000000000000000000000000000000000000123", Index: 0}},
},
},
{
name: "rescanBlocks",
newCmd: func() (interface{}, error) {
return model.NewCommand("rescanBlocks", `["0000000000000000000000000000000000000000000000000000000000000123"]`)
},
staticCmd: func() interface{} {
blockhashes := []string{"0000000000000000000000000000000000000000000000000000000000000123"}
return model.NewRescanBlocksCmd(blockhashes)
},
marshalled: `{"jsonrpc":"1.0","method":"rescanBlocks","params":[["0000000000000000000000000000000000000000000000000000000000000123"]],"id":1}`,
unmarshalled: &model.RescanBlocksCmd{
BlockHashes: []string{"0000000000000000000000000000000000000000000000000000000000000123"},
},
},
}
t.Logf("Running %d tests", len(tests))

View File

@ -9,10 +9,3 @@ package model
type SessionResult struct {
SessionID uint64 `json:"sessionId"`
}
// RescannedBlock contains the hash and all discovered transactions of a single
// rescanned block.
type RescannedBlock struct {
Hash string `json:"hash"`
Transactions []string `json:"transactions"`
}

View File

@ -1,50 +0,0 @@
// Copyright (c) 2017 The btcsuite developers
// Copyright (c) 2017 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package model_test
import (
"encoding/json"
"testing"
"github.com/kaspanet/kaspad/infrastructure/network/rpc/model"
)
// TestRPCServerWebsocketResults ensures any results that have custom marshalling
// work as intended.
func TestRPCServerWebsocketResults(t *testing.T) {
t.Parallel()
tests := []struct {
name string
result interface{}
expected string
}{
{
name: "RescannedBlock",
result: &model.RescannedBlock{
Hash: "blockhash",
Transactions: []string{"serializedtx"},
},
expected: `{"hash":"blockhash","transactions":["serializedtx"]}`,
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
marshalled, err := json.Marshal(test.result)
if err != nil {
t.Errorf("Test #%d (%s) unexpected error: %v", i,
test.name, err)
continue
}
if string(marshalled) != test.expected {
t.Errorf("Test #%d (%s) unexpected marhsalled data - "+
"got %s, want %s", i, test.name, marshalled,
test.expected)
continue
}
}
}

View File

@ -106,8 +106,6 @@ var rpcLimited = map[string]struct{}{
"notifyNewTransactions": {},
"notifyReceived": {},
"notifySpent": {},
"rescan": {},
"rescanBlocks": {},
"session": {},
// Websockets AND HTTP/S commands

View File

@ -522,20 +522,11 @@ var helpDescsEnUS = map[string]string{
"outpoint-index": "The index of the outpoint",
// LoadTxFilterCmd help.
"loadTxFilter--synopsis": "Load, add to, or reload a websocket client's transaction filter for mempool transactions, new blocks and rescanBlocks.",
"loadTxFilter--synopsis": "Load, add to, or reload a websocket client's transaction filter for mempool transactions and new blocks.",
"loadTxFilter-reload": "Load a new filter instead of adding data to an existing one",
"loadTxFilter-addresses": "Array of addresses to add to the transaction filter",
"loadTxFilter-outpoints": "Array of outpoints to add to the transaction filter",
// RescanBlocks help.
"rescanBlocks--synopsis": "Rescan blocks for transactions matching the loaded transaction filter.",
"rescanBlocks-blockHashes": "List of hashes to rescan. Each next block must be a child of the previous.",
"rescanBlocks--result0": "List of matching blocks.",
// RescannedBlock help.
"rescannedBlock-hash": "Hash of the matching block.",
"rescannedBlock-transactions": "List of matching transactions, serialized and hex-encoded.",
// Uptime help.
"uptime--synopsis": "Returns the total uptime of the server.",
"uptime--result0": "The number of seconds that the server has been running",
@ -604,7 +595,6 @@ var rpcResultTypes = map[string][]interface{}{
"stopNotifyChainChanges": nil,
"notifyNewTransactions": nil,
"stopNotifyNewTransactions": nil,
"rescanBlocks": {(*[]model.RescannedBlock)(nil)},
}
// helpCacher provides a concurrent safe type that provides help and usage for

View File

@ -73,7 +73,6 @@ var wsHandlersBeforeInit = map[string]wsCommandHandler{
"stopNotifyBlocks": handleStopNotifyBlocks,
"stopNotifyChainChanges": handleStopNotifyChainChanges,
"stopNotifyNewTransactions": handleStopNotifyNewTransactions,
"rescanBlocks": handleRescanBlocks,
}
// WebsocketHandler handles a new websocket client by creating a new wsClient,

View File

@ -1,6 +1,7 @@
package integration
import (
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
"testing"
)
@ -9,7 +10,7 @@ func TestAddressExchange(t *testing.T) {
defer teardown()
testAddress := "1.2.3.4:6789"
err := appHarness1.app.AddressManager().AddAddressByIP(testAddress, nil)
err := addressmanager.AddAddressByIP(appHarness1.app.AddressManager(), testAddress, nil)
if err != nil {
t.Fatalf("Error adding address to addressManager: %+v", err)
}