Compare commits

..

20 Commits

Author SHA1 Message Date
Ori Newman
917fa11706 bignet debug 2020-09-02 14:53:38 +03:00
Ori Newman
a32a9011c7 [NOD-1305] Close client connection on disconnect (#909) 2020-08-31 15:57:11 +03:00
stasatdaglabs
5da957f16e Update to version 0.6.8 2020-08-30 11:31:43 +03:00
Ori Newman
505d264603 [NOD-1322] Fix compilation on windows (#905) 2020-08-27 18:04:54 +03:00
Ori Newman
883361fea3 [NOD-1323] Always save new block reachability data (#906) 2020-08-27 18:03:50 +03:00
stasatdaglabs
13a6872a45 Update to version 0.6.7 2020-08-26 12:13:43 +03:00
Elichai Turkel
c82a951a24 [NOD-1316] Refactor TestGHOSTDAG to enable arbitrary DAGs (#899)
* Add VirtualBlueHashes to BlockDAG

* Refactor TestGHOSTDAG to read DAGs from json files

* Added a new DAG for the ghostdag test suite

* Pass BehaviorFlags to delayed blocks
2020-08-25 14:00:43 +03:00
Ori Newman
bbb9dfa4cd [NOD-1318] Check if relay block is known before requesting it (#900) 2020-08-25 09:18:03 +03:00
stasatdaglabs
86d51fa1cb [NOD-1307] Fix duplicate connections (#897)
* [NOD-1307] Lock peersMutex in methods that don't.

* [NOD-1307] Fix duplicate connections.

* [NOD-1307] Use RLock instead of Lock.

* [NOD-1307] Simplify IsEqual.
2020-08-24 16:11:32 +03:00
Ori Newman
8dd7b95423 [NOD-1308] Don't call wg.done() on handshake if flow failed (#896) 2020-08-24 14:03:58 +03:00
stasatdaglabs
b668d98942 [NOD-1095] Fix data races in gRPCConnection.stream. (#895) 2020-08-24 12:56:19 +03:00
stasatdaglabs
e9602cc777 [NOD-1304] Fix nil deference originating in HandleHandshake. (#894) 2020-08-24 11:45:33 +03:00
stasatdaglabs
5fd164bf66 [NOD-1095] RLock the dagLock in SelectedTipHeader. (#893) 2020-08-24 11:31:12 +03:00
Ori Newman
83e7c9e8e4 [NOD-1303] Fix concurent access to UTXO set from RPC (#892) 2020-08-23 18:54:03 +03:00
Ori Newman
a6b8eea369 [NOD-1301] Add MsgReject to protowire mapping (#891) 2020-08-23 18:29:41 +03:00
stasatdaglabs
15b545ee2b [NOD-592] Remove TODOs and XXXs from the codebase (#890)
* [NOD-592] Remove TODOs related to fake nonces.

* [NOD-592] Remove irrelevant TODOs from handleRescanBlocks and parseTxAcceptedVerboseNtfnParams.

* [NOD-592] Fix TODO in handleGetTxOut.

* [NOD-592] Remove irrelevant TODO from updateAddress.

* [NOD-592] Move StandardVerifyFlags to a separate file.

* [NOD-592] Remove TODOs in sign.go.

* [NOD-592] Remove TODO in scriptval_test.go.

* [NOD-592] Remove TODO in reachabilitystore.go.

* [NOD-592] Remove XXXs.

* [NOD-592] Fix a comment.

* [NOD-557] Move AddAddressByIP out of AddressManager since it's used only for tests..

* [NOD-557] Remove rescan blocks.

* [NOD-592] Fix handleGetTxOut.
2020-08-23 17:17:06 +03:00
stasatdaglabs
667b2d46e9 [NOD-557] Remove RegTest (#889)
* [NOD-557] Remove regTest network.

* [NOD-557] Remove remaining references to regTest.

* [NOD-557] Move newHashFromStr from params.go to params_test.go.

* [NOD-557] Rename test to network in register_test.go.

* [NOD-557] Replaced removed tests in TestDecodeAddressErrorConditions.
2020-08-23 15:38:27 +03:00
stasatdaglabs
53ab906ea8 [NOD-1279] Handle ruleErrors properly in processIBDBlock. (#887) 2020-08-23 13:42:21 +03:00
stasatdaglabs
5d20772f94 [NOD-1293] Fix kaspad sending 127.0.0.1 in its msgVersion (#886)
* [NOD-1293] Use addressManager's GetBestLocalAddress.

* [NOD-1293] Copy the initListeners function from the old p2p to the address manager.

* [NOD-1293] Remove debug logs.

* [NOD-1293] Remove unused import.

* [NOD-1293] Fix a comment.
2020-08-23 13:11:48 +03:00
stasatdaglabs
d4728bd9b6 Update to version 0.6.6 2020-08-23 11:22:13 +03:00
82 changed files with 1162 additions and 935 deletions

View File

@@ -114,13 +114,14 @@ func New(cfg *config.Config, databaseContext *dbaccess.DatabaseContext, interrup
if err != nil {
return nil, err
}
addressManager := addressmanager.New(cfg, databaseContext)
addressManager, err := addressmanager.New(cfg, databaseContext)
if err != nil {
return nil, err
}
connectionManager, err := connmanager.New(cfg, netAdapter, addressManager)
if err != nil {
return nil, err
}
protocolManager, err := protocol.NewManager(cfg, dag, netAdapter, addressManager, txMempool, connectionManager)
if err != nil {
return nil, err

View File

@@ -16,7 +16,7 @@ import (
)
// genesisCoinbaseTx is the coinbase transaction for the genesis blocks for
// the main network, regression test network, and test network.
// the main network and test network.
var genesisCoinbaseTxIns = []*TxIn{
{
PreviousOutpoint: Outpoint{
@@ -352,7 +352,7 @@ func BenchmarkReadBlockHeader(b *testing.B) {
0x3a, 0x9f, 0xb8, 0xaa, 0x4b, 0x1e, 0x5e, 0x4a, // MerkleRoot
0x29, 0xab, 0x5f, 0x49, 0x00, 0x00, 0x00, 0x00, // Timestamp
0xff, 0xff, 0x00, 0x1d, // Bits
0xf3, 0xe0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // Fake Nonce. TODO: (Ori) Replace to a real nonce
0xf3, 0xe0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // Fake Nonce
0x00, // TxnCount Varint
}
r := bytes.NewReader(buf)

View File

@@ -92,7 +92,7 @@ func TestBlockHeaderEncoding(t *testing.T) {
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
0x99, 0x0f, 0xed, 0x15, 0x73, 0x01, 0x00, 0x00, // Timestamp
0xff, 0xff, 0x00, 0x1d, // Bits
0xf3, 0xe0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // Fake Nonce. TODO: (Ori) Replace to a real nonce
0xf3, 0xe0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // Fake Nonce
}
tests := []struct {
@@ -208,7 +208,7 @@ func TestBlockHeaderSerialize(t *testing.T) {
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
0x99, 0x0f, 0xed, 0x15, 0x73, 0x01, 0x00, 0x00, // Timestamp
0xff, 0xff, 0x00, 0x1d, // Bits
0xf3, 0xe0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // Fake Nonce. TODO: (Ori) Replace to a real nonce
0xf3, 0xe0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // Fake Nonce
}
tests := []struct {

View File

@@ -68,7 +68,6 @@ the following constants:
appmessage.Mainnet
appmessage.Testnet (Test network)
appmessage.Regtest (Regression test network)
appmessage.Simnet (Simulation test network)
appmessage.Devnet (Development network)

View File

@@ -431,7 +431,7 @@ func TestBlockOverflowErrors(t *testing.T) {
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
0x61, 0xbc, 0x66, 0x49, 0x00, 0x00, 0x00, 0x00, // Timestamp
0xff, 0xff, 0x00, 0x1d, // Bits
0x01, 0xe3, 0x62, 0x99, 0x00, 0x00, 0x00, 0x00, // Fake Nonce. TODO: (Ori) Replace to a real nonce
0x01, 0xe3, 0x62, 0x99, 0x00, 0x00, 0x00, 0x00, // Fake Nonce
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, // TxnCount
}, pver, &MessageError{},
@@ -572,7 +572,7 @@ var blockOneBytes = []byte{
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
0x99, 0x0f, 0xed, 0x15, 0x73, 0x01, 0x00, 0x00, // Timestamp
0xff, 0xff, 0x00, 0x1d, // Bits
0x01, 0xe3, 0x62, 0x99, 0x00, 0x00, 0x00, 0x00, // Fake Nonce. TODO: (Ori) Replace to a real nonce
0x01, 0xe3, 0x62, 0x99, 0x00, 0x00, 0x00, 0x00, // Fake Nonce
0x01, // TxnCount
0x01, 0x00, 0x00, 0x00, // Version
0x01, // Varint for number of transaction inputs

View File

@@ -10,10 +10,13 @@ import (
"strings"
)
// XXX pedro: we will probably need to bump this.
const (
// ProtocolVersion is the latest protocol version this package supports.
ProtocolVersion uint32 = 1
// DefaultServices describes the default services that are supported by
// the server.
DefaultServices = SFNodeNetwork | SFNodeBloom | SFNodeCF
)
// ServiceFlag identifies services supported by a kaspa peer.
@@ -103,9 +106,6 @@ const (
// Testnet represents the test network.
Testnet KaspaNet = 0xddb8af8f
// Regtest represents the regression test network.
Regtest KaspaNet = 0xf396cdd6
// Simnet represents the simulation test network.
Simnet KaspaNet = 0x374dcf1c
@@ -118,7 +118,6 @@ const (
var bnStrings = map[KaspaNet]string{
Mainnet: "Mainnet",
Testnet: "Testnet",
Regtest: "Regtest",
Simnet: "Simnet",
Devnet: "Devnet",
}

View File

@@ -40,7 +40,6 @@ func TestKaspaNetStringer(t *testing.T) {
want string
}{
{Mainnet, "Mainnet"},
{Regtest, "Regtest"},
{Testnet, "Testnet"},
{Simnet, "Simnet"},
{0xffffffff, "Unknown KaspaNet (4294967295)"},

View File

@@ -22,7 +22,7 @@ func (*FlowContext) HandleError(err error, flowName string, isStopping *uint32,
panic(err)
}
log.Errorf("error from %s: %+v", flowName, err)
log.Errorf("error from %s: %s", flowName, err)
}
if atomic.AddUint32(isStopping, 1) == 1 {

View File

@@ -39,7 +39,7 @@ type FlowContext struct {
startIBDMutex sync.Mutex
ibdPeer *peerpkg.Peer
peers map[*id.ID]*peerpkg.Peer
peers map[id.ID]*peerpkg.Peer
peersMutex sync.RWMutex
}
@@ -57,7 +57,7 @@ func New(cfg *config.Config, dag *blockdag.BlockDAG, addressManager *addressmana
txPool: txPool,
sharedRequestedTransactions: relaytransactions.NewSharedRequestedTransactions(),
sharedRequestedBlocks: blockrelay.NewSharedRequestedBlocks(),
peers: make(map[*id.ID]*peerpkg.Peer),
peers: make(map[id.ID]*peerpkg.Peer),
transactionsToRebroadcast: make(map[daghash.TxID]*util.Tx),
}
}

View File

@@ -37,6 +37,9 @@ func (f *FlowContext) IsInIBD() bool {
// selectPeerForIBD returns the first peer whose selected tip
// hash is not in our DAG
func (f *FlowContext) selectPeerForIBD(dag *blockdag.BlockDAG) *peerpkg.Peer {
f.peersMutex.RLock()
defer f.peersMutex.RUnlock()
for _, peer := range f.peers {
peerSelectedTipHash := peer.SelectedTipHash()
if !dag.IsInDAG(peerSelectedTipHash) {
@@ -59,6 +62,9 @@ func (f *FlowContext) isDAGTimeCurrent() bool {
}
func (f *FlowContext) requestSelectedTips() {
f.peersMutex.RLock()
defer f.peersMutex.RUnlock()
for _, peer := range f.peers {
peer.RequestSelectedTipIfRequired()
}

View File

@@ -24,11 +24,11 @@ func (f *FlowContext) AddToPeers(peer *peerpkg.Peer) error {
f.peersMutex.Lock()
defer f.peersMutex.Unlock()
if _, ok := f.peers[peer.ID()]; ok {
if _, ok := f.peers[*peer.ID()]; ok {
return errors.Wrapf(common.ErrPeerWithSameIDExists, "peer with ID %s already exists", peer.ID())
}
f.peers[peer.ID()] = peer
f.peers[*peer.ID()] = peer
return nil
}
@@ -38,7 +38,7 @@ func (f *FlowContext) RemoveFromPeers(peer *peerpkg.Peer) {
f.peersMutex.Lock()
defer f.peersMutex.Unlock()
delete(f.peers, peer.ID())
delete(f.peers, *peer.ID())
}
// readyPeerConnections returns the NetConnections of all the ready peers.

View File

@@ -26,6 +26,7 @@ func (f *FlowContext) AddTransaction(tx *util.Tx) error {
f.transactionsToRebroadcast[*tx.ID()] = tx
inv := appmessage.NewMsgInvTransaction([]*daghash.TxID{tx.ID()})
log.Criticalf("~~~~~ FlowContext.AddTransaction() broadcasting", tx.ID())
return f.Broadcast(inv)
}

View File

@@ -38,7 +38,7 @@ func ReceiveAddresses(context ReceiveAddressesContext, incomingRoute *router.Rou
msgAddresses := message.(*appmessage.MsgAddresses)
if len(msgAddresses.AddrList) > addressmanager.GetAddressesMax {
return protocolerrors.Errorf(true, "address count excceeded %d", addressmanager.GetAddressesMax)
return protocolerrors.Errorf(true, "address count exceeded %d", addressmanager.GetAddressesMax)
}
if msgAddresses.IncludeAllSubnetworks {

View File

@@ -14,7 +14,6 @@ type SendAddressesContext interface {
// SendAddresses sends addresses to a peer that requests it.
func SendAddresses(context SendAddressesContext, incomingRoute *router.Route, outgoingRoute *router.Route) error {
message, err := incomingRoute.Dequeue()
if err != nil {
return err

View File

@@ -55,6 +55,8 @@ func (flow *handleRelayInvsFlow) start() error {
return err
}
log.Debugf("Got relay inv for block %s", inv.Hash)
if flow.DAG().IsKnownBlock(inv.Hash) {
if flow.DAG().IsKnownInvalid(inv.Hash) {
return protocolerrors.Errorf(true, "sent inv of an invalid block %s",
@@ -114,6 +116,11 @@ func (flow *handleRelayInvsFlow) requestBlocks(requestQueue *hashesQueueSet) err
continue
}
// The block can become known from another peer in the process of orphan resolution
if flow.DAG().IsKnownBlock(hash) {
continue
}
pendingBlocks[*hash] = struct{}{}
filteredHashesToRequest = append(filteredHashesToRequest, hash)
}
@@ -153,7 +160,6 @@ func (flow *handleRelayInvsFlow) requestBlocks(requestQueue *hashesQueueSet) err
delete(pendingBlocks, *blockHash)
flow.SharedRequestedBlocks().remove(blockHash)
}
return nil
}
@@ -190,7 +196,7 @@ func (flow *handleRelayInvsFlow) processAndRelayBlock(requestQueue *hashesQueueS
}
log.Infof("Rejected block %s from %s: %s", blockHash, flow.peer, err)
return protocolerrors.Wrapf(true, err, "got invalid block %s", blockHash)
return protocolerrors.Wrapf(true, err, "got invalid block %s from relay", blockHash)
}
if isDelayed {

View File

@@ -34,7 +34,7 @@ type HandleHandshakeContext interface {
// version message, as well as a verack for the sent version
func HandleHandshake(context HandleHandshakeContext, netConnection *netadapter.NetConnection,
receiveVersionRoute *routerpkg.Route, sendVersionRoute *routerpkg.Route, outgoingRoute *routerpkg.Route,
) (peer *peerpkg.Peer, err error) {
) (*peerpkg.Peer, error) {
// For HandleHandshake to finish, we need to get from the other node
// a version and verack messages, so we increase the wait group by 2
@@ -45,26 +45,26 @@ func HandleHandshake(context HandleHandshakeContext, netConnection *netadapter.N
isStopping := uint32(0)
errChan := make(chan error)
peer = peerpkg.New(netConnection)
peer := peerpkg.New(netConnection)
var peerAddress *appmessage.NetAddress
spawn("HandleHandshake-ReceiveVersion", func() {
defer wg.Done()
address, err := ReceiveVersion(context, receiveVersionRoute, outgoingRoute, peer)
if err != nil {
handleError(err, "ReceiveVersion", &isStopping, errChan)
return
}
peerAddress = address
wg.Done()
})
spawn("HandleHandshake-SendVersion", func() {
defer wg.Done()
err := SendVersion(context, sendVersionRoute, outgoingRoute)
err := SendVersion(context, sendVersionRoute, outgoingRoute, peer)
if err != nil {
handleError(err, "SendVersion", &isStopping, errChan)
return
}
wg.Done()
})
select {
@@ -76,7 +76,7 @@ func HandleHandshake(context HandleHandshakeContext, netConnection *netadapter.N
case <-locks.ReceiveFromChanWhenDone(func() { wg.Wait() }):
}
err = context.AddToPeers(peer)
err := context.AddToPeers(peer)
if err != nil {
if errors.As(err, &common.ErrPeerWithSameIDExists) {
return nil, protocolerrors.Wrap(false, err, "peer already exists")

View File

@@ -3,6 +3,7 @@ package handshake
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/version"
)
@@ -18,7 +19,7 @@ var (
// defaultServices describes the default services that are supported by
// the server.
defaultServices = appmessage.SFNodeNetwork | appmessage.SFNodeBloom | appmessage.SFNodeCF
defaultServices = appmessage.DefaultServices
// defaultRequiredServices describes the default services that are
// required to be supported by outbound peers.
@@ -28,14 +29,18 @@ var (
type sendVersionFlow struct {
HandleHandshakeContext
incomingRoute, outgoingRoute *router.Route
peer *peerpkg.Peer
}
// SendVersion sends a version to a peer and waits for verack.
func SendVersion(context HandleHandshakeContext, incomingRoute *router.Route, outgoingRoute *router.Route) error {
func SendVersion(context HandleHandshakeContext, incomingRoute *router.Route,
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
flow := &sendVersionFlow{
HandleHandshakeContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
peer: peer,
}
return flow.start()
}
@@ -45,10 +50,7 @@ func (flow *sendVersionFlow) start() error {
subnetworkID := flow.Config().SubnetworkID
// Version message.
localAddress, err := flow.NetAdapter().GetBestLocalAddress()
if err != nil {
return err
}
localAddress := flow.AddressManager().GetBestLocalAddress(flow.peer.Connection().NetAddress())
msg := appmessage.NewMsgVersion(localAddress, flow.NetAdapter().ID(),
flow.Config().ActiveNetParams.Name, selectedTipHash, subnetworkID)
msg.AddUserAgent(userAgentName, userAgentVersion, flow.Config().UserAgentComments...)
@@ -62,7 +64,7 @@ func (flow *sendVersionFlow) start() error {
// Advertise if inv messages for transactions are desired.
msg.DisableRelayTx = flow.Config().BlocksOnly
err = flow.outgoingRoute.Enqueue(msg)
err := flow.outgoingRoute.Enqueue(msg)
if err != nil {
return err
}

View File

@@ -10,6 +10,7 @@ import (
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/pkg/errors"
)
// HandleIBDContext is the interface for the context needed for the HandleIBD flow.
@@ -53,10 +54,13 @@ func (flow *handleIBDFlow) runIBD() error {
defer flow.FinishIBD()
peerSelectedTipHash := flow.peer.SelectedTipHash()
log.Debugf("Trying to find highest shared chain block with peer %s with selected tip %s", flow.peer, peerSelectedTipHash)
highestSharedBlockHash, err := flow.findHighestSharedBlockHash(peerSelectedTipHash)
if err != nil {
return err
}
log.Debugf("Found highest shared chain block %s with peer %s", highestSharedBlockHash, flow.peer)
if flow.DAG().IsKnownFinalizedBlock(highestSharedBlockHash) {
return protocolerrors.Errorf(false, "cannot initiate "+
"IBD with peer %s because the highest shared chain block (%s) is "+
@@ -176,11 +180,17 @@ func (flow *handleIBDFlow) receiveIBDBlock() (msgIBDBlock *appmessage.MsgIBDBloc
func (flow *handleIBDFlow) processIBDBlock(msgIBDBlock *appmessage.MsgIBDBlock) error {
block := util.NewBlock(msgIBDBlock.MsgBlock)
if flow.DAG().IsInDAG(block.Hash()) {
log.Debugf("IBD block %s is already in the DAG. Skipping...", block.Hash())
return nil
}
isOrphan, isDelayed, err := flow.DAG().ProcessBlock(block, blockdag.BFNone)
if err != nil {
return err
if !errors.As(err, &blockdag.RuleError{}) {
return errors.Wrapf(err, "failed to process block %s during IBD", block.Hash())
}
log.Infof("Rejected block %s from %s during IBD: %s", block.Hash(), flow.peer, err)
return protocolerrors.Wrapf(true, err, "got invalid block %s during IBD", block.Hash())
}
if isOrphan {
return protocolerrors.Errorf(true, "received orphan block %s "+

View File

@@ -1,10 +1,10 @@
package ping
import (
"github.com/kaspanet/kaspad/app/protocol/common"
"time"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"

View File

@@ -48,6 +48,10 @@ func (flow *handleRelayedTransactionsFlow) start() error {
return err
}
for _, txID := range inv.TxIDs {
log.Criticalf("~~~~~ handleRelayedTransactionsFlow.start() got %s", txID)
}
requestedIDs, err := flow.requestInvTransactions(inv)
if err != nil {
return err
@@ -137,6 +141,7 @@ func (flow *handleRelayedTransactionsFlow) readInv() (*appmessage.MsgInvTransact
func (flow *handleRelayedTransactionsFlow) broadcastAcceptedTransactions(acceptedTxs []*mempool.TxDesc) error {
idsToBroadcast := make([]*daghash.TxID, len(acceptedTxs))
for i, tx := range acceptedTxs {
log.Criticalf("~~~~~ broadcastAcceptedTransactions() broadcasting", tx.Tx.ID())
idsToBroadcast[i] = tx.Tx.ID()
}
inv := appmessage.NewMsgInvTransaction(idsToBroadcast)
@@ -187,6 +192,7 @@ func (flow *handleRelayedTransactionsFlow) receiveTransactions(requestedTransact
continue
}
tx := util.NewTx(msgTx)
log.Criticalf("~~~~~ receiveTransactions() got %s", tx.ID())
if !tx.ID().IsEqual(expectedID) {
return protocolerrors.Errorf(true, "expected transaction %s, but got %s",
expectedID, tx.ID())

View File

@@ -30,6 +30,7 @@ func (flow *handleRequestedTransactionsFlow) start() error {
}
for _, transactionID := range msgRequestTransactions.IDs {
log.Criticalf("~~~~~ handleRequestedTransactionsFlow.start() tx %s was requested", transactionID)
tx, ok := flow.TxPool().FetchTransaction(transactionID)
if !ok {

View File

@@ -0,0 +1,9 @@
package relaytransactions
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var log, _ = logger.Get(logger.SubsystemTags.PROT)
var spawn = panics.GoroutineWrapperFunc(log)

View File

@@ -133,14 +133,14 @@ func (dag *BlockDAG) antiPastHashesBetween(lowHash, highHash *daghash.Hash, maxH
func (dag *BlockDAG) antiPastBetween(lowHash, highHash *daghash.Hash, maxEntries uint64) ([]*blockNode, error) {
lowNode, ok := dag.index.LookupNode(lowHash)
if !ok {
return nil, errors.Errorf("Couldn't find low hash %s", lowHash)
return nil, errors.Errorf("couldn't find low hash %s", lowHash)
}
highNode, ok := dag.index.LookupNode(highHash)
if !ok {
return nil, errors.Errorf("Couldn't find high hash %s", highHash)
return nil, errors.Errorf("couldn't find high hash %s", highHash)
}
if lowNode.blueScore >= highNode.blueScore {
return nil, errors.Errorf("Low hash blueScore >= high hash blueScore (%d >= %d)",
return nil, errors.Errorf("low hash blueScore >= high hash blueScore (%d >= %d)",
lowNode.blueScore, highNode.blueScore)
}

View File

@@ -42,7 +42,7 @@ func (dag *BlockDAG) UTXOConfirmations(outpoint *appmessage.Outpoint) (uint64, b
dag.dagLock.RLock()
defer dag.dagLock.RUnlock()
utxoEntry, ok := dag.GetUTXOEntry(*outpoint)
utxoEntry, ok := dag.virtual.utxoSet.get(*outpoint)
if !ok {
return 0, false
}

View File

@@ -211,6 +211,8 @@ func (dag *BlockDAG) selectedTip() *blockNode {
//
// This function is safe for concurrent access.
func (dag *BlockDAG) SelectedTipHeader() *appmessage.BlockHeader {
dag.dagLock.RLock()
defer dag.dagLock.RUnlock()
selectedTip := dag.selectedTip()
if selectedTip == nil {
return nil
@@ -248,6 +250,8 @@ func (dag *BlockDAG) CalcPastMedianTime() mstime.Time {
// This function is safe for concurrent access. However, the returned entry (if
// any) is NOT.
func (dag *BlockDAG) GetUTXOEntry(outpoint appmessage.Outpoint) (*UTXOEntry, bool) {
dag.RLock()
defer dag.RUnlock()
return dag.virtual.utxoSet.get(outpoint)
}
@@ -281,6 +285,17 @@ func (dag *BlockDAG) SelectedTipBlueScore() uint64 {
return dag.selectedTip().blueScore
}
// VirtualBlueHashes returns the blue of the current virtual block
func (dag *BlockDAG) VirtualBlueHashes() []*daghash.Hash {
dag.RLock()
defer dag.RUnlock()
hashes := make([]*daghash.Hash, len(dag.virtual.blues))
for i, blue := range dag.virtual.blues {
hashes[i] = blue.hash
}
return hashes
}
// VirtualBlueScore returns the blue score of the current virtual block
func (dag *BlockDAG) VirtualBlueScore() uint64 {
return dag.virtual.blueScore

View File

@@ -12,6 +12,7 @@ import (
type delayedBlock struct {
block *util.Block
processTime mstime.Time
flags BehaviorFlags
}
func (dag *BlockDAG) isKnownDelayedBlock(hash *daghash.Hash) bool {
@@ -19,12 +20,13 @@ func (dag *BlockDAG) isKnownDelayedBlock(hash *daghash.Hash) bool {
return exists
}
func (dag *BlockDAG) addDelayedBlock(block *util.Block, delay time.Duration) error {
func (dag *BlockDAG) addDelayedBlock(block *util.Block, flags BehaviorFlags, delay time.Duration) error {
processTime := dag.Now().Add(delay)
log.Debugf("Adding block to delayed blocks queue (block hash: %s, process time: %s)", block.Hash().String(), processTime)
delayedBlock := &delayedBlock{
block: block,
processTime: processTime,
flags: flags,
}
dag.delayedBlocks[*block.Hash()] = delayedBlock
@@ -42,7 +44,7 @@ func (dag *BlockDAG) processDelayedBlocks() error {
break
}
delayedBlock := dag.popDelayedBlock()
_, _, err := dag.processBlockNoLock(delayedBlock.block, BFAfterDelay)
_, _, err := dag.processBlockNoLock(delayedBlock.block, delayedBlock.flags|BFAfterDelay)
if err != nil {
log.Errorf("Error while processing delayed block (block %s): %s", delayedBlock.block.Hash().String(), err)
// Rule errors should not be propagated as they refer only to the delayed block,

View File

@@ -172,7 +172,6 @@ func TestFinalityInterval(t *testing.T) {
&dagconfig.MainnetParams,
&dagconfig.TestnetParams,
&dagconfig.DevnetParams,
&dagconfig.RegressionNetParams,
&dagconfig.SimnetParams,
}
for _, params := range netParams {

View File

@@ -1,7 +1,10 @@
package blockdag
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"reflect"
"sort"
"strings"
@@ -13,12 +16,19 @@ import (
"github.com/kaspanet/kaspad/util/daghash"
)
type testBlockData struct {
parents []string
id string // id is a virtual entity that is used only for tests so we can define relations between blocks without knowing their hash
expectedScore uint64
expectedSelectedParent string
expectedBlues []string
type block struct {
ID string // id is a virtual entity that is used only for tests so we can define relations between blocks without knowing their hash
ExpectedScore uint64
ExpectedSelectedParent string
ExpectedBlues []string
Parents []string
}
type testData struct {
K dagconfig.KType
GenesisID string
ExpectedReds []string
Blocks []block
}
// TestGHOSTDAG iterates over several dag simulations, and checks
@@ -26,158 +36,26 @@ type testBlockData struct {
// block are calculated as expected.
func TestGHOSTDAG(t *testing.T) {
dagParams := dagconfig.SimnetParams
err := filepath.Walk("./testdata/dags/", func(path string, info os.FileInfo, err error) error {
if info.IsDir() {
return nil
}
var test testData
file, err := os.Open(path)
if err != nil {
t.Fatalf("TestGHOSTDAG: failed opening file: %s", path)
}
decoder := json.NewDecoder(file)
decoder.DisallowUnknownFields()
err = decoder.Decode(&test)
if err != nil {
t.Fatalf("TestGHOSTDAG: test: %s, failed decoding json: %v", info.Name(), err)
}
tests := []struct {
k dagconfig.KType
expectedReds []string
dagData []*testBlockData
}{
{
k: 3,
expectedReds: []string{"F", "G", "H", "I", "O", "P"},
dagData: []*testBlockData{
{
parents: []string{"A"},
id: "B",
expectedScore: 1,
expectedSelectedParent: "A",
expectedBlues: []string{"A"},
},
{
parents: []string{"B"},
id: "C",
expectedScore: 2,
expectedSelectedParent: "B",
expectedBlues: []string{"B"},
},
{
parents: []string{"A"},
id: "D",
expectedScore: 1,
expectedSelectedParent: "A",
expectedBlues: []string{"A"},
},
{
parents: []string{"C", "D"},
id: "E",
expectedScore: 4,
expectedSelectedParent: "C",
expectedBlues: []string{"C", "D"},
},
{
parents: []string{"A"},
id: "F",
expectedScore: 1,
expectedSelectedParent: "A",
expectedBlues: []string{"A"},
},
{
parents: []string{"F"},
id: "G",
expectedScore: 2,
expectedSelectedParent: "F",
expectedBlues: []string{"F"},
},
{
parents: []string{"A"},
id: "H",
expectedScore: 1,
expectedSelectedParent: "A",
expectedBlues: []string{"A"},
},
{
parents: []string{"A"},
id: "I",
expectedScore: 1,
expectedSelectedParent: "A",
expectedBlues: []string{"A"},
},
{
parents: []string{"E", "G"},
id: "J",
expectedScore: 5,
expectedSelectedParent: "E",
expectedBlues: []string{"E"},
},
{
parents: []string{"J"},
id: "K",
expectedScore: 6,
expectedSelectedParent: "J",
expectedBlues: []string{"J"},
},
{
parents: []string{"I", "K"},
id: "L",
expectedScore: 7,
expectedSelectedParent: "K",
expectedBlues: []string{"K"},
},
{
parents: []string{"L"},
id: "M",
expectedScore: 8,
expectedSelectedParent: "L",
expectedBlues: []string{"L"},
},
{
parents: []string{"M"},
id: "N",
expectedScore: 9,
expectedSelectedParent: "M",
expectedBlues: []string{"M"},
},
{
parents: []string{"M"},
id: "O",
expectedScore: 9,
expectedSelectedParent: "M",
expectedBlues: []string{"M"},
},
{
parents: []string{"M"},
id: "P",
expectedScore: 9,
expectedSelectedParent: "M",
expectedBlues: []string{"M"},
},
{
parents: []string{"M"},
id: "Q",
expectedScore: 9,
expectedSelectedParent: "M",
expectedBlues: []string{"M"},
},
{
parents: []string{"M"},
id: "R",
expectedScore: 9,
expectedSelectedParent: "M",
expectedBlues: []string{"M"},
},
{
parents: []string{"R"},
id: "S",
expectedScore: 10,
expectedSelectedParent: "R",
expectedBlues: []string{"R"},
},
{
parents: []string{"N", "O", "P", "Q", "S"},
id: "T",
expectedScore: 13,
expectedSelectedParent: "S",
expectedBlues: []string{"S", "Q", "N"},
},
},
},
}
for i, test := range tests {
func() {
resetExtraNonceForTest()
dagParams.K = test.k
dag, teardownFunc, err := DAGSetup(fmt.Sprintf("TestGHOSTDAG%d", i), true, Config{
dagParams.K = test.K
dag, teardownFunc, err := DAGSetup(fmt.Sprintf("TestGHOSTDAG %s", info.Name()), true, Config{
DAGParams: &dagParams,
})
if err != nil {
@@ -188,32 +66,33 @@ func TestGHOSTDAG(t *testing.T) {
genesisNode := dag.genesis
blockByIDMap := make(map[string]*blockNode)
idByBlockMap := make(map[*blockNode]string)
blockByIDMap["A"] = genesisNode
idByBlockMap[genesisNode] = "A"
blockByIDMap[test.GenesisID] = genesisNode
idByBlockMap[genesisNode] = test.GenesisID
for _, blockData := range test.dagData {
for _, blockData := range test.Blocks {
parents := blockSet{}
for _, parentID := range blockData.parents {
for _, parentID := range blockData.Parents {
parent := blockByIDMap[parentID]
parents.add(parent)
}
block, err := PrepareBlockForTest(dag, parents.hashes(), nil)
if err != nil {
t.Fatalf("TestGHOSTDAG: block %v got unexpected error from PrepareBlockForTest: %v", blockData.id, err)
t.Fatalf("TestGHOSTDAG: block %s got unexpected error from PrepareBlockForTest: %v", blockData.ID,
err)
}
utilBlock := util.NewBlock(block)
isOrphan, isDelayed, err := dag.ProcessBlock(utilBlock, BFNoPoWCheck)
if err != nil {
t.Fatalf("TestGHOSTDAG: dag.ProcessBlock got unexpected error for block %v: %v", blockData.id, err)
t.Fatalf("TestGHOSTDAG: dag.ProcessBlock got unexpected error for block %s: %v", blockData.ID, err)
}
if isDelayed {
t.Fatalf("TestGHOSTDAG: block %s "+
"is too far in the future", blockData.id)
"is too far in the future", blockData.ID)
}
if isOrphan {
t.Fatalf("TestGHOSTDAG: block %v was unexpectedly orphan", blockData.id)
t.Fatalf("TestGHOSTDAG: block %s was unexpectedly orphan", blockData.ID)
}
node, ok := dag.index.LookupNode(utilBlock.Hash())
@@ -221,8 +100,8 @@ func TestGHOSTDAG(t *testing.T) {
t.Fatalf("block %s does not exist in the DAG", utilBlock.Hash())
}
blockByIDMap[blockData.id] = node
idByBlockMap[node] = blockData.id
blockByIDMap[blockData.ID] = node
idByBlockMap[node] = blockData.ID
bluesIDs := make([]string, 0, len(node.blues))
for _, blue := range node.blues {
@@ -231,17 +110,17 @@ func TestGHOSTDAG(t *testing.T) {
selectedParentID := idByBlockMap[node.selectedParent]
fullDataStr := fmt.Sprintf("blues: %v, selectedParent: %v, score: %v",
bluesIDs, selectedParentID, node.blueScore)
if blockData.expectedScore != node.blueScore {
t.Errorf("Test %d: Block %v expected to have score %v but got %v (fulldata: %v)",
i, blockData.id, blockData.expectedScore, node.blueScore, fullDataStr)
if blockData.ExpectedScore != node.blueScore {
t.Errorf("Test %s: Block %s expected to have score %v but got %v (fulldata: %v)",
info.Name(), blockData.ID, blockData.ExpectedScore, node.blueScore, fullDataStr)
}
if blockData.expectedSelectedParent != selectedParentID {
t.Errorf("Test %d: Block %v expected to have selected parent %v but got %v (fulldata: %v)",
i, blockData.id, blockData.expectedSelectedParent, selectedParentID, fullDataStr)
if blockData.ExpectedSelectedParent != selectedParentID {
t.Errorf("Test %s: Block %s expected to have selected parent %v but got %v (fulldata: %v)",
info.Name(), blockData.ID, blockData.ExpectedSelectedParent, selectedParentID, fullDataStr)
}
if !reflect.DeepEqual(blockData.expectedBlues, bluesIDs) {
t.Errorf("Test %d: Block %v expected to have blues %v but got %v (fulldata: %v)",
i, blockData.id, blockData.expectedBlues, bluesIDs, fullDataStr)
if !reflect.DeepEqual(blockData.ExpectedBlues, bluesIDs) {
t.Errorf("Test %s: Block %s expected to have blues %v but got %v (fulldata: %v)",
info.Name(), blockData.ID, blockData.ExpectedBlues, bluesIDs, fullDataStr)
}
}
@@ -259,16 +138,22 @@ func TestGHOSTDAG(t *testing.T) {
delete(reds, blueID)
}
}
if !checkReds(test.expectedReds, reds) {
if !checkReds(test.ExpectedReds, reds) {
redsIDs := make([]string, 0, len(reds))
for id := range reds {
redsIDs = append(redsIDs, id)
}
sort.Strings(redsIDs)
sort.Strings(test.expectedReds)
t.Errorf("Test %d: Expected reds %v but got %v", i, test.expectedReds, redsIDs)
sort.Strings(test.ExpectedReds)
t.Errorf("Test %s: Expected reds %v but got %v", info.Name(), test.ExpectedReds, redsIDs)
}
}()
return nil
})
if err != nil {
t.Fatal(err)
}
}

View File

@@ -33,6 +33,10 @@ func (dag *BlockDAG) processBlockNoLock(block *util.Block, flags BehaviorFlags)
blockHash := block.Hash()
log.Tracef("Processing block %s", blockHash)
for _, tx := range block.Transactions() {
log.Criticalf("~~~~~ processBlockNoLock block %s tx %s", block.Hash(), tx.ID())
}
err = dag.checkDuplicateBlock(blockHash, flags)
if err != nil {
return false, false, err
@@ -86,7 +90,7 @@ func (dag *BlockDAG) checkBlockDelay(block *util.Block, flags BehaviorFlags) (is
}
if isDelayed {
err := dag.addDelayedBlock(block, delay)
err := dag.addDelayedBlock(block, flags, delay)
if err != nil {
return false, err
}
@@ -114,7 +118,7 @@ func (dag *BlockDAG) checkMissingParents(block *util.Block, flags BehaviorFlags)
if isParentDelayed {
// Add Millisecond to ensure that parent process time will be after its child.
delay += time.Millisecond
err := dag.addDelayedBlock(block, delay)
err := dag.addDelayedBlock(block, flags, delay)
if err != nil {
return false, false, err
}

View File

@@ -268,6 +268,9 @@ func (rtn *reachabilityTreeNode) addChild(child *reachabilityTreeNode, reindexRo
rtn.children = append(rtn.children, child)
child.parent = rtn
modifiedNodes[rtn] = struct{}{}
modifiedNodes[child] = struct{}{}
// Temporarily set the child's interval to be empty, at
// the start of rtn's remaining interval. This is done
// so that child-of-rtn checks (e.g.
@@ -312,8 +315,6 @@ func (rtn *reachabilityTreeNode) addChild(child *reachabilityTreeNode, reindexRo
return err
}
child.interval = allocated
modifiedNodes[rtn] = struct{}{}
modifiedNodes[child] = struct{}{}
return nil
}

View File

@@ -108,7 +108,6 @@ func (store *reachabilityStore) clearDirtyEntries() {
}
func (store *reachabilityStore) init(dbContext dbaccess.Context) error {
// TODO: (Stas) This is a quick and dirty hack.
// We iterate over the entire bucket twice:
// * First, populate the loaded set with all entries
// * Second, connect the parent/children pointers in each entry

View File

@@ -16,7 +16,7 @@ import (
// TestCheckBlockScripts ensures that validating the all of the scripts in a
// known-good block doesn't return an error.
func TestCheckBlockScripts(t *testing.T) {
t.Skip() // TODO: Reactivate this test once we have blocks from testnet.
t.Skip()
runtime.GOMAXPROCS(runtime.NumCPU())
testBlockNum := 277647

View File

@@ -10,7 +10,6 @@ func TestIsDAGCurrentMaxDiff(t *testing.T) {
&dagconfig.MainnetParams,
&dagconfig.TestnetParams,
&dagconfig.DevnetParams,
&dagconfig.RegressionNetParams,
&dagconfig.SimnetParams,
}
for _, params := range netParams {

233
domain/blockdag/testdata/dags/dag0.json vendored Normal file
View File

@@ -0,0 +1,233 @@
{
"K": 4,
"GenesisID": "A",
"ExpectedReds": [
"Q",
"H",
"I"
],
"Blocks": [
{
"ID": "B",
"ExpectedScore": 1,
"ExpectedSelectedParent": "A",
"ExpectedBlues": [
"A"
],
"Parents": [
"A"
]
},
{
"ID": "C",
"ExpectedScore": 2,
"ExpectedSelectedParent": "B",
"ExpectedBlues": [
"B"
],
"Parents": [
"B"
]
},
{
"ID": "D",
"ExpectedScore": 1,
"ExpectedSelectedParent": "A",
"ExpectedBlues": [
"A"
],
"Parents": [
"A"
]
},
{
"ID": "E",
"ExpectedScore": 4,
"ExpectedSelectedParent": "C",
"ExpectedBlues": [
"C",
"D"
],
"Parents": [
"C",
"D"
]
},
{
"ID": "F",
"ExpectedScore": 1,
"ExpectedSelectedParent": "A",
"ExpectedBlues": [
"A"
],
"Parents": [
"A"
]
},
{
"ID": "G",
"ExpectedScore": 2,
"ExpectedSelectedParent": "F",
"ExpectedBlues": [
"F"
],
"Parents": [
"F"
]
},
{
"ID": "H",
"ExpectedScore": 1,
"ExpectedSelectedParent": "A",
"ExpectedBlues": [
"A"
],
"Parents": [
"A"
]
},
{
"ID": "I",
"ExpectedScore": 1,
"ExpectedSelectedParent": "A",
"ExpectedBlues": [
"A"
],
"Parents": [
"A"
]
},
{
"ID": "J",
"ExpectedScore": 7,
"ExpectedSelectedParent": "E",
"ExpectedBlues": [
"E",
"F",
"G"
],
"Parents": [
"E",
"G"
]
},
{
"ID": "K",
"ExpectedScore": 8,
"ExpectedSelectedParent": "J",
"ExpectedBlues": [
"J"
],
"Parents": [
"J"
]
},
{
"ID": "L",
"ExpectedScore": 9,
"ExpectedSelectedParent": "K",
"ExpectedBlues": [
"K"
],
"Parents": [
"I",
"K"
]
},
{
"ID": "M",
"ExpectedScore": 10,
"ExpectedSelectedParent": "L",
"ExpectedBlues": [
"L"
],
"Parents": [
"L"
]
},
{
"ID": "N",
"ExpectedScore": 11,
"ExpectedSelectedParent": "M",
"ExpectedBlues": [
"M"
],
"Parents": [
"M"
]
},
{
"ID": "O",
"ExpectedScore": 11,
"ExpectedSelectedParent": "M",
"ExpectedBlues": [
"M"
],
"Parents": [
"M"
]
},
{
"ID": "P",
"ExpectedScore": 11,
"ExpectedSelectedParent": "M",
"ExpectedBlues": [
"M"
],
"Parents": [
"M"
]
},
{
"ID": "Q",
"ExpectedScore": 11,
"ExpectedSelectedParent": "M",
"ExpectedBlues": [
"M"
],
"Parents": [
"M"
]
},
{
"ID": "R",
"ExpectedScore": 11,
"ExpectedSelectedParent": "M",
"ExpectedBlues": [
"M"
],
"Parents": [
"M"
]
},
{
"ID": "S",
"ExpectedScore": 12,
"ExpectedSelectedParent": "R",
"ExpectedBlues": [
"R"
],
"Parents": [
"R"
]
},
{
"ID": "T",
"ExpectedScore": 16,
"ExpectedSelectedParent": "S",
"ExpectedBlues": [
"S",
"P",
"N",
"O"
],
"Parents": [
"N",
"O",
"P",
"Q",
"S"
]
}
]
}

386
domain/blockdag/testdata/dags/dag1.json vendored Normal file
View File

@@ -0,0 +1,386 @@
{
"K": 4,
"GenesisID": "0",
"ExpectedReds": [
"12",
"30",
"6",
"27",
"4",
"16",
"7",
"23",
"24",
"11",
"15",
"19",
"9"
],
"Blocks": [
{
"ID": "1",
"ExpectedScore": 1,
"ExpectedSelectedParent": "0",
"ExpectedBlues": [
"0"
],
"Parents": [
"0"
]
},
{
"ID": "2",
"ExpectedScore": 1,
"ExpectedSelectedParent": "0",
"ExpectedBlues": [
"0"
],
"Parents": [
"0"
]
},
{
"ID": "3",
"ExpectedScore": 1,
"ExpectedSelectedParent": "0",
"ExpectedBlues": [
"0"
],
"Parents": [
"0"
]
},
{
"ID": "4",
"ExpectedScore": 2,
"ExpectedSelectedParent": "1",
"ExpectedBlues": [
"1"
],
"Parents": [
"1"
]
},
{
"ID": "5",
"ExpectedScore": 3,
"ExpectedSelectedParent": "2",
"ExpectedBlues": [
"2",
"3"
],
"Parents": [
"2",
"3"
]
},
{
"ID": "6",
"ExpectedScore": 2,
"ExpectedSelectedParent": "3",
"ExpectedBlues": [
"3"
],
"Parents": [
"3"
]
},
{
"ID": "7",
"ExpectedScore": 3,
"ExpectedSelectedParent": "6",
"ExpectedBlues": [
"6"
],
"Parents": [
"6"
]
},
{
"ID": "8",
"ExpectedScore": 3,
"ExpectedSelectedParent": "2",
"ExpectedBlues": [
"2",
"1"
],
"Parents": [
"1",
"2"
]
},
{
"ID": "9",
"ExpectedScore": 5,
"ExpectedSelectedParent": "5",
"ExpectedBlues": [
"5",
"6"
],
"Parents": [
"5",
"6"
]
},
{
"ID": "10",
"ExpectedScore": 5,
"ExpectedSelectedParent": "8",
"ExpectedBlues": [
"8",
"4"
],
"Parents": [
"8",
"4"
]
},
{
"ID": "11",
"ExpectedScore": 7,
"ExpectedSelectedParent": "9",
"ExpectedBlues": [
"9",
"7"
],
"Parents": [
"7",
"9"
]
},
{
"ID": "12",
"ExpectedScore": 8,
"ExpectedSelectedParent": "9",
"ExpectedBlues": [
"9",
"8",
"10"
],
"Parents": [
"10",
"9"
]
},
{
"ID": "13",
"ExpectedScore": 6,
"ExpectedSelectedParent": "8",
"ExpectedBlues": [
"8",
"3",
"5"
],
"Parents": [
"5",
"8"
]
},
{
"ID": "14",
"ExpectedScore": 8,
"ExpectedSelectedParent": "13",
"ExpectedBlues": [
"13",
"10"
],
"Parents": [
"13",
"10"
]
},
{
"ID": "15",
"ExpectedScore": 9,
"ExpectedSelectedParent": "11",
"ExpectedBlues": [
"11",
"13"
],
"Parents": [
"11",
"13"
]
},
{
"ID": "16",
"ExpectedScore": 8,
"ExpectedSelectedParent": "11",
"ExpectedBlues": [
"11"
],
"Parents": [
"11"
]
},
{
"ID": "17",
"ExpectedScore": 9,
"ExpectedSelectedParent": "14",
"ExpectedBlues": [
"14"
],
"Parents": [
"14"
]
},
{
"ID": "18",
"ExpectedScore": 7,
"ExpectedSelectedParent": "13",
"ExpectedBlues": [
"13"
],
"Parents": [
"13"
]
},
{
"ID": "19",
"ExpectedScore": 10,
"ExpectedSelectedParent": "15",
"ExpectedBlues": [
"15"
],
"Parents": [
"18",
"15"
]
},
{
"ID": "20",
"ExpectedScore": 10,
"ExpectedSelectedParent": "17",
"ExpectedBlues": [
"17"
],
"Parents": [
"16",
"17"
]
},
{
"ID": "21",
"ExpectedScore": 12,
"ExpectedSelectedParent": "20",
"ExpectedBlues": [
"20",
"18"
],
"Parents": [
"18",
"20"
]
},
{
"ID": "22",
"ExpectedScore": 13,
"ExpectedSelectedParent": "21",
"ExpectedBlues": [
"21"
],
"Parents": [
"19",
"21"
]
},
{
"ID": "23",
"ExpectedScore": 11,
"ExpectedSelectedParent": "17",
"ExpectedBlues": [
"17",
"12"
],
"Parents": [
"12",
"17"
]
},
{
"ID": "24",
"ExpectedScore": 13,
"ExpectedSelectedParent": "23",
"ExpectedBlues": [
"23",
"20"
],
"Parents": [
"20",
"23"
]
},
{
"ID": "25",
"ExpectedScore": 13,
"ExpectedSelectedParent": "21",
"ExpectedBlues": [
"21"
],
"Parents": [
"21"
]
},
{
"ID": "26",
"ExpectedScore": 15,
"ExpectedSelectedParent": "22",
"ExpectedBlues": [
"22",
"25"
],
"Parents": [
"22",
"24",
"25"
]
},
{
"ID": "27",
"ExpectedScore": 9,
"ExpectedSelectedParent": "16",
"ExpectedBlues": [
"16"
],
"Parents": [
"16"
]
},
{
"ID": "28",
"ExpectedScore": 14,
"ExpectedSelectedParent": "25",
"ExpectedBlues": [
"25"
],
"Parents": [
"23",
"25"
]
},
{
"ID": "29",
"ExpectedScore": 17,
"ExpectedSelectedParent": "26",
"ExpectedBlues": [
"26",
"28"
],
"Parents": [
"26",
"28"
]
},
{
"ID": "30",
"ExpectedScore": 10,
"ExpectedSelectedParent": "27",
"ExpectedBlues": [
"27"
],
"Parents": [
"27"
]
}
]
}

View File

@@ -6,7 +6,6 @@ of monetary value, there also exists the following standard networks:
* testnet
* simnet
* devnet
* regression test
These networks are incompatible with each other (each sharing a different
genesis block) and software should handle errors where input intended for
one network is used on an application instance running on a different

View File

@@ -108,55 +108,6 @@ var devnetGenesisBlock = appmessage.MsgBlock{
Transactions: []*appmessage.MsgTx{devnetGenesisCoinbaseTx},
}
var regtestGenesisTxOuts = []*appmessage.TxOut{}
var regtestGenesisTxPayload = []byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Blue score
0x17, // Varint
0xa9, 0x14, 0xda, 0x17, 0x45, 0xe9, 0xb5, 0x49, // OP-TRUE p2sh
0xbd, 0x0b, 0xfa, 0x1a, 0x56, 0x99, 0x71, 0xc7,
0x7e, 0xba, 0x30, 0xcd, 0x5a, 0x4b, 0x87,
0x6b, 0x61, 0x73, 0x70, 0x61, 0x2d, 0x72, 0x65, 0x67, 0x74, 0x65, 0x73, 0x74, // kaspa-regtest
}
// regtestGenesisCoinbaseTx is the coinbase transaction for
// the genesis blocks for the regtest network.
var regtestGenesisCoinbaseTx = appmessage.NewSubnetworkMsgTx(1, []*appmessage.TxIn{}, regtestGenesisTxOuts, subnetworkid.SubnetworkIDCoinbase, 0, regtestGenesisTxPayload)
// devGenesisHash is the hash of the first block in the block DAG for the development
// network (genesis block).
var regtestGenesisHash = daghash.Hash{
0xda, 0x23, 0x61, 0x5e, 0xf6, 0x2a, 0x95, 0x27,
0x7f, 0x5a, 0x40, 0xd5, 0x91, 0x97, 0x1c, 0xef,
0xd5, 0x86, 0xac, 0xac, 0x82, 0xb3, 0xc9, 0x43,
0xd3, 0x49, 0x5f, 0x7e, 0x93, 0x0b, 0x35, 0x2d,
}
// regtestGenesisMerkleRoot is the hash of the first transaction in the genesis block
// for the regtest.
var regtestGenesisMerkleRoot = daghash.Hash{
0x1e, 0x08, 0xae, 0x1f, 0x43, 0xf5, 0xfc, 0x24,
0xe6, 0xec, 0x54, 0x5b, 0xf7, 0x52, 0x99, 0xe4,
0xcc, 0x4c, 0xa0, 0x79, 0x41, 0xfc, 0xbe, 0x76,
0x72, 0x4c, 0x7e, 0xd8, 0xa3, 0x43, 0x65, 0x94,
}
// regtestGenesisBlock defines the genesis block of the block DAG which serves as the
// public transaction ledger for the development network.
var regtestGenesisBlock = appmessage.MsgBlock{
Header: appmessage.BlockHeader{
Version: 0x10000000,
ParentHashes: []*daghash.Hash{},
HashMerkleRoot: &regtestGenesisMerkleRoot,
AcceptedIDMerkleRoot: &daghash.Hash{},
UTXOCommitment: &daghash.ZeroHash,
Timestamp: mstime.UnixMilliseconds(0x1730a958ac4),
Bits: 0x207fffff,
Nonce: 0x0,
},
Transactions: []*appmessage.MsgTx{regtestGenesisCoinbaseTx},
}
var simnetGenesisTxOuts = []*appmessage.TxOut{}
var simnetGenesisTxPayload = []byte{

View File

@@ -37,33 +37,6 @@ func TestGenesisBlock(t *testing.T) {
}
}
// TestRegtestGenesisBlock tests the genesis block of the regression test
// network for validity by checking the encoded bytes and hashes.
func TestRegtestGenesisBlock(t *testing.T) {
// Encode the genesis block to raw bytes.
var buf bytes.Buffer
err := RegressionNetParams.GenesisBlock.Serialize(&buf)
if err != nil {
t.Fatalf("TestRegtestGenesisBlock: %v", err)
}
// Ensure the encoded block matches the expected bytes.
if !bytes.Equal(buf.Bytes(), regtestGenesisBlockBytes) {
t.Fatalf("TestRegtestGenesisBlock: Genesis block does not "+
"appear valid - got %v, want %v",
spew.Sdump(buf.Bytes()),
spew.Sdump(regtestGenesisBlockBytes))
}
// Check hash of the block against expected hash.
hash := RegressionNetParams.GenesisBlock.BlockHash()
if !RegressionNetParams.GenesisHash.IsEqual(hash) {
t.Fatalf("TestRegtestGenesisBlock: Genesis block hash does "+
"not appear valid - got %v, want %v", spew.Sdump(hash),
spew.Sdump(RegressionNetParams.GenesisHash))
}
}
// TestTestnetGenesisBlock tests the genesis block of the test network for
// validity by checking the encoded bytes and hashes.
func TestTestnetGenesisBlock(t *testing.T) {
@@ -165,27 +138,6 @@ var genesisBlockBytes = []byte{
0x30, 0xcd, 0x5a, 0x4b, 0x87,
}
// regtestGenesisBlockBytes are the encoded bytes for the genesis block of
// the regression test network as of protocol version 1.
var regtestGenesisBlockBytes = []byte{
0x00, 0x00, 0x00, 0x10, 0x00, 0x1e, 0x08, 0xae, 0x1f, 0x43, 0xf5, 0xfc, 0x24, 0xe6, 0xec, 0x54,
0x5b, 0xf7, 0x52, 0x99, 0xe4, 0xcc, 0x4c, 0xa0, 0x79, 0x41, 0xfc, 0xbe, 0x76, 0x72, 0x4c, 0x7e,
0xd8, 0xa3, 0x43, 0x65, 0x94, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xc4, 0x8a, 0x95, 0x0a, 0x73, 0x01, 0x00, 0x00, 0xff, 0xff, 0x7f,
0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xd4, 0xc4, 0x87, 0x77, 0xf2, 0xe7, 0x5d, 0xf7, 0xff, 0x2d, 0xbb, 0xb6,
0x2a, 0x73, 0x1f, 0x54, 0x36, 0x33, 0xa7, 0x99, 0xad, 0xb1, 0x09, 0x65, 0xc0, 0xf0, 0xf4, 0x53,
0xba, 0xfb, 0x88, 0xae, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0xa9, 0x14,
0xda, 0x17, 0x45, 0xe9, 0xb5, 0x49, 0xbd, 0x0b, 0xfa, 0x1a, 0x56, 0x99, 0x71, 0xc7, 0x7e, 0xba,
0x30, 0xcd, 0x5a, 0x4b, 0x87, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x2d, 0x72, 0x65, 0x67, 0x74, 0x65,
0x73, 0x74,
}
// testnetGenesisBlockBytes are the encoded bytes for the genesis block of
// the test network as of protocol version 1.
var testnetGenesisBlockBytes = []byte{

View File

@@ -27,10 +27,6 @@ var (
// have for the main network. It is the value 2^255 - 1.
mainPowMax = new(big.Int).Sub(new(big.Int).Lsh(bigOne, 255), bigOne)
// regressionPowMax is the highest proof of work value a Kaspa block
// can have for the regression test network. It is the value 2^255 - 1.
regressionPowMax = new(big.Int).Sub(new(big.Int).Lsh(bigOne, 255), bigOne)
// testnetPowMax is the highest proof of work value a Kaspa block
// can have for the test network. It is the value 2^239 - 1.
testnetPowMax = new(big.Int).Sub(new(big.Int).Lsh(bigOne, 239), bigOne)
@@ -53,37 +49,6 @@ const (
targetTimePerBlock = 1 * time.Second
)
// ConsensusDeployment defines details related to a specific consensus rule
// change that is voted in. This is part of BIP0009.
type ConsensusDeployment struct {
// BitNumber defines the specific bit number within the block version
// this particular soft-fork deployment refers to.
BitNumber uint8
// StartTime is the median block time after which voting on the
// deployment starts.
StartTime uint64
// ExpireTime is the median block time after which the attempted
// deployment expires.
ExpireTime uint64
}
// Constants that define the deployment offset in the deployments field of the
// parameters for each deployment. This is useful to be able to get the details
// of a specific deployment by name.
const (
// DeploymentTestDummy defines the rule change deployment ID for testing
// purposes.
DeploymentTestDummy = iota
// NOTE: DefinedDeployments must always come last since it is used to
// determine how many defined deployments there currently are.
// DefinedDeployments is the number of currently defined deployments.
DefinedDeployments
)
// KType defines the size of GHOSTDAG consensus algorithm K parameter.
type KType uint8
@@ -232,54 +197,6 @@ var MainnetParams = Params{
DisableDifficultyAdjustment: false,
}
// RegressionNetParams defines the network parameters for the regression test
// Kaspa network. Not to be confused with the test Kaspa network (version
// 3), this network is sometimes simply called "testnet".
var RegressionNetParams = Params{
K: ghostdagK,
Name: "kaspa-regtest",
Net: appmessage.Regtest,
RPCPort: "16210",
DefaultPort: "16211",
DNSSeeds: []string{},
// DAG parameters
GenesisBlock: &regtestGenesisBlock,
GenesisHash: &regtestGenesisHash,
PowMax: regressionPowMax,
BlockCoinbaseMaturity: 100,
SubsidyReductionInterval: 150,
TargetTimePerBlock: targetTimePerBlock,
FinalityDuration: finalityDuration,
DifficultyAdjustmentWindowSize: difficultyAdjustmentWindowSize,
TimestampDeviationTolerance: timestampDeviationTolerance,
// Consensus rule change deployments.
//
// The miner confirmation window is defined as:
// target proof of work timespan / target proof of work spacing
RuleChangeActivationThreshold: 108, // 75% of MinerConfirmationWindow
MinerConfirmationWindow: 144,
// Mempool parameters
RelayNonStdTxs: true,
// AcceptUnroutable specifies whether this network accepts unroutable
// IP addresses, such as 10.0.0.0/8
AcceptUnroutable: false,
// Human-readable part for Bech32 encoded addresses
Prefix: util.Bech32PrefixKaspaReg,
// Address encoding magics
PrivateKeyID: 0xef, // starts with 9 (uncompressed) or c (compressed)
// EnableNonNativeSubnetworks enables non-native/coinbase transactions
EnableNonNativeSubnetworks: false,
DisableDifficultyAdjustment: false,
}
// TestnetParams defines the network parameters for the test Kaspa network.
var TestnetParams = Params{
K: ghostdagK,
@@ -459,29 +376,9 @@ func mustRegister(params *Params) {
}
}
// newHashFromStr converts the passed big-endian hex string into a
// daghash.Hash. It only differs from the one available in daghash in that
// it panics on an error since it will only (and must only) be called with
// hard-coded, and therefore known good, hashes.
func newHashFromStr(hexStr string) *daghash.Hash {
hash, err := daghash.NewHashFromStr(hexStr)
if err != nil {
// Ordinarily I don't like panics in library code since it
// can take applications down without them having a chance to
// recover which is extremely annoying, however an exception is
// being made in this case because the only way this can panic
// is if there is an error in the hard-coded hashes. Thus it
// will only ever potentially panic on init and therefore is
// 100% predictable.
panic(err)
}
return hash
}
func init() {
// Register all default networks when the package is initialized.
mustRegister(&MainnetParams)
mustRegister(&TestnetParams)
mustRegister(&RegressionNetParams)
mustRegister(&SimnetParams)
}

View File

@@ -41,6 +41,17 @@ func TestNewHashFromStr(t *testing.T) {
}
}
// newHashFromStr converts the passed big-endian hex string into a
// daghash.Hash. It only differs from the one available in daghash in that
// it panics on an error since it will only be called from tests.
func newHashFromStr(hexStr string) *daghash.Hash {
hash, err := daghash.NewHashFromStr(hexStr)
if err != nil {
panic(err)
}
return hash
}
// TestMustRegisterPanic ensures the mustRegister function panics when used to
// register an invalid network.
func TestMustRegisterPanic(t *testing.T) {

View File

@@ -33,11 +33,6 @@ func TestRegister(t *testing.T) {
params: &MainnetParams,
err: ErrDuplicateNet,
},
{
name: "duplicate regtest",
params: &RegressionNetParams,
err: ErrDuplicateNet,
},
{
name: "duplicate testnet",
params: &TestnetParams,
@@ -68,11 +63,6 @@ func TestRegister(t *testing.T) {
params: &MainnetParams,
err: ErrDuplicateNet,
},
{
name: "duplicate regtest",
params: &RegressionNetParams,
err: ErrDuplicateNet,
},
{
name: "duplicate testnet",
params: &TestnetParams,
@@ -93,12 +83,12 @@ func TestRegister(t *testing.T) {
}
for _, test := range tests {
for _, regtest := range test.register {
err := Register(regtest.params)
for _, network := range test.register {
err := Register(network.params)
if err != regtest.err {
if err != network.err {
t.Errorf("%s:%s: Registered network with unexpected error: got %v expected %v",
test.name, regtest.name, err, regtest.err)
network.name, network.name, err, network.err)
}
}
}

View File

@@ -7,6 +7,7 @@ package mempool
import (
"container/list"
"fmt"
"runtime/debug"
"sync"
"sync/atomic"
"time"
@@ -498,6 +499,7 @@ func (mp *TxPool) removeTransactionWithDiff(tx *util.Tx, diff *blockdag.UTXODiff
txDesc, _ := mp.fetchTxDesc(txID)
if txDesc.depCount == 0 {
log.Criticalf("~~~~~ removeTransactionWithDiff delete %s stack %s", txID, debug.Stack())
delete(mp.pool, *txID)
} else {
delete(mp.depends, *txID)
@@ -571,6 +573,7 @@ func (mp *TxPool) processRemovedTransactionDependencies(tx *util.Tx) {
if _, ok := mp.depends[*txD.Tx.ID()]; ok {
delete(mp.depends, *txD.Tx.ID())
mp.pool[*txD.Tx.ID()] = txD
log.Criticalf("~~~~~ processRemovedTransactionDependencies adds %s stack %s", txD.Tx.ID(), debug.Stack())
}
}
}
@@ -652,6 +655,7 @@ func (mp *TxPool) addTransaction(tx *util.Tx, fee uint64, parentsInPool []*appme
}
if len(parentsInPool) == 0 {
log.Criticalf("~~~~~ addTransaction adds %s stack %s", tx.ID(), debug.Stack())
mp.pool[*tx.ID()] = txD
} else {
mp.depends[*tx.ID()] = txD

12
domain/txscript/policy.go Normal file
View File

@@ -0,0 +1,12 @@
package txscript
const (
// StandardVerifyFlags are the script flags which are used when
// executing transaction scripts to enforce additional checks which
// are required for the script to be considered standard. These checks
// help reduce issues related to transaction malleability as well as
// allow pay-to-script hash transactions. Note these flags are
// different than what is required for the consensus rules in that they
// are more strict.
StandardVerifyFlags = ScriptDiscourageUpgradableNops
)

View File

@@ -171,8 +171,8 @@ func (b *ScriptBuilder) addData(data []byte) *ScriptBuilder {
// AddFullData should not typically be used by ordinary users as it does not
// include the checks which prevent data pushes larger than the maximum allowed
// sizes which leads to scripts that can't be executed. This is provided for
// testing purposes such as regression tests where sizes are intentionally made
// larger than allowed.
// testing purposes such as tests where sizes are intentionally made larger
// than allowed.
//
// Use AddData instead.
func (b *ScriptBuilder) AddFullData(data []byte) *ScriptBuilder {

View File

@@ -247,7 +247,7 @@ func TestScriptBuilderAddData(t *testing.T) {
// Additional tests for the PushFullData function that
// intentionally allows data pushes to exceed the limit for
// regression testing purposes.
// testing purposes.
// 3-byte data push via OP_PUSHDATA_2.
{

View File

@@ -108,10 +108,6 @@ func sign(dagParams *dagconfig.Params, tx *appmessage.MsgTx, idx int,
func mergeScripts(dagParams *dagconfig.Params, tx *appmessage.MsgTx, idx int,
class ScriptClass, sigScript, prevScript []byte) ([]byte, error) {
// TODO: the scripthash and multisig paths here are overly
// inefficient in that they will recompute already known data.
// some internal refactoring could probably make this avoid needless
// extra calculations.
switch class {
case ScriptHashTy:
// Remove the last push in the script and then recurse.
@@ -210,7 +206,6 @@ func SignTxOutput(dagParams *dagconfig.Params, tx *appmessage.MsgTx, idx int,
}
if class == ScriptHashTy {
// TODO keep the sub addressed and pass down to merge.
realSigScript, _, _, err := sign(dagParams, tx, idx,
sigScript, hashType, kdb, sdb)
if err != nil {
@@ -223,7 +218,6 @@ func SignTxOutput(dagParams *dagconfig.Params, tx *appmessage.MsgTx, idx int,
builder.AddData(sigScript)
sigScript, _ = builder.Script()
// TODO keep a copy of the script for merging.
}
// Merge scripts. with any previous data, if any.

View File

@@ -12,20 +12,6 @@ import (
"github.com/kaspanet/kaspad/util"
)
const (
// StandardVerifyFlags are the script flags which are used when
// executing transaction scripts to enforce additional checks which
// are required for the script to be considered standard. These checks
// help reduce issues related to transaction malleability as well as
// allow pay-to-script hash transactions. Note these flags are
// different than what is required for the consensus rules in that they
// are more strict.
//
// TODO: This definition does not belong here. It belongs in a policy
// package.
StandardVerifyFlags = ScriptDiscourageUpgradableNops
)
// ScriptClass is an enumeration for the list of standard types of script.
type ScriptClass byte

View File

@@ -262,7 +262,7 @@ func LoadConfig() (cfg *Config, remainingArgs []string, err error) {
cfg = &Config{
Flags: cfgFlags,
}
if !(preCfg.RegressionTest || preCfg.Simnet) || preCfg.ConfigFile !=
if !preCfg.Simnet || preCfg.ConfigFile !=
defaultConfigFile {
if _, err := os.Stat(preCfg.ConfigFile); os.IsNotExist(err) {
@@ -285,11 +285,6 @@ func LoadConfig() (cfg *Config, remainingArgs []string, err error) {
}
}
// Don't add peers from the config file when in regression test mode.
if preCfg.RegressionTest && len(cfg.AddPeers) > 0 {
cfg.AddPeers = nil
}
// Parse command line options again to ensure they take precedence.
remainingArgs, err = parser.Parse()
if err != nil {

View File

@@ -11,7 +11,6 @@ import (
// NetworkFlags holds the network configuration, that is which network is selected.
type NetworkFlags struct {
Testnet bool `long:"testnet" description:"Use the test network"`
RegressionTest bool `long:"regtest" description:"Use the regression test network"`
Simnet bool `long:"simnet" description:"Use the simulation test network"`
Devnet bool `long:"devnet" description:"Use the development test network"`
ActiveNetParams *dagconfig.Params
@@ -31,10 +30,6 @@ func (networkFlags *NetworkFlags) ResolveNetwork(parser *flags.Parser) error {
numNets++
networkFlags.ActiveNetParams = &dagconfig.TestnetParams
}
if networkFlags.RegressionTest {
numNets++
networkFlags.ActiveNetParams = &dagconfig.RegressionNetParams
}
if networkFlags.Simnet {
numNets++
networkFlags.ActiveNetParams = &dagconfig.SimnetParams

View File

@@ -17,7 +17,9 @@ import (
"io"
"math/rand"
"net"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
@@ -192,7 +194,7 @@ const (
var ErrAddressNotFound = errors.New("address not found")
// New returns a new Kaspa address manager.
func New(cfg *config.Config, databaseContext *dbaccess.DatabaseContext) *AddressManager {
func New(cfg *config.Config, databaseContext *dbaccess.DatabaseContext) (*AddressManager, error) {
addressManager := AddressManager{
cfg: cfg,
databaseContext: databaseContext,
@@ -202,8 +204,12 @@ func New(cfg *config.Config, databaseContext *dbaccess.DatabaseContext) *Address
localAddresses: make(map[AddressKey]*localAddress),
localSubnetworkID: cfg.SubnetworkID,
}
err := addressManager.initListeners()
if err != nil {
return nil, err
}
addressManager.reset()
return &addressManager
return &addressManager, nil
}
// updateAddress is a helper function to either update an address already known
@@ -218,7 +224,6 @@ func (am *AddressManager) updateAddress(netAddress, sourceAddress *appmessage.Ne
addressKey := NetAddressKey(netAddress)
knownAddress := am.knownAddress(netAddress)
if knownAddress != nil {
// TODO: only update addresses periodically.
// Update the last seen time and services.
// note that to prevent causing excess garbage on getaddr
// messages the netaddresses in addrmaanger are *immutable*,
@@ -777,28 +782,6 @@ func (am *AddressManager) AddAddress(address, sourceAddress *appmessage.NetAddre
am.updateAddress(address, sourceAddress, subnetworkID)
}
// AddAddressByIP adds an address where we are given an ip:port and not a
// appmessage.NetAddress.
func (am *AddressManager) AddAddressByIP(addressIP string, subnetworkID *subnetworkid.SubnetworkID) error {
// Split IP and port
ipString, portString, err := net.SplitHostPort(addressIP)
if err != nil {
return err
}
// Put it in appmessage.Netaddress
ip := net.ParseIP(ipString)
if ip == nil {
return errors.Errorf("invalid ip %s", ipString)
}
port, err := strconv.ParseUint(portString, 10, 0)
if err != nil {
return errors.Errorf("invalid port %s: %s", portString, err)
}
netAddress := appmessage.NewNetAddressIPPort(ip, uint16(port), 0)
am.AddAddress(netAddress, netAddress, subnetworkID) // XXX use correct src address
return nil
}
// numAddresses returns the number of addresses that belongs to a specific subnetwork id
// which are known to the address manager.
func (am *AddressManager) numAddresses(subnetworkID *subnetworkid.SubnetworkID) int {
@@ -1419,3 +1402,188 @@ func (am *AddressManager) IsBanned(address *appmessage.NetAddress) (bool, error)
}
return knownAddress.isBanned, nil
}
// initListeners initializes the configured net listeners and adds any bound
// addresses to the address manager
func (am *AddressManager) initListeners() error {
if len(am.cfg.ExternalIPs) != 0 {
defaultPort, err := strconv.ParseUint(am.cfg.NetParams().DefaultPort, 10, 16)
if err != nil {
log.Errorf("Can not parse default port %s for active DAG: %s",
am.cfg.NetParams().DefaultPort, err)
return err
}
for _, sip := range am.cfg.ExternalIPs {
eport := uint16(defaultPort)
host, portstr, err := net.SplitHostPort(sip)
if err != nil {
// no port, use default.
host = sip
} else {
port, err := strconv.ParseUint(portstr, 10, 16)
if err != nil {
log.Warnf("Can not parse port from %s for "+
"externalip: %s", sip, err)
continue
}
eport = uint16(port)
}
na, err := am.HostToNetAddress(host, eport, appmessage.DefaultServices)
if err != nil {
log.Warnf("Not adding %s as externalip: %s", sip, err)
continue
}
err = am.AddLocalAddress(na, ManualPrio)
if err != nil {
log.Warnf("Skipping specified external IP: %s", err)
}
}
} else {
// Listen for TCP connections at the configured addresses
netAddrs, err := parseListeners(am.cfg.Listeners)
if err != nil {
return err
}
// Add bound addresses to address manager to be advertised to peers.
for _, addr := range netAddrs {
listener, err := net.Listen(addr.Network(), addr.String())
if err != nil {
log.Warnf("Can't listen on %s: %s", addr, err)
continue
}
addr := listener.Addr().String()
err = listener.Close()
if err != nil {
return err
}
err = am.addLocalAddress(addr)
if err != nil {
log.Warnf("Skipping bound address %s: %s", addr, err)
}
}
}
return nil
}
// parseListeners determines whether each listen address is IPv4 and IPv6 and
// returns a slice of appropriate net.Addrs to listen on with TCP. It also
// properly detects addresses which apply to "all interfaces" and adds the
// address as both IPv4 and IPv6.
func parseListeners(addrs []string) ([]net.Addr, error) {
netAddrs := make([]net.Addr, 0, len(addrs)*2)
for _, addr := range addrs {
host, _, err := net.SplitHostPort(addr)
if err != nil {
// Shouldn't happen due to already being normalized.
return nil, err
}
// Empty host or host of * on plan9 is both IPv4 and IPv6.
if host == "" || (host == "*" && runtime.GOOS == "plan9") {
netAddrs = append(netAddrs, simpleAddr{net: "tcp4", addr: addr})
netAddrs = append(netAddrs, simpleAddr{net: "tcp6", addr: addr})
continue
}
// Strip IPv6 zone id if present since net.ParseIP does not
// handle it.
zoneIndex := strings.LastIndex(host, "%")
if zoneIndex > 0 {
host = host[:zoneIndex]
}
// Parse the IP.
ip := net.ParseIP(host)
if ip == nil {
hostAddrs, err := net.LookupHost(host)
if err != nil {
return nil, err
}
ip = net.ParseIP(hostAddrs[0])
if ip == nil {
return nil, errors.Errorf("Cannot resolve IP address for host '%s'", host)
}
}
// To4 returns nil when the IP is not an IPv4 address, so use
// this determine the address type.
if ip.To4() == nil {
netAddrs = append(netAddrs, simpleAddr{net: "tcp6", addr: addr})
} else {
netAddrs = append(netAddrs, simpleAddr{net: "tcp4", addr: addr})
}
}
return netAddrs, nil
}
// addLocalAddress adds an address that this node is listening on to the
// address manager so that it may be relayed to peers.
func (am *AddressManager) addLocalAddress(addr string) error {
host, portStr, err := net.SplitHostPort(addr)
if err != nil {
return err
}
port, err := strconv.ParseUint(portStr, 10, 16)
if err != nil {
return err
}
if ip := net.ParseIP(host); ip != nil && ip.IsUnspecified() {
// If bound to unspecified address, advertise all local interfaces
addrs, err := net.InterfaceAddrs()
if err != nil {
return err
}
for _, addr := range addrs {
ifaceIP, _, err := net.ParseCIDR(addr.String())
if err != nil {
continue
}
// If bound to 0.0.0.0, do not add IPv6 interfaces and if bound to
// ::, do not add IPv4 interfaces.
if (ip.To4() == nil) != (ifaceIP.To4() == nil) {
continue
}
netAddr := appmessage.NewNetAddressIPPort(ifaceIP, uint16(port), appmessage.DefaultServices)
am.AddLocalAddress(netAddr, BoundPrio)
}
} else {
netAddr, err := am.HostToNetAddress(host, uint16(port), appmessage.DefaultServices)
if err != nil {
return err
}
am.AddLocalAddress(netAddr, BoundPrio)
}
return nil
}
// simpleAddr implements the net.Addr interface with two struct fields
type simpleAddr struct {
net, addr string
}
// String returns the address.
//
// This is part of the net.Addr interface.
func (a simpleAddr) String() string {
return a.addr
}
// Network returns the network.
//
// This is part of the net.Addr interface.
func (a simpleAddr) Network() string {
return a.net
}
// Ensure simpleAddr implements the net.Addr interface.
var _ net.Addr = simpleAddr{}

View File

@@ -123,7 +123,10 @@ func newAddrManagerForTest(t *testing.T, testName string,
t.Fatalf("error creating db: %s", err)
}
addressManager = New(cfg, databaseContext)
addressManager, err = New(cfg, databaseContext)
if err != nil {
t.Fatalf("error creating address manager: %s", err)
}
return addressManager, func() {
err := databaseContext.Close()
@@ -174,7 +177,7 @@ func TestAddAddressByIP(t *testing.T) {
amgr, teardown := newAddrManagerForTest(t, "TestAddAddressByIP", nil)
defer teardown()
for i, test := range tests {
err := amgr.AddAddressByIP(test.addrIP, nil)
err := AddAddressByIP(amgr, test.addrIP, nil)
if test.err != nil && err == nil {
t.Errorf("TestAddAddressByIP test %d failed expected an error and got none", i)
continue
@@ -250,7 +253,7 @@ func TestAttempt(t *testing.T) {
defer teardown()
// Add a new address and get it
err := amgr.AddAddressByIP(someIP+":8333", nil)
err := AddAddressByIP(amgr, someIP+":8333", nil)
if err != nil {
t.Fatalf("Adding address failed: %v", err)
}
@@ -273,7 +276,7 @@ func TestConnected(t *testing.T) {
defer teardown()
// Add a new address and get it
err := amgr.AddAddressByIP(someIP+":8333", nil)
err := AddAddressByIP(amgr, someIP+":8333", nil)
if err != nil {
t.Fatalf("Adding address failed: %v", err)
}
@@ -451,7 +454,7 @@ func TestGetAddress(t *testing.T) {
}
// Add a new address and get it
err := amgr.AddAddressByIP(someIP+":8332", localSubnetworkID)
err := AddAddressByIP(amgr, someIP+":8332", localSubnetworkID)
if err != nil {
t.Fatalf("Adding address failed: %v", err)
}
@@ -478,7 +481,7 @@ func TestGetAddress(t *testing.T) {
// Now we repeat the same process, but now the address has the expected subnetwork ID.
// Add a new address and get it
err = amgr.AddAddressByIP(someIP+":8333", localSubnetworkID)
err = AddAddressByIP(amgr, someIP+":8333", localSubnetworkID)
if err != nil {
t.Fatalf("Adding address failed: %v", err)
}
@@ -552,15 +555,6 @@ func TestGetBestLocalAddress(t *testing.T) {
appmessage.NetAddress{IP: net.ParseIP("2001:470::1")},
appmessage.NetAddress{IP: net.ParseIP("2001:470::1")},
},
/* XXX
{
// Remote connection from Tor
appmessage.NetAddress{IP: net.ParseIP("fd87:d87e:eb43::100")},
appmessage.NetAddress{IP: net.IPv4zero},
appmessage.NetAddress{IP: net.ParseIP("204.124.8.100")},
appmessage.NetAddress{IP: net.ParseIP("fd87:d87e:eb43:25::1")},
},
*/
}
amgr, teardown := newAddrManagerForTest(t, "TestGetBestLocalAddress", nil)
@@ -630,5 +624,4 @@ func TestNetAddressKey(t *testing.T) {
continue
}
}
}

View File

@@ -0,0 +1,31 @@
package addressmanager
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/pkg/errors"
"net"
"strconv"
)
// AddAddressByIP adds an address where we are given an ip:port and not a
// appmessage.NetAddress.
func AddAddressByIP(am *AddressManager, addressIP string, subnetworkID *subnetworkid.SubnetworkID) error {
// Split IP and port
ipString, portString, err := net.SplitHostPort(addressIP)
if err != nil {
return err
}
// Put it in appmessage.Netaddress
ip := net.ParseIP(ipString)
if ip == nil {
return errors.Errorf("invalid ip %s", ipString)
}
port, err := strconv.ParseUint(portString, 10, 0)
if err != nil {
return errors.Errorf("invalid port %s: %s", portString, err)
}
netAddress := appmessage.NewNetAddressIPPort(ip, uint16(port), 0)
am.AddAddress(netAddress, netAddress, subnetworkID)
return nil
}

View File

@@ -12,7 +12,7 @@ const IDLength = 16
// ID identifies a network connection
type ID struct {
bytes []byte
bytes [IDLength]byte
}
// GenerateID generates a new ID
@@ -27,23 +27,22 @@ func GenerateID() (*ID, error) {
// IsEqual returns whether id equals to other.
func (id *ID) IsEqual(other *ID) bool {
return bytes.Equal(id.bytes, other.bytes)
return *id == *other
}
func (id *ID) String() string {
return hex.EncodeToString(id.bytes)
return hex.EncodeToString(id.bytes[:])
}
// Deserialize decodes a block from r into the receiver.
func (id *ID) Deserialize(r io.Reader) error {
id.bytes = make([]byte, IDLength)
_, err := io.ReadFull(r, id.bytes)
_, err := io.ReadFull(r, id.bytes[:])
return err
}
// Serialize serializes the receiver into the given writer.
func (id *ID) Serialize(w io.Writer) error {
_, err := w.Write(id.bytes)
_, err := w.Write(id.bytes[:])
return err
}

View File

@@ -1,8 +1,6 @@
package netadapter
import (
"net"
"strconv"
"sync"
"sync/atomic"
@@ -159,57 +157,3 @@ func (na *NetAdapter) Broadcast(netConnections []*NetConnection, message appmess
}
return nil
}
// GetBestLocalAddress returns the most appropriate local address to use
// for the given remote address.
func (na *NetAdapter) GetBestLocalAddress() (*appmessage.NetAddress, error) {
if len(na.cfg.ExternalIPs) > 0 {
host, portString, err := net.SplitHostPort(na.cfg.ExternalIPs[0])
if err != nil {
portString = na.cfg.NetParams().DefaultPort
}
portInt, err := strconv.Atoi(portString)
if err != nil {
return nil, err
}
ip := net.ParseIP(host)
if ip == nil {
hostAddrs, err := net.LookupHost(host)
if err != nil {
return nil, err
}
ip = net.ParseIP(hostAddrs[0])
if ip == nil {
return nil, errors.Errorf("Cannot resolve IP address for host '%s'", host)
}
}
return appmessage.NewNetAddressIPPort(ip, uint16(portInt), appmessage.SFNodeNetwork), nil
}
listenAddress := na.cfg.Listeners[0]
_, portString, err := net.SplitHostPort(listenAddress)
if err != nil {
portString = na.cfg.NetParams().DefaultPort
}
portInt, err := strconv.Atoi(portString)
if err != nil {
return nil, err
}
addresses, err := net.InterfaceAddrs()
if err != nil {
return nil, err
}
for _, address := range addresses {
ip, _, err := net.ParseCIDR(address.String())
if err != nil {
continue
}
return appmessage.NewNetAddressIPPort(ip, uint16(portInt), appmessage.SFNodeNetwork), nil
}
return nil, errors.New("no address was found")
}

View File

@@ -52,7 +52,7 @@ func (c *gRPCConnection) sendLoop() error {
return err
}
err = c.stream.Send(messageProto)
err = c.send(messageProto)
if err != nil {
return err
}
@@ -63,7 +63,7 @@ func (c *gRPCConnection) sendLoop() error {
func (c *gRPCConnection) receiveLoop() error {
messageNumber := uint64(0)
for c.IsConnected() {
protoMessage, err := c.stream.Recv()
protoMessage, err := c.receive()
if err != nil {
if err == io.EOF {
err = nil

View File

@@ -5,6 +5,7 @@ import (
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/server/grpcserver/protowire"
"github.com/pkg/errors"
"net"
"sync"
"sync/atomic"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/server"
@@ -12,11 +13,18 @@ import (
)
type gRPCConnection struct {
server *gRPCServer
address *net.TCPAddr
isOutbound bool
stream grpcStream
router *router.Router
server *gRPCServer
address *net.TCPAddr
stream grpcStream
router *router.Router
lowLevelClientConnection *grpc.ClientConn
// streamLock protects concurrent access to stream.
// Note that it's an RWMutex. Despite what the name
// implies, we use it to RLock() send() and receive() because
// they can work perfectly fine in parallel, and Lock()
// closeSend() because it must run alone.
streamLock sync.RWMutex
stopChan chan struct{}
clientConn grpc.ClientConn
@@ -26,14 +34,16 @@ type gRPCConnection struct {
isConnected uint32
}
func newConnection(server *gRPCServer, address *net.TCPAddr, isOutbound bool, stream grpcStream) *gRPCConnection {
func newConnection(server *gRPCServer, address *net.TCPAddr, stream grpcStream,
lowLevelClientConnection *grpc.ClientConn) *gRPCConnection {
connection := &gRPCConnection{
server: server,
address: address,
isOutbound: isOutbound,
stream: stream,
stopChan: make(chan struct{}),
isConnected: 1,
server: server,
address: address,
stream: stream,
stopChan: make(chan struct{}),
isConnected: 1,
lowLevelClientConnection: lowLevelClientConnection,
}
return connection
@@ -75,7 +85,7 @@ func (c *gRPCConnection) SetOnInvalidMessageHandler(onInvalidMessageHandler serv
}
func (c *gRPCConnection) IsOutbound() bool {
return c.isOutbound
return c.lowLevelClientConnection != nil
}
// Disconnect disconnects the connection
@@ -90,9 +100,8 @@ func (c *gRPCConnection) Disconnect() {
close(c.stopChan)
if c.isOutbound {
clientStream := c.stream.(protowire.P2P_MessageStreamClient)
_ = clientStream.CloseSend() // ignore error because we don't really know what's the status of the connection
if c.IsOutbound() {
c.closeSend()
log.Debugf("Disconnected from %s", c)
}
@@ -105,3 +114,34 @@ func (c *gRPCConnection) Disconnect() {
func (c *gRPCConnection) Address() *net.TCPAddr {
return c.address
}
func (c *gRPCConnection) receive() (*protowire.KaspadMessage, error) {
// We use RLock here and in send() because they can work
// in parallel. closeSend(), however, must not have either
// receive() nor send() running while it's running.
c.streamLock.RLock()
defer c.streamLock.RUnlock()
return c.stream.Recv()
}
func (c *gRPCConnection) send(message *protowire.KaspadMessage) error {
// We use RLock here and in receive() because they can work
// in parallel. closeSend(), however, must not have either
// receive() nor send() running while it's running.
c.streamLock.RLock()
defer c.streamLock.RUnlock()
return c.stream.Send(message)
}
func (c *gRPCConnection) closeSend() {
c.streamLock.Lock()
defer c.streamLock.Unlock()
clientStream := c.stream.(protowire.P2P_MessageStreamClient)
// ignore error because we don't really know what's the status of the connection
_ = clientStream.CloseSend()
_ = c.lowLevelClientConnection.Close()
}

View File

@@ -90,12 +90,12 @@ func (s *gRPCServer) Connect(address string) (server.Connection, error) {
ctx, cancel := context.WithTimeout(context.Background(), dialTimeout)
defer cancel()
gRPCConnection, err := grpc.DialContext(ctx, address, grpc.WithInsecure(), grpc.WithBlock())
gRPCClientConnection, err := grpc.DialContext(ctx, address, grpc.WithInsecure(), grpc.WithBlock())
if err != nil {
return nil, errors.Wrapf(err, "error connecting to %s", address)
}
client := protowire.NewP2PClient(gRPCConnection)
client := protowire.NewP2PClient(gRPCClientConnection)
stream, err := client.MessageStream(context.Background(), grpc.UseCompressor(gzip.Name))
if err != nil {
return nil, errors.Wrapf(err, "error getting client stream for %s", address)
@@ -110,7 +110,7 @@ func (s *gRPCServer) Connect(address string) (server.Connection, error) {
return nil, errors.Errorf("non-tcp addresses are not supported")
}
connection := newConnection(s, tcpAddress, true, stream)
connection := newConnection(s, tcpAddress, stream, gRPCClientConnection)
err = s.onConnectedHandler(connection)
if err != nil {

View File

@@ -29,7 +29,7 @@ func (p *p2pServer) MessageStream(stream protowire.P2P_MessageStreamServer) erro
return errors.Errorf("non-tcp connections are not supported")
}
connection := newConnection(p.server, tcpAddress, false, stream)
connection := newConnection(p.server, tcpAddress, stream, nil)
err := p.server.onConnectedHandler(connection)
if err != nil {

View File

@@ -4,13 +4,13 @@ import (
"github.com/kaspanet/kaspad/app/appmessage"
)
func (x *KaspadMessage_Reject) toWireMessage() (appmessage.Message, error) {
func (x *KaspadMessage_Reject) toAppMessage() (appmessage.Message, error) {
return &appmessage.MsgReject{
Reason: x.Reject.Reason,
}, nil
}
func (x *KaspadMessage_Reject) fromWireMessage(msgReject *appmessage.MsgReject) error {
func (x *KaspadMessage_Reject) fromAppMessage(msgReject *appmessage.MsgReject) error {
x.Reject = &RejectMessage{
Reason: msgReject.Reason,
}

View File

@@ -174,6 +174,13 @@ func toPayload(message appmessage.Message) (isKaspadMessage_Payload, error) {
return nil, err
}
return payload, nil
case *appmessage.MsgReject:
payload := new(KaspadMessage_Reject)
err := payload.fromAppMessage(message)
if err != nil {
return nil, err
}
return payload, nil
default:
return nil, errors.Errorf("unknown message type %T", message)
}

View File

@@ -703,46 +703,3 @@ func (c *Client) GetTxOutAsync(txHash *daghash.Hash, index uint32, mempool bool)
func (c *Client) GetTxOut(txHash *daghash.Hash, index uint32, mempool bool) (*model.GetTxOutResult, error) {
return c.GetTxOutAsync(txHash, index, mempool).Receive()
}
// FutureRescanBlocksResult is a future promise to deliver the result of a
// RescanBlocksAsync RPC invocation (or an applicable error).
type FutureRescanBlocksResult chan *response
// Receive waits for the response promised by the future and returns the
// discovered rescanblocks data.
func (r FutureRescanBlocksResult) Receive() ([]model.RescannedBlock, error) {
res, err := receiveFuture(r)
if err != nil {
return nil, err
}
var rescanBlocksResult []model.RescannedBlock
err = json.Unmarshal(res, &rescanBlocksResult)
if err != nil {
return nil, errors.Wrap(err, "couldn't decode rescanBlocks response")
}
return rescanBlocksResult, nil
}
// RescanBlocksAsync returns an instance of a type that can be used to get the
// result of the RPC at some future time by invoking the Receive function on the
// returned instance.
//
// See RescanBlocks for the blocking version and more details.
func (c *Client) RescanBlocksAsync(blockHashes []*daghash.Hash) FutureRescanBlocksResult {
strBlockHashes := make([]string, len(blockHashes))
for i := range blockHashes {
strBlockHashes[i] = blockHashes[i].String()
}
cmd := model.NewRescanBlocksCmd(strBlockHashes)
return c.sendCmd(cmd)
}
// RescanBlocks rescans the blocks identified by blockHashes, in order, using
// the client's loaded transaction filter. The blocks do not need to be on the
// main dag, but they do need to be adjacent to each other.
func (c *Client) RescanBlocks(blockHashes []*daghash.Hash) ([]model.RescannedBlock, error) {
return c.RescanBlocksAsync(blockHashes).Receive()
}

View File

@@ -547,9 +547,7 @@ func (c *Client) reregisterNtfns() error {
// ignoreResends is a set of all methods for requests that are "long running"
// are not be reissued by the client on reconnect.
var ignoreResends = map[string]struct{}{
"rescan": {},
}
var ignoreResends = map[string]struct{}{}
func (c *Client) collectResendRequests() []*jsonRequest {
c.requestLock.Lock()

View File

@@ -439,9 +439,6 @@ func parseTxAcceptedVerboseNtfnParams(params []json.RawMessage) (*model.TxRawRes
return nil, err
}
// TODO: change txacceptedverbose notification callbacks to use nicer
// types for all details about the transaction (i.e. decoding hashes
// from their string encoding).
return &rawTx, nil
}
@@ -613,7 +610,7 @@ func (c *Client) LoadTxFilterAsync(reload bool, addresses []util.Address,
// LoadTxFilter loads, reloads, or adds data to a websocket client's transaction
// filter. The filter is consistently updated based on inspected transactions
// during mempool acceptance, block acceptance, and for all rescanned blocks.
// during mempool acceptance, and for block acceptance.
func (c *Client) LoadTxFilter(reload bool, addresses []util.Address, outpoints []appmessage.Outpoint) error {
return c.LoadTxFilterAsync(reload, addresses, outpoints).Receive()
}

View File

@@ -102,10 +102,8 @@ func handleGetBlockTemplate(s *Server, cmd interface{}, closeChan <-chan struct{
func handleGetBlockTemplateRequest(s *Server, request *model.TemplateRequest, closeChan <-chan struct{}) (interface{}, error) {
// Return an error if there are no peers connected since there is no
// way to relay a found block or receive transactions to work on.
// However, allow this state when running in the regression test or
// simulation test mode.
if !(s.cfg.RegressionTest || s.cfg.Simnet) &&
s.connectionManager.ConnectionCount() == 0 {
// However, allow this state when running in the simulation test mode.
if !s.cfg.Simnet && s.connectionManager.ConnectionCount() == 0 {
return nil, &model.RPCError{
Code: model.ErrRPCClientNotConnected,

View File

@@ -34,14 +34,9 @@ func handleGetTxOut(s *Server, cmd interface{}, closeChan <-chan struct{}) (inte
if c.IncludeMempool != nil {
includeMempool = *c.IncludeMempool
}
// TODO: This is racy. It should attempt to fetch it directly and check
// the error.
if includeMempool && s.txMempool.HaveTransaction(txID) {
tx, ok := s.txMempool.FetchTransaction(txID)
if !ok {
return nil, rpcNoTxInfoError(txID)
}
tx, ok := s.txMempool.FetchTransaction(txID)
if includeMempool && ok {
mtx := tx.MsgTx()
if c.Vout > uint32(len(mtx.TxOut)-1) {
return nil, &model.RPCError{

View File

@@ -1,71 +0,0 @@
package rpc
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/domain/dagconfig"
"github.com/kaspanet/kaspad/domain/txscript"
"github.com/kaspanet/kaspad/util"
)
// rescanBlockFilter rescans a block for any relevant transactions for the
// passed lookup keys. Any discovered transactions are returned hex encoded as
// a string slice.
//
// NOTE: This extension is ported from github.com/decred/dcrd
func rescanBlockFilter(filter *wsClientFilter, block *util.Block, params *dagconfig.Params) []string {
var transactions []string
filter.mu.Lock()
defer filter.mu.Unlock()
for _, tx := range block.Transactions() {
msgTx := tx.MsgTx()
// Keep track of whether the transaction has already been added
// to the result. It shouldn't be added twice.
added := false
// Scan inputs if not a coinbase transaction.
if !msgTx.IsCoinBase() {
for _, input := range msgTx.TxIn {
if !filter.existsUnspentOutpointNoLock(&input.PreviousOutpoint) {
continue
}
if !added {
transactions = append(
transactions,
txHexString(msgTx))
added = true
}
}
}
// Scan outputs.
for i, output := range msgTx.TxOut {
_, addr, err := txscript.ExtractScriptPubKeyAddress(
output.ScriptPubKey, params)
if err != nil {
continue
}
if addr != nil {
if !filter.existsAddress(addr) {
continue
}
op := appmessage.Outpoint{
TxID: *tx.ID(),
Index: uint32(i),
}
filter.addUnspentOutpoint(&op)
if !added {
transactions = append(
transactions,
txHexString(msgTx))
added = true
}
}
}
}
return transactions
}

View File

@@ -1,72 +0,0 @@
package rpc
import (
"fmt"
"github.com/kaspanet/kaspad/infrastructure/network/rpc/model"
"github.com/kaspanet/kaspad/util/daghash"
)
// handleRescanBlocks implements the rescanBlocks command extension for
// websocket connections.
//
// NOTE: This extension is ported from github.com/decred/dcrd
func handleRescanBlocks(wsc *wsClient, icmd interface{}) (interface{}, error) {
cmd, ok := icmd.(*model.RescanBlocksCmd)
if !ok {
return nil, model.ErrRPCInternal
}
// Load client's transaction filter. Must exist in order to continue.
filter := wsc.FilterData()
if filter == nil {
return nil, &model.RPCError{
Code: model.ErrRPCMisc,
Message: "Transaction filter must be loaded before rescanning",
}
}
blockHashes := make([]*daghash.Hash, len(cmd.BlockHashes))
for i := range cmd.BlockHashes {
hash, err := daghash.NewHashFromStr(cmd.BlockHashes[i])
if err != nil {
return nil, err
}
blockHashes[i] = hash
}
discoveredData := make([]model.RescannedBlock, 0, len(blockHashes))
// Iterate over each block in the request and rescan. When a block
// contains relevant transactions, add it to the response.
bc := wsc.server.dag
params := wsc.server.dag.Params
var lastBlockHash *daghash.Hash
for i := range blockHashes {
block, err := bc.BlockByHash(blockHashes[i])
if err != nil {
return nil, &model.RPCError{
Code: model.ErrRPCBlockNotFound,
Message: "Failed to fetch block: " + err.Error(),
}
}
if lastBlockHash != nil && !block.MsgBlock().Header.ParentHashes[0].IsEqual(lastBlockHash) { // TODO: (Stas) This is likely wrong. Modified to satisfy compilation.
return nil, &model.RPCError{
Code: model.ErrRPCInvalidParameter,
Message: fmt.Sprintf("Block %s is not a child of %s",
blockHashes[i], lastBlockHash),
}
}
lastBlockHash = blockHashes[i]
transactions := rescanBlockFilter(filter, block, params)
if len(transactions) != 0 {
discoveredData = append(discoveredData, model.RescannedBlock{
Hash: cmd.BlockHashes[i],
Transactions: transactions,
})
}
}
return &discoveredData, nil
}

View File

@@ -29,6 +29,7 @@ func handleSendRawTransaction(s *Server, cmd interface{}, closeChan <-chan struc
}
tx := util.NewTx(&msgTx)
log.Criticalf("~~~~~ handleSendRawTransaction got %s", tx.ID())
err = s.protocolManager.AddTransaction(tx)
if err != nil {
if !errors.As(err, &mempool.RuleError{}) {

View File

@@ -123,18 +123,6 @@ func NewLoadTxFilterCmd(reload bool, addresses []string, outpoints []Outpoint) *
}
}
// RescanBlocksCmd defines the rescan JSON-RPC command.
type RescanBlocksCmd struct {
// Block hashes as a string array.
BlockHashes []string
}
// NewRescanBlocksCmd returns a new instance which can be used to issue a rescan
// JSON-RPC command.
func NewRescanBlocksCmd(blockHashes []string) *RescanBlocksCmd {
return &RescanBlocksCmd{BlockHashes: blockHashes}
}
func init() {
// The commands in this file are only usable by websockets.
flags := UFWebsocketOnly
@@ -148,5 +136,4 @@ func init() {
MustRegisterCommand("stopNotifyBlocks", (*StopNotifyBlocksCmd)(nil), flags)
MustRegisterCommand("stopNotifyChainChanges", (*StopNotifyChainChangesCmd)(nil), flags)
MustRegisterCommand("stopNotifyNewTransactions", (*StopNotifyNewTransactionsCmd)(nil), flags)
MustRegisterCommand("rescanBlocks", (*RescanBlocksCmd)(nil), flags)
}

View File

@@ -157,20 +157,6 @@ func TestRPCServerWebsocketCommands(t *testing.T) {
Outpoints: []model.Outpoint{{TxID: "0000000000000000000000000000000000000000000000000000000000000123", Index: 0}},
},
},
{
name: "rescanBlocks",
newCmd: func() (interface{}, error) {
return model.NewCommand("rescanBlocks", `["0000000000000000000000000000000000000000000000000000000000000123"]`)
},
staticCmd: func() interface{} {
blockhashes := []string{"0000000000000000000000000000000000000000000000000000000000000123"}
return model.NewRescanBlocksCmd(blockhashes)
},
marshalled: `{"jsonrpc":"1.0","method":"rescanBlocks","params":[["0000000000000000000000000000000000000000000000000000000000000123"]],"id":1}`,
unmarshalled: &model.RescanBlocksCmd{
BlockHashes: []string{"0000000000000000000000000000000000000000000000000000000000000123"},
},
},
}
t.Logf("Running %d tests", len(tests))

View File

@@ -9,10 +9,3 @@ package model
type SessionResult struct {
SessionID uint64 `json:"sessionId"`
}
// RescannedBlock contains the hash and all discovered transactions of a single
// rescanned block.
type RescannedBlock struct {
Hash string `json:"hash"`
Transactions []string `json:"transactions"`
}

View File

@@ -1,50 +0,0 @@
// Copyright (c) 2017 The btcsuite developers
// Copyright (c) 2017 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package model_test
import (
"encoding/json"
"testing"
"github.com/kaspanet/kaspad/infrastructure/network/rpc/model"
)
// TestRPCServerWebsocketResults ensures any results that have custom marshalling
// work as intended.
func TestRPCServerWebsocketResults(t *testing.T) {
t.Parallel()
tests := []struct {
name string
result interface{}
expected string
}{
{
name: "RescannedBlock",
result: &model.RescannedBlock{
Hash: "blockhash",
Transactions: []string{"serializedtx"},
},
expected: `{"hash":"blockhash","transactions":["serializedtx"]}`,
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
marshalled, err := json.Marshal(test.result)
if err != nil {
t.Errorf("Test #%d (%s) unexpected error: %v", i,
test.name, err)
continue
}
if string(marshalled) != test.expected {
t.Errorf("Test #%d (%s) unexpected marhsalled data - "+
"got %s, want %s", i, test.name, marshalled,
test.expected)
continue
}
}
}

View File

@@ -106,8 +106,6 @@ var rpcLimited = map[string]struct{}{
"notifyNewTransactions": {},
"notifyReceived": {},
"notifySpent": {},
"rescan": {},
"rescanBlocks": {},
"session": {},
// Websockets AND HTTP/S commands

View File

@@ -522,20 +522,11 @@ var helpDescsEnUS = map[string]string{
"outpoint-index": "The index of the outpoint",
// LoadTxFilterCmd help.
"loadTxFilter--synopsis": "Load, add to, or reload a websocket client's transaction filter for mempool transactions, new blocks and rescanBlocks.",
"loadTxFilter--synopsis": "Load, add to, or reload a websocket client's transaction filter for mempool transactions and new blocks.",
"loadTxFilter-reload": "Load a new filter instead of adding data to an existing one",
"loadTxFilter-addresses": "Array of addresses to add to the transaction filter",
"loadTxFilter-outpoints": "Array of outpoints to add to the transaction filter",
// RescanBlocks help.
"rescanBlocks--synopsis": "Rescan blocks for transactions matching the loaded transaction filter.",
"rescanBlocks-blockHashes": "List of hashes to rescan. Each next block must be a child of the previous.",
"rescanBlocks--result0": "List of matching blocks.",
// RescannedBlock help.
"rescannedBlock-hash": "Hash of the matching block.",
"rescannedBlock-transactions": "List of matching transactions, serialized and hex-encoded.",
// Uptime help.
"uptime--synopsis": "Returns the total uptime of the server.",
"uptime--result0": "The number of seconds that the server has been running",
@@ -604,7 +595,6 @@ var rpcResultTypes = map[string][]interface{}{
"stopNotifyChainChanges": nil,
"notifyNewTransactions": nil,
"stopNotifyNewTransactions": nil,
"rescanBlocks": {(*[]model.RescannedBlock)(nil)},
}
// helpCacher provides a concurrent safe type that provides help and usage for

View File

@@ -73,7 +73,6 @@ var wsHandlersBeforeInit = map[string]wsCommandHandler{
"stopNotifyBlocks": handleStopNotifyBlocks,
"stopNotifyChainChanges": handleStopNotifyChainChanges,
"stopNotifyNewTransactions": handleStopNotifyNewTransactions,
"rescanBlocks": handleRescanBlocks,
}
// WebsocketHandler handles a new websocket client by creating a new wsClient,

View File

@@ -165,9 +165,6 @@ func removeDatabase(cfg *config.Config) error {
return os.RemoveAll(dbPath)
}
// removeRegressionDB removes the existing regression test database if running
// in regression test mode and it already exists.
// dbPath returns the path to the block database given a database type.
func blockDbPath(cfg *config.Config) string {
// The database name is based on the database type.

View File

@@ -68,7 +68,7 @@ func (s *kaspadService) Execute(args []string, r <-chan svc.ChangeRequest, chang
// be properly logged
doneChan := make(chan error)
startedChan := make(chan struct{})
spawn(func() {
spawn("kaspadMain-windows", func() {
err := kaspadMain(startedChan)
doneChan <- err
})

View File

@@ -1,6 +1,7 @@
package integration
import (
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
"testing"
)
@@ -9,7 +10,7 @@ func TestAddressExchange(t *testing.T) {
defer teardown()
testAddress := "1.2.3.4:6789"
err := appHarness1.app.AddressManager().AddAddressByIP(testAddress, nil)
err := addressmanager.AddAddressByIP(appHarness1.app.AddressManager(), testAddress, nil)
if err != nil {
t.Fatalf("Error adding address to addressManager: %+v", err)
}

View File

@@ -12,10 +12,6 @@ import (
)
var (
// ErrChecksumMismatch describes an error where decoding failed due
// to a bad checksum.
ErrChecksumMismatch = errors.New("checksum mismatch")
// ErrUnknownAddressType describes an error where an address can not
// decoded as a specific address type due to the string encoding
// begining with an identifier byte unknown to any standard or
@@ -46,9 +42,6 @@ const (
// Prefix for the dev network.
Bech32PrefixKaspaDev
// Prefix for the regression test network.
Bech32PrefixKaspaReg
// Prefix for the test network.
Bech32PrefixKaspaTest
@@ -60,7 +53,6 @@ const (
var stringsToBech32Prefixes = map[string]Bech32Prefix{
"kaspa": Bech32PrefixKaspa,
"kaspadev": Bech32PrefixKaspaDev,
"kaspareg": Bech32PrefixKaspaReg,
"kaspatest": Bech32PrefixKaspaTest,
"kaspasim": Bech32PrefixKaspaSim,
}

View File

@@ -321,18 +321,13 @@ func TestDecodeAddressErrorConditions(t *testing.T) {
"decoded address's prefix could not be parsed",
},
{
"kaspareg:qpm2qsznhks23z7629mms6s4cwef74vcwv4w75h796",
util.Bech32PrefixKaspaTest,
"decoded address is of wrong network",
},
{
"kaspareg:raskzctpv9skzctpv9skzctpv9skzctpvyn6vmqa89",
util.Bech32PrefixKaspaReg,
"kaspasim:raskzctpv9skzctpv9skzctpv9skzctpvy37ct7zaf",
util.Bech32PrefixKaspaSim,
"unknown address type",
},
{
"kaspareg:raskzcgrjj7l73l",
util.Bech32PrefixKaspaReg,
"kaspasim:raskzcg58mth0an",
util.Bech32PrefixKaspaSim,
"decoded address is of unknown size",
},
{
@@ -360,7 +355,6 @@ func TestParsePrefix(t *testing.T) {
expectedError bool
}{
{"kaspa", util.Bech32PrefixKaspa, false},
{"kaspareg", util.Bech32PrefixKaspaReg, false},
{"kaspatest", util.Bech32PrefixKaspaTest, false},
{"kaspasim", util.Bech32PrefixKaspaSim, false},
{"blabla", util.Bech32PrefixUnknown, true},
@@ -388,7 +382,6 @@ func TestPrefixToString(t *testing.T) {
expectedPrefixStr string
}{
{util.Bech32PrefixKaspa, "kaspa"},
{util.Bech32PrefixKaspaReg, "kaspareg"},
{util.Bech32PrefixKaspaTest, "kaspatest"},
{util.Bech32PrefixKaspaSim, "kaspasim"},
{util.Bech32PrefixUnknown, ""},

9
util/bigintpool/log.go Normal file
View File

@@ -0,0 +1,9 @@
package bigintpool
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var log, _ = logger.Get(logger.SubsystemTags.UTIL)
var spawn = panics.GoroutineWrapperFunc(log)

View File

@@ -11,7 +11,7 @@ const validCharacters = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrs
const (
appMajor uint = 0
appMinor uint = 6
appPatch uint = 5
appPatch uint = 8
)
// appBuild is defined as a variable so it can be overridden during the build