diff --git a/cmd/kaspactl/config.go b/cmd/kaspactl/config.go index 1f9c6b01b..4cea58acf 100644 --- a/cmd/kaspactl/config.go +++ b/cmd/kaspactl/config.go @@ -16,7 +16,7 @@ import ( "strings" "github.com/jessevdk/go-flags" - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" "github.com/kaspanet/kaspad/util" ) @@ -24,7 +24,7 @@ const ( // unusableFlags are the command usage flags which this utility are not // able to use. In particular it doesn't support websockets and // consequently notifications. - unusableFlags = rpcmodel.UFWebsocketOnly | rpcmodel.UFNotification + unusableFlags = model.UFWebsocketOnly | model.UFNotification ) var ( @@ -45,10 +45,10 @@ func listCommands() { ) // Get a list of registered commands and categorize and filter them. - cmdMethods := rpcmodel.RegisteredCmdMethods() + cmdMethods := model.RegisteredCmdMethods() categorized := make([][]string, numCategories) for _, method := range cmdMethods { - flags, err := rpcmodel.MethodUsageFlags(method) + flags, err := model.MethodUsageFlags(method) if err != nil { // This should never happen since the method was just // returned from the package, but be safe. @@ -60,7 +60,7 @@ func listCommands() { continue } - usage, err := rpcmodel.MethodUsageText(method) + usage, err := model.MethodUsageText(method) if err != nil { // This should never happen since the method was just // returned from the package, but be safe. diff --git a/cmd/kaspactl/httpclient.go b/cmd/kaspactl/httpclient.go index c6d133bb4..b54e30c97 100644 --- a/cmd/kaspactl/httpclient.go +++ b/cmd/kaspactl/httpclient.go @@ -11,7 +11,7 @@ import ( "net/http" "github.com/btcsuite/go-socks/socks" - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" ) // newHTTPClient returns a new HTTP client that is configured according to the @@ -117,7 +117,7 @@ func sendPostRequest(marshalledJSON []byte, cfg *ConfigFlags) ([]byte, error) { } // Unmarshal the response. - var resp rpcmodel.Response + var resp model.Response if err := json.Unmarshal(respBytes, &resp); err != nil { return nil, err } diff --git a/cmd/kaspactl/kaspactl.go b/cmd/kaspactl/kaspactl.go index 0f536ddf8..bc4d4498d 100644 --- a/cmd/kaspactl/kaspactl.go +++ b/cmd/kaspactl/kaspactl.go @@ -11,7 +11,7 @@ import ( "path/filepath" "strings" - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" ) const ( @@ -21,7 +21,7 @@ const ( // commandUsage display the usage for a specific command. func commandUsage(method string) { - usage, err := rpcmodel.MethodUsageText(method) + usage, err := model.MethodUsageText(method) if err != nil { // This should never happen since the method was already checked // before calling this function, but be safe. @@ -60,7 +60,7 @@ func main() { // Ensure the specified method identifies a valid registered command and // is one of the usable types. method := args[0] - usageFlags, err := rpcmodel.MethodUsageFlags(method) + usageFlags, err := model.MethodUsageFlags(method) if err != nil { fmt.Fprintf(os.Stderr, "Unrecognized command '%s'\n", method) fmt.Fprintln(os.Stderr, listCmdMessage) @@ -105,13 +105,13 @@ func main() { // Attempt to create the appropriate command using the arguments // provided by the user. - cmd, err := rpcmodel.NewCommand(method, params...) + cmd, err := model.NewCommand(method, params...) if err != nil { // Show the error along with its error code when it's a - // rpcmodel.Error as it reallistcally will always be since the + // model.Error as it reallistcally will always be since the // NewCommand function is only supposed to return errors of that // type. - var rpcModelErr rpcmodel.Error + var rpcModelErr model.Error if ok := errors.As(err, &rpcModelErr); ok { fmt.Fprintf(os.Stderr, "%s error: %s (command code: %s)\n", method, err, rpcModelErr.ErrorCode) @@ -119,7 +119,7 @@ func main() { os.Exit(1) } - // The error is not a rpcmodel.Error and this really should not + // The error is not a model.Error and this really should not // happen. Nevertheless, fallback to just showing the error // if it should happen due to a bug in the package. fmt.Fprintf(os.Stderr, "%s error: %s\n", method, err) @@ -129,7 +129,7 @@ func main() { // Marshal the command into a JSON-RPC byte slice in preparation for // sending it to the RPC server. - marshalledJSON, err := rpcmodel.MarshalCommand(1, cmd) + marshalledJSON, err := model.MarshalCommand(1, cmd) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) diff --git a/cmd/kaspaminer/client.go b/cmd/kaspaminer/client.go index 7eff63852..368782e0c 100644 --- a/cmd/kaspaminer/client.go +++ b/cmd/kaspaminer/client.go @@ -1,7 +1,7 @@ package main import ( - "github.com/kaspanet/kaspad/rpcclient" + "github.com/kaspanet/kaspad/rpc/client" "github.com/kaspanet/kaspad/util" "github.com/kaspanet/kaspad/wire" "github.com/pkg/errors" @@ -10,30 +10,30 @@ import ( ) type minerClient struct { - *rpcclient.Client + *client.Client onBlockAdded chan struct{} } -func newMinerClient(connCfg *rpcclient.ConnConfig) (*minerClient, error) { - client := &minerClient{ +func newMinerClient(connCfg *client.ConnConfig) (*minerClient, error) { + minerClient := &minerClient{ onBlockAdded: make(chan struct{}, 1), } - notificationHandlers := &rpcclient.NotificationHandlers{ + notificationHandlers := &client.NotificationHandlers{ OnFilteredBlockAdded: func(_ uint64, header *wire.BlockHeader, txs []*util.Tx) { - client.onBlockAdded <- struct{}{} + minerClient.onBlockAdded <- struct{}{} }, } var err error - client.Client, err = rpcclient.New(connCfg, notificationHandlers) + minerClient.Client, err = client.New(connCfg, notificationHandlers) if err != nil { return nil, errors.Errorf("Error connecting to address %s: %s", connCfg.Host, err) } - if err = client.NotifyBlocks(); err != nil { - return nil, errors.Errorf("Error while registering client %s for block notifications: %s", client.Host(), err) + if err = minerClient.NotifyBlocks(); err != nil { + return nil, errors.Errorf("Error while registering minerClient %s for block notifications: %s", minerClient.Host(), err) } - return client, nil + return minerClient, nil } func connectToServer(cfg *configFlags) (*minerClient, error) { @@ -47,7 +47,7 @@ func connectToServer(cfg *configFlags) (*minerClient, error) { return nil, err } - connCfg := &rpcclient.ConnConfig{ + connCfg := &client.ConnConfig{ Host: rpcAddr, Endpoint: "ws", User: cfg.RPCUser, diff --git a/cmd/kaspaminer/log.go b/cmd/kaspaminer/log.go index e3ed6115f..55de49fa5 100644 --- a/cmd/kaspaminer/log.go +++ b/cmd/kaspaminer/log.go @@ -3,7 +3,7 @@ package main import ( "fmt" "github.com/kaspanet/kaspad/logs" - "github.com/kaspanet/kaspad/rpcclient" + "github.com/kaspanet/kaspad/rpc/client" "github.com/kaspanet/kaspad/util/panics" "os" ) @@ -28,5 +28,5 @@ func initLog(logFile, errLogFile string) { } func enableRPCLogging() { - rpcclient.UseLogger(backendLog, logs.LevelTrace) + client.UseLogger(backendLog, logs.LevelTrace) } diff --git a/cmd/kaspaminer/mineloop.go b/cmd/kaspaminer/mineloop.go index 09bc25f11..26656c713 100644 --- a/cmd/kaspaminer/mineloop.go +++ b/cmd/kaspaminer/mineloop.go @@ -7,8 +7,8 @@ import ( "sync/atomic" "time" - "github.com/kaspanet/kaspad/rpcclient" - "github.com/kaspanet/kaspad/rpcmodel" + clientpkg "github.com/kaspanet/kaspad/rpc/client" + "github.com/kaspanet/kaspad/rpc/model" "github.com/kaspanet/kaspad/util" "github.com/kaspanet/kaspad/util/daghash" "github.com/pkg/errors" @@ -79,7 +79,7 @@ func logHashRate() { func mineNextBlock(client *minerClient, miningAddr util.Address, foundBlock chan *util.Block, mineWhenNotSynced bool, templateStopChan chan struct{}, errChan chan error) { - newTemplateChan := make(chan *rpcmodel.GetBlockTemplateResult) + newTemplateChan := make(chan *model.GetBlockTemplateResult) spawn("templatesLoop", func() { templatesLoop(client, miningAddr, newTemplateChan, errChan, templateStopChan) }) @@ -91,7 +91,7 @@ func mineNextBlock(client *minerClient, miningAddr util.Address, foundBlock chan func handleFoundBlock(client *minerClient, block *util.Block) error { log.Infof("Found block %s with parents %s. Submitting to %s", block.Hash(), block.MsgBlock().Header.ParentHashes, client.Host()) - err := client.SubmitBlock(block, &rpcmodel.SubmitBlockOptions{}) + err := client.SubmitBlock(block, &model.SubmitBlockOptions{}) if err != nil { return errors.Errorf("Error submitting block %s to %s: %s", block.Hash(), client.Host(), err) } @@ -120,7 +120,7 @@ func solveBlock(block *util.Block, stopChan chan struct{}, foundBlock chan *util } func templatesLoop(client *minerClient, miningAddr util.Address, - newTemplateChan chan *rpcmodel.GetBlockTemplateResult, errChan chan error, stopChan chan struct{}) { + newTemplateChan chan *model.GetBlockTemplateResult, errChan chan error, stopChan chan struct{}) { longPollID := "" getBlockTemplateLongPoll := func() { @@ -130,7 +130,7 @@ func templatesLoop(client *minerClient, miningAddr util.Address, log.Infof("Requesting template without longPollID from %s", client.Host()) } template, err := getBlockTemplate(client, miningAddr, longPollID) - if nativeerrors.Is(err, rpcclient.ErrResponseTimedOut) { + if nativeerrors.Is(err, clientpkg.ErrResponseTimedOut) { log.Infof("Got timeout while requesting template '%s' from %s", longPollID, client.Host()) return } else if err != nil { @@ -157,11 +157,11 @@ func templatesLoop(client *minerClient, miningAddr util.Address, } } -func getBlockTemplate(client *minerClient, miningAddr util.Address, longPollID string) (*rpcmodel.GetBlockTemplateResult, error) { +func getBlockTemplate(client *minerClient, miningAddr util.Address, longPollID string) (*model.GetBlockTemplateResult, error) { return client.GetBlockTemplate(miningAddr.String(), longPollID) } -func solveLoop(newTemplateChan chan *rpcmodel.GetBlockTemplateResult, foundBlock chan *util.Block, +func solveLoop(newTemplateChan chan *model.GetBlockTemplateResult, foundBlock chan *util.Block, mineWhenNotSynced bool, errChan chan error) { var stopOldTemplateSolving chan struct{} @@ -179,7 +179,7 @@ func solveLoop(newTemplateChan chan *rpcmodel.GetBlockTemplateResult, foundBlock } stopOldTemplateSolving = make(chan struct{}) - block, err := rpcclient.ConvertGetBlockTemplateResultToBlock(template) + block, err := clientpkg.ConvertGetBlockTemplateResultToBlock(template) if err != nil { errChan <- errors.Errorf("Error parsing block: %s", err) return diff --git a/connmanager/connection_requests.go b/connmanager/connection_requests.go index df999f2f9..17073a130 100644 --- a/connmanager/connection_requests.go +++ b/connmanager/connection_requests.go @@ -102,3 +102,10 @@ func (c *ConnectionManager) AddConnectionRequest(address string, isPermanent boo } }) } + +// RemoveConnection disconnects the connection for the given address +// and removes it entirely from the connection manager. +func (c *ConnectionManager) RemoveConnection(address string) { + // TODO(libp2p): unimplemented + panic("unimplemented") +} diff --git a/connmanager/connmanager.go b/connmanager/connmanager.go index 50efa7e3f..5b77dcd62 100644 --- a/connmanager/connmanager.go +++ b/connmanager/connmanager.go @@ -107,3 +107,8 @@ func (c *ConnectionManager) connectionsLoop() { <-time.Tick(connectionsLoopInterval) } } + +// ConnectionCount returns the count of the connected connections +func (c *ConnectionManager) ConnectionCount() int { + return c.netAdapter.ConnectionCount() +} diff --git a/connmgr/README.md b/connmgr/README.md deleted file mode 100644 index 74524ce7b..000000000 --- a/connmgr/README.md +++ /dev/null @@ -1,27 +0,0 @@ -connmgr -======= - -[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](https://choosealicense.com/licenses/isc/) -[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/kaspanet/kaspad/connmgr) - -Package connmgr implements a generic Kaspa network connection manager. - -## Overview - -Connection Manager handles all the general connection concerns such as -maintaining a set number of outbound connections, sourcing peers, banning, -limiting max connections, etc. - -The package provides a generic connection manager which is able to accept -connection requests from a source or a set of given addresses, dial them and -notify the caller on connections. The main intended use is to initialize a pool -of active connections and maintain them to remain connected to the P2P network. - -In addition the connection manager provides the following utilities: - -- Notifications on connections or disconnections -- Handle failures and retry new addresses from the source -- Connect only to specified addresses -- Permanent connections with increasing backoff retry timers -- Disconnect or Remove an established connection - diff --git a/connmgr/connmanager.go b/connmgr/connmanager.go deleted file mode 100644 index 9a67ec23b..000000000 --- a/connmgr/connmanager.go +++ /dev/null @@ -1,782 +0,0 @@ -// Copyright (c) 2016 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package connmgr - -import ( - nativeerrors "errors" - "fmt" - "net" - "sync" - "sync/atomic" - "time" - - "github.com/kaspanet/kaspad/addrmgr" - "github.com/kaspanet/kaspad/config" - "github.com/kaspanet/kaspad/wire" - - "github.com/pkg/errors" -) - -// maxFailedAttempts is the maximum number of successive failed connection -// attempts after which network failure is assumed and new connections will -// be delayed by the configured retry duration. -const maxFailedAttempts = 25 - -var ( - // maxRetryDuration is the max duration of time retrying of a persistent - // connection is allowed to grow to. This is necessary since the retry - // logic uses a backoff mechanism which increases the interval base times - // the number of retries that have been done. - maxRetryDuration = time.Minute * 5 - - // defaultRetryDuration is the default duration of time for retrying - // persistent connections. - defaultRetryDuration = time.Second * 5 -) - -var ( - //ErrDialNil is used to indicate that Dial cannot be nil in the configuration. - ErrDialNil = errors.New("Config: Dial cannot be nil") - - // ErrMaxOutboundPeers is an error that is thrown when the max amount of peers had - // been reached. - ErrMaxOutboundPeers = errors.New("max outbound peers reached") - - // ErrAlreadyConnected is an error that is thrown if the peer is already - // connected. - ErrAlreadyConnected = errors.New("peer already connected") - - // ErrAlreadyPermanent is an error that is thrown if the peer is already - // connected as a permanent peer. - ErrAlreadyPermanent = errors.New("peer exists as a permanent peer") - - // ErrPeerNotFound is an error that is thrown if the peer was not found. - ErrPeerNotFound = errors.New("peer not found") - - //ErrAddressManagerNil is used to indicate that Address Manager cannot be nil in the configuration. - ErrAddressManagerNil = errors.New("Config: Address manager cannot be nil") -) - -// ConnState represents the state of the requested connection. -type ConnState uint8 - -// ConnState can be either pending, established, disconnected or failed. When -// a new connection is requested, it is attempted and categorized as -// established or failed depending on the connection result. An established -// connection which was disconnected is categorized as disconnected. -const ( - ConnPending ConnState = iota - ConnFailing - ConnCanceled - ConnEstablished - ConnDisconnected -) - -// ConnReq is the connection request to a network address. If permanent, the -// connection will be retried on disconnection. -type ConnReq struct { - // The following variables must only be used atomically. - id uint64 - - Addr *net.TCPAddr - Permanent bool - - conn net.Conn - state ConnState - stateMtx sync.RWMutex - retryCount uint32 -} - -// updateState updates the state of the connection request. -func (c *ConnReq) updateState(state ConnState) { - c.stateMtx.Lock() - defer c.stateMtx.Unlock() - c.state = state -} - -// ID returns a unique identifier for the connection request. -func (c *ConnReq) ID() uint64 { - return atomic.LoadUint64(&c.id) -} - -// State is the connection state of the requested connection. -func (c *ConnReq) State() ConnState { - c.stateMtx.RLock() - defer c.stateMtx.RUnlock() - state := c.state - return state -} - -// String returns a human-readable string for the connection request. -func (c *ConnReq) String() string { - if c.Addr == nil || c.Addr.String() == "" { - return fmt.Sprintf("reqid %d", atomic.LoadUint64(&c.id)) - } - return fmt.Sprintf("%s (reqid %d)", c.Addr, atomic.LoadUint64(&c.id)) -} - -// Config holds the configuration options related to the connection manager. -type Config struct { - // Listeners defines a slice of listeners for which the connection - // manager will take ownership of and accept connections. When a - // connection is accepted, the OnAccept handler will be invoked with the - // connection. Since the connection manager takes ownership of these - // listeners, they will be closed when the connection manager is - // stopped. - // - // This field will not have any effect if the OnAccept field is not - // also specified. It may be nil if the caller does not wish to listen - // for incoming connections. - Listeners []net.Listener - - // OnAccept is a callback that is fired when an inbound connection is - // accepted. It is the caller's responsibility to close the connection. - // Failure to close the connection will result in the connection manager - // believing the connection is still active and thus have undesirable - // side effects such as still counting toward maximum connection limits. - // - // This field will not have any effect if the Listeners field is not - // also specified since there couldn't possibly be any accepted - // connections in that case. - OnAccept func(net.Conn) - - // TargetOutbound is the number of outbound network connections to - // maintain. Defaults to 8. - TargetOutbound uint32 - - // RetryDuration is the duration to wait before retrying connection - // requests. Defaults to 5s. - RetryDuration time.Duration - - // OnConnection is a callback that is fired when a new outbound - // connection is established. - OnConnection func(*ConnReq, net.Conn) - - // OnConnectionFailed is a callback that is fired when a new outbound - // connection has failed to be established. - OnConnectionFailed func(*ConnReq) - - // OnDisconnection is a callback that is fired when an outbound - // connection is disconnected. - OnDisconnection func(*ConnReq) - - AddrManager *addrmgr.AddrManager - - // Dial connects to the address on the named network. It cannot be nil. - Dial func(net.Addr) (net.Conn, error) -} - -// registerPending is used to register a pending connection attempt. By -// registering pending connection attempts we allow callers to cancel pending -// connection attempts before their successful or in the case they're not -// longer wanted. -type registerPending struct { - c *ConnReq - done chan struct{} -} - -// handleConnected is used to queue a successful connection. -type handleConnected struct { - c *ConnReq - conn net.Conn -} - -// handleDisconnected is used to remove a connection. -type handleDisconnected struct { - id uint64 - retry bool -} - -// handleFailed is used to remove a pending connection. -type handleFailed struct { - c *ConnReq - err error -} - -// ConnManager provides a manager to handle network connections. -type ConnManager struct { - // The following variables must only be used atomically. - connReqCount uint64 - start int32 - stop int32 - - addressMtx sync.Mutex - usedOutboundGroups map[string]int64 - usedAddresses map[string]struct{} - - cfg Config - appCfg *config.Config - wg sync.WaitGroup - failedAttempts uint64 - requests chan interface{} - quit chan struct{} -} - -// handleFailedConn handles a connection failed due to a disconnect or any -// other failure. If permanent, it retries the connection after the configured -// retry duration. Otherwise, if required, it makes a new connection request. -// After maxFailedConnectionAttempts new connections will be retried after the -// configured retry duration. -func (cm *ConnManager) handleFailedConn(c *ConnReq, err error) { - if atomic.LoadInt32(&cm.stop) != 0 { - return - } - - // Don't write throttled logs more than once every throttledConnFailedLogInterval - shouldWriteLog := shouldWriteConnFailedLog(err) - if shouldWriteLog { - // If we are to write a log, set its lastLogTime to now - setConnFailedLastLogTime(err, time.Now()) - } - - if c.Permanent { - c.retryCount++ - d := time.Duration(c.retryCount) * cm.cfg.RetryDuration - if d > maxRetryDuration { - d = maxRetryDuration - } - if shouldWriteLog { - log.Debugf("Retrying further connections to %s every %s", c, d) - } - spawnAfter("ConnManager.connect-withDelay", d, func() { - cm.connect(c) - }) - } else { - if c.Addr != nil { - cm.releaseAddress(c.Addr) - } - cm.failedAttempts++ - if cm.failedAttempts >= maxFailedAttempts { - if shouldWriteLog { - log.Debugf("Max failed connection attempts reached: [%d] "+ - "-- retrying further connections every %s", maxFailedAttempts, - cm.cfg.RetryDuration) - } - spawnAfter("ConnManager.NewConnReq-withDelay", cm.cfg.RetryDuration, cm.NewConnReq) - } else { - spawn("ConnManager.NewConnReq", cm.NewConnReq) - } - } -} - -func (cm *ConnManager) releaseAddress(addr *net.TCPAddr) { - cm.addressMtx.Lock() - defer cm.addressMtx.Unlock() - - groupKey := cm.usedOutboundGroupsKey(addr) - cm.usedOutboundGroups[groupKey]-- - if cm.usedOutboundGroups[groupKey] < 0 { - panic(fmt.Errorf("cm.usedOutboundGroups[%s] has a negative value of %d. This should never happen", groupKey, cm.usedOutboundGroups[groupKey])) - } - delete(cm.usedAddresses, usedAddressesKey(addr)) -} - -func (cm *ConnManager) markAddressAsUsed(addr *net.TCPAddr) { - cm.usedOutboundGroups[cm.usedOutboundGroupsKey(addr)]++ - cm.usedAddresses[usedAddressesKey(addr)] = struct{}{} -} - -func (cm *ConnManager) isOutboundGroupUsed(addr *net.TCPAddr) bool { - _, ok := cm.usedOutboundGroups[cm.usedOutboundGroupsKey(addr)] - return ok -} - -func (cm *ConnManager) isAddressUsed(addr *net.TCPAddr) bool { - _, ok := cm.usedAddresses[usedAddressesKey(addr)] - return ok -} - -func (cm *ConnManager) usedOutboundGroupsKey(addr *net.TCPAddr) string { - // A fake service flag is used since it doesn't affect the group key. - na := wire.NewNetAddress(addr, wire.SFNodeNetwork) - return cm.cfg.AddrManager.GroupKey(na) -} - -func usedAddressesKey(addr *net.TCPAddr) string { - return addr.String() -} - -// throttledError defines an error type whose logs get throttled. This is to -// prevent flooding the logs with identical errors. -type throttledError error - -var ( - // throttledConnFailedLogInterval is the minimum duration of time between - // the logs defined in throttledConnFailedLogs. - throttledConnFailedLogInterval = time.Minute * 10 - - // throttledConnFailedLogs are logs that get written at most every - // throttledConnFailedLogInterval. Each entry in this map defines a type - // of error that we want to throttle. The value of each entry is the last - // time that type of log had been written. - throttledConnFailedLogs = map[throttledError]time.Time{ - ErrNoAddress: {}, - } - - // ErrNoAddress is an error that is thrown when there aren't any - // valid connection addresses. - ErrNoAddress throttledError = errors.New("no valid connect address") -) - -// shouldWriteConnFailedLog resolves whether to write logs related to connection -// failures. Errors that had not been previously registered in throttledConnFailedLogs -// and non-error (nil values) must always be logged. -func shouldWriteConnFailedLog(err error) bool { - if err == nil { - return true - } - lastLogTime, ok := throttledConnFailedLogs[err] - return !ok || lastLogTime.Add(throttledConnFailedLogInterval).Before(time.Now()) -} - -// setConnFailedLastLogTime sets the last log time of the specified error -func setConnFailedLastLogTime(err error, lastLogTime time.Time) { - var throttledErr throttledError - nativeerrors.As(err, &throttledErr) - throttledConnFailedLogs[err] = lastLogTime -} - -// connHandler handles all connection related requests. It must be run as a -// goroutine. -// -// The connection handler makes sure that we maintain a pool of active outbound -// connections so that we remain connected to the network. Connection requests -// are processed and mapped by their assigned ids. -func (cm *ConnManager) connHandler() { - - var ( - // pending holds all registered conn requests that have yet to - // succeed. - pending = make(map[uint64]*ConnReq) - - // conns represents the set of all actively connected peers. - conns = make(map[uint64]*ConnReq, cm.cfg.TargetOutbound) - ) - -out: - for { - select { - case req := <-cm.requests: - switch msg := req.(type) { - - case registerPending: - connReq := msg.c - connReq.updateState(ConnPending) - pending[msg.c.id] = connReq - close(msg.done) - - case handleConnected: - connReq := msg.c - - if _, ok := pending[connReq.id]; !ok { - if msg.conn != nil { - msg.conn.Close() - } - log.Debugf("Ignoring connection for "+ - "canceled connreq=%s", connReq) - continue - } - - connReq.updateState(ConnEstablished) - connReq.conn = msg.conn - conns[connReq.id] = connReq - log.Debugf("Connected to %s", connReq) - connReq.retryCount = 0 - - delete(pending, connReq.id) - - if cm.cfg.OnConnection != nil { - cm.cfg.OnConnection(connReq, msg.conn) - } - - case handleDisconnected: - connReq, ok := conns[msg.id] - if !ok { - connReq, ok = pending[msg.id] - if !ok { - log.Errorf("Unknown connid=%d", - msg.id) - continue - } - - // Pending connection was found, remove - // it from pending map if we should - // ignore a later, successful - // connection. - connReq.updateState(ConnCanceled) - log.Debugf("Canceling: %s", connReq) - delete(pending, msg.id) - continue - - } - - // An existing connection was located, mark as - // disconnected and execute disconnection - // callback. - log.Debugf("Disconnected from %s", connReq) - delete(conns, msg.id) - - if connReq.conn != nil { - connReq.conn.Close() - } - - if cm.cfg.OnDisconnection != nil { - spawn("cm.cfg.OnDisconnection", func() { - cm.cfg.OnDisconnection(connReq) - }) - } - - // All internal state has been cleaned up, if - // this connection is being removed, we will - // make no further attempts with this request. - if !msg.retry { - connReq.updateState(ConnDisconnected) - continue - } - - // Otherwise, we will attempt a reconnection. - // The connection request is re added to the - // pending map, so that subsequent processing - // of connections and failures do not ignore - // the request. - connReq.updateState(ConnPending) - log.Debugf("Reconnecting to %s", - connReq) - pending[msg.id] = connReq - cm.handleFailedConn(connReq, nil) - - case handleFailed: - connReq := msg.c - - if _, ok := pending[connReq.id]; !ok { - log.Debugf("Ignoring connection for "+ - "canceled conn req: %s", connReq) - continue - } - - connReq.updateState(ConnFailing) - if shouldWriteConnFailedLog(msg.err) { - log.Debugf("Failed to connect to %s: %s", - connReq, msg.err) - } - cm.handleFailedConn(connReq, msg.err) - - if cm.cfg.OnConnectionFailed != nil { - cm.cfg.OnConnectionFailed(connReq) - } - } - - case <-cm.quit: - break out - } - } - - cm.wg.Done() - log.Trace("Connection handler done") -} - -// NotifyConnectionRequestComplete notifies the connection -// manager that a peer had been successfully connected and -// marked as good. -func (cm *ConnManager) NotifyConnectionRequestComplete() { - cm.failedAttempts = 0 -} - -// NewConnReq creates a new connection request and connects to the -// corresponding address. -func (cm *ConnManager) NewConnReq() { - if atomic.LoadInt32(&cm.stop) != 0 { - return - } - - c := &ConnReq{} - atomic.StoreUint64(&c.id, atomic.AddUint64(&cm.connReqCount, 1)) - - // Submit a request of a pending connection attempt to the connection - // manager. By registering the id before the connection is even - // established, we'll be able to later cancel the connection via the - // Remove method. - done := make(chan struct{}) - select { - case cm.requests <- registerPending{c, done}: - case <-cm.quit: - return - } - - // Wait for the registration to successfully add the pending conn req to - // the conn manager's internal state. - select { - case <-done: - case <-cm.quit: - return - } - err := cm.associateAddressToConnReq(c) - if err != nil { - select { - case cm.requests <- handleFailed{c, err}: - case <-cm.quit: - } - return - } - - cm.connect(c) -} - -func (cm *ConnManager) associateAddressToConnReq(c *ConnReq) error { - cm.addressMtx.Lock() - defer cm.addressMtx.Unlock() - - addr, err := cm.getNewAddress() - if err != nil { - return err - } - - cm.markAddressAsUsed(addr) - c.Addr = addr - return nil -} - -// Connect assigns an id and dials a connection to the address of the -// connection request. -func (cm *ConnManager) Connect(c *ConnReq) error { - err := func() error { - cm.addressMtx.Lock() - defer cm.addressMtx.Unlock() - - if cm.isAddressUsed(c.Addr) { - return fmt.Errorf("address %s is already in use", c.Addr) - } - cm.markAddressAsUsed(c.Addr) - return nil - }() - if err != nil { - return err - } - - cm.connect(c) - return nil -} - -// connect assigns an id and dials a connection to the address of the -// connection request. This function assumes that the connection address -// has checked and already marked as used. -func (cm *ConnManager) connect(c *ConnReq) { - if atomic.LoadInt32(&cm.stop) != 0 { - return - } - - if atomic.LoadUint64(&c.id) == 0 { - atomic.StoreUint64(&c.id, atomic.AddUint64(&cm.connReqCount, 1)) - - // Submit a request of a pending connection attempt to the - // connection manager. By registering the id before the - // connection is even established, we'll be able to later - // cancel the connection via the Remove method. - done := make(chan struct{}) - select { - case cm.requests <- registerPending{c, done}: - case <-cm.quit: - return - } - - // Wait for the registration to successfully add the pending - // conn req to the conn manager's internal state. - select { - case <-done: - case <-cm.quit: - return - } - } - - log.Debugf("Attempting to connect to %s", c) - - conn, err := cm.cfg.Dial(c.Addr) - if err != nil { - select { - case cm.requests <- handleFailed{c, err}: - case <-cm.quit: - } - return - } - - select { - case cm.requests <- handleConnected{c, conn}: - case <-cm.quit: - } -} - -// Disconnect disconnects the connection corresponding to the given connection -// id. If permanent, the connection will be retried with an increasing backoff -// duration. -func (cm *ConnManager) Disconnect(id uint64) { - if atomic.LoadInt32(&cm.stop) != 0 { - return - } - - select { - case cm.requests <- handleDisconnected{id, true}: - case <-cm.quit: - } -} - -// Remove removes the connection corresponding to the given connection id from -// known connections. -// -// NOTE: This method can also be used to cancel a lingering connection attempt -// that hasn't yet succeeded. -func (cm *ConnManager) Remove(id uint64) { - if atomic.LoadInt32(&cm.stop) != 0 { - return - } - - select { - case cm.requests <- handleDisconnected{id, false}: - case <-cm.quit: - } -} - -// listenHandler accepts incoming connections on a given listener. It must be -// run as a goroutine. -func (cm *ConnManager) listenHandler(listener net.Listener) { - log.Infof("Server listening on %s", listener.Addr()) - for atomic.LoadInt32(&cm.stop) == 0 { - conn, err := listener.Accept() - if err != nil { - // Only log the error if not forcibly shutting down. - if atomic.LoadInt32(&cm.stop) == 0 { - log.Errorf("Can't accept connection: %s", err) - } - continue - } - spawn("SPAWN_PLACEHOLDER_NAME", func() { - cm.cfg.OnAccept(conn) - }) - } - - cm.wg.Done() - log.Tracef("Listener handler done for %s", listener.Addr()) -} - -// Start launches the connection manager and begins connecting to the network. -func (cm *ConnManager) Start() { - // Already started? - if atomic.AddInt32(&cm.start, 1) != 1 { - return - } - - log.Trace("Connection manager started") - cm.wg.Add(1) - spawn("SPAWN_PLACEHOLDER_NAME", cm.connHandler) - - // Start all the listeners so long as the caller requested them and - // provided a callback to be invoked when connections are accepted. - if cm.cfg.OnAccept != nil { - for _, listener := range cm.cfg.Listeners { - // Declaring this variable is necessary as it needs be declared in the same - // scope of the anonymous function below it. - listenerCopy := listener - cm.wg.Add(1) - spawn("SPAWN_PLACEHOLDER_NAME", func() { - cm.listenHandler(listenerCopy) - }) - } - } - - for i := atomic.LoadUint64(&cm.connReqCount); i < uint64(cm.cfg.TargetOutbound); i++ { - spawn("SPAWN_PLACEHOLDER_NAME", cm.NewConnReq) - } -} - -// Wait blocks until the connection manager halts gracefully. -func (cm *ConnManager) Wait() { - cm.wg.Wait() -} - -// Stop gracefully shuts down the connection manager. -func (cm *ConnManager) Stop() { - if atomic.AddInt32(&cm.stop, 1) != 1 { - log.Warnf("Connection manager already stopped") - return - } - - // Stop all the listeners. There will not be any listeners if - // listening is disabled. - for _, listener := range cm.cfg.Listeners { - // Ignore the error since this is shutdown and there is no way - // to recover anyways. - _ = listener.Close() - } - - close(cm.quit) - log.Trace("Connection manager stopped") -} - -func (cm *ConnManager) getNewAddress() (*net.TCPAddr, error) { - for tries := 0; tries < 100; tries++ { - addr := cm.cfg.AddrManager.GetAddress() - if addr == nil { - break - } - - // Check if there's already a connection to the same address. - netAddr := addr.NetAddress().TCPAddress() - if cm.isAddressUsed(netAddr) { - continue - } - - // Address will not be invalid, local or unroutable - // because addrmanager rejects those on addition. - // Just check that we don't already have an address - // in the same group so that we are not connecting - // to the same network segment at the expense of - // others. - // - // Networks that accept unroutable connections are exempt - // from this rule, since they're meant to run within a - // private subnet, like 10.0.0.0/16. - if !cm.appCfg.NetParams().AcceptUnroutable && cm.isOutboundGroupUsed(netAddr) { - continue - } - - // only allow recent nodes (10mins) after we failed 30 - // times - if tries < 30 && time.Since(addr.LastAttempt().ToNativeTime()) < 10*time.Minute { - continue - } - - // allow nondefault ports after 50 failed tries. - if tries < 50 && fmt.Sprintf("%d", netAddr.Port) != - cm.appCfg.NetParams().DefaultPort { - continue - } - - return netAddr, nil - } - return nil, ErrNoAddress -} - -// New returns a new connection manager. -// Use Start to start connecting to the network. -func New(cfg *Config, appCfg *config.Config) (*ConnManager, error) { - if cfg.Dial == nil { - return nil, errors.WithStack(ErrDialNil) - } - if cfg.AddrManager == nil { - return nil, errors.WithStack(ErrAddressManagerNil) - } - // Default to sane values - if cfg.RetryDuration <= 0 { - cfg.RetryDuration = defaultRetryDuration - } - cm := ConnManager{ - cfg: *cfg, // Copy so caller can't mutate - appCfg: appCfg, - requests: make(chan interface{}), - quit: make(chan struct{}), - usedAddresses: make(map[string]struct{}), - usedOutboundGroups: make(map[string]int64), - } - return &cm, nil -} diff --git a/connmgr/connmanager_test.go b/connmgr/connmanager_test.go deleted file mode 100644 index d2a858509..000000000 --- a/connmgr/connmanager_test.go +++ /dev/null @@ -1,955 +0,0 @@ -// Copyright (c) 2016 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package connmgr - -import ( - "fmt" - "io" - "io/ioutil" - "net" - "sync/atomic" - "testing" - "time" - - "github.com/kaspanet/kaspad/addrmgr" - "github.com/kaspanet/kaspad/config" - "github.com/kaspanet/kaspad/dagconfig" - "github.com/kaspanet/kaspad/dbaccess" - "github.com/pkg/errors" -) - -func init() { - // Override the max retry duration when running tests. - maxRetryDuration = 2 * time.Millisecond -} - -func defaultAppConfig() *config.Config { - return &config.Config{ - Flags: &config.Flags{ - NetworkFlags: config.NetworkFlags{ - ActiveNetParams: &dagconfig.SimnetParams}, - }, - } -} - -// mockAddr mocks a network address -type mockAddr struct { - net, address string -} - -func (m mockAddr) Network() string { return m.net } -func (m mockAddr) String() string { return m.address } - -// mockConn mocks a network connection by implementing the net.Conn interface. -type mockConn struct { - io.Reader - io.Writer - io.Closer - - // local network, address for the connection. - lnet, laddr string - - // remote network, address for the connection. - rAddr net.Addr -} - -// LocalAddr returns the local address for the connection. -func (c mockConn) LocalAddr() net.Addr { - return &mockAddr{c.lnet, c.laddr} -} - -// RemoteAddr returns the remote address for the connection. -func (c mockConn) RemoteAddr() net.Addr { - return &mockAddr{c.rAddr.Network(), c.rAddr.String()} -} - -// Close handles closing the connection. -func (c mockConn) Close() error { - return nil -} - -func (c mockConn) SetDeadline(t time.Time) error { return nil } -func (c mockConn) SetReadDeadline(t time.Time) error { return nil } -func (c mockConn) SetWriteDeadline(t time.Time) error { return nil } - -// mockDialer mocks the net.Dial interface by returning a mock connection to -// the given address. -func mockDialer(addr net.Addr) (net.Conn, error) { - r, w := io.Pipe() - c := &mockConn{rAddr: addr} - c.Reader = r - c.Writer = w - return c, nil -} - -// TestNewConfig tests that new ConnManager config is validated as expected. -func TestNewConfig(t *testing.T) { - appCfg := defaultAppConfig() - _, err := New(&Config{}, appCfg) - if !errors.Is(err, ErrDialNil) { - t.Fatalf("New expected error: %s, got %s", ErrDialNil, err) - } - - _, err = New(&Config{ - Dial: mockDialer, - }, appCfg) - if !errors.Is(err, ErrAddressManagerNil) { - t.Fatalf("New expected error: %s, got %s", ErrAddressManagerNil, err) - } - - amgr, teardown := addressManagerForTest(t, "TestNewConfig", defaultAppConfig(), 10) - defer teardown() - - _, err = New(&Config{ - Dial: mockDialer, - AddrManager: amgr, - }, appCfg) - if err != nil { - t.Fatalf("New unexpected error: %v", err) - } -} - -// TestStartStop tests that the connection manager starts and stops as -// expected. -func TestStartStop(t *testing.T) { - connected := make(chan *ConnReq) - disconnected := make(chan *ConnReq) - - amgr, teardown := addressManagerForTest(t, "TestStartStop", defaultAppConfig(), 10) - defer teardown() - - cmgr, err := New(&Config{ - TargetOutbound: 1, - AddrManager: amgr, - Dial: mockDialer, - OnConnection: func(c *ConnReq, conn net.Conn) { - connected <- c - }, - OnDisconnection: func(c *ConnReq) { - disconnected <- c - }, - }, defaultAppConfig()) - if err != nil { - t.Fatalf("unexpected error from New: %s", err) - } - cmgr.Start() - gotConnReq := <-connected - cmgr.Stop() - // already stopped - cmgr.Stop() - // ignored - cr := &ConnReq{ - Addr: &net.TCPAddr{ - IP: net.ParseIP("127.0.0.1"), - Port: 18555, - }, - Permanent: true, - } - err = cmgr.Connect(cr) - if err != nil { - t.Fatalf("Connect error: %s", err) - } - if cr.ID() != 0 { - t.Fatalf("start/stop: got id: %v, want: 0", cr.ID()) - } - cmgr.Disconnect(gotConnReq.ID()) - cmgr.Remove(gotConnReq.ID()) - select { - case <-disconnected: - t.Fatalf("start/stop: unexpected disconnection") - case <-time.Tick(10 * time.Millisecond): - break - } -} - -func addressManagerForTest(t *testing.T, testName string, appConfig *config.Config, numAddresses uint8) (*addrmgr.AddrManager, func()) { - amgr, teardown := createEmptyAddressManagerForTest(t, testName, appConfig) - - for i := uint8(0); i < numAddresses; i++ { - ip := fmt.Sprintf("173.%d.115.66:16511", i) - err := amgr.AddAddressByIP(ip, nil) - if err != nil { - t.Fatalf("AddAddressByIP unexpectedly failed to add IP %s: %s", ip, err) - } - } - - return amgr, teardown -} - -func createEmptyAddressManagerForTest(t *testing.T, testName string, appConfig *config.Config) (*addrmgr.AddrManager, func()) { - path, err := ioutil.TempDir("", fmt.Sprintf("%s-database", testName)) - if err != nil { - t.Fatalf("createEmptyAddressManagerForTest: TempDir unexpectedly "+ - "failed: %s", err) - } - - databaseContext, err := dbaccess.New(path) - if err != nil { - t.Fatalf("error creating db: %s", err) - } - - return addrmgr.New(appConfig, databaseContext), func() { - // Wait for the connection manager to finish, so it'll - // have access to the address manager as long as it's - // alive. - time.Sleep(10 * time.Millisecond) - - err := databaseContext.Close() - if err != nil { - t.Fatalf("error closing the database: %s", err) - } - } -} - -// TestConnectMode tests that the connection manager works in the connect mode. -// -// In connect mode, automatic connections are disabled, so we test that -// requests using Connect are handled and that no other connections are made. -func TestConnectMode(t *testing.T) { - appConfig := defaultAppConfig() - - connected := make(chan *ConnReq) - amgr, teardown := addressManagerForTest(t, "TestConnectMode", appConfig, 10) - defer teardown() - - cmgr, err := New(&Config{ - TargetOutbound: 0, - Dial: mockDialer, - OnConnection: func(c *ConnReq, conn net.Conn) { - connected <- c - }, - AddrManager: amgr, - }, appConfig) - if err != nil { - t.Fatalf("unexpected error from New: %s", err) - } - cr := &ConnReq{ - Addr: &net.TCPAddr{ - IP: net.ParseIP("127.0.0.1"), - Port: 18555, - }, - Permanent: true, - } - cmgr.Start() - cmgr.Connect(cr) - gotConnReq := <-connected - wantID := cr.ID() - gotID := gotConnReq.ID() - if gotID != wantID { - t.Fatalf("connect mode: %v - want ID %v, got ID %v", cr.Addr, wantID, gotID) - } - gotState := cr.State() - wantState := ConnEstablished - if gotState != wantState { - t.Fatalf("connect mode: %v - want state %v, got state %v", cr.Addr, wantState, gotState) - } - select { - case c := <-connected: - t.Fatalf("connect mode: got unexpected connection - %v", c.Addr) - case <-time.After(time.Millisecond): - break - } - cmgr.Stop() - cmgr.Wait() -} - -// TestTargetOutbound tests the target number of outbound connections. -// -// We wait until all connections are established, then test they there are the -// only connections made. -func TestTargetOutbound(t *testing.T) { - appConfig := defaultAppConfig() - - const numAddressesInAddressManager = 10 - targetOutbound := uint32(numAddressesInAddressManager - 2) - connected := make(chan *ConnReq) - - amgr, teardown := addressManagerForTest(t, "TestTargetOutbound", appConfig, 10) - defer teardown() - - cmgr, err := New(&Config{ - TargetOutbound: targetOutbound, - Dial: mockDialer, - AddrManager: amgr, - OnConnection: func(c *ConnReq, conn net.Conn) { - connected <- c - }, - }, defaultAppConfig()) - if err != nil { - t.Fatalf("unexpected error from New: %s", err) - } - cmgr.Start() - for i := uint32(0); i < targetOutbound; i++ { - <-connected - } - - select { - case c := <-connected: - t.Fatalf("target outbound: got unexpected connection - %v", c.Addr) - case <-time.After(time.Millisecond): - break - } - cmgr.Stop() - cmgr.Wait() -} - -// TestDuplicateOutboundConnections tests that connection requests cannot use an already used address. -// It checks it by creating one connection request for each address in the address manager, so that -// the next connection request will have to fail because no unused address will be available. -func TestDuplicateOutboundConnections(t *testing.T) { - appConfig := defaultAppConfig() - - const numAddressesInAddressManager = 10 - targetOutbound := uint32(numAddressesInAddressManager - 1) - connected := make(chan struct{}) - failedConnections := make(chan struct{}) - - amgr, teardown := addressManagerForTest(t, "TestDuplicateOutboundConnections", appConfig, 10) - defer teardown() - - cmgr, err := New(&Config{ - TargetOutbound: targetOutbound, - Dial: mockDialer, - AddrManager: amgr, - OnConnection: func(c *ConnReq, conn net.Conn) { - connected <- struct{}{} - }, - OnConnectionFailed: func(_ *ConnReq) { - failedConnections <- struct{}{} - }, - }, defaultAppConfig()) - if err != nil { - t.Fatalf("unexpected error from New: %s", err) - } - cmgr.Start() - for i := uint32(0); i < targetOutbound; i++ { - <-connected - } - - time.Sleep(time.Millisecond) - - // Here we check that making a manual connection request beyond the target outbound connection - // doesn't fail, so we can know that the reason such connection request will fail is an address - // related issue. - cmgr.NewConnReq() - select { - case <-connected: - break - case <-time.After(time.Millisecond): - t.Fatalf("connection request unexpectedly didn't connect") - } - - select { - case <-failedConnections: - t.Fatalf("a connection request unexpectedly failed") - case <-time.After(time.Millisecond): - break - } - - // After we created numAddressesInAddressManager connection requests, this request should fail - // because there aren't any more available addresses. - cmgr.NewConnReq() - select { - case <-connected: - t.Fatalf("connection request unexpectedly succeeded") - case <-time.After(time.Millisecond): - t.Fatalf("connection request didn't fail as expected") - case <-failedConnections: - break - } - - cmgr.Stop() - cmgr.Wait() -} - -// TestSameOutboundGroupConnections tests that connection requests cannot use an address with an already used -// address CIDR group. -// It checks it by creating an address manager with only two addresses, that both belong to the same CIDR group -// and checks that the second connection request fails. -func TestSameOutboundGroupConnections(t *testing.T) { - appConfig := defaultAppConfig() - - amgr, teardown := createEmptyAddressManagerForTest(t, "TestSameOutboundGroupConnections", appConfig) - defer teardown() - - err := amgr.AddAddressByIP("173.190.115.66:16511", nil) - if err != nil { - t.Fatalf("AddAddressByIP unexpectedly failed: %s", err) - } - - err = amgr.AddAddressByIP("173.190.115.67:16511", nil) - if err != nil { - t.Fatalf("AddAddressByIP unexpectedly failed: %s", err) - } - - connected := make(chan struct{}) - failedConnections := make(chan struct{}) - cmgr, err := New(&Config{ - TargetOutbound: 0, - Dial: mockDialer, - AddrManager: amgr, - OnConnection: func(c *ConnReq, conn net.Conn) { - connected <- struct{}{} - }, - OnConnectionFailed: func(_ *ConnReq) { - failedConnections <- struct{}{} - }, - }, appConfig) - if err != nil { - t.Fatalf("unexpected error from New: %s", err) - } - - cmgr.Start() - - cmgr.NewConnReq() - select { - case <-connected: - break - case <-time.After(time.Millisecond): - t.Fatalf("connection request unexpectedly didn't connect") - } - - select { - case <-failedConnections: - t.Fatalf("a connection request unexpectedly failed") - case <-time.After(time.Millisecond): - break - } - - cmgr.NewConnReq() - select { - case <-connected: - t.Fatalf("connection request unexpectedly succeeded") - case <-time.After(time.Millisecond): - t.Fatalf("connection request didn't fail as expected") - case <-failedConnections: - break - } - - cmgr.Stop() - cmgr.Wait() -} - -// TestRetryPermanent tests that permanent connection requests are retried. -// -// We make a permanent connection request using Connect, disconnect it using -// Disconnect and we wait for it to be connected back. -func TestRetryPermanent(t *testing.T) { - appConfig := defaultAppConfig() - - connected := make(chan *ConnReq) - disconnected := make(chan *ConnReq) - - amgr, teardown := addressManagerForTest(t, "TestRetryPermanent", appConfig, 10) - defer teardown() - - cmgr, err := New(&Config{ - RetryDuration: time.Millisecond, - TargetOutbound: 0, - Dial: mockDialer, - OnConnection: func(c *ConnReq, conn net.Conn) { - connected <- c - }, - OnDisconnection: func(c *ConnReq) { - disconnected <- c - }, - AddrManager: amgr, - }, appConfig) - if err != nil { - t.Fatalf("unexpected error from New: %s", err) - } - - cr := &ConnReq{ - Addr: &net.TCPAddr{ - IP: net.ParseIP("127.0.0.1"), - Port: 18555, - }, - Permanent: true, - } - go cmgr.Connect(cr) - cmgr.Start() - gotConnReq := <-connected - wantID := cr.ID() - gotID := gotConnReq.ID() - if gotID != wantID { - t.Fatalf("retry: %v - want ID %v, got ID %v", cr.Addr, wantID, gotID) - } - gotState := cr.State() - wantState := ConnEstablished - if gotState != wantState { - t.Fatalf("retry: %v - want state %v, got state %v", cr.Addr, wantState, gotState) - } - - cmgr.Disconnect(cr.ID()) - gotConnReq = <-disconnected - wantID = cr.ID() - gotID = gotConnReq.ID() - if gotID != wantID { - t.Fatalf("retry: %v - want ID %v, got ID %v", cr.Addr, wantID, gotID) - } - gotState = cr.State() - wantState = ConnPending - if gotState != wantState { - // There is a small chance that connection has already been established, - // so check for that as well - if gotState != ConnEstablished { - t.Fatalf("retry: %v - want state %v, got state %v", cr.Addr, wantState, gotState) - } - } - - gotConnReq = <-connected - wantID = cr.ID() - gotID = gotConnReq.ID() - if gotID != wantID { - t.Fatalf("retry: %v - want ID %v, got ID %v", cr.Addr, wantID, gotID) - } - gotState = cr.State() - wantState = ConnEstablished - if gotState != wantState { - t.Fatalf("retry: %v - want state %v, got state %v", cr.Addr, wantState, gotState) - } - - cmgr.Remove(cr.ID()) - gotConnReq = <-disconnected - - // Wait for status to be updated - time.Sleep(10 * time.Millisecond) - wantID = cr.ID() - gotID = gotConnReq.ID() - if gotID != wantID { - t.Fatalf("retry: %v - want ID %v, got ID %v", cr.Addr, wantID, gotID) - } - gotState = cr.State() - wantState = ConnDisconnected - if gotState != wantState { - t.Fatalf("retry: %v - want state %v, got state %v", cr.Addr, wantState, gotState) - } - cmgr.Stop() - cmgr.Wait() -} - -// TestMaxRetryDuration tests the maximum retry duration. -// -// We have a timed dialer which initially returns err but after RetryDuration -// hits maxRetryDuration returns a mock conn. -func TestMaxRetryDuration(t *testing.T) { - appConfig := defaultAppConfig() - - networkUp := make(chan struct{}) - time.AfterFunc(5*time.Millisecond, func() { - close(networkUp) - }) - timedDialer := func(addr net.Addr) (net.Conn, error) { - select { - case <-networkUp: - return mockDialer(addr) - default: - return nil, errors.New("network down") - } - } - - amgr, teardown := addressManagerForTest(t, "TestMaxRetryDuration", appConfig, 10) - defer teardown() - - connected := make(chan *ConnReq) - cmgr, err := New(&Config{ - RetryDuration: time.Millisecond, - TargetOutbound: 0, - Dial: timedDialer, - OnConnection: func(c *ConnReq, conn net.Conn) { - connected <- c - }, - AddrManager: amgr, - }, appConfig) - if err != nil { - t.Fatalf("unexpected error from New: %s", err) - } - - cr := &ConnReq{ - Addr: &net.TCPAddr{ - IP: net.ParseIP("127.0.0.1"), - Port: 18555, - }, - Permanent: true, - } - go cmgr.Connect(cr) - cmgr.Start() - // retry in 1ms - // retry in 2ms - max retry duration reached - // retry in 2ms - timedDialer returns mockDial - select { - case <-connected: - case <-time.Tick(100 * time.Millisecond): - t.Fatalf("max retry duration: connection timeout") - } - cmgr.Stop() - cmgr.Wait() -} - -// TestNetworkFailure tests that the connection manager handles a network -// failure gracefully. -func TestNetworkFailure(t *testing.T) { - appConfig := defaultAppConfig() - - var dials uint32 - errDialer := func(net net.Addr) (net.Conn, error) { - atomic.AddUint32(&dials, 1) - return nil, errors.New("network down") - } - - amgr, teardown := addressManagerForTest(t, "TestNetworkFailure", appConfig, 10) - defer teardown() - - cmgr, err := New(&Config{ - TargetOutbound: 5, - RetryDuration: 5 * time.Millisecond, - Dial: errDialer, - AddrManager: amgr, - OnConnection: func(c *ConnReq, conn net.Conn) { - t.Fatalf("network failure: got unexpected connection - %v", c.Addr) - }, - }, appConfig) - if err != nil { - t.Fatalf("unexpected error from New: %s", err) - } - cmgr.Start() - time.Sleep(10 * time.Millisecond) - cmgr.Stop() - cmgr.Wait() - wantMaxDials := uint32(75) - if atomic.LoadUint32(&dials) > wantMaxDials { - t.Fatalf("network failure: unexpected number of dials - got %v, want < %v", - atomic.LoadUint32(&dials), wantMaxDials) - } -} - -// TestStopFailed tests that failed connections are ignored after connmgr is -// stopped. -// -// We have a dailer which sets the stop flag on the conn manager and returns an -// err so that the handler assumes that the conn manager is stopped and ignores -// the failure. -func TestStopFailed(t *testing.T) { - appConfig := defaultAppConfig() - - done := make(chan struct{}, 1) - waitDialer := func(addr net.Addr) (net.Conn, error) { - done <- struct{}{} - time.Sleep(time.Millisecond) - return nil, errors.New("network down") - } - - amgr, teardown := addressManagerForTest(t, "TestStopFailed", appConfig, 10) - defer teardown() - - cmgr, err := New(&Config{ - Dial: waitDialer, - AddrManager: amgr, - }, appConfig) - if err != nil { - t.Fatalf("unexpected error from New: %s", err) - } - cmgr.Start() - go func() { - <-done - atomic.StoreInt32(&cmgr.stop, 1) - time.Sleep(2 * time.Millisecond) - atomic.StoreInt32(&cmgr.stop, 0) - cmgr.Stop() - }() - cr := &ConnReq{ - Addr: &net.TCPAddr{ - IP: net.ParseIP("127.0.0.1"), - Port: 18555, - }, - Permanent: true, - } - go cmgr.Connect(cr) - cmgr.Wait() -} - -// TestRemovePendingConnection tests that it's possible to cancel a pending -// connection, removing its internal state from the ConnMgr. -func TestRemovePendingConnection(t *testing.T) { - appConfig := defaultAppConfig() - - // Create a ConnMgr instance with an instance of a dialer that'll never - // succeed. - wait := make(chan struct{}) - indefiniteDialer := func(addr net.Addr) (net.Conn, error) { - <-wait - return nil, errors.Errorf("error") - } - - amgr, teardown := addressManagerForTest(t, "TestRemovePendingConnection", appConfig, 10) - defer teardown() - - cmgr, err := New(&Config{ - Dial: indefiniteDialer, - AddrManager: amgr, - }, appConfig) - if err != nil { - t.Fatalf("unexpected error from New: %s", err) - } - cmgr.Start() - - // Establish a connection request to a random IP we've chosen. - cr := &ConnReq{ - Addr: &net.TCPAddr{ - IP: net.ParseIP("127.0.0.1"), - Port: 18555, - }, - Permanent: true, - } - go cmgr.Connect(cr) - - time.Sleep(10 * time.Millisecond) - - if cr.State() != ConnPending { - t.Fatalf("pending request hasn't been registered, status: %v", - cr.State()) - } - - // The request launched above will actually never be able to establish - // a connection. So we'll cancel it _before_ it's able to be completed. - cmgr.Remove(cr.ID()) - - time.Sleep(10 * time.Millisecond) - - // Now examine the status of the connection request, it should read a - // status of failed. - if cr.State() != ConnCanceled { - t.Fatalf("request wasn't canceled, status is: %v", cr.State()) - } - - close(wait) - cmgr.Stop() - cmgr.Wait() -} - -// TestCancelIgnoreDelayedConnection tests that a canceled connection request will -// not execute the on connection callback, even if an outstanding retry -// succeeds. -func TestCancelIgnoreDelayedConnection(t *testing.T) { - appConfig := defaultAppConfig() - - retryTimeout := 10 * time.Millisecond - - // Setup a dialer that will continue to return an error until the - // connect chan is signaled, the dial attempt immediately after will - // succeed in returning a connection. - connect := make(chan struct{}) - failingDialer := func(addr net.Addr) (net.Conn, error) { - select { - case <-connect: - return mockDialer(addr) - default: - } - - return nil, errors.Errorf("error") - } - - connected := make(chan *ConnReq) - - amgr, teardown := addressManagerForTest(t, "TestCancelIgnoreDelayedConnection", appConfig, 10) - defer teardown() - - cmgr, err := New(&Config{ - Dial: failingDialer, - RetryDuration: retryTimeout, - OnConnection: func(c *ConnReq, conn net.Conn) { - connected <- c - }, - AddrManager: amgr, - }, appConfig) - if err != nil { - t.Fatalf("unexpected error from New: %s", err) - } - cmgr.Start() - - // Establish a connection request to a random IP we've chosen. - cr := &ConnReq{ - Addr: &net.TCPAddr{ - IP: net.ParseIP("127.0.0.1"), - Port: 18555, - }, - } - cmgr.Connect(cr) - - // Allow for the first retry timeout to elapse. - time.Sleep(2 * retryTimeout) - - // Connection be marked as failed, even after reattempting to - // connect. - if cr.State() != ConnFailing { - t.Fatalf("failing request should have status failed, status: %v", - cr.State()) - } - - // Remove the connection, and then immediately allow the next connection - // to succeed. - cmgr.Remove(cr.ID()) - close(connect) - - // Allow the connection manager to process the removal. - time.Sleep(5 * time.Millisecond) - - // Now examine the status of the connection request, it should read a - // status of canceled. - if cr.State() != ConnCanceled { - t.Fatalf("request wasn't canceled, status is: %v", cr.State()) - } - - // Finally, the connection manager should not signal the on-connection - // callback, since we explicitly canceled this request. We give a - // generous window to ensure the connection manager's lienar backoff is - // allowed to properly elapse. - select { - case <-connected: - t.Fatalf("on-connect should not be called for canceled req") - case <-time.After(5 * retryTimeout): - } - cmgr.Stop() - cmgr.Wait() -} - -// mockListener implements the net.Listener interface and is used to test -// code that deals with net.Listeners without having to actually make any real -// connections. -type mockListener struct { - localAddr string - provideConn chan net.Conn -} - -// Accept returns a mock connection when it receives a signal via the Connect -// function. -// -// This is part of the net.Listener interface. -func (m *mockListener) Accept() (net.Conn, error) { - for conn := range m.provideConn { - return conn, nil - } - return nil, errors.New("network connection closed") -} - -// Close closes the mock listener which will cause any blocked Accept -// operations to be unblocked and return errors. -// -// This is part of the net.Listener interface. -func (m *mockListener) Close() error { - close(m.provideConn) - return nil -} - -// Addr returns the address the mock listener was configured with. -// -// This is part of the net.Listener interface. -func (m *mockListener) Addr() net.Addr { - return &mockAddr{"tcp", m.localAddr} -} - -// Connect fakes a connection to the mock listener from the provided remote -// address. It will cause the Accept function to return a mock connection -// configured with the provided remote address and the local address for the -// mock listener. -func (m *mockListener) Connect(ip string, port int) { - m.provideConn <- &mockConn{ - laddr: m.localAddr, - lnet: "tcp", - rAddr: &net.TCPAddr{ - IP: net.ParseIP(ip), - Port: port, - }, - } -} - -// newMockListener returns a new mock listener for the provided local address -// and port. No ports are actually opened. -func newMockListener(localAddr string) *mockListener { - return &mockListener{ - localAddr: localAddr, - provideConn: make(chan net.Conn), - } -} - -// TestListeners ensures providing listeners to the connection manager along -// with an accept callback works properly. -func TestListeners(t *testing.T) { - appConfig := defaultAppConfig() - - // Setup a connection manager with a couple of mock listeners that - // notify a channel when they receive mock connections. - receivedConns := make(chan net.Conn) - listener1 := newMockListener("127.0.0.1:16111") - listener2 := newMockListener("127.0.0.1:9333") - listeners := []net.Listener{listener1, listener2} - - amgr, teardown := addressManagerForTest(t, "TestListeners", appConfig, 10) - defer teardown() - - cmgr, err := New(&Config{ - Listeners: listeners, - OnAccept: func(conn net.Conn) { - receivedConns <- conn - }, - Dial: mockDialer, - AddrManager: amgr, - }, appConfig) - if err != nil { - t.Fatalf("unexpected error from New: %s", err) - } - cmgr.Start() - - // Fake a couple of mock connections to each of the listeners. - go func() { - for i, listener := range listeners { - l := listener.(*mockListener) - l.Connect("127.0.0.1", 10000+i*2) - l.Connect("127.0.0.1", 10000+i*2+1) - } - }() - - // Tally the receive connections to ensure the expected number are - // received. Also, fail the test after a timeout so it will not hang - // forever should the test not work. - expectedNumConns := len(listeners) * 2 - var numConns int -out: - for { - select { - case <-receivedConns: - numConns++ - if numConns == expectedNumConns { - break out - } - - case <-time.After(time.Millisecond * 50): - t.Fatalf("Timeout waiting for %d expected connections", - expectedNumConns) - } - } - - cmgr.Stop() - cmgr.Wait() -} - -// TestConnReqString ensures that ConnReq.String() does not crash -func TestConnReqString(t *testing.T) { - defer func() { - if r := recover(); r != nil { - t.Fatalf("ConnReq.String crashed %v", r) - } - }() - cr1 := &ConnReq{ - Addr: &net.TCPAddr{ - IP: net.ParseIP("127.0.0.1"), - Port: 18555, - }, - Permanent: true, - } - _ = cr1.String() - cr2 := &ConnReq{} - _ = cr2.String() -} diff --git a/connmgr/doc.go b/connmgr/doc.go deleted file mode 100644 index 3893cf88f..000000000 --- a/connmgr/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Package connmgr implements a generic Kaspa network connection manager. - -Connection Manager Overview - -Connection Manager handles all the general connection concerns such as -maintaining a set number of outbound connections, sourcing peers, banning, -limiting max connections, etc. - -The package provides a generic connection manager which is able to accept -connection requests from a source or a set of given addresses, dial them and -notify the caller on connections. The main intended use is to initialize a pool -of active connections and maintain them to remain connected to the P2P network. - -In addition the connection manager provides the following utilities: - -- Notifications on connections or disconnections -- Handle failures and retry new addresses from the source -- Connect only to specified addresses -- Permanent connections with increasing backoff retry timers -- Disconnect or Remove an established connection -*/ -package connmgr diff --git a/connmgr/dynamicbanscore.go b/connmgr/dynamicbanscore.go deleted file mode 100644 index 23d7e2bff..000000000 --- a/connmgr/dynamicbanscore.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright (c) 2016 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package connmgr - -import ( - "fmt" - "math" - "sync" - "time" -) - -const ( - // Halflife defines the time (in seconds) by which the transient part - // of the ban score decays to one half of it's original value. - Halflife = 60 - - // lambda is the decaying constant. - lambda = math.Ln2 / Halflife - - // Lifetime defines the maximum age of the transient part of the ban - // score to be considered a non-zero score (in seconds). - Lifetime = 1800 - - // precomputedLen defines the amount of decay factors (one per second) that - // should be precomputed at initialization. - precomputedLen = 64 -) - -// precomputedFactor stores precomputed exponential decay factors for the first -// 'precomputedLen' seconds starting from t == 0. -var precomputedFactor [precomputedLen]float64 - -// init precomputes decay factors. -func init() { - for i := range precomputedFactor { - precomputedFactor[i] = math.Exp(-1.0 * float64(i) * lambda) - } -} - -// decayFactor returns the decay factor at t seconds, using precalculated values -// if available, or calculating the factor if needed. -func decayFactor(t int64) float64 { - if t < precomputedLen { - return precomputedFactor[t] - } - return math.Exp(-1.0 * float64(t) * lambda) -} - -// DynamicBanScore provides dynamic ban scores consisting of a persistent and a -// decaying component. -// -// The decaying score enables the creation of evasive logic which handles -// misbehaving peers (especially application layer DoS attacks) gracefully -// by disconnecting and banning peers attempting various kinds of flooding. -// DynamicBanScore allows these two approaches to be used in tandem. -// -// Zero value: Values of type DynamicBanScore are immediately ready for use upon -// declaration. -type DynamicBanScore struct { - lastUnix int64 - transient float64 - persistent uint32 - mtx sync.Mutex -} - -// String returns the ban score as a human-readable string. -func (s *DynamicBanScore) String() string { - s.mtx.Lock() - defer s.mtx.Unlock() - r := fmt.Sprintf("persistent %d + transient %f at %d = %d as of now", - s.persistent, s.transient, s.lastUnix, s.Int()) - return r -} - -// Int returns the current ban score, the sum of the persistent and decaying -// scores. -// -// This function is safe for concurrent access. -func (s *DynamicBanScore) Int() uint32 { - s.mtx.Lock() - defer s.mtx.Unlock() - r := s.int(time.Now()) - return r -} - -// Increase increases both the persistent and decaying scores by the values -// passed as parameters. The resulting score is returned. -// -// This function is safe for concurrent access. -func (s *DynamicBanScore) Increase(persistent, transient uint32) uint32 { - s.mtx.Lock() - defer s.mtx.Unlock() - r := s.increase(persistent, transient, time.Now()) - return r -} - -// Reset set both persistent and decaying scores to zero. -// -// This function is safe for concurrent access. -func (s *DynamicBanScore) Reset() { - s.mtx.Lock() - defer s.mtx.Unlock() - s.persistent = 0 - s.transient = 0 - s.lastUnix = 0 -} - -// int returns the ban score, the sum of the persistent and decaying scores at a -// given point in time. -// -// This function is not safe for concurrent access. It is intended to be used -// internally and during testing. -func (s *DynamicBanScore) int(t time.Time) uint32 { - dt := t.Unix() - s.lastUnix - if s.transient < 1 || dt < 0 || Lifetime < dt { - return s.persistent - } - return s.persistent + uint32(s.transient*decayFactor(dt)) -} - -// increase increases the persistent, the decaying or both scores by the values -// passed as parameters. The resulting score is calculated as if the action was -// carried out at the point time represented by the third parameter. The -// resulting score is returned. -// -// This function is not safe for concurrent access. -func (s *DynamicBanScore) increase(persistent, transient uint32, t time.Time) uint32 { - s.persistent += persistent - tu := t.Unix() - dt := tu - s.lastUnix - - if transient > 0 { - if Lifetime < dt { - s.transient = 0 - } else if s.transient > 1 && dt > 0 { - s.transient *= decayFactor(dt) - } - s.transient += float64(transient) - s.lastUnix = tu - } - return s.persistent + uint32(s.transient) -} diff --git a/connmgr/dynamicbanscore_test.go b/connmgr/dynamicbanscore_test.go deleted file mode 100644 index ec3d2eae6..000000000 --- a/connmgr/dynamicbanscore_test.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (c) 2016 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package connmgr - -import ( - "math" - "testing" - "time" -) - -// TestDynamicBanScoreDecay tests the exponential decay implemented in -// DynamicBanScore. -func TestDynamicBanScoreDecay(t *testing.T) { - var bs DynamicBanScore - base := time.Now() - - r := bs.increase(100, 50, base) - if r != 150 { - t.Errorf("Unexpected result %d after ban score increase.", r) - } - - r = bs.int(base.Add(time.Minute)) - if r != 125 { - t.Errorf("Halflife check failed - %d instead of 125", r) - } - - r = bs.int(base.Add(7 * time.Minute)) - if r != 100 { - t.Errorf("Decay after 7m - %d instead of 100", r) - } -} - -// TestDynamicBanScoreLifetime tests that DynamicBanScore properly yields zero -// once the maximum age is reached. -func TestDynamicBanScoreLifetime(t *testing.T) { - var bs DynamicBanScore - base := time.Now() - - bs.increase(0, math.MaxUint32, base) - r := bs.int(base.Add(Lifetime * time.Second)) - if r != 3 { // 3, not 4 due to precision loss and truncating 3.999... - t.Errorf("Pre max age check with MaxUint32 failed - %d", r) - } - r = bs.int(base.Add((Lifetime + 1) * time.Second)) - if r != 0 { - t.Errorf("Zero after max age check failed - %d instead of 0", r) - } -} - -// TestDynamicBanScore tests exported functions of DynamicBanScore. Exponential -// decay or other time based behavior is tested by other functions. -func TestDynamicBanScoreReset(t *testing.T) { - var bs DynamicBanScore - if bs.Int() != 0 { - t.Errorf("Initial state is not zero.") - } - bs.Increase(100, 0) - r := bs.Int() - if r != 100 { - t.Errorf("Unexpected result %d after ban score increase.", r) - } - bs.Reset() - if bs.Int() != 0 { - t.Errorf("Failed to reset ban score.") - } -} diff --git a/connmgr/log.go b/connmgr/log.go deleted file mode 100644 index ce983fdd2..000000000 --- a/connmgr/log.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (c) 2016 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package connmgr - -import ( - "github.com/kaspanet/kaspad/logger" - "github.com/kaspanet/kaspad/util/panics" -) - -var log, _ = logger.Get(logger.SubsystemTags.CMGR) -var spawn = panics.GoroutineWrapperFunc(log) -var spawnAfter = panics.AfterFuncWrapperFunc(log) diff --git a/connmgr/seed.go b/connmgr/seed.go deleted file mode 100644 index af1965cad..000000000 --- a/connmgr/seed.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (c) 2016 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package connmgr - -import ( - "fmt" - mrand "math/rand" - "net" - "strconv" - "time" - - "github.com/kaspanet/kaspad/util/mstime" - - "github.com/kaspanet/kaspad/config" - "github.com/kaspanet/kaspad/util/subnetworkid" - - "github.com/kaspanet/kaspad/dagconfig" - "github.com/kaspanet/kaspad/wire" -) - -const ( - // These constants are used by the DNS seed code to pick a random last - // seen time. - secondsIn3Days int32 = 24 * 60 * 60 * 3 - secondsIn4Days int32 = 24 * 60 * 60 * 4 - - // SubnetworkIDPrefixChar is the prefix of subnetworkID, when building a DNS seed request - SubnetworkIDPrefixChar byte = 'n' - - // ServiceFlagPrefixChar is the prefix of service flag, when building a DNS seed request - ServiceFlagPrefixChar byte = 'x' -) - -// OnSeed is the signature of the callback function which is invoked when DNS -// seeding is succesfull. -type OnSeed func(addrs []*wire.NetAddress) - -// LookupFunc is the signature of the DNS lookup function. -type LookupFunc func(string) ([]net.IP, error) - -// SeedFromDNS uses DNS seeding to populate the address manager with peers. -func SeedFromDNS(mainConfig *config.Config, dagParams *dagconfig.Params, reqServices wire.ServiceFlag, includeAllSubnetworks bool, - subnetworkID *subnetworkid.SubnetworkID, lookupFn LookupFunc, seedFn OnSeed) { - - var dnsSeeds []string - if mainConfig != nil && mainConfig.DNSSeed != "" { - dnsSeeds = []string{mainConfig.DNSSeed} - } else { - dnsSeeds = dagParams.DNSSeeds - } - - for _, dnsseed := range dnsSeeds { - var host string - if reqServices == wire.SFNodeNetwork { - host = dnsseed - } else { - host = fmt.Sprintf("%c%x.%s", ServiceFlagPrefixChar, uint64(reqServices), dnsseed) - } - - if !includeAllSubnetworks { - if subnetworkID != nil { - host = fmt.Sprintf("%c%s.%s", SubnetworkIDPrefixChar, subnetworkID, host) - } else { - host = fmt.Sprintf("%c.%s", SubnetworkIDPrefixChar, host) - } - } - - spawn("SPAWN_PLACEHOLDER_NAME", func() { - randSource := mrand.New(mrand.NewSource(time.Now().UnixNano())) - - seedpeers, err := lookupFn(host) - if err != nil { - log.Infof("DNS discovery failed on seed %s: %s", host, err) - return - } - numPeers := len(seedpeers) - - log.Infof("%d addresses found from DNS seed %s", numPeers, host) - - if numPeers == 0 { - return - } - addresses := make([]*wire.NetAddress, len(seedpeers)) - // if this errors then we have *real* problems - intPort, _ := strconv.Atoi(dagParams.DefaultPort) - for i, peer := range seedpeers { - addresses[i] = wire.NewNetAddressTimestamp( - // seed with addresses from a time randomly selected - // between 3 and 7 days ago. - mstime.Now().Add(-1*time.Second*time.Duration(secondsIn3Days+ - randSource.Int31n(secondsIn4Days))), - 0, peer, uint16(intPort)) - } - - seedFn(addresses) - }) - } -} diff --git a/kaspad.go b/kaspad.go index 5598025b6..b6805969c 100644 --- a/kaspad.go +++ b/kaspad.go @@ -23,7 +23,7 @@ import ( "github.com/kaspanet/kaspad/mempool" "github.com/kaspanet/kaspad/mining" "github.com/kaspanet/kaspad/protocol" - "github.com/kaspanet/kaspad/server/rpc" + "github.com/kaspanet/kaspad/rpc" "github.com/kaspanet/kaspad/signal" "github.com/kaspanet/kaspad/txscript" "github.com/kaspanet/kaspad/util" @@ -135,7 +135,8 @@ func newKaspad(cfg *config.Config, databaseContext *dbaccess.DatabaseContext, in return nil, err } - rpcServer, err := setupRPC(cfg, dag, txMempool, sigCache, acceptanceIndex) + rpcServer, err := setupRPC(cfg, dag, txMempool, sigCache, acceptanceIndex, + connectionManager, addressManager, protocolManager) if err != nil { return nil, err } @@ -201,7 +202,8 @@ func setupMempool(cfg *config.Config, dag *blockdag.BlockDAG, sigCache *txscript } func setupRPC(cfg *config.Config, dag *blockdag.BlockDAG, txMempool *mempool.TxPool, sigCache *txscript.SigCache, - acceptanceIndex *indexers.AcceptanceIndex) (*rpc.Server, error) { + acceptanceIndex *indexers.AcceptanceIndex, connectionManager *connmanager.ConnectionManager, + addressManager *addrmgr.AddrManager, protocolManager *protocol.Manager) (*rpc.Server, error) { if !cfg.DisableRPC { policy := mining.Policy{ @@ -209,7 +211,8 @@ func setupRPC(cfg *config.Config, dag *blockdag.BlockDAG, txMempool *mempool.TxP } blockTemplateGenerator := mining.NewBlkTmplGenerator(&policy, txMempool, dag, sigCache) - rpcServer, err := rpc.NewRPCServer(cfg, dag, txMempool, acceptanceIndex, blockTemplateGenerator) + rpcServer, err := rpc.NewRPCServer(cfg, dag, txMempool, acceptanceIndex, blockTemplateGenerator, + connectionManager, addressManager, protocolManager) if err != nil { return nil, err } diff --git a/mempool/mempool.go b/mempool/mempool.go index b44e5ea96..1f22d4713 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -17,7 +17,6 @@ import ( "github.com/kaspanet/kaspad/blockdag" "github.com/kaspanet/kaspad/logger" "github.com/kaspanet/kaspad/mining" - "github.com/kaspanet/kaspad/rpcmodel" "github.com/kaspanet/kaspad/txscript" "github.com/kaspanet/kaspad/util" "github.com/kaspanet/kaspad/util/daghash" @@ -1294,42 +1293,6 @@ func (mp *TxPool) MiningDescs() []*mining.TxDesc { return descs } -// RawMempoolVerbose returns all of the entries in the mempool as a fully -// populated jsonrpc result. -// -// This function is safe for concurrent access. -func (mp *TxPool) RawMempoolVerbose() map[string]*rpcmodel.GetRawMempoolVerboseResult { - mp.mtx.RLock() - defer mp.mtx.RUnlock() - - result := make(map[string]*rpcmodel.GetRawMempoolVerboseResult, len(mp.pool)) - - for _, desc := range mp.pool { - // Calculate the current priority based on the inputs to - // the transaction. Use zero if one or more of the - // input transactions can't be found for some reason. - tx := desc.Tx - - mpd := &rpcmodel.GetRawMempoolVerboseResult{ - Size: int32(tx.MsgTx().SerializeSize()), - Fee: util.Amount(desc.Fee).ToKAS(), - Time: desc.Added.UnixMilliseconds(), - Depends: make([]string, 0), - } - for _, txIn := range tx.MsgTx().TxIn { - txID := &txIn.PreviousOutpoint.TxID - if mp.haveTransaction(txID) { - mpd.Depends = append(mpd.Depends, - txID.String()) - } - } - - result[tx.ID().String()] = mpd - } - - return result -} - // LastUpdated returns the last time a transaction was added to or removed from // the main pool. It does not include the orphan pool. // diff --git a/mining/mining.go b/mining/mining.go index 07ff55472..57e36a844 100644 --- a/mining/mining.go +++ b/mining/mining.go @@ -7,6 +7,7 @@ package mining import ( "github.com/kaspanet/kaspad/util/mstime" "github.com/pkg/errors" + "time" "github.com/kaspanet/kaspad/blockdag" "github.com/kaspanet/kaspad/txscript" @@ -228,3 +229,21 @@ func (g *BlkTmplGenerator) UpdateBlockTime(msgBlock *wire.MsgBlock) error { func (g *BlkTmplGenerator) TxSource() TxSource { return g.txSource } + +// IsSynced checks if the node is synced enough based upon its worldview. +// This is used to determine if the node can support mining and requesting newly-mined blocks. +// To do that, first it checks if the selected tip timestamp is not older than maxTipAge. If that's the case, it means +// the node is synced since blocks' timestamps are not allowed to deviate too much into the future. +// If that's not the case it checks the rate it added new blocks to the DAG recently. If it's faster than +// blockRate * maxSyncRateDeviation it means the node is not synced, since when the node is synced it shouldn't add +// blocks to the DAG faster than the block rate. +func (g *BlkTmplGenerator) IsSynced() bool { + const maxTipAge = 5 * time.Minute + isCloseToCurrentTime := g.dag.Now().Sub(g.dag.SelectedTipHeader().Timestamp) <= maxTipAge + if isCloseToCurrentTime { + return true + } + + const maxSyncRateDeviation = 1.05 + return g.dag.IsSyncRateBelowThreshold(maxSyncRateDeviation) +} diff --git a/netadapter/netadapter.go b/netadapter/netadapter.go index d56071b73..9067fc54b 100644 --- a/netadapter/netadapter.go +++ b/netadapter/netadapter.go @@ -17,7 +17,7 @@ import ( // RouterInitializer is a function that initializes a new // router to be used with a new connection -type RouterInitializer func() (*routerpkg.Router, error) +type RouterInitializer func(netConnection *NetConnection) (*routerpkg.Router, error) // NetAdapter is an abstraction layer over networking. // This type expects a RouteInitializer function. This @@ -99,15 +99,19 @@ func (na *NetAdapter) Connections() []*NetConnection { return netConnections } +// ConnectionCount returns the count of the connected connections +func (na *NetAdapter) ConnectionCount() int { + return len(na.connectionsToIDs) +} + func (na *NetAdapter) onConnectedHandler(connection server.Connection) error { - router, err := na.routerInitializer() + netConnection := newNetConnection(connection, nil) + router, err := na.routerInitializer(netConnection) if err != nil { return err } connection.Start(router) - netConnection := newNetConnection(connection, nil) - na.routersToConnections[router] = netConnection na.connectionsToIDs[netConnection] = nil diff --git a/netsync/README.md b/netsync/README.md deleted file mode 100644 index 44a3047ab..000000000 --- a/netsync/README.md +++ /dev/null @@ -1,15 +0,0 @@ -netsync -======= - -[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](https://choosealicense.com/licenses/isc/) -[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/kaspanet/kaspad/netsync) - -## Overview - -This package implements a concurrency safe block syncing protocol. The -SyncManager communicates with connected peers to perform an initial block -download, keep the chain and unconfirmed transaction pool in sync, and announce -new blocks connected to the DAG. The sync manager selects a single -sync peer that it downloads all blocks from until it is up to date with the -the peer's selected tip. - diff --git a/netsync/blocklogger.go b/netsync/blocklogger.go deleted file mode 100644 index 639252ef8..000000000 --- a/netsync/blocklogger.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright (c) 2015-2017 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package netsync - -import ( - "github.com/kaspanet/kaspad/util/mstime" - "sync" - "time" - - "github.com/kaspanet/kaspad/logs" - "github.com/kaspanet/kaspad/util" -) - -// blockProgressLogger provides periodic logging for other services in order -// to show users progress of certain "actions" involving some or all current -// blocks. Ex: syncing, indexing all blocks, etc. -type blockProgressLogger struct { - receivedLogBlocks int64 - receivedLogTx int64 - lastBlockLogTime mstime.Time - - subsystemLogger *logs.Logger - progressAction string - sync.Mutex -} - -// newBlockProgressLogger returns a new block progress logger. -// The progress message is templated as follows: -// {progressAction} {numProcessed} {blocks|block} in the last {timePeriod} -// ({numTxs}, height {lastBlockHeight}, {lastBlockTimeStamp}) -func newBlockProgressLogger(progressMessage string, logger *logs.Logger) *blockProgressLogger { - return &blockProgressLogger{ - lastBlockLogTime: mstime.Now(), - progressAction: progressMessage, - subsystemLogger: logger, - } -} - -// LogBlockBlueScore logs a new block blue score as an information message -// to show progress to the user. In order to prevent spam, it limits logging to -// one message every 10 seconds with duration and totals included. -func (b *blockProgressLogger) LogBlockBlueScore(block *util.Block, blueScore uint64) { - b.Lock() - defer b.Unlock() - - b.receivedLogBlocks++ - b.receivedLogTx += int64(len(block.MsgBlock().Transactions)) - - now := mstime.Now() - duration := now.Sub(b.lastBlockLogTime) - if duration < time.Second*10 { - return - } - - // Truncate the duration to 10s of milliseconds. - durationMillis := int64(duration / time.Millisecond) - tDuration := 10 * time.Millisecond * time.Duration(durationMillis/10) - - // Log information about new block height. - blockStr := "blocks" - if b.receivedLogBlocks == 1 { - blockStr = "block" - } - txStr := "transactions" - if b.receivedLogTx == 1 { - txStr = "transaction" - } - b.subsystemLogger.Infof("%s %d %s in the last %s (%d %s, blue score %d, %s)", - b.progressAction, b.receivedLogBlocks, blockStr, tDuration, b.receivedLogTx, - txStr, blueScore, block.MsgBlock().Header.Timestamp) - - b.receivedLogBlocks = 0 - b.receivedLogTx = 0 - b.lastBlockLogTime = now -} diff --git a/netsync/doc.go b/netsync/doc.go deleted file mode 100644 index b40dd60f2..000000000 --- a/netsync/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -/* -Package netsync implements a concurrency safe block syncing protocol. The -SyncManager communicates with connected peers to perform an initial block -download, keep the DAG and unconfirmed transaction pool in sync, and announce -new blocks connected to the DAG. Currently the sync manager selects a single -sync peer that it downloads all blocks from until it is up to date with the -selected tip of the sync peer. -*/ -package netsync diff --git a/netsync/interface.go b/netsync/interface.go deleted file mode 100644 index f167963ca..000000000 --- a/netsync/interface.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (c) 2017 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package netsync - -import ( - "github.com/kaspanet/kaspad/blockdag" - "github.com/kaspanet/kaspad/dagconfig" - "github.com/kaspanet/kaspad/mempool" - "github.com/kaspanet/kaspad/util" - "github.com/kaspanet/kaspad/wire" -) - -// PeerNotifier exposes methods to notify peers of status changes to -// transactions, blocks, etc. Currently server (in the main package) implements -// this interface. -type PeerNotifier interface { - AnnounceNewTransactions(newTxs []*mempool.TxDesc) - - RelayInventory(invVect *wire.InvVect, data interface{}) - - TransactionConfirmed(tx *util.Tx) -} - -// Config is a configuration struct used to initialize a new SyncManager. -type Config struct { - PeerNotifier PeerNotifier - DAG *blockdag.BlockDAG - TxMemPool *mempool.TxPool - DAGParams *dagconfig.Params - MaxPeers int -} diff --git a/netsync/log.go b/netsync/log.go deleted file mode 100644 index 6a7d7db73..000000000 --- a/netsync/log.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (c) 2017 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package netsync - -import ( - "github.com/kaspanet/kaspad/logger" - "github.com/kaspanet/kaspad/util/panics" -) - -var log, _ = logger.Get(logger.SubsystemTags.SYNC) -var spawn = panics.GoroutineWrapperFunc(log) diff --git a/netsync/manager.go b/netsync/manager.go deleted file mode 100644 index 281faa4e2..000000000 --- a/netsync/manager.go +++ /dev/null @@ -1,1239 +0,0 @@ -// Copyright (c) 2013-2017 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package netsync - -import ( - "fmt" - "github.com/kaspanet/kaspad/util/mstime" - "net" - "sync" - "sync/atomic" - "time" - - "github.com/kaspanet/kaspad/blockdag" - "github.com/kaspanet/kaspad/dagconfig" - "github.com/kaspanet/kaspad/mempool" - peerpkg "github.com/kaspanet/kaspad/peer" - "github.com/kaspanet/kaspad/util" - "github.com/kaspanet/kaspad/util/daghash" - "github.com/kaspanet/kaspad/wire" - "github.com/pkg/errors" -) - -const ( - // maxRejectedTxns is the maximum number of rejected transactions - // hashes to store in memory. - maxRejectedTxns = 1000 - - // maxRequestedBlocks is the maximum number of requested block - // hashes to store in memory. - maxRequestedBlocks = wire.MaxInvPerMsg - - // maxRequestedTxns is the maximum number of requested transactions - // hashes to store in memory. - maxRequestedTxns = wire.MaxInvPerMsg - - minGetSelectedTipInterval = time.Minute - - minDAGTimeDelay = time.Minute -) - -// newPeerMsg signifies a newly connected peer to the block handler. -type newPeerMsg struct { - peer *peerpkg.Peer -} - -// blockMsg packages a kaspa block message and the peer it came from together -// so the block handler has access to that information. -type blockMsg struct { - block *util.Block - peer *peerpkg.Peer - isDelayedBlock bool - reply chan struct{} -} - -// invMsg packages a kaspa inv message and the peer it came from together -// so the block handler has access to that information. -type invMsg struct { - inv *wire.MsgInv - peer *peerpkg.Peer -} - -// donePeerMsg signifies a newly disconnected peer to the block handler. -type donePeerMsg struct { - peer *peerpkg.Peer -} - -// txMsg packages a kaspa tx message and the peer it came from together -// so the block handler has access to that information. -type txMsg struct { - tx *util.Tx - peer *peerpkg.Peer - reply chan struct{} -} - -// getSyncPeerMsg is a message type to be sent across the message channel for -// retrieving the current sync peer. -type getSyncPeerMsg struct { - reply chan int32 -} - -// processBlockResponse is a response sent to the reply channel of a -// processBlockMsg. -type processBlockResponse struct { - isOrphan bool - err error -} - -// processBlockMsg is a message type to be sent across the message channel -// for requested a block is processed. Note this call differs from blockMsg -// above in that blockMsg is intended for blocks that came from peers and have -// extra handling whereas this message essentially is just a concurrent safe -// way to call ProcessBlock on the internal block DAG instance. -type processBlockMsg struct { - block *util.Block - flags blockdag.BehaviorFlags - reply chan processBlockResponse -} - -// isSyncedMsg is a message type to be sent across the message channel for -// requesting whether or not the sync manager believes it is synced with the -// currently connected peers. -type isSyncedMsg struct { - reply chan bool -} - -// pauseMsg is a message type to be sent across the message channel for -// pausing the sync manager. This effectively provides the caller with -// exclusive access over the manager until a receive is performed on the -// unpause channel. -type pauseMsg struct { - unpause <-chan struct{} -} - -type selectedTipMsg struct { - selectedTipHash *daghash.Hash - peer *peerpkg.Peer - reply chan struct{} -} - -type requestQueueAndSet struct { - queue []*wire.InvVect - set map[daghash.Hash]struct{} -} - -// peerSyncState stores additional information that the SyncManager tracks -// about a peer. -type peerSyncState struct { - syncCandidate bool - lastSelectedTipRequest mstime.Time - peerShouldSendSelectedTip bool - requestQueueMtx sync.Mutex - requestQueues map[wire.InvType]*requestQueueAndSet - requestedTxns map[daghash.TxID]struct{} - requestedBlocks map[daghash.Hash]struct{} -} - -// SyncManager is used to communicate block related messages with peers. The -// SyncManager is started as by executing Start() in a goroutine. Once started, -// it selects peers to sync from and starts the initial block download. Once the -// DAG is in sync, the SyncManager handles incoming block and header -// notifications and relays announcements of new blocks to peers. -type SyncManager struct { - peerNotifier PeerNotifier - started int32 - shutdown int32 - dag *blockdag.BlockDAG - txMemPool *mempool.TxPool - dagParams *dagconfig.Params - progressLogger *blockProgressLogger - msgChan chan interface{} - wg sync.WaitGroup - quit chan struct{} - syncPeerLock sync.Mutex - isSyncing bool - - // These fields should only be accessed from the messageHandler thread - rejectedTxns map[daghash.TxID]struct{} - requestedTxns map[daghash.TxID]struct{} - requestedBlocks map[daghash.Hash]struct{} - syncPeer *peerpkg.Peer - peerStates map[*peerpkg.Peer]*peerSyncState -} - -// startSync will choose the sync peer among the available candidate peers to -// download/sync the blockDAG from. When syncing is already running, it -// simply returns. It also examines the candidates for any which are no longer -// candidates and removes them as needed. -// -// This function MUST be called with the sync peer lock held. -func (sm *SyncManager) startSync() { - // Return now if we're already syncing. - if sm.syncPeer != nil { - return - } - - var syncPeer *peerpkg.Peer - for peer, state := range sm.peerStates { - if !state.syncCandidate { - continue - } - - if !peer.IsSelectedTipKnown() { - continue - } - - // TODO(davec): Use a better algorithm to choose the sync peer. - // For now, just pick the first available candidate. - syncPeer = peer - break - } - - // Start syncing from the sync peer if one was selected. - if syncPeer != nil { - // Clear the requestedBlocks if the sync peer changes, otherwise - // we may ignore blocks we need that the last sync peer failed - // to send. - sm.requestedBlocks = make(map[daghash.Hash]struct{}) - - log.Infof("Syncing to block %s from peer %s", - syncPeer.SelectedTipHash(), syncPeer.Addr()) - - syncPeer.PushGetBlockLocatorMsg(syncPeer.SelectedTipHash(), sm.dagParams.GenesisHash) - sm.isSyncing = true - sm.syncPeer = syncPeer - return - } - - pendingForSelectedTips := false - - if sm.shouldQueryPeerSelectedTips() { - sm.isSyncing = true - hasSyncCandidates := false - for peer, state := range sm.peerStates { - if state.peerShouldSendSelectedTip { - pendingForSelectedTips = true - continue - } - if !state.syncCandidate { - continue - } - hasSyncCandidates = true - - if mstime.Since(state.lastSelectedTipRequest) < minGetSelectedTipInterval { - continue - } - - sm.queueMsgGetSelectedTip(peer, state) - pendingForSelectedTips = true - } - if !hasSyncCandidates { - log.Warnf("No sync peer candidates available") - } - } - - if !pendingForSelectedTips { - sm.isSyncing = false - } -} - -func (sm *SyncManager) shouldQueryPeerSelectedTips() bool { - return sm.dag.Now().Sub(sm.dag.CalcPastMedianTime()) > minDAGTimeDelay -} - -func (sm *SyncManager) queueMsgGetSelectedTip(peer *peerpkg.Peer, state *peerSyncState) { - state.lastSelectedTipRequest = mstime.Now() - state.peerShouldSendSelectedTip = true - peer.QueueMessage(wire.NewMsgGetSelectedTip(), nil) -} - -// isSyncCandidate returns whether or not the peer is a candidate to consider -// syncing from. -func (sm *SyncManager) isSyncCandidate(peer *peerpkg.Peer) bool { - // Typically a peer is not a candidate for sync if it's not a full node, - // however regression test is special in that the regression tool is - // not a full node and still needs to be considered a sync candidate. - if sm.dagParams == &dagconfig.RegressionNetParams { - // The peer is not a candidate if it's not coming from localhost - // or the hostname can't be determined for some reason. - host, _, err := net.SplitHostPort(peer.Addr()) - if err != nil { - return false - } - - if host != "127.0.0.1" && host != "localhost" { - return false - } - } else { - // The peer is not a candidate for sync if it's not a full - // node. - nodeServices := peer.Services() - if nodeServices&wire.SFNodeNetwork != wire.SFNodeNetwork { - return false - } - } - - // Candidate if all checks passed. - return true -} - -// handleNewPeerMsg deals with new peers that have signalled they may -// be considered as a sync peer (they have already successfully negotiated). It -// also starts syncing if needed. It is invoked from the syncHandler goroutine. -func (sm *SyncManager) handleNewPeerMsg(peer *peerpkg.Peer) { - // Ignore if in the process of shutting down. - if atomic.LoadInt32(&sm.shutdown) != 0 { - return - } - - log.Infof("New valid peer %s (%s)", peer, peer.UserAgent()) - - // Initialize the peer state - isSyncCandidate := sm.isSyncCandidate(peer) - requestQueues := make(map[wire.InvType]*requestQueueAndSet) - requestQueueInvTypes := []wire.InvType{wire.InvTypeTx, wire.InvTypeBlock, wire.InvTypeSyncBlock, wire.InvTypeMissingAncestor} - for _, invType := range requestQueueInvTypes { - requestQueues[invType] = &requestQueueAndSet{ - set: make(map[daghash.Hash]struct{}), - } - } - sm.peerStates[peer] = &peerSyncState{ - syncCandidate: isSyncCandidate, - requestedTxns: make(map[daghash.TxID]struct{}), - requestedBlocks: make(map[daghash.Hash]struct{}), - requestQueues: requestQueues, - } - - // Start syncing by choosing the best candidate if needed. - if isSyncCandidate { - sm.restartSyncIfNeeded() - } -} - -// handleDonePeerMsg deals with peers that have signalled they are done. It -// removes the peer as a candidate for syncing and in the case where it was -// the current sync peer, attempts to select a new best peer to sync from. It -// is invoked from the syncHandler goroutine. -func (sm *SyncManager) handleDonePeerMsg(peer *peerpkg.Peer) { - state, exists := sm.peerStates[peer] - if !exists { - log.Warnf("Received done peer message for unknown peer %s", peer) - return - } - - // Remove the peer from the list of candidate peers. - delete(sm.peerStates, peer) - - log.Infof("Lost peer %s", peer) - - // Remove requested transactions from the global map so that they will - // be fetched from elsewhere next time we get an inv. - for txHash := range state.requestedTxns { - delete(sm.requestedTxns, txHash) - } - - // Remove requested blocks from the global map so that they will be - // fetched from elsewhere next time we get an inv. - // TODO: we could possibly here check which peers have these blocks - // and request them now to speed things up a little. - for blockHash := range state.requestedBlocks { - delete(sm.requestedBlocks, blockHash) - } - - sm.stopSyncFromPeer(peer) -} - -// stopSyncFromPeer replaces a sync peer if the given peer -// is the sync peer. -func (sm *SyncManager) stopSyncFromPeer(peer *peerpkg.Peer) { - if sm.syncPeer == peer { - sm.syncPeer = nil - sm.restartSyncIfNeeded() - } -} - -// RemoveFromSyncCandidates removes the given peer from being -// a sync candidate and stop syncing from it if it's the current -// sync peer. -func (sm *SyncManager) RemoveFromSyncCandidates(peer *peerpkg.Peer) { - sm.peerStates[peer].syncCandidate = false - sm.stopSyncFromPeer(peer) -} - -// handleTxMsg handles transaction messages from all peers. -func (sm *SyncManager) handleTxMsg(tmsg *txMsg) { - peer := tmsg.peer - state, exists := sm.peerStates[peer] - if !exists { - log.Warnf("Received tx message from unknown peer %s", peer) - return - } - - // If we didn't ask for this transaction then the peer is misbehaving. - txID := tmsg.tx.ID() - if _, exists = state.requestedTxns[*txID]; !exists { - peer.AddBanScoreAndPushRejectMsg(wire.CmdTx, wire.RejectNotRequested, (*daghash.Hash)(txID), - peerpkg.BanScoreUnrequestedTx, 0, fmt.Sprintf("got unrequested transaction %s", txID)) - return - } - - // Ignore transactions that we have already rejected. Do not - // send a reject message here because if the transaction was already - // rejected, the transaction was unsolicited. - if _, exists = sm.rejectedTxns[*txID]; exists { - log.Debugf("Ignoring unsolicited previously rejected "+ - "transaction %s from %s", txID, peer) - return - } - - // Process the transaction to include validation, insertion in the - // memory pool, orphan handling, etc. - acceptedTxs, err := sm.txMemPool.ProcessTransaction(tmsg.tx, - true, mempool.Tag(peer.ID())) - - // Remove transaction from request maps. Either the mempool/DAG - // already knows about it and as such we shouldn't have any more - // instances of trying to fetch it, or we failed to insert and thus - // we'll retry next time we get an inv. - delete(state.requestedTxns, *txID) - delete(sm.requestedTxns, *txID) - - if err != nil { - // Do not request this transaction again until a new block - // has been processed. - sm.rejectedTxns[*txID] = struct{}{} - sm.limitTxIDMap(sm.rejectedTxns, maxRejectedTxns) - - // When the error is a rule error, it means the transaction was - // simply rejected as opposed to something actually going wrong, - // so log it as such. Otherwise, something really did go wrong, - // so panic. - ruleErr := &mempool.RuleError{} - if !errors.As(err, ruleErr) { - panic(errors.Wrapf(err, "failed to process transaction %s", txID)) - } - - shouldIncreaseBanScore := false - if txRuleErr := (&mempool.TxRuleError{}); errors.As(ruleErr.Err, txRuleErr) { - if txRuleErr.RejectCode == wire.RejectInvalid { - shouldIncreaseBanScore = true - } - } else if dagRuleErr := (&blockdag.RuleError{}); errors.As(ruleErr.Err, dagRuleErr) { - shouldIncreaseBanScore = true - } - - if shouldIncreaseBanScore { - peer.AddBanScoreAndPushRejectMsg(wire.CmdTx, wire.RejectInvalid, (*daghash.Hash)(txID), - peerpkg.BanScoreInvalidTx, 0, fmt.Sprintf("rejected transaction %s: %s", txID, err)) - } - return - } - - sm.peerNotifier.AnnounceNewTransactions(acceptedTxs) -} - -// restartSyncIfNeeded finds a new sync candidate if we're not expecting any -// blocks from the current one. -func (sm *SyncManager) restartSyncIfNeeded() { - sm.syncPeerLock.Lock() - defer sm.syncPeerLock.Unlock() - - if !sm.shouldReplaceSyncPeer() { - return - } - - sm.syncPeer = nil - sm.startSync() -} - -func (sm *SyncManager) shouldReplaceSyncPeer() bool { - if sm.syncPeer == nil { - return true - } - - syncPeerState, exists := sm.peerStates[sm.syncPeer] - if !exists { - panic(errors.Errorf("no peer state for sync peer %s", sm.syncPeer)) - } - - syncPeerState.requestQueueMtx.Lock() - defer syncPeerState.requestQueueMtx.Unlock() - return len(syncPeerState.requestedBlocks) == 0 && - len(syncPeerState.requestQueues[wire.InvTypeSyncBlock].queue) == 0 && - !sm.syncPeer.WasBlockLocatorRequested() -} - -// handleBlockMsg handles block messages from all peers. -func (sm *SyncManager) handleBlockMsg(bmsg *blockMsg) { - peer := bmsg.peer - state, exists := sm.peerStates[peer] - if !exists { - log.Warnf("Received block message from unknown peer %s", peer) - return - } - - // If we didn't ask for this block then the peer is misbehaving. - blockHash := bmsg.block.Hash() - if _, exists = state.requestedBlocks[*blockHash]; !exists { - // The regression test intentionally sends some blocks twice - // to test duplicate block insertion fails. Don't disconnect - // the peer or ignore the block when we're in regression test - // mode in this case so the DAG code is actually fed the - // duplicate blocks. - if sm.dagParams != &dagconfig.RegressionNetParams { - peer.AddBanScoreAndPushRejectMsg(wire.CmdBlock, wire.RejectNotRequested, blockHash, - peerpkg.BanScoreUnrequestedBlock, 0, fmt.Sprintf("got unrequested block %s", blockHash)) - return - } - } - - behaviorFlags := blockdag.BFNone - if bmsg.isDelayedBlock { - behaviorFlags |= blockdag.BFAfterDelay - } - if bmsg.peer == sm.syncPeer { - behaviorFlags |= blockdag.BFIsSync - } - - // Process the block to include validation, orphan handling, etc. - isOrphan, isDelayed, err := sm.dag.ProcessBlock(bmsg.block, behaviorFlags) - - // Remove block from request maps. Either DAG knows about it and - // so we shouldn't have any more instances of trying to fetch it, or - // the insertion fails and thus we'll retry next time we get an inv. - delete(state.requestedBlocks, *blockHash) - delete(sm.requestedBlocks, *blockHash) - - sm.restartSyncIfNeeded() - - if err != nil { - // When the error is a rule error, it means the block was simply - // rejected as opposed to something actually going wrong, so log - // it as such. Otherwise, something really did go wrong, so log - // it as an actual error. - if !errors.As(err, &blockdag.RuleError{}) { - panic(errors.Wrapf(err, "Failed to process block %s", - blockHash)) - } - log.Infof("Rejected block %s from %s: %s", blockHash, - peer, err) - - peer.AddBanScoreAndPushRejectMsg(wire.CmdBlock, wire.RejectInvalid, blockHash, - peerpkg.BanScoreInvalidBlock, 0, fmt.Sprintf("got invalid block: %s", err)) - // Whether the peer will be banned or not, syncing from a node that doesn't follow - // the netsync protocol is undesired. - sm.RemoveFromSyncCandidates(peer) - return - } - - if isDelayed { - return - } - - if isOrphan { - blueScore, err := bmsg.block.BlueScore() - if err != nil { - log.Errorf("Received an orphan block %s with malformed blue score from %s. Disconnecting...", - blockHash, peer) - peer.AddBanScoreAndPushRejectMsg(wire.CmdBlock, wire.RejectInvalid, blockHash, - peerpkg.BanScoreMalformedBlueScoreInOrphan, 0, - fmt.Sprintf("Received an orphan block %s with malformed blue score", blockHash)) - return - } - - const maxOrphanBlueScoreDiff = 10000 - selectedTipBlueScore := sm.dag.SelectedTipBlueScore() - if blueScore > selectedTipBlueScore+maxOrphanBlueScoreDiff { - log.Infof("Orphan block %s has blue score %d and the selected tip blue score is "+ - "%d. Ignoring orphans with a blue score difference from the selected tip greater than %d", - blockHash, blueScore, selectedTipBlueScore, maxOrphanBlueScoreDiff) - return - } - - // Request the parents for the orphan block from the peer that sent it. - missingAncestors := sm.dag.GetOrphanMissingAncestorHashes(blockHash) - sm.addBlocksToRequestQueue(state, missingAncestors, wire.InvTypeMissingAncestor) - } else { - // When the block is not an orphan, log information about it and - // update the DAG state. - blockBlueScore, err := sm.dag.BlueScoreByBlockHash(blockHash) - if err != nil { - log.Errorf("Failed to get blue score for block %s: %s", blockHash, err) - } - sm.progressLogger.LogBlockBlueScore(bmsg.block, blockBlueScore) - - // Clear the rejected transactions. - sm.rejectedTxns = make(map[daghash.TxID]struct{}) - } - - // We don't want to flood our sync peer with getdata messages, so - // instead of asking it immediately about missing ancestors, we first - // wait until it finishes to send us all of the requested blocks. - if (isOrphan && peer != sm.syncPeer) || (peer == sm.syncPeer && len(state.requestedBlocks) == 0) { - err := sm.sendInvsFromRequestQueue(peer, state) - if err != nil { - log.Errorf("Failed to send invs from queue: %s", err) - return - } - } -} - -func (sm *SyncManager) addBlocksToRequestQueue(state *peerSyncState, hashes []*daghash.Hash, invType wire.InvType) { - state.requestQueueMtx.Lock() - defer state.requestQueueMtx.Unlock() - for _, hash := range hashes { - if _, exists := sm.requestedBlocks[*hash]; !exists { - iv := wire.NewInvVect(invType, hash) - state.addInvToRequestQueueNoLock(iv) - } - } -} - -func (state *peerSyncState) addInvToRequestQueueNoLock(iv *wire.InvVect) { - requestQueue, ok := state.requestQueues[iv.Type] - if !ok { - panic(errors.Errorf("got unsupported inventory type %s", iv.Type)) - } - - if _, exists := requestQueue.set[*iv.Hash]; exists { - return - } - - requestQueue.set[*iv.Hash] = struct{}{} - requestQueue.queue = append(requestQueue.queue, iv) -} - -func (state *peerSyncState) addInvToRequestQueue(iv *wire.InvVect) { - state.requestQueueMtx.Lock() - defer state.requestQueueMtx.Unlock() - state.addInvToRequestQueueNoLock(iv) -} - -// haveInventory returns whether or not the inventory represented by the passed -// inventory vector is known. This includes checking all of the various places -// inventory can be when it is in different states such as blocks that are part -// of the DAG, in the orphan pool, and transactions that are in the memory pool -// (either the main pool or orphan pool). -func (sm *SyncManager) haveInventory(invVect *wire.InvVect) (bool, error) { - switch invVect.Type { - case wire.InvTypeMissingAncestor: - fallthrough - case wire.InvTypeSyncBlock: - fallthrough - case wire.InvTypeBlock: - // Ask DAG if the block is known to it in any form (in DAG or as an orphan). - return sm.dag.IsKnownBlock(invVect.Hash), nil - - case wire.InvTypeTx: - // Ask the transaction memory pool if the transaction is known - // to it in any form (main pool or orphan). - if sm.txMemPool.HaveTransaction((*daghash.TxID)(invVect.Hash)) { - return true, nil - } - - // Check if the transaction exists from the point of view of the - // DAG's virtual block. Note that this is only a best effort - // since it is expensive to check existence of every output and - // the only purpose of this check is to avoid downloading - // already known transactions. Only the first two outputs are - // checked because the vast majority of transactions consist of - // two outputs where one is some form of "pay-to-somebody-else" - // and the other is a change output. - prevOut := wire.Outpoint{TxID: daghash.TxID(*invVect.Hash)} - for i := uint32(0); i < 2; i++ { - prevOut.Index = i - entry, ok := sm.dag.GetUTXOEntry(prevOut) - if !ok { - return false, nil - } - if entry != nil { - return true, nil - } - } - - return false, nil - } - - // The requested inventory is is an unsupported type, so just claim - // it is known to avoid requesting it. - return true, nil -} - -// handleInvMsg handles inv messages from all peers. -// We examine the inventory advertised by the remote peer and act accordingly. -func (sm *SyncManager) handleInvMsg(imsg *invMsg) { - peer := imsg.peer - state, exists := sm.peerStates[peer] - if !exists { - log.Warnf("Received inv message from unknown peer %s", peer) - return - } - - // Attempt to find the final block in the inventory list. There may - // not be one. - lastBlock := -1 - invVects := imsg.inv.InvList - for i := len(invVects) - 1; i >= 0; i-- { - if invVects[i].IsBlockOrSyncBlock() { - lastBlock = i - break - } - } - - haveUnknownInvBlock := false - - // Request the advertised inventory if we don't already have it. Also, - // request parent blocks of orphans if we receive one we already have. - // Finally, attempt to detect potential stalls due to big orphan DAGs - // we already have and request more blocks to prevent them. - for i, iv := range invVects { - // Ignore unsupported inventory types. - switch iv.Type { - case wire.InvTypeBlock: - case wire.InvTypeSyncBlock: - case wire.InvTypeTx: - default: - log.Warnf("got unsupported inv type %s from %s", iv.Type, peer) - continue - } - - // Add the inventory to the cache of known inventory - // for the peer. - peer.AddKnownInventory(iv) - - // Request the inventory if we don't already have it. - haveInv, err := sm.haveInventory(iv) - if err != nil { - log.Warnf("Unexpected failure when checking for "+ - "existing inventory during inv message "+ - "processing: %s", err) - continue - } - if !haveInv { - if iv.Type == wire.InvTypeTx { - // Skip the transaction if it has already been rejected. - if _, exists := sm.rejectedTxns[daghash.TxID(*iv.Hash)]; exists { - continue - } - - // Skip the transaction if it had previously been requested. - if _, exists := state.requestedTxns[daghash.TxID(*iv.Hash)]; exists { - continue - } - } - - if iv.Type == wire.InvTypeBlock { - haveUnknownInvBlock = true - } - - // Add it to the request queue. - state.addInvToRequestQueue(iv) - continue - } - - if iv.IsBlockOrSyncBlock() { - if sm.dag.IsKnownInvalid(iv.Hash) { - peer.AddBanScoreAndPushRejectMsg(imsg.inv.Command(), wire.RejectInvalid, iv.Hash, - peerpkg.BanScoreInvalidInvBlock, 0, fmt.Sprintf("sent inv of invalid block %s", iv.Hash)) - // Whether the peer will be banned or not, syncing from a node that doesn't follow - // the netsync protocol is undesired. - sm.RemoveFromSyncCandidates(peer) - return - } - // The block is an orphan block that we already have. - // When the existing orphan was processed, it requested - // the missing parent blocks. When this scenario - // happens, it means there were more blocks missing - // than are allowed into a single inventory message. As - // a result, once this peer requested the final - // advertised block, the remote peer noticed and is now - // resending the orphan block as an available block - // to signal there are more missing blocks that need to - // be requested. - if sm.dag.IsKnownOrphan(iv.Hash) { - if iv.Type == wire.InvTypeSyncBlock { - peer.AddBanScoreAndPushRejectMsg(imsg.inv.Command(), wire.RejectInvalid, iv.Hash, - peerpkg.BanScoreOrphanInvAsPartOfNetsync, 0, - fmt.Sprintf("sent inv of orphan block %s as part of netsync", iv.Hash)) - // Whether the peer will be banned or not, syncing from a node that doesn't follow - // the netsync protocol is undesired. - sm.RemoveFromSyncCandidates(peer) - return - } - missingAncestors := sm.dag.GetOrphanMissingAncestorHashes(iv.Hash) - sm.addBlocksToRequestQueue(state, missingAncestors, wire.InvTypeMissingAncestor) - continue - } - - // We already have the final block advertised by this - // inventory message, so force a request for more. This - // should only happen if our DAG and the peer's DAG have - // diverged long time ago. - if i == lastBlock && peer == sm.syncPeer { - // Request blocks after the first block's ancestor that exists - // in the selected path chain, one up to the - // final one the remote peer knows about. - peer.PushGetBlockLocatorMsg(iv.Hash, sm.dagParams.GenesisHash) - } - } - } - - err := sm.sendInvsFromRequestQueue(peer, state) - if err != nil { - log.Errorf("Failed to send invs from queue: %s", err) - } - - if haveUnknownInvBlock && !sm.isSyncing { - // If one of the inv messages is an unknown block - // it is an indication that one of our peers has more - // up-to-date data than us. - sm.restartSyncIfNeeded() - } -} - -func (sm *SyncManager) addInvsToGetDataMessageFromQueue(gdmsg *wire.MsgGetData, state *peerSyncState, invType wire.InvType, maxInvsToAdd int) error { - requestQueue, ok := state.requestQueues[invType] - if !ok { - panic(errors.Errorf("got unsupported inventory type %s", invType)) - } - queue := requestQueue.queue - var invsNum int - leftSpaceInGdmsg := wire.MaxInvPerGetDataMsg - len(gdmsg.InvList) - if len(queue) > leftSpaceInGdmsg { - invsNum = leftSpaceInGdmsg - } else { - invsNum = len(queue) - } - if invsNum > maxInvsToAdd { - invsNum = maxInvsToAdd - } - invsToAdd := make([]*wire.InvVect, 0, invsNum) - for len(queue) != 0 && len(invsToAdd) < invsNum { - var iv *wire.InvVect - iv, queue = queue[0], queue[1:] - - exists, err := sm.haveInventory(iv) - if err != nil { - return err - } - if !exists { - invsToAdd = append(invsToAdd, iv) - } - } - - addBlockInv := func(iv *wire.InvVect) { - // Request the block if there is not already a pending - // request. - if _, exists := sm.requestedBlocks[*iv.Hash]; !exists { - sm.requestedBlocks[*iv.Hash] = struct{}{} - sm.limitHashMap(sm.requestedBlocks, maxRequestedBlocks) - state.requestedBlocks[*iv.Hash] = struct{}{} - - gdmsg.AddInvVect(iv) - } - } - for _, iv := range invsToAdd { - delete(requestQueue.set, *iv.Hash) - switch invType { - case wire.InvTypeMissingAncestor: - addBlockInv(iv) - case wire.InvTypeSyncBlock: - addBlockInv(iv) - case wire.InvTypeBlock: - addBlockInv(iv) - - case wire.InvTypeTx: - // Request the transaction if there is not already a - // pending request. - if _, exists := sm.requestedTxns[daghash.TxID(*iv.Hash)]; !exists { - sm.requestedTxns[daghash.TxID(*iv.Hash)] = struct{}{} - sm.limitTxIDMap(sm.requestedTxns, maxRequestedTxns) - state.requestedTxns[daghash.TxID(*iv.Hash)] = struct{}{} - - gdmsg.AddInvVect(iv) - } - } - - if len(queue) >= wire.MaxInvPerGetDataMsg { - break - } - } - requestQueue.queue = queue - return nil -} - -func (sm *SyncManager) sendInvsFromRequestQueue(peer *peerpkg.Peer, state *peerSyncState) error { - state.requestQueueMtx.Lock() - defer state.requestQueueMtx.Unlock() - if len(sm.requestedBlocks) != 0 { - return nil - } - gdmsg := wire.NewMsgGetData() - err := sm.addInvsToGetDataMessageFromQueue(gdmsg, state, wire.InvTypeSyncBlock, wire.MaxSyncBlockInvPerGetDataMsg) - if err != nil { - return err - } - if !sm.isSyncing || sm.isSynced() { - err := sm.addInvsToGetDataMessageFromQueue(gdmsg, state, wire.InvTypeMissingAncestor, wire.MaxInvPerGetDataMsg) - if err != nil { - return err - } - - err = sm.addInvsToGetDataMessageFromQueue(gdmsg, state, wire.InvTypeBlock, wire.MaxInvPerGetDataMsg) - if err != nil { - return err - } - - err = sm.addInvsToGetDataMessageFromQueue(gdmsg, state, wire.InvTypeTx, wire.MaxInvPerGetDataMsg) - if err != nil { - return err - } - } - if len(gdmsg.InvList) > 0 { - peer.QueueMessage(gdmsg, nil) - } - return nil -} - -// limitTxIDMap is a helper function for maps that require a maximum limit by -// evicting a random transaction if adding a new value would cause it to -// overflow the maximum allowed. -func (sm *SyncManager) limitTxIDMap(m map[daghash.TxID]struct{}, limit int) { - if len(m)+1 > limit { - // Remove a random entry from the map. For most compilers, Go's - // range statement iterates starting at a random item although - // that is not 100% guaranteed by the spec. The iteration order - // is not important here because an adversary would have to be - // able to pull off preimage attacks on the hashing function in - // order to target eviction of specific entries anyways. - for txID := range m { - delete(m, txID) - return - } - } -} - -// limitHashMap is a helper function for maps that require a maximum limit by -// evicting a random item if adding a new value would cause it to -// overflow the maximum allowed. -func (sm *SyncManager) limitHashMap(m map[daghash.Hash]struct{}, limit int) { - if len(m)+1 > limit { - // Remove a random entry from the map. For most compilers, Go's - // range statement iterates starting at a random item although - // that is not 100% guaranteed by the spec. The iteration order - // is not important here because an adversary would have to be - // able to pull off preimage attacks on the hashing function in - // order to target eviction of specific entries anyways. - for hash := range m { - delete(m, hash) - return - } - } -} - -func (sm *SyncManager) handleProcessBlockMsg(msg processBlockMsg) (isOrphan bool, err error) { - isOrphan, isDelayed, err := sm.dag.ProcessBlock( - msg.block, msg.flags|blockdag.BFDisallowDelay) - if err != nil { - return false, err - } - if isDelayed { - return false, errors.New("Cannot process blocks from RPC beyond the allowed time offset") - } - - return isOrphan, nil -} - -func (sm *SyncManager) handleSelectedTipMsg(msg *selectedTipMsg) { - peer := msg.peer - selectedTipHash := msg.selectedTipHash - state := sm.peerStates[peer] - if !state.peerShouldSendSelectedTip { - peer.AddBanScoreAndPushRejectMsg(wire.CmdSelectedTip, wire.RejectNotRequested, nil, - peerpkg.BanScoreUnrequestedSelectedTip, 0, "got unrequested selected tip message") - return - } - state.peerShouldSendSelectedTip = false - peer.SetSelectedTipHash(selectedTipHash) - sm.restartSyncIfNeeded() -} - -// messageHandler is the main handler for the sync manager. It must be run as a -// goroutine. It processes block and inv messages in a separate goroutine -// from the peer handlers so the block (MsgBlock) messages are handled by a -// single thread without needing to lock memory data structures. This is -// important because the sync manager controls which blocks are needed and how -// the fetching should proceed. -func (sm *SyncManager) messageHandler() { -out: - for { - select { - case m := <-sm.msgChan: - switch msg := m.(type) { - case *newPeerMsg: - sm.handleNewPeerMsg(msg.peer) - - case *txMsg: - sm.handleTxMsg(msg) - msg.reply <- struct{}{} - - case *blockMsg: - sm.handleBlockMsg(msg) - msg.reply <- struct{}{} - - case *invMsg: - sm.handleInvMsg(msg) - - case *donePeerMsg: - sm.handleDonePeerMsg(msg.peer) - - case getSyncPeerMsg: - var peerID int32 - if sm.syncPeer != nil { - peerID = sm.syncPeer.ID() - } - msg.reply <- peerID - - case processBlockMsg: - isOrphan, err := sm.handleProcessBlockMsg(msg) - msg.reply <- processBlockResponse{ - isOrphan: isOrphan, - err: err, - } - - case isSyncedMsg: - msg.reply <- sm.isSynced() - - case pauseMsg: - // Wait until the sender unpauses the manager. - <-msg.unpause - - case *selectedTipMsg: - sm.handleSelectedTipMsg(msg) - msg.reply <- struct{}{} - - default: - log.Warnf("Invalid message type in block "+ - "handler: %T", msg) - } - - case <-sm.quit: - break out - } - } - - sm.wg.Done() - log.Trace("Block handler done") -} - -// handleBlockDAGNotification handles notifications from blockDAG. It does -// things such as request orphan block parents and relay accepted blocks to -// connected peers. -func (sm *SyncManager) handleBlockDAGNotification(notification *blockdag.Notification) { - switch notification.Type { - // A block has been accepted into the blockDAG. Relay it to other peers. - case blockdag.NTBlockAdded: - data, ok := notification.Data.(*blockdag.BlockAddedNotificationData) - if !ok { - log.Warnf("Block Added notification data is of wrong type.") - break - } - block := data.Block - - // Update mempool - ch := make(chan mempool.NewBlockMsg) - spawn("SPAWN_PLACEHOLDER_NAME", func() { - err := sm.txMemPool.HandleNewBlockOld(block, ch) - close(ch) - if err != nil { - panic(fmt.Sprintf("HandleNewBlockOld failed to handle block %s", block.Hash())) - } - }) - - // sm.peerNotifier sends messages to the rebroadcastHandler, so we call - // it in its own goroutine so it won't block dag.ProcessBlock in case - // rebroadcastHandler channel is full. - spawn("SPAWN_PLACEHOLDER_NAME", func() { - // Relay if we are current and the block was not just now unorphaned. - // Otherwise peers that are current should already know about it - if sm.isSynced() && !data.WasUnorphaned { - iv := wire.NewInvVect(wire.InvTypeBlock, block.Hash()) - sm.peerNotifier.RelayInventory(iv, block.MsgBlock().Header) - } - - for msg := range ch { - sm.peerNotifier.TransactionConfirmed(msg.Tx) - sm.peerNotifier.AnnounceNewTransactions(msg.AcceptedTxs) - } - }) - } -} - -// NewPeer informs the sync manager of a newly active peer. -func (sm *SyncManager) NewPeer(peer *peerpkg.Peer) { - // Ignore if we are shutting down. - if atomic.LoadInt32(&sm.shutdown) != 0 { - return - } - sm.msgChan <- &newPeerMsg{peer: peer} -} - -// QueueTx adds the passed transaction message and peer to the block handling -// queue. Responds to the done channel argument after the tx message is -// processed. -func (sm *SyncManager) QueueTx(tx *util.Tx, peer *peerpkg.Peer, done chan struct{}) { - // Don't accept more transactions if we're shutting down. - if atomic.LoadInt32(&sm.shutdown) != 0 { - done <- struct{}{} - return - } - - sm.msgChan <- &txMsg{tx: tx, peer: peer, reply: done} -} - -// QueueBlock adds the passed block message and peer to the block handling -// queue. Responds to the done channel argument after the block message is -// processed. -func (sm *SyncManager) QueueBlock(block *util.Block, peer *peerpkg.Peer, isDelayedBlock bool, done chan struct{}) { - // Don't accept more blocks if we're shutting down. - if atomic.LoadInt32(&sm.shutdown) != 0 { - done <- struct{}{} - return - } - - sm.msgChan <- &blockMsg{block: block, peer: peer, isDelayedBlock: isDelayedBlock, reply: done} -} - -// QueueInv adds the passed inv message and peer to the block handling queue. -func (sm *SyncManager) QueueInv(inv *wire.MsgInv, peer *peerpkg.Peer) { - // No channel handling here because peers do not need to block on inv - // messages. - if atomic.LoadInt32(&sm.shutdown) != 0 { - return - } - - sm.msgChan <- &invMsg{inv: inv, peer: peer} -} - -// QueueSelectedTipMsg adds the passed selected tip message and peer to the -// block handling queue. Responds to the done channel argument after it finished -// handling the message. -func (sm *SyncManager) QueueSelectedTipMsg(msg *wire.MsgSelectedTip, peer *peerpkg.Peer, done chan struct{}) { - sm.msgChan <- &selectedTipMsg{ - selectedTipHash: msg.SelectedTipHash, - peer: peer, - reply: done, - } -} - -// DonePeer informs the blockmanager that a peer has disconnected. -func (sm *SyncManager) DonePeer(peer *peerpkg.Peer) { - // Ignore if we are shutting down. - if atomic.LoadInt32(&sm.shutdown) != 0 { - return - } - - sm.msgChan <- &donePeerMsg{peer: peer} -} - -// Start begins the core block handler which processes block and inv messages. -func (sm *SyncManager) Start() { - // Already started? - if atomic.AddInt32(&sm.started, 1) != 1 { - return - } - - log.Trace("Starting sync manager") - sm.wg.Add(1) - spawn("SPAWN_PLACEHOLDER_NAME", sm.messageHandler) -} - -// Stop gracefully shuts down the sync manager by stopping all asynchronous -// handlers and waiting for them to finish. -func (sm *SyncManager) Stop() error { - if atomic.AddInt32(&sm.shutdown, 1) != 1 { - log.Warnf("Sync manager is already in the process of " + - "shutting down") - return nil - } - - log.Infof("Sync manager shutting down") - close(sm.quit) - sm.wg.Wait() - return nil -} - -// SyncPeerID returns the ID of the current sync peer, or 0 if there is none. -func (sm *SyncManager) SyncPeerID() int32 { - reply := make(chan int32) - sm.msgChan <- getSyncPeerMsg{reply: reply} - return <-reply -} - -// ProcessBlock makes use of ProcessBlock on an internal instance of a blockDAG. -func (sm *SyncManager) ProcessBlock(block *util.Block, flags blockdag.BehaviorFlags) (bool, error) { - reply := make(chan processBlockResponse) - sm.msgChan <- processBlockMsg{block: block, flags: flags, reply: reply} - response := <-reply - return response.isOrphan, response.err -} - -// IsSynced returns whether or not the sync manager believes it is synced with -// the connected peers. -func (sm *SyncManager) IsSynced() bool { - reply := make(chan bool) - sm.msgChan <- isSyncedMsg{reply: reply} - return <-reply -} - -// isSynced checks if the node is synced enough based upon its worldview. -// This is used to determine if the node can support mining and requesting newly-mined blocks. -// To do that, first it checks if the selected tip timestamp is not older than maxTipAge. If that's the case, it means -// the node is synced since blocks' timestamps are not allowed to deviate too much into the future. -// If that's not the case it checks the rate it added new blocks to the DAG recently. If it's faster than -// blockRate * maxSyncRateDeviation it means the node is not synced, since when the node is synced it shouldn't add -// blocks to the DAG faster than the block rate. -func (sm *SyncManager) isSynced() bool { - const maxTipAge = 5 * time.Minute - isCloseToCurrentTime := sm.dag.Now().Sub(sm.dag.SelectedTipHeader().Timestamp) <= maxTipAge - if isCloseToCurrentTime { - return true - } - - const maxSyncRateDeviation = 1.05 - return sm.dag.IsSyncRateBelowThreshold(maxSyncRateDeviation) -} - -// Pause pauses the sync manager until the returned channel is closed. -// -// Note that while paused, all peer and block processing is halted. The -// message sender should avoid pausing the sync manager for long durations. -func (sm *SyncManager) Pause() chan<- struct{} { - c := make(chan struct{}) - sm.msgChan <- pauseMsg{c} - return c -} - -// New constructs a new SyncManager. Use Start to begin processing asynchronous -// block, tx, and inv updates. -func New(config *Config) (*SyncManager, error) { - sm := SyncManager{ - peerNotifier: config.PeerNotifier, - dag: config.DAG, - txMemPool: config.TxMemPool, - dagParams: config.DAGParams, - rejectedTxns: make(map[daghash.TxID]struct{}), - requestedTxns: make(map[daghash.TxID]struct{}), - requestedBlocks: make(map[daghash.Hash]struct{}), - peerStates: make(map[*peerpkg.Peer]*peerSyncState), - progressLogger: newBlockProgressLogger("Processed", log), - msgChan: make(chan interface{}, config.MaxPeers*3), - quit: make(chan struct{}), - } - - sm.dag.Subscribe(sm.handleBlockDAGNotification) - - return &sm, nil -} diff --git a/peer/README.md b/peer/README.md deleted file mode 100644 index a34e6c28e..000000000 --- a/peer/README.md +++ /dev/null @@ -1,49 +0,0 @@ -peer -==== - -[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](https://choosealicense.com/licenses/isc/) -[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/kaspanet/kaspad/peer) - -Package peer provides a common base for creating and managing kaspa network -peers. - -## Overview - -This package builds upon the wire package, which provides the fundamental -primitives necessary to speak the kaspa wire protocol, in order to simplify -the process of creating fully functional peers. - -A quick overview of the major features peer provides are as follows: - - - Provides a basic concurrent safe kaspa peer for handling kaspa - communications via the peer-to-peer protocol - - Full duplex reading and writing of kaspa protocol messages - - Automatic handling of the initial handshake process including protocol - version negotiation - - Asynchronous message queueing of outbound messages with optional channel for - notification when the message is actually sent - - Flexible peer configuration - - Caller is responsible for creating outgoing connections and listening for - incoming connections so they have flexibility to establish connections as - they see fit (proxies, etc) - - User agent name and version - - Maximum supported protocol version - - Ability to register callbacks for handling kaspa protocol messages - - Inventory message batching and send trickling with known inventory detection - and avoidance - - Automatic periodic keep-alive pinging and pong responses - - Random nonce generation and self connection detection - - Proper handling of bloom filter related commands when the caller does not - specify the related flag to signal support - - Disconnects the peer when the protocol version is high enough - - Does not invoke the related callbacks for older protocol versions - - Snapshottable peer statistics such as the total number of bytes read and - written, the remote address, user agent, and negotiated protocol version - - Helper functions pushing addresses, getblockinvs, getheaders, and reject - messages - - These could all be sent manually via the standard message output function, - but the helpers provide additional nice functionality such as duplicate - filtering and address randomization - - Ability to wait for shutdown/disconnect - - Comprehensive test coverage - diff --git a/peer/banscores.go b/peer/banscores.go deleted file mode 100644 index a7ffd3152..000000000 --- a/peer/banscores.go +++ /dev/null @@ -1,42 +0,0 @@ -package peer - -// Ban scores for misbehaving nodes -const ( - BanScoreUnrequestedBlock = 100 - BanScoreInvalidBlock = 100 - BanScoreInvalidInvBlock = 100 - BanScoreOrphanInvAsPartOfNetsync = 100 - BanScoreMalformedBlueScoreInOrphan = 100 - - BanScoreRequestNonExistingBlock = 10 - - BanScoreUnrequestedSelectedTip = 20 - BanScoreUnrequestedTx = 20 - BanScoreInvalidTx = 100 - - BanScoreMalformedMessage = 10 - - BanScoreNonVersionFirstMessage = 1 - BanScoreDuplicateVersion = 1 - BanScoreDuplicateVerack = 1 - - BanScoreSentTooManyAddresses = 20 - BanScoreMsgAddressesWithInvalidSubnetwork = 10 - - BanScoreInvalidFeeFilter = 100 - BanScoreNoFilterLoaded = 5 - - BanScoreInvalidMsgGetBlockInvs = 10 - - BanScoreInvalidMsgGetBlockLocator = 100 - - BanScoreEmptyBlockLocator = 100 - - BanScoreSentTxToBlocksOnly = 20 - - BanScoreNodeBloomFlagViolation = 100 - - BanScoreStallTimeout = 1 - - BanScoreUnrequestedMessage = 100 -) diff --git a/peer/doc.go b/peer/doc.go deleted file mode 100644 index 08237009a..000000000 --- a/peer/doc.go +++ /dev/null @@ -1,133 +0,0 @@ -/* -Package peer provides a common base for creating and managing kaspa network -peers. - -Overview - -This package builds upon the wire package, which provides the fundamental -primitives necessary to speak the kaspa wire protocol, in order to simplify -the process of creating fully functional peers. In essence, it provides a -common base for creating concurrent safe fully validating nodes, Simplified -Payment Verification (SPV) nodes, proxies, etc. - -A quick overview of the major features peer provides are as follows: - - - Provides a basic concurrent safe kaspa peer for handling kaspa - communications via the peer-to-peer protocol - - Full duplex reading and writing of kaspa protocol messages - - Automatic handling of the initial handshake process including protocol - version negotiation - - Asynchronous message queuing of outbound messages with optional channel for - notification when the message is actually sent - - Flexible peer configuration - - Caller is responsible for creating outgoing connections and listening for - incoming connections so they have flexibility to establish connections as - they see fit (proxies, etc) - - User agent name and version - - Kaspa network - - Service support signalling (full nodes, bloom filters, etc) - - Maximum supported protocol version - - Ability to register callbacks for handling kaspa protocol messages - - Inventory message batching and send trickling with known inventory detection - and avoidance - - Automatic periodic keep-alive pinging and pong responses - - Random nonce generation and self connection detection - - Proper handling of bloom filter related commands when the caller does not - specify the related flag to signal support - - Disconnects the peer when the protocol version is high enough - - Does not invoke the related callbacks for older protocol versions - - Snapshottable peer statistics such as the total number of bytes read and - written, the remote address, user agent, and negotiated protocol version - - Helper functions pushing addresses, getblockinvs, getheaders, and reject - messages - - These could all be sent manually via the standard message output function, - but the helpers provide additional nice functionality such as duplicate - filtering and address randomization - - Ability to wait for shutdown/disconnect - - Comprehensive test coverage - -Peer Configuration - -All peer configuration is handled with the Config struct. This allows the -caller to specify things such as the user agent name and version, the kaspa -network to use, which services it supports, and callbacks to invoke when kaspa -messages are received. See the documentation for each field of the Config -struct for more details. - -Inbound and Outbound Peers - -A peer can either be inbound or outbound. The caller is responsible for -establishing the connection to remote peers and listening for incoming peers. -This provides high flexibility for things such as connecting via proxies, acting -as a proxy, creating bridge peers, choosing whether to listen for inbound peers, -etc. - -NewOutboundPeer and NewInboundPeer functions must be followed by calling Connect -with a net.Conn instance to the peer. This will start all async I/O goroutines -and initiate the protocol negotiation process. Once finished with the peer call -Disconnect to disconnect from the peer and clean up all resources. -WaitForDisconnect can be used to block until peer disconnection and resource -cleanup has completed. - -Callbacks - -In order to do anything useful with a peer, it is necessary to react to kaspa -messages. This is accomplished by creating an instance of the MessageListeners -struct with the callbacks to be invoke specified and setting the Listeners field -of the Config struct specified when creating a peer to it. - -For convenience, a callback hook for all of the currently supported kaspa -messages is exposed which receives the peer instance and the concrete message -type. In addition, a hook for OnRead is provided so even custom messages types -for which this package does not directly provide a hook, as long as they -implement the wire.Message interface, can be used. Finally, the OnWrite hook -is provided, which in conjunction with OnRead, can be used to track server-wide -byte counts. - -It is often useful to use closures which encapsulate state when specifying the -callback handlers. This provides a clean method for accessing that state when -callbacks are invoked. - -Queuing Messages and Inventory - -The QueueMessage function provides the fundamental means to send messages to the -remote peer. As the name implies, this employs a non-blocking queue. A done -channel which will be notified when the message is actually sent can optionally -be specified. There are certain message types which are better sent using other -functions which provide additional functionality. - -Of special interest are inventory messages. Rather than manually sending MsgInv -messages via Queuemessage, the inventory vectors should be queued using the -QueueInventory function. It employs batching and trickling along with -intelligent known remote peer inventory detection and avoidance through the use -of a most-recently used algorithm. - -Message Sending Helper Functions - -In addition to the bare QueueMessage function previously described, the -PushAddrMsg, PushGetBlockInvsMsg, PushGetHeadersMsg, and PushRejectMsg functions -are provided as a convenience. While it is of course possible to create and -send these message manually via QueueMessage, these helper functions provided -additional useful functionality that is typically desired. - -For example, the PushAddrMsg function automatically limits the addresses to the -maximum number allowed by the message and randomizes the chosen addresses when -there are too many. This allows the caller to simply provide a slice of known -addresses, such as that returned by the addrmgr package, without having to worry -about the details. - -Next, the PushGetBlockInvsMsg and PushGetHeadersMsg functions will construct proper -messages using a block locator and ignore back to back duplicate requests. - -Finally, the PushRejectMsg function can be used to easily create and send an -appropriate reject message based on the provided parameters as well as -optionally provides a flag to cause it to block until the message is actually -sent. - -Peer Statistics - -A snapshot of the current peer statistics can be obtained with the StatsSnapshot -function. This includes statistics such as the total number of bytes read and -written, the remote address, user agent, and negotiated protocol version. -*/ -package peer diff --git a/peer/log.go b/peer/log.go deleted file mode 100644 index 7ae364f48..000000000 --- a/peer/log.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (c) 2015-2016 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package peer - -import ( - "github.com/kaspanet/kaspad/logger" - "github.com/kaspanet/kaspad/util/panics" -) - -var log, _ = logger.Get(logger.SubsystemTags.PEER) -var spawn = panics.GoroutineWrapperFunc(log) -var spawnAfter = panics.AfterFuncWrapperFunc(log) diff --git a/peer/message_logging.go b/peer/message_logging.go deleted file mode 100644 index 39c2eb8ab..000000000 --- a/peer/message_logging.go +++ /dev/null @@ -1,173 +0,0 @@ -package peer - -import ( - "fmt" - "github.com/kaspanet/kaspad/logs" - "github.com/kaspanet/kaspad/txscript" - "github.com/kaspanet/kaspad/util/mstime" - "github.com/kaspanet/kaspad/wire" - "strings" -) - -const ( - // maxRejectReasonLen is the maximum length of a sanitized reject reason - // that will be logged. - maxRejectReasonLen = 250 -) - -// formatLockTime returns a transaction lock time as a human-readable string. -func formatLockTime(lockTime uint64) string { - // The lock time field of a transaction is either a block blue score at - // which the transaction is finalized or a timestamp depending on if the - // value is before the lockTimeThreshold. When it is under the - // threshold it is a block blue score. - if lockTime < txscript.LockTimeThreshold { - return fmt.Sprintf("blue score %d", lockTime) - } - - return mstime.UnixMilliseconds(int64(lockTime)).String() -} - -// invSummary returns an inventory message as a human-readable string. -func invSummary(invList []*wire.InvVect) string { - // No inventory. - invLen := len(invList) - if invLen == 0 { - return "empty" - } - - // One inventory item. - if invLen == 1 { - iv := invList[0] - switch iv.Type { - case wire.InvTypeError: - return fmt.Sprintf("error %s", iv.Hash) - case wire.InvTypeBlock: - return fmt.Sprintf("block %s", iv.Hash) - case wire.InvTypeSyncBlock: - return fmt.Sprintf("sync block %s", iv.Hash) - case wire.InvTypeMissingAncestor: - return fmt.Sprintf("missing ancestor %s", iv.Hash) - case wire.InvTypeTx: - return fmt.Sprintf("tx %s", iv.Hash) - } - - return fmt.Sprintf("unknown (%d) %s", uint32(iv.Type), iv.Hash) - } - - // More than one inv item. - return fmt.Sprintf("size %d", invLen) -} - -// sanitizeString strips any characters which are even remotely dangerous, such -// as html control characters, from the passed string. It also limits it to -// the passed maximum size, which can be 0 for unlimited. When the string is -// limited, it will also add "..." to the string to indicate it was truncated. -func sanitizeString(str string, maxLength uint) string { - const safeChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXY" + - "Z01234567890 .,;_/:?@" - - // Strip any characters not in the safeChars string removed. - str = strings.Map(func(r rune) rune { - if strings.ContainsRune(safeChars, r) { - return r - } - return -1 - }, str) - - // Limit the string to the max allowed length. - if maxLength > 0 && uint(len(str)) > maxLength { - str = str[:maxLength] - str = str + "..." - } - return str -} - -// messageSummary returns a human-readable string which summarizes a message. -// Not all messages have or need a summary. This is used for debug logging. -func messageSummary(msg wire.Message) string { - switch msg := msg.(type) { - case *wire.MsgVersion: - return fmt.Sprintf("agent %s, pver %d, selected tip %s", - msg.UserAgent, msg.ProtocolVersion, msg.SelectedTipHash) - - case *wire.MsgVerAck: - // No summary. - - case *wire.MsgGetAddresses: - if msg.IncludeAllSubnetworks { - return "all subnetworks and full nodes" - } - if msg.SubnetworkID == nil { - return "full nodes" - } - return fmt.Sprintf("subnetwork ID %v", msg.SubnetworkID) - - case *wire.MsgAddresses: - return fmt.Sprintf("%d addr", len(msg.AddrList)) - - case *wire.MsgPing: - // No summary - perhaps add nonce. - - case *wire.MsgPong: - // No summary - perhaps add nonce. - - case *wire.MsgTx: - return fmt.Sprintf("hash %s, %d inputs, %d outputs, lock %s", - msg.TxID(), len(msg.TxIn), len(msg.TxOut), - formatLockTime(msg.LockTime)) - - case *wire.MsgBlock: - header := &msg.Header - return fmt.Sprintf("hash %s, ver %d, %d tx, %s", msg.BlockHash(), - header.Version, len(msg.Transactions), header.Timestamp) - - case *wire.MsgInv: - return invSummary(msg.InvList) - - case *wire.MsgNotFound: - return invSummary(msg.InvList) - - case *wire.MsgGetData: - return invSummary(msg.InvList) - - case *wire.MsgGetBlocks: - return fmt.Sprintf("low hash %s, high hash %s", msg.LowHash, - msg.HighHash) - - case *wire.MsgGetBlockLocator: - return fmt.Sprintf("high hash %s, low hash %s", msg.HighHash, - msg.LowHash) - - case *wire.MsgBlockLocator: - if len(msg.BlockLocatorHashes) > 0 { - return fmt.Sprintf("locator first hash: %s, last hash: %s", msg.BlockLocatorHashes[0], msg.BlockLocatorHashes[len(msg.BlockLocatorHashes)-1]) - } - return fmt.Sprintf("no locator") - - case *wire.MsgReject: - // Ensure the variable length strings don't contain any - // characters which are even remotely dangerous such as HTML - // control characters, etc. Also limit them to sane length for - // logging. - rejReason := sanitizeString(msg.Reason, maxRejectReasonLen) - summary := fmt.Sprintf("cmd %s, code %s, reason %s", msg.Cmd, - msg.Code, rejReason) - if msg.Cmd == wire.CmdBlock || msg.Cmd == wire.CmdTx { - summary += fmt.Sprintf(", hash %s", msg.Hash) - } - return summary - } - - // No summary for other messages. - return "" -} - -func messageLogLevel(msg wire.Message) logs.Level { - switch msg.(type) { - case *wire.MsgReject: - return logs.LevelWarn - default: - return logs.LevelDebug - } -} diff --git a/peer/mruinvmap.go b/peer/mruinvmap.go deleted file mode 100644 index 5f0e9889d..000000000 --- a/peer/mruinvmap.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright (c) 2013-2015 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package peer - -import ( - "bytes" - "container/list" - "fmt" - "sync" - - "github.com/kaspanet/kaspad/wire" -) - -// mruInventoryMap provides a concurrency safe map that is limited to a maximum -// number of items with eviction for the oldest entry when the limit is -// exceeded. -type mruInventoryMap struct { - invMtx sync.Mutex - invMap map[wire.InvVect]*list.Element // nearly O(1) lookups - invList *list.List // O(1) insert, update, delete - limit uint -} - -// String returns the map as a human-readable string. -// -// This function is safe for concurrent access. -func (m *mruInventoryMap) String() string { - m.invMtx.Lock() - defer m.invMtx.Unlock() - - lastEntryNum := len(m.invMap) - 1 - curEntry := 0 - buf := bytes.NewBufferString("[") - for iv := range m.invMap { - buf.WriteString(iv.String()) - if curEntry < lastEntryNum { - buf.WriteString(", ") - } - curEntry++ - } - buf.WriteString("]") - - return fmt.Sprintf("<%d>%s", m.limit, buf.String()) -} - -// Exists returns whether or not the passed inventory item is in the map. -// -// This function is safe for concurrent access. -func (m *mruInventoryMap) Exists(iv *wire.InvVect) bool { - m.invMtx.Lock() - defer m.invMtx.Unlock() - _, exists := m.invMap[*iv] - - return exists -} - -// Add adds the passed inventory to the map and handles eviction of the oldest -// item if adding the new item would exceed the max limit. Adding an existing -// item makes it the most recently used item. -// -// This function is safe for concurrent access. -func (m *mruInventoryMap) Add(iv *wire.InvVect) { - m.invMtx.Lock() - defer m.invMtx.Unlock() - - // When the limit is zero, nothing can be added to the map, so just - // return. - if m.limit == 0 { - return - } - - // When the entry already exists move it to the front of the list - // thereby marking it most recently used. - if node, exists := m.invMap[*iv]; exists { - m.invList.MoveToFront(node) - return - } - - // Evict the least recently used entry (back of the list) if the the new - // entry would exceed the size limit for the map. Also reuse the list - // node so a new one doesn't have to be allocated. - if uint(len(m.invMap))+1 > m.limit { - node := m.invList.Back() - lru := node.Value.(*wire.InvVect) - - // Evict least recently used item. - delete(m.invMap, *lru) - - // Reuse the list node of the item that was just evicted for the - // new item. - node.Value = iv - m.invList.MoveToFront(node) - m.invMap[*iv] = node - return - } - - // The limit hasn't been reached yet, so just add the new item. - node := m.invList.PushFront(iv) - m.invMap[*iv] = node -} - -// Delete deletes the passed inventory item from the map (if it exists). -// -// This function is safe for concurrent access. -func (m *mruInventoryMap) Delete(iv *wire.InvVect) { - m.invMtx.Lock() - defer m.invMtx.Unlock() - if node, exists := m.invMap[*iv]; exists { - m.invList.Remove(node) - delete(m.invMap, *iv) - } -} - -// newMruInventoryMap returns a new inventory map that is limited to the number -// of entries specified by limit. When the number of entries exceeds the limit, -// the oldest (least recently used) entry will be removed to make room for the -// new entry. -func newMruInventoryMap(limit uint) *mruInventoryMap { - m := mruInventoryMap{ - invMap: make(map[wire.InvVect]*list.Element), - invList: list.New(), - limit: limit, - } - return &m -} diff --git a/peer/mrunoncemap.go b/peer/mrunoncemap.go deleted file mode 100644 index 2931fb3bb..000000000 --- a/peer/mrunoncemap.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright (c) 2015 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package peer - -import ( - "bytes" - "container/list" - "fmt" - "sync" -) - -// mruNonceMap provides a concurrency safe map that is limited to a maximum -// number of items with eviction for the oldest entry when the limit is -// exceeded. -type mruNonceMap struct { - mtx sync.Mutex - nonceMap map[uint64]*list.Element // nearly O(1) lookups - nonceList *list.List // O(1) insert, update, delete - limit uint -} - -// String returns the map as a human-readable string. -// -// This function is safe for concurrent access. -func (m *mruNonceMap) String() string { - m.mtx.Lock() - defer m.mtx.Unlock() - - lastEntryNum := len(m.nonceMap) - 1 - curEntry := 0 - buf := bytes.NewBufferString("[") - for nonce := range m.nonceMap { - buf.WriteString(fmt.Sprintf("%d", nonce)) - if curEntry < lastEntryNum { - buf.WriteString(", ") - } - curEntry++ - } - buf.WriteString("]") - - return fmt.Sprintf("<%d>%s", m.limit, buf.String()) -} - -// Exists returns whether or not the passed nonce is in the map. -// -// This function is safe for concurrent access. -func (m *mruNonceMap) Exists(nonce uint64) bool { - m.mtx.Lock() - defer m.mtx.Unlock() - _, exists := m.nonceMap[nonce] - - return exists -} - -// Add adds the passed nonce to the map and handles eviction of the oldest item -// if adding the new item would exceed the max limit. Adding an existing item -// makes it the most recently used item. -// -// This function is safe for concurrent access. -func (m *mruNonceMap) Add(nonce uint64) { - m.mtx.Lock() - defer m.mtx.Unlock() - - // When the limit is zero, nothing can be added to the map, so just - // return. - if m.limit == 0 { - return - } - - // When the entry already exists move it to the front of the list - // thereby marking it most recently used. - if node, exists := m.nonceMap[nonce]; exists { - m.nonceList.MoveToFront(node) - return - } - - // Evict the least recently used entry (back of the list) if the the new - // entry would exceed the size limit for the map. Also reuse the list - // node so a new one doesn't have to be allocated. - if uint(len(m.nonceMap))+1 > m.limit { - node := m.nonceList.Back() - lru := node.Value.(uint64) - - // Evict least recently used item. - delete(m.nonceMap, lru) - - // Reuse the list node of the item that was just evicted for the - // new item. - node.Value = nonce - m.nonceList.MoveToFront(node) - m.nonceMap[nonce] = node - return - } - - // The limit hasn't been reached yet, so just add the new item. - node := m.nonceList.PushFront(nonce) - m.nonceMap[nonce] = node -} - -// Delete deletes the passed nonce from the map (if it exists). -// -// This function is safe for concurrent access. -func (m *mruNonceMap) Delete(nonce uint64) { - m.mtx.Lock() - defer m.mtx.Unlock() - if node, exists := m.nonceMap[nonce]; exists { - m.nonceList.Remove(node) - delete(m.nonceMap, nonce) - } -} - -// newMruNonceMap returns a new nonce map that is limited to the number of -// entries specified by limit. When the number of entries exceeds the limit, -// the oldest (least recently used) entry will be removed to make room for the -// new entry. -func newMruNonceMap(limit uint) *mruNonceMap { - m := mruNonceMap{ - nonceMap: make(map[uint64]*list.Element), - nonceList: list.New(), - limit: limit, - } - return &m -} diff --git a/peer/peer.go b/peer/peer.go deleted file mode 100644 index b0d764151..000000000 --- a/peer/peer.go +++ /dev/null @@ -1,1979 +0,0 @@ -// Copyright (c) 2013-2016 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package peer - -import ( - "bytes" - "container/list" - "fmt" - "io" - "math/rand" - "net" - "strconv" - "sync" - "sync/atomic" - "time" - - "github.com/kaspanet/kaspad/config" - mathUtil "github.com/kaspanet/kaspad/util/math" - "github.com/kaspanet/kaspad/util/mstime" - - "github.com/pkg/errors" - - "github.com/kaspanet/kaspad/util/random" - "github.com/kaspanet/kaspad/util/subnetworkid" - - "github.com/btcsuite/go-socks/socks" - "github.com/davecgh/go-spew/spew" - "github.com/kaspanet/kaspad/blockdag" - "github.com/kaspanet/kaspad/dagconfig" - "github.com/kaspanet/kaspad/logger" - "github.com/kaspanet/kaspad/util/daghash" - "github.com/kaspanet/kaspad/wire" -) - -const ( - // MaxProtocolVersion is the max protocol version the peer supports. - MaxProtocolVersion = wire.ProtocolVersion - - // minAcceptableProtocolVersion is the lowest protocol version that a - // connected peer may support. - minAcceptableProtocolVersion = wire.ProtocolVersion - - // outputBufferSize is the number of elements the output channels use. - outputBufferSize = 50 - - // invTrickleSize is the maximum amount of inventory to send in a single - // message when trickling inventory to remote peers. - maxInvTrickleSize = 1000 - - // maxKnownInventory is the maximum number of items to keep in the known - // inventory cache. - maxKnownInventory = 1000 - - // pingInterval is the interval of time to wait in between sending ping - // messages. - pingInterval = 2 * time.Minute - - // negotiateTimeout is the duration of inactivity before we timeout a - // peer that hasn't completed the initial version negotiation. - negotiateTimeout = 30 * time.Second - - // idleTimeout is the duration of inactivity before we time out a peer. - idleTimeout = 5 * time.Minute - - // stallTickInterval is the interval of time between each check for - // stalled peers. - stallTickInterval = 15 * time.Second - - // stallResponseTimeout is the base maximum amount of time messages that - // expect a response will wait before disconnecting the peer for - // stalling. The deadlines are adjusted for callback running times and - // only checked on each stall tick interval. - stallResponseTimeout = 30 * time.Second - - // trickleTimeout is the duration of the ticker which trickles down the - // inventory to a peer. - trickleTimeout = 100 * time.Millisecond -) - -var ( - // nodeCount is the total number of peer connections made since startup - // and is used to assign an id to a peer. - nodeCount int32 - - // sentNonces houses the unique nonces that are generated when pushing - // version messages that are used to detect self connections. - sentNonces = newMruNonceMap(50) - - // allowSelfConns is only used to allow the tests to bypass the self - // connection detecting and disconnect logic since they intentionally - // do so for testing purposes. - allowSelfConns bool -) - -// MessageListeners defines callback function pointers to invoke with message -// listeners for a peer. Any listener which is not set to a concrete callback -// during peer initialization is ignored. Execution of multiple message -// listeners occurs serially, so one callback blocks the execution of the next. -// -// NOTE: Unless otherwise documented, these listeners must NOT directly call any -// blocking calls (such as WaitForShutdown) on the peer instance since the input -// handler goroutine blocks until the callback has completed. Doing so will -// result in a deadlock. -type MessageListeners struct { - // OnGetAddr is invoked when a peer receives a getaddr kaspa message. - OnGetAddr func(p *Peer, msg *wire.MsgGetAddresses) - - // OnAddr is invoked when a peer receives an addr kaspa message. - OnAddr func(p *Peer, msg *wire.MsgAddresses) - - // OnPing is invoked when a peer receives a ping kaspa message. - OnPing func(p *Peer, msg *wire.MsgPing) - - // OnPong is invoked when a peer receives a pong kaspa message. - OnPong func(p *Peer, msg *wire.MsgPong) - - // OnTx is invoked when a peer receives a tx kaspa message. - OnTx func(p *Peer, msg *wire.MsgTx) - - // OnBlock is invoked when a peer receives a block kaspa message. - OnBlock func(p *Peer, msg *wire.MsgBlock, buf []byte) - - // OnInv is invoked when a peer receives an inv kaspa message. - OnInv func(p *Peer, msg *wire.MsgInv) - - // OnGetBlockLocator is invoked when a peer receives a getlocator kaspa message. - OnGetBlockLocator func(p *Peer, msg *wire.MsgGetBlockLocator) - - // OnBlockLocator is invoked when a peer receives a locator kaspa message. - OnBlockLocator func(p *Peer, msg *wire.MsgBlockLocator) - - // OnNotFound is invoked when a peer receives a notfound kaspa - // message. - OnNotFound func(p *Peer, msg *wire.MsgNotFound) - - // OnGetData is invoked when a peer receives a getdata kaspa message. - OnGetData func(p *Peer, msg *wire.MsgGetData) - - // OnGetBlockInvs is invoked when a peer receives a getblockinvs kaspa - // message. - OnGetBlockInvs func(p *Peer, msg *wire.MsgGetBlocks) - - // OnFeeFilter is invoked when a peer receives a feefilter kaspa message. - OnFeeFilter func(p *Peer, msg *wire.MsgFeeFilter) - - // OnFilterAdd is invoked when a peer receives a filteradd kaspa message. - OnFilterAdd func(p *Peer, msg *wire.MsgFilterAdd) - - // OnFilterClear is invoked when a peer receives a filterclear kaspa - // message. - OnFilterClear func(p *Peer, msg *wire.MsgFilterClear) - - // OnFilterLoad is invoked when a peer receives a filterload kaspa - // message. - OnFilterLoad func(p *Peer, msg *wire.MsgFilterLoad) - - // OnMerkleBlock is invoked when a peer receives a merkleblock kaspa - // message. - OnMerkleBlock func(p *Peer, msg *wire.MsgMerkleBlock) - - // OnVersion is invoked when a peer receives a version kaspa message. - OnVersion func(p *Peer, msg *wire.MsgVersion) - - // OnVerAck is invoked when a peer receives a verack kaspa message. - OnVerAck func(p *Peer, msg *wire.MsgVerAck) - - // OnReject is invoked when a peer receives a reject kaspa message. - OnReject func(p *Peer, msg *wire.MsgReject) - - // OnGetSelectedTip is invoked when a peer receives a getSelectedTip kaspa - // message. - OnGetSelectedTip func() - - // OnSelectedTip is invoked when a peer receives a selectedTip kaspa - // message. - OnSelectedTip func(p *Peer, msg *wire.MsgSelectedTip) - - // OnRead is invoked when a peer receives a kaspa message. It - // consists of the number of bytes read, the message, and whether or not - // an error in the read occurred. Typically, callers will opt to use - // the callbacks for the specific message types, however this can be - // useful for circumstances such as keeping track of server-wide byte - // counts or working with custom message types for which the peer does - // not directly provide a callback. - OnRead func(p *Peer, bytesRead int, msg wire.Message, err error) - - // OnWrite is invoked when we write a kaspa message to a peer. It - // consists of the number of bytes written, the message, and whether or - // not an error in the write occurred. This can be useful for - // circumstances such as keeping track of server-wide byte counts. - OnWrite func(p *Peer, bytesWritten int, msg wire.Message, err error) -} - -// Config is the struct to hold configuration options useful to Peer. -type Config struct { - // SelectedTipHash specifies a callback which provides the selected tip - // to the peer as needed. - SelectedTipHash func() *daghash.Hash - - // IsInDAG determines whether a block with the given hash exists in - // the DAG. - IsInDAG func(*daghash.Hash) bool - - // AddBanScore increases the persistent and decaying ban score fields by the - // values passed as parameters. If the resulting score exceeds half of the ban - // threshold, a warning is logged including the reason provided. Further, if - // the score is above the ban threshold, the peer will be banned and - // disconnected. - AddBanScore func(persistent, transient uint32, reason string) - - // HostToNetAddress returns the netaddress for the given host. This can be - // nil in which case the host will be parsed as an IP address. - HostToNetAddress HostToNetAddrFunc - - // Proxy indicates a proxy is being used for connections. The only - // effect this has is to prevent leaking the tor proxy address, so it - // only needs to specified if using a tor proxy. - Proxy string - - // UserAgentName specifies the user agent name to advertise. It is - // highly recommended to specify this value. - UserAgentName string - - // UserAgentVersion specifies the user agent version to advertise. It - // is highly recommended to specify this value and that it follows the - // form "major.minor.revision" e.g. "2.6.41". - UserAgentVersion string - - // UserAgentComments specify the user agent comments to advertise. These - // values must not contain the illegal characters specified in BIP 14: - // '/', ':', '(', ')'. - UserAgentComments []string - - // DAGParams identifies which DAG parameters the peer is associated - // with. It is highly recommended to specify this field, however it can - // be omitted in which case the test network will be used. - DAGParams *dagconfig.Params - - // Services specifies which services to advertise as supported by the - // local peer. This field can be omitted in which case it will be 0 - // and therefore advertise no supported services. - Services wire.ServiceFlag - - // ProtocolVersion specifies the maximum protocol version to use and - // advertise. This field can be omitted in which case - // peer.MaxProtocolVersion will be used. - ProtocolVersion uint32 - - // DisableRelayTx specifies if the remote peer should be informed to - // not send inv messages for transactions. - DisableRelayTx bool - - // Listeners houses callback functions to be invoked on receiving peer - // messages. - Listeners MessageListeners - - // SubnetworkID specifies which subnetwork the peer is associated with. - // It is nil in full nodes. - SubnetworkID *subnetworkid.SubnetworkID -} - -// newNetAddress attempts to extract the IP address and port from the passed -// net.Addr interface and create a kaspa NetAddress structure using that -// information. -func newNetAddress(addr net.Addr, services wire.ServiceFlag) (*wire.NetAddress, error) { - // addr will be a net.TCPAddr when not using a proxy. - if tcpAddr, ok := addr.(*net.TCPAddr); ok { - ip := tcpAddr.IP - port := uint16(tcpAddr.Port) - na := wire.NewNetAddressIPPort(ip, port, services) - return na, nil - } - - // addr will be a socks.ProxiedAddr when using a proxy. - if proxiedAddr, ok := addr.(*socks.ProxiedAddr); ok { - ip := net.ParseIP(proxiedAddr.Host) - if ip == nil { - ip = net.ParseIP("0.0.0.0") - } - port := uint16(proxiedAddr.Port) - na := wire.NewNetAddressIPPort(ip, port, services) - return na, nil - } - - // For the most part, addr should be one of the two above cases, but - // to be safe, fall back to trying to parse the information from the - // address string as a last resort. - host, portStr, err := net.SplitHostPort(addr.String()) - if err != nil { - return nil, err - } - ip := net.ParseIP(host) - port, err := strconv.ParseUint(portStr, 10, 16) - if err != nil { - return nil, err - } - na := wire.NewNetAddressIPPort(ip, uint16(port), services) - return na, nil -} - -// outMsg is used to house a message to be sent along with a channel to signal -// when the message has been sent (or won't be sent due to things such as -// shutdown) -type outMsg struct { - msg wire.Message - doneChan chan<- struct{} -} - -// stallControlCmd represents the command of a stall control message. -type stallControlCmd uint8 - -// Constants for the command of a stall control message. -const ( - // sccSendMessage indicates a message is being sent to the remote peer. - sccSendMessage stallControlCmd = iota - - // sccReceiveMessage indicates a message has been received from the - // remote peer. - sccReceiveMessage - - // sccHandlerStart indicates a callback handler is about to be invoked. - sccHandlerStart - - // sccHandlerStart indicates a callback handler has completed. - sccHandlerDone -) - -// stallControlMsg is used to signal the stall handler about specific events -// so it can properly detect and handle stalled remote peers. -type stallControlMsg struct { - command stallControlCmd - message wire.Message -} - -// StatsSnap is a snapshot of peer stats at a point in time. -type StatsSnap struct { - ID int32 - Addr string - Services wire.ServiceFlag - LastSend mstime.Time - LastRecv mstime.Time - BytesSent uint64 - BytesRecv uint64 - ConnTime mstime.Time - TimeOffset int64 - Version uint32 - UserAgent string - Inbound bool - SelectedTipHash *daghash.Hash - LastPingNonce uint64 - LastPingTime time.Time - LastPingMicros int64 -} - -// HostToNetAddrFunc is a func which takes a host, port, services and returns -// the netaddress. -type HostToNetAddrFunc func(host string, port uint16, - services wire.ServiceFlag) (*wire.NetAddress, error) - -// NOTE: The overall data flow of a peer is split into 3 goroutines. Inbound -// messages are read via the inHandler goroutine and generally dispatched to -// their own handler. For inbound data-related messages such as blocks, -// transactions, and inventory, the data is handled by the corresponding -// message handlers. The data flow for outbound messages is split into 2 -// goroutines, queueHandler and outHandler. The first, queueHandler, is used -// as a way for external entities to queue messages, by way of the QueueMessage -// function, quickly regardless of whether the peer is currently sending or not. -// It acts as the traffic cop between the external world and the actual -// goroutine which writes to the network socket. - -// Peer provides a basic concurrent safe kaspa peer for handling kaspa -// communications via the peer-to-peer protocol. It provides full duplex -// reading and writing, automatic handling of the initial handshake process, -// querying of usage statistics and other information about the remote peer such -// as its address, user agent, and protocol version, output message queuing, -// inventory trickling, and the ability to dynamically register and unregister -// callbacks for handling kaspa protocol messages. -// -// Outbound messages are typically queued via QueueMessage or QueueInventory. -// QueueMessage is intended for all messages, including responses to data such -// as blocks and transactions. QueueInventory, on the other hand, is only -// intended for relaying inventory as it employs a trickling mechanism to batch -// the inventory together. However, some helper functions for pushing messages -// of specific types that typically require common special handling are -// provided as a convenience. -type Peer struct { - // The following variables must only be used atomically. - bytesReceived uint64 - bytesSent uint64 - lastRecv int64 - lastSend int64 - connected int32 - disconnect int32 - - conn net.Conn - - // These fields are set at creation time and never modified, so they are - // safe to read from concurrently without a mutex. - addr string - cfg Config - AppCfg *config.Config // exported so that serverPeer can access as well - inbound bool - - flagsMtx sync.Mutex // protects the peer flags below - na *wire.NetAddress - id int32 - userAgent string - services wire.ServiceFlag - versionKnown bool - advertisedProtoVer uint32 // protocol version advertised by remote - protocolVersion uint32 // negotiated protocol version - verAckReceived bool - - knownInventory *mruInventoryMap - prevGetBlockInvsMtx sync.Mutex - prevGetBlockInvsLow *daghash.Hash - prevGetBlockInvsHigh *daghash.Hash - - wasBlockLocatorRequested bool - - // These fields keep track of statistics for the peer and are protected - // by the statsMtx mutex. - statsMtx sync.RWMutex - timeOffset int64 - timeConnected time.Time - selectedTipHash *daghash.Hash - lastPingNonce uint64 // Set to nonce if we have a pending ping. - lastPingTime time.Time // Time we sent last ping. - lastPingMicros int64 // Time for last ping to return. - - stallControl chan stallControlMsg - outputQueue chan outMsg - sendQueue chan outMsg - sendDoneQueue chan struct{} - outputInvChan chan *wire.InvVect - inQuit chan struct{} - queueQuit chan struct{} - outQuit chan struct{} - quit chan struct{} -} - -// WasBlockLocatorRequested returns whether the node -// is expecting to get a block locator from this -// peer. -func (p *Peer) WasBlockLocatorRequested() bool { - return p.wasBlockLocatorRequested -} - -// SetWasBlockLocatorRequested sets whether the node -// is expecting to get a block locator from this -// peer. -func (p *Peer) SetWasBlockLocatorRequested(wasBlockLocatorRequested bool) { - p.wasBlockLocatorRequested = wasBlockLocatorRequested -} - -// String returns the peer's address and directionality as a human-readable -// string. -// -// This function is safe for concurrent access. -func (p *Peer) String() string { - return fmt.Sprintf("%s (%s)", p.addr, logger.DirectionString(p.inbound)) -} - -// AddKnownInventory adds the passed inventory to the cache of known inventory -// for the peer. -// -// This function is safe for concurrent access. -func (p *Peer) AddKnownInventory(invVect *wire.InvVect) { - p.knownInventory.Add(invVect) -} - -// StatsSnapshot returns a snapshot of the current peer flags and statistics. -// -// This function is safe for concurrent access. -func (p *Peer) StatsSnapshot() *StatsSnap { - p.statsMtx.RLock() - defer p.statsMtx.RUnlock() - - p.flagsMtx.Lock() - defer p.flagsMtx.Unlock() - - id := p.id - addr := p.addr - userAgent := p.userAgent - services := p.services - protocolVersion := p.advertisedProtoVer - - // Get a copy of all relevant flags and stats. - statsSnap := &StatsSnap{ - ID: id, - Addr: addr, - UserAgent: userAgent, - Services: services, - LastSend: p.LastSend(), - LastRecv: p.LastRecv(), - BytesSent: p.BytesSent(), - BytesRecv: p.BytesReceived(), - ConnTime: mstime.ToMSTime(p.timeConnected), - TimeOffset: p.timeOffset, - Version: protocolVersion, - Inbound: p.inbound, - SelectedTipHash: p.selectedTipHash, - LastPingNonce: p.lastPingNonce, - LastPingMicros: p.lastPingMicros, - LastPingTime: p.lastPingTime, - } - - return statsSnap -} - -// ID returns the peer id. -// -// This function is safe for concurrent access. -func (p *Peer) ID() int32 { - p.flagsMtx.Lock() - defer p.flagsMtx.Unlock() - return p.id -} - -// NA returns the peer network address. -// -// This function is safe for concurrent access. -func (p *Peer) NA() *wire.NetAddress { - p.flagsMtx.Lock() - defer p.flagsMtx.Unlock() - return p.na -} - -// Addr returns the peer address. -// -// This function is safe for concurrent access. -func (p *Peer) Addr() string { - // The address doesn't change after initialization, therefore it is not - // protected by a mutex. - return p.addr -} - -// Inbound returns whether the peer is inbound. -// -// This function is safe for concurrent access. -func (p *Peer) Inbound() bool { - return p.inbound -} - -// Services returns the services flag of the remote peer. -// -// This function is safe for concurrent access. -func (p *Peer) Services() wire.ServiceFlag { - p.flagsMtx.Lock() - defer p.flagsMtx.Unlock() - return p.services -} - -// UserAgent returns the user agent of the remote peer. -// -// This function is safe for concurrent access. -func (p *Peer) UserAgent() string { - p.flagsMtx.Lock() - defer p.flagsMtx.Unlock() - return p.userAgent -} - -// SubnetworkID returns peer subnetwork ID -func (p *Peer) SubnetworkID() *subnetworkid.SubnetworkID { - p.flagsMtx.Lock() - defer p.flagsMtx.Unlock() - return p.cfg.SubnetworkID -} - -// LastPingNonce returns the last ping nonce of the remote peer. -// -// This function is safe for concurrent access. -func (p *Peer) LastPingNonce() uint64 { - p.statsMtx.RLock() - defer p.statsMtx.RUnlock() - return p.lastPingNonce -} - -// LastPingTime returns the last ping time of the remote peer. -// -// This function is safe for concurrent access. -func (p *Peer) LastPingTime() time.Time { - p.statsMtx.RLock() - defer p.statsMtx.RUnlock() - return p.lastPingTime -} - -// LastPingMicros returns the last ping micros of the remote peer. -// -// This function is safe for concurrent access. -func (p *Peer) LastPingMicros() int64 { - p.statsMtx.RLock() - defer p.statsMtx.RUnlock() - return p.lastPingMicros -} - -// VersionKnown returns the whether or not the version of a peer is known -// locally. -// -// This function is safe for concurrent access. -func (p *Peer) VersionKnown() bool { - p.flagsMtx.Lock() - defer p.flagsMtx.Unlock() - return p.versionKnown -} - -// VerAckReceived returns whether or not a verack message was received by the -// peer. -// -// This function is safe for concurrent access. -func (p *Peer) VerAckReceived() bool { - p.flagsMtx.Lock() - defer p.flagsMtx.Unlock() - return p.verAckReceived -} - -// ProtocolVersion returns the negotiated peer protocol version. -// -// This function is safe for concurrent access. -func (p *Peer) ProtocolVersion() uint32 { - p.flagsMtx.Lock() - defer p.flagsMtx.Unlock() - return p.protocolVersion -} - -// SelectedTipHash returns the selected tip of the peer. -// -// This function is safe for concurrent access. -func (p *Peer) SelectedTipHash() *daghash.Hash { - p.statsMtx.RLock() - defer p.statsMtx.RUnlock() - return p.selectedTipHash -} - -// SetSelectedTipHash sets the selected tip of the peer. -func (p *Peer) SetSelectedTipHash(selectedTipHash *daghash.Hash) { - p.statsMtx.Lock() - defer p.statsMtx.Unlock() - p.selectedTipHash = selectedTipHash -} - -// IsSelectedTipKnown returns whether or not this peer selected -// tip is a known block. -// -// This function is safe for concurrent access. -func (p *Peer) IsSelectedTipKnown() bool { - return !p.cfg.IsInDAG(p.selectedTipHash) -} - -// AddBanScore increases the persistent and decaying ban score fields by the -// values passed as parameters. If the resulting score exceeds half of the ban -// threshold, a warning is logged including the reason provided. Further, if -// the score is above the ban threshold, the peer will be banned and -// disconnected. -func (p *Peer) AddBanScore(persistent, transient uint32, reason string) { - p.cfg.AddBanScore(persistent, transient, reason) -} - -// AddBanScoreAndPushRejectMsg increases ban score and sends a -// reject message to the misbehaving peer. -func (p *Peer) AddBanScoreAndPushRejectMsg(command wire.MessageCommand, code wire.RejectCode, hash *daghash.Hash, persistent, transient uint32, reason string) { - p.PushRejectMsg(command, code, reason, hash, true) - p.cfg.AddBanScore(persistent, transient, reason) -} - -// LastSend returns the last send time of the peer. -// -// This function is safe for concurrent access. -func (p *Peer) LastSend() mstime.Time { - return mstime.UnixMilliseconds(atomic.LoadInt64(&p.lastSend)) -} - -// LastRecv returns the last recv time of the peer. -// -// This function is safe for concurrent access. -func (p *Peer) LastRecv() mstime.Time { - return mstime.UnixMilliseconds(atomic.LoadInt64(&p.lastRecv)) -} - -// BytesSent returns the total number of bytes sent by the peer. -// -// This function is safe for concurrent access. -func (p *Peer) BytesSent() uint64 { - return atomic.LoadUint64(&p.bytesSent) -} - -// BytesReceived returns the total number of bytes received by the peer. -// -// This function is safe for concurrent access. -func (p *Peer) BytesReceived() uint64 { - return atomic.LoadUint64(&p.bytesReceived) -} - -// TimeConnected returns the time at which the peer connected. -// -// This function is safe for concurrent access. -func (p *Peer) TimeConnected() time.Time { - p.statsMtx.RLock() - defer p.statsMtx.RUnlock() - return p.timeConnected -} - -// TimeOffset returns the number of seconds the local time was offset from the -// time the peer reported during the initial negotiation phase. Negative values -// indicate the remote peer's time is before the local time. -// -// This function is safe for concurrent access. -func (p *Peer) TimeOffset() int64 { - p.statsMtx.RLock() - defer p.statsMtx.RUnlock() - return p.timeOffset -} - -// localVersionMsg creates a version message that can be used to send to the -// remote peer. -func (p *Peer) localVersionMsg() (*wire.MsgVersion, error) { - //TODO(libp2p) Remove this function - panic("not supported anymore") - //selectedTipHash := p.cfg.SelectedTipHash() - // - //// Generate a unique nonce for this peer so self connections can be - //// detected. This is accomplished by adding it to a size-limited map of - //// recently seen nonces. - //nonce := uint64(rand.Int63()) - //sentNonces.Add(nonce) - // - //subnetworkID := p.cfg.SubnetworkID - // - //// Version message. - //msg := wire.NewMsgVersion(nonce, selectedTipHash, subnetworkID) - //msg.AddUserAgent(p.cfg.UserAgentName, p.cfg.UserAgentVersion, - // p.cfg.UserAgentComments...) - // - //// Advertise the services flag - //msg.Services = p.cfg.Services - // - //// Advertise our max supported protocol version. - //msg.ProtocolVersion = p.cfg.ProtocolVersion - // - //// Advertise if inv messages for transactions are desired. - //msg.DisableRelayTx = p.cfg.DisableRelayTx - // - //return msg, nil -} - -// PushAddrMsg sends an addr message to the connected peer using the provided -// addresses. This function is useful over manually sending the message via -// QueueMessage since it automatically limits the addresses to the maximum -// number allowed by the message and randomizes the chosen addresses when there -// are too many. It returns the addresses that were actually sent. -// -// This function is safe for concurrent access. -func (p *Peer) PushAddrMsg(addresses []*wire.NetAddress, subnetworkID *subnetworkid.SubnetworkID) ([]*wire.NetAddress, error) { - addressCount := len(addresses) - - msg := wire.NewMsgAddresses(false, subnetworkID) - msg.AddrList = make([]*wire.NetAddress, addressCount) - copy(msg.AddrList, addresses) - - // Randomize the addresses sent if there are more than the maximum allowed. - if addressCount > wire.MaxAddressesPerMsg { - // Shuffle the address list. - for i := 0; i < wire.MaxAddressesPerMsg; i++ { - j := i + rand.Intn(addressCount-i) - msg.AddrList[i], msg.AddrList[j] = msg.AddrList[j], msg.AddrList[i] - } - - // Truncate it to the maximum size. - msg.AddrList = msg.AddrList[:wire.MaxAddressesPerMsg] - } - - p.QueueMessage(msg, nil) - return msg.AddrList, nil -} - -// PushGetBlockLocatorMsg sends a getlocator message for the provided high -// and low hash. -// -// This function is safe for concurrent access. -func (p *Peer) PushGetBlockLocatorMsg(highHash, lowHash *daghash.Hash) { - p.SetWasBlockLocatorRequested(true) - msg := wire.NewMsgGetBlockLocator(highHash, lowHash) - p.QueueMessage(msg, nil) -} - -func (p *Peer) isDuplicateGetBlockInvsMsg(lowHash, highHash *daghash.Hash) bool { - p.prevGetBlockInvsMtx.Lock() - defer p.prevGetBlockInvsMtx.Unlock() - return p.prevGetBlockInvsHigh != nil && p.prevGetBlockInvsLow != nil && - lowHash != nil && highHash.IsEqual(p.prevGetBlockInvsHigh) && - lowHash.IsEqual(p.prevGetBlockInvsLow) -} - -// PushGetBlockInvsMsg sends a getblockinvs message for the provided block locator -// and high hash. It will ignore back-to-back duplicate requests. -// -// This function is safe for concurrent access. -func (p *Peer) PushGetBlockInvsMsg(lowHash, highHash *daghash.Hash) error { - // Filter duplicate getblockinvs requests. - if p.isDuplicateGetBlockInvsMsg(lowHash, highHash) { - log.Tracef("Filtering duplicate [getblockinvs] with low "+ - "hash %s, high hash %s", lowHash, highHash) - return nil - } - - // Construct the getblockinvs request and queue it to be sent. - msg := wire.NewMsgGetBlocks(lowHash, highHash) - p.QueueMessage(msg, nil) - - // Update the previous getblockinvs request information for filtering - // duplicates. - p.prevGetBlockInvsMtx.Lock() - defer p.prevGetBlockInvsMtx.Unlock() - p.prevGetBlockInvsLow = lowHash - p.prevGetBlockInvsHigh = highHash - return nil -} - -// PushBlockLocatorMsg sends a locator message for the provided block locator. -// -// This function is safe for concurrent access. -func (p *Peer) PushBlockLocatorMsg(locator blockdag.BlockLocator) error { - // Construct the locator request and queue it to be sent. - msg := wire.NewMsgBlockLocator(locator) - p.QueueMessage(msg, nil) - return nil -} - -// PushRejectMsg sends a reject message for the provided command, reject code, -// reject reason, and hash. The hash will only be used when the command is a tx -// or block and should be nil in other cases. The wait parameter will cause the -// function to block until the reject message has actually been sent. -// -// This function is safe for concurrent access. -func (p *Peer) PushRejectMsg(command wire.MessageCommand, code wire.RejectCode, reason string, hash *daghash.Hash, wait bool) { - msg := wire.NewMsgReject(command, code, reason) - if command == wire.CmdTx || command == wire.CmdBlock { - if hash == nil { - log.Warnf("Sending a reject message for command "+ - "type %s which should have specified a hash "+ - "but does not", command) - hash = &daghash.ZeroHash - } - msg.Hash = hash - } - - // Send the message without waiting if the caller has not requested it. - if !wait { - p.QueueMessage(msg, nil) - return - } - - // Send the message and block until it has been sent before returning. - doneChan := make(chan struct{}, 1) - p.QueueMessage(msg, doneChan) - <-doneChan -} - -// handleRemoteVersionMsg is invoked when a version kaspa message is received -// from the remote peer. It will return an error if the remote peer's version -// is not compatible with ours. -func (p *Peer) handleRemoteVersionMsg(msg *wire.MsgVersion) error { - //TODO(libp2p) Remove this function - //// Detect self connections. - //if !allowSelfConns && sentNonces.Exists(msg.Nonce) { - // return errors.New("disconnecting peer connected to self") - //} - - // Notify and disconnect clients that have a protocol version that is - // too old. - // - // NOTE: If minAcceptableProtocolVersion is raised to be higher than - // wire.RejectVersion, this should send a reject packet before - // disconnecting. - if msg.ProtocolVersion < minAcceptableProtocolVersion { - reason := fmt.Sprintf("protocol version must be %d or greater", - minAcceptableProtocolVersion) - return errors.New(reason) - } - - // Disconnect from partial nodes in networks that don't allow them - if !p.cfg.DAGParams.EnableNonNativeSubnetworks && msg.SubnetworkID != nil { - return errors.New("partial nodes are not allowed") - } - - // Disconnect if: - // - we are a full node and the outbound connection we've initiated is a partial node - // - the remote node is partial and our subnetwork doesn't match their subnetwork - isLocalNodeFull := p.cfg.SubnetworkID == nil - isRemoteNodeFull := msg.SubnetworkID == nil - if (isLocalNodeFull && !isRemoteNodeFull && !p.inbound) || - (!isLocalNodeFull && !isRemoteNodeFull && !msg.SubnetworkID.IsEqual(p.cfg.SubnetworkID)) { - - return errors.New("incompatible subnetworks") - } - - p.updateStatsFromVersionMsg(msg) - p.updateFlagsFromVersionMsg(msg) - - return nil -} - -// updateStatsFromVersionMsg updates a bunch of stats including block based stats, and the -// peer's time offset. -func (p *Peer) updateStatsFromVersionMsg(msg *wire.MsgVersion) { - p.statsMtx.Lock() - defer p.statsMtx.Unlock() - p.selectedTipHash = msg.SelectedTipHash - p.timeOffset = msg.Timestamp.UnixMilliseconds() - mstime.Now().UnixMilliseconds() -} - -func (p *Peer) updateFlagsFromVersionMsg(msg *wire.MsgVersion) { - // Negotiate the protocol version. - p.flagsMtx.Lock() - defer p.flagsMtx.Unlock() - - p.advertisedProtoVer = msg.ProtocolVersion - p.protocolVersion = mathUtil.MinUint32(p.protocolVersion, p.advertisedProtoVer) - p.versionKnown = true - log.Debugf("Negotiated protocol version %d for peer %s", - p.protocolVersion, p) - - // Set the peer's ID. - p.id = atomic.AddInt32(&nodeCount, 1) - - // Set the supported services for the peer to what the remote peer - // advertised. - p.services = msg.Services - - // Set the remote peer's user agent. - p.userAgent = msg.UserAgent -} - -// handlePingMsg is invoked when a peer receives a ping kaspa message. For -// recent clients (protocol version > BIP0031Version), it replies with a pong -// message. For older clients, it does nothing and anything other than failure -// is considered a successful ping. -func (p *Peer) handlePingMsg(msg *wire.MsgPing) { - // Include nonce from ping so pong can be identified. - p.QueueMessage(wire.NewMsgPong(msg.Nonce), nil) -} - -// handlePongMsg is invoked when a peer receives a pong kaspa message. It -// updates the ping statistics as required for recent clients. -func (p *Peer) handlePongMsg(msg *wire.MsgPong) { - // Arguably we could use a buffered channel here sending data - // in a fifo manner whenever we send a ping, or a list keeping track of - // the times of each ping. For now we just make a best effort and - // only record stats if it was for the last ping sent. Any preceding - // and overlapping pings will be ignored. It is unlikely to occur - // without large usage of the ping rpc call since we ping infrequently - // enough that if they overlap we would have timed out the peer. - p.statsMtx.Lock() - defer p.statsMtx.Unlock() - if p.lastPingNonce != 0 && msg.Nonce == p.lastPingNonce { - p.lastPingMicros = time.Since(p.lastPingTime).Nanoseconds() - p.lastPingMicros /= 1000 // convert to usec. - p.lastPingNonce = 0 - } -} - -// readMessage reads the next kaspa message from the peer with logging. -func (p *Peer) readMessage() (wire.Message, []byte, error) { - n, msg, buf, err := wire.ReadMessageN(p.conn, - p.ProtocolVersion(), p.cfg.DAGParams.Net) - atomic.AddUint64(&p.bytesReceived, uint64(n)) - if p.cfg.Listeners.OnRead != nil { - p.cfg.Listeners.OnRead(p, n, msg, err) - } - if err != nil { - return nil, nil, err - } - - // Use closures to log expensive operations so they are only run when - // the logging level requires it. - logLevel := messageLogLevel(msg) - log.Writef(logLevel, "%s", logger.NewLogClosure(func() string { - // Debug summary of message. - summary := messageSummary(msg) - if len(summary) > 0 { - summary = " (" + summary + ")" - } - return fmt.Sprintf("Received %s%s from %s", - msg.Command(), summary, p) - })) - log.Tracef("%s", logger.NewLogClosure(func() string { - return spew.Sdump(msg) - })) - log.Tracef("%s", logger.NewLogClosure(func() string { - return spew.Sdump(buf) - })) - - return msg, buf, nil -} - -// writeMessage sends a kaspa message to the peer with logging. -func (p *Peer) writeMessage(msg wire.Message) error { - // Don't do anything if we're disconnecting. - if atomic.LoadInt32(&p.disconnect) != 0 { - return nil - } - - // Use closures to log expensive operations so they are only run when - // the logging level requires it. - logLevel := messageLogLevel(msg) - log.Writef(logLevel, "%s", logger.NewLogClosure(func() string { - // Debug summary of message. - summary := messageSummary(msg) - if len(summary) > 0 { - summary = " (" + summary + ")" - } - return fmt.Sprintf("Sending %s%s to %s", msg.Command(), - summary, p) - })) - log.Tracef("%s", logger.NewLogClosure(func() string { - return spew.Sdump(msg) - })) - log.Tracef("%s", logger.NewLogClosure(func() string { - var buf bytes.Buffer - _, err := wire.WriteMessageN(&buf, msg, p.ProtocolVersion(), - p.cfg.DAGParams.Net) - if err != nil { - return err.Error() - } - return spew.Sdump(buf.Bytes()) - })) - - // Write the message to the peer. - n, err := wire.WriteMessageN(p.conn, msg, - p.ProtocolVersion(), p.cfg.DAGParams.Net) - atomic.AddUint64(&p.bytesSent, uint64(n)) - if p.cfg.Listeners.OnWrite != nil { - p.cfg.Listeners.OnWrite(p, n, msg, err) - } - return err -} - -// isAllowedReadError returns whether or not the passed error is allowed without -// disconnecting the peer. In particular, regression tests need to be allowed -// to send malformed messages without the peer being disconnected. -func (p *Peer) isAllowedReadError(err error) bool { - // Only allow read errors in regression test mode. - if p.cfg.DAGParams.Net != wire.Regtest { - return false - } - - // Don't allow the error if it's not specifically a malformed message error. - if msgErr := &(wire.MessageError{}); !errors.As(err, &msgErr) { - return false - } - - // Don't allow the error if it's not coming from localhost or the - // hostname can't be determined for some reason. - host, _, err := net.SplitHostPort(p.addr) - if err != nil { - return false - } - - if host != "127.0.0.1" && host != "localhost" { - return false - } - - // Allowed if all checks passed. - return true -} - -// shouldHandleReadError returns whether or not the passed error, which is -// expected to have come from reading from the remote peer in the inHandler, -// should be logged and responded to with a reject message. -func (p *Peer) shouldHandleReadError(err error) bool { - // No logging or reject message when the peer is being forcibly - // disconnected. - if atomic.LoadInt32(&p.disconnect) != 0 { - return false - } - - // No logging or reject message when the remote peer has been - // disconnected. - if err == io.EOF { - return false - } - var opErr *net.OpError - if ok := errors.As(err, &opErr); ok && !opErr.Temporary() { - return false - } - - return true -} - -// maybeAddDeadline potentially adds a deadline for the appropriate expected -// response for the passed wire protocol command to the pending responses map. -func (p *Peer) maybeAddDeadline(pendingResponses map[wire.MessageCommand]time.Time, msgCmd wire.MessageCommand) { - // Setup a deadline for each message being sent that expects a response. - // - // NOTE: Pings are intentionally ignored here since they are typically - // sent asynchronously and as a result of a long backlock of messages, - // such as is typical in the case of initial block download, the - // response won't be received in time. - deadline := time.Now().Add(stallResponseTimeout) - switch msgCmd { - case wire.CmdVersion: - // Expects a verack message. - pendingResponses[wire.CmdVerAck] = deadline - - case wire.CmdGetBlocks: - // Expects an inv message. - pendingResponses[wire.CmdInv] = deadline - - case wire.CmdGetData: - // Expects a block, merkleblock, tx, or notfound message. - pendingResponses[wire.CmdBlock] = deadline - pendingResponses[wire.CmdMerkleBlock] = deadline - pendingResponses[wire.CmdTx] = deadline - pendingResponses[wire.CmdNotFound] = deadline - - case wire.CmdGetSelectedTip: - // Expects a selected tip message. - pendingResponses[wire.CmdSelectedTip] = deadline - } -} - -// stallHandler handles stall detection for the peer. This entails keeping -// track of expected responses and assigning them deadlines while accounting for -// the time spent in callbacks. It must be run as a goroutine. -func (p *Peer) stallHandler() { - // These variables are used to adjust the deadline times forward by the - // time it takes callbacks to execute. This is done because new - // messages aren't read until the previous one is finished processing - // (which includes callbacks), so the deadline for receiving a response - // for a given message must account for the processing time as well. - var handlerActive bool - var handlersStartTime time.Time - var deadlineOffset time.Duration - - // pendingResponses tracks the expected response deadline times. - pendingResponses := make(map[wire.MessageCommand]time.Time) - - // stallTicker is used to periodically check pending responses that have - // exceeded the expected deadline and disconnect the peer due to - // stalling. - stallTicker := time.NewTicker(stallTickInterval) - defer stallTicker.Stop() - - // ioStopped is used to detect when both the input and output handler - // goroutines are done. - var ioStopped bool -out: - for { - select { - case msg := <-p.stallControl: - switch msg.command { - case sccSendMessage: - // Add a deadline for the expected response - // message if needed. - p.maybeAddDeadline(pendingResponses, - msg.message.Command()) - - case sccReceiveMessage: - // Remove received messages from the expected - // response map. Since certain commands expect - // one of a group of responses, remove - // everything in the expected group accordingly. - switch msgCmd := msg.message.Command(); msgCmd { - case wire.CmdBlock: - fallthrough - case wire.CmdMerkleBlock: - fallthrough - case wire.CmdTx: - fallthrough - case wire.CmdNotFound: - delete(pendingResponses, wire.CmdBlock) - delete(pendingResponses, wire.CmdMerkleBlock) - delete(pendingResponses, wire.CmdTx) - delete(pendingResponses, wire.CmdNotFound) - - default: - delete(pendingResponses, msgCmd) - } - - case sccHandlerStart: - // Warn on unbalanced callback signalling. - if handlerActive { - log.Warn("Received handler start " + - "control command while a " + - "handler is already active") - continue - } - - handlerActive = true - handlersStartTime = time.Now() - - case sccHandlerDone: - // Warn on unbalanced callback signalling. - if !handlerActive { - log.Warn("Received handler done " + - "control command when a " + - "handler is not already active") - continue - } - - // Extend active deadlines by the time it took - // to execute the callback. - duration := time.Since(handlersStartTime) - deadlineOffset += duration - handlerActive = false - - default: - log.Warnf("Unsupported message command %d", - msg.command) - } - - case <-stallTicker.C: - // Calculate the offset to apply to the deadline based - // on how long the handlers have taken to execute since - // the last tick. - now := time.Now() - offset := deadlineOffset - if handlerActive { - offset += now.Sub(handlersStartTime) - } - - // Disconnect the peer if any of the pending responses - // don't arrive by their adjusted deadline. - for command, deadline := range pendingResponses { - if now.Before(deadline.Add(offset)) { - continue - } - - p.AddBanScore(BanScoreStallTimeout, 0, fmt.Sprintf("got timeout for command %s", command)) - p.Disconnect() - break - } - - // Reset the deadline offset for the next tick. - deadlineOffset = 0 - - case <-p.inQuit: - // The stall handler can exit once both the input and - // output handler goroutines are done. - if ioStopped { - break out - } - ioStopped = true - - case <-p.outQuit: - // The stall handler can exit once both the input and - // output handler goroutines are done. - if ioStopped { - break out - } - ioStopped = true - } - } - - // Drain any wait channels before going away so there is nothing left - // waiting on this goroutine. -cleanup: - for { - select { - case <-p.stallControl: - default: - break cleanup - } - } - log.Tracef("Peer stall handler done for %s", p) -} - -// inHandler handles all incoming messages for the peer. It must be run as a -// goroutine. -func (p *Peer) inHandler() { - // The timer is stopped when a new message is received and reset after it - // is processed. - idleTimer := spawnAfter("Peer.inHandler-disconnectOnIdle", idleTimeout, func() { - log.Warnf("Peer %s no answer for %s -- disconnecting", p, idleTimeout) - p.Disconnect() - }) - -out: - for atomic.LoadInt32(&p.disconnect) == 0 { - // Read a message and stop the idle timer as soon as the read - // is done. The timer is reset below for the next iteration if - // needed. - rmsg, buf, err := p.readMessage() - idleTimer.Stop() - if err != nil { - // In order to allow regression tests with malformed messages, don't - // disconnect the peer when we're in regression test mode and the - // error is one of the allowed errors. - if p.isAllowedReadError(err) { - log.Errorf("Allowed test error from %s: %s", p, err) - idleTimer.Reset(idleTimeout) - continue - } - - // Only log the error and send reject message if the - // local peer is not forcibly disconnecting and the - // remote peer has not disconnected. - if p.shouldHandleReadError(err) { - errMsg := fmt.Sprintf("Can't read message from %s: %s", p, err) - if err != io.ErrUnexpectedEOF { - log.Errorf(errMsg) - } - - // Add ban score, push a reject message for the malformed message - // and wait for the message to be sent before disconnecting. - // - // NOTE: Ideally this would include the command in the header if - // at least that much of the message was valid, but that is not - // currently exposed by wire, so just used malformed for the - // command. - p.AddBanScoreAndPushRejectMsg(wire.CmdRejectMalformed, wire.RejectMalformed, nil, - BanScoreMalformedMessage, 0, errMsg) - } - break out - } - atomic.StoreInt64(&p.lastRecv, mstime.Now().UnixMilliseconds()) - p.stallControl <- stallControlMsg{sccReceiveMessage, rmsg} - - // Handle each supported message type. - p.stallControl <- stallControlMsg{sccHandlerStart, rmsg} - switch msg := rmsg.(type) { - case *wire.MsgVersion: - - reason := "duplicate version message" - p.AddBanScoreAndPushRejectMsg(msg.Command(), wire.RejectDuplicate, nil, - BanScoreDuplicateVersion, 0, reason) - - case *wire.MsgVerAck: - - // No read lock is necessary because verAckReceived is not written - // to in any other goroutine. - if p.verAckReceived { - p.AddBanScoreAndPushRejectMsg(msg.Command(), wire.RejectDuplicate, nil, - BanScoreDuplicateVerack, 0, "verack sent twice") - log.Warnf("Already received 'verack' from peer %s", p) - } - p.markVerAckReceived() - if p.cfg.Listeners.OnVerAck != nil { - p.cfg.Listeners.OnVerAck(p, msg) - } - - case *wire.MsgGetAddresses: - if p.cfg.Listeners.OnGetAddr != nil { - p.cfg.Listeners.OnGetAddr(p, msg) - } - - case *wire.MsgAddresses: - if p.cfg.Listeners.OnAddr != nil { - p.cfg.Listeners.OnAddr(p, msg) - } - - case *wire.MsgPing: - p.handlePingMsg(msg) - if p.cfg.Listeners.OnPing != nil { - p.cfg.Listeners.OnPing(p, msg) - } - - case *wire.MsgPong: - p.handlePongMsg(msg) - if p.cfg.Listeners.OnPong != nil { - p.cfg.Listeners.OnPong(p, msg) - } - - case *wire.MsgTx: - if p.cfg.Listeners.OnTx != nil { - p.cfg.Listeners.OnTx(p, msg) - } - - case *wire.MsgBlock: - if p.cfg.Listeners.OnBlock != nil { - p.cfg.Listeners.OnBlock(p, msg, buf) - } - - case *wire.MsgInv: - if p.cfg.Listeners.OnInv != nil { - p.cfg.Listeners.OnInv(p, msg) - } - - case *wire.MsgNotFound: - if p.cfg.Listeners.OnNotFound != nil { - p.cfg.Listeners.OnNotFound(p, msg) - } - - case *wire.MsgGetData: - if p.cfg.Listeners.OnGetData != nil { - p.cfg.Listeners.OnGetData(p, msg) - } - - case *wire.MsgGetBlockLocator: - if p.cfg.Listeners.OnGetBlockLocator != nil { - p.cfg.Listeners.OnGetBlockLocator(p, msg) - } - - case *wire.MsgBlockLocator: - if p.cfg.Listeners.OnBlockLocator != nil { - p.cfg.Listeners.OnBlockLocator(p, msg) - } - - case *wire.MsgGetBlocks: - if p.cfg.Listeners.OnGetBlockInvs != nil { - p.cfg.Listeners.OnGetBlockInvs(p, msg) - } - - case *wire.MsgFeeFilter: - if p.cfg.Listeners.OnFeeFilter != nil { - p.cfg.Listeners.OnFeeFilter(p, msg) - } - - case *wire.MsgFilterAdd: - if p.cfg.Listeners.OnFilterAdd != nil { - p.cfg.Listeners.OnFilterAdd(p, msg) - } - - case *wire.MsgFilterClear: - if p.cfg.Listeners.OnFilterClear != nil { - p.cfg.Listeners.OnFilterClear(p, msg) - } - - case *wire.MsgFilterLoad: - if p.cfg.Listeners.OnFilterLoad != nil { - p.cfg.Listeners.OnFilterLoad(p, msg) - } - - case *wire.MsgMerkleBlock: - if p.cfg.Listeners.OnMerkleBlock != nil { - p.cfg.Listeners.OnMerkleBlock(p, msg) - } - - case *wire.MsgReject: - if p.cfg.Listeners.OnReject != nil { - p.cfg.Listeners.OnReject(p, msg) - } - - case *wire.MsgGetSelectedTip: - if p.cfg.Listeners.OnGetSelectedTip != nil { - p.cfg.Listeners.OnGetSelectedTip() - } - - case *wire.MsgSelectedTip: - if p.cfg.Listeners.OnSelectedTip != nil { - p.cfg.Listeners.OnSelectedTip(p, msg) - } - - default: - log.Debugf("Received unhandled message of type %s "+ - "from %s", rmsg.Command(), p) - } - p.stallControl <- stallControlMsg{sccHandlerDone, rmsg} - - // A message was received so reset the idle timer. - idleTimer.Reset(idleTimeout) - } - - // Ensure the idle timer is stopped to avoid leaking the resource. - idleTimer.Stop() - - // Ensure connection is closed. - p.Disconnect() - - close(p.inQuit) - log.Tracef("Peer input handler done for %s", p) -} - -func (p *Peer) markVerAckReceived() { - p.flagsMtx.Lock() - defer p.flagsMtx.Unlock() - p.verAckReceived = true -} - -// queueHandler handles the queuing of outgoing data for the peer. This runs as -// a muxer for various sources of input so we can ensure that server and peer -// handlers will not block on us sending a message. That data is then passed on -// to outHandler to be actually written. -func (p *Peer) queueHandler() { - pendingMsgs := list.New() - invSendQueue := list.New() - trickleTicker := time.NewTicker(trickleTimeout) - defer trickleTicker.Stop() - - // We keep the waiting flag so that we know if we have a message queued - // to the outHandler or not. We could use the presence of a head of - // the list for this but then we have rather racy concerns about whether - // it has gotten it at cleanup time - and thus who sends on the - // message's done channel. To avoid such confusion we keep a different - // flag and pendingMsgs only contains messages that we have not yet - // passed to outHandler. - waiting := false - - // To avoid duplication below. - queuePacket := func(msg outMsg, list *list.List, waiting bool) bool { - if !waiting { - p.sendQueue <- msg - } else { - list.PushBack(msg) - } - // we are always waiting now. - return true - } -out: - for { - select { - case msg := <-p.outputQueue: - waiting = queuePacket(msg, pendingMsgs, waiting) - - // This channel is notified when a message has been sent across - // the network socket. - case <-p.sendDoneQueue: - // No longer waiting if there are no more messages - // in the pending messages queue. - next := pendingMsgs.Front() - if next == nil { - waiting = false - continue - } - - // Notify the outHandler about the next item to - // asynchronously send. - val := pendingMsgs.Remove(next) - p.sendQueue <- val.(outMsg) - - case iv := <-p.outputInvChan: - // No handshake? They'll find out soon enough. - if p.VersionKnown() { - // If this is a new block, then we'll blast it - // out immediately, skipping the inv trickle - // queue. - if iv.Type == wire.InvTypeBlock { - invMsg := wire.NewMsgInvSizeHint(1) - invMsg.AddInvVect(iv) - waiting = queuePacket(outMsg{msg: invMsg}, - pendingMsgs, waiting) - } else { - invSendQueue.PushBack(iv) - } - } - - case <-trickleTicker.C: - // Don't send anything if we're disconnecting or there - // is no queued inventory. - // version is known if send queue has any entries. - if atomic.LoadInt32(&p.disconnect) != 0 || - invSendQueue.Len() == 0 { - continue - } - - // Create and send as many inv messages as needed to - // drain the inventory send queue. - invMsg := wire.NewMsgInvSizeHint(uint(invSendQueue.Len())) - for e := invSendQueue.Front(); e != nil; e = invSendQueue.Front() { - iv := invSendQueue.Remove(e).(*wire.InvVect) - - // Don't send inventory that became known after - // the initial check. - if p.knownInventory.Exists(iv) { - continue - } - - invMsg.AddInvVect(iv) - if len(invMsg.InvList) >= maxInvTrickleSize { - waiting = queuePacket( - outMsg{msg: invMsg}, - pendingMsgs, waiting) - invMsg = wire.NewMsgInvSizeHint(uint(invSendQueue.Len())) - } - - // Add the inventory that is being relayed to - // the known inventory for the peer. - p.AddKnownInventory(iv) - } - if len(invMsg.InvList) > 0 { - waiting = queuePacket(outMsg{msg: invMsg}, - pendingMsgs, waiting) - } - - case <-p.quit: - break out - } - } - - // Drain any wait channels before we go away so we don't leave something - // waiting for us. - for e := pendingMsgs.Front(); e != nil; e = pendingMsgs.Front() { - val := pendingMsgs.Remove(e) - msg := val.(outMsg) - if msg.doneChan != nil { - msg.doneChan <- struct{}{} - } - } -cleanup: - for { - select { - case msg := <-p.outputQueue: - if msg.doneChan != nil { - msg.doneChan <- struct{}{} - } - case <-p.outputInvChan: - // Just drain channel - // sendDoneQueue is buffered so doesn't need draining. - default: - break cleanup - } - } - close(p.queueQuit) - log.Tracef("Peer queue handler done for %s", p) -} - -// outHandler handles all outgoing messages for the peer. It must be run as a -// goroutine. It uses a buffered channel to serialize output messages while -// allowing the sender to continue running asynchronously. -func (p *Peer) outHandler() { -out: - for { - select { - case msg := <-p.sendQueue: - switch m := msg.msg.(type) { - case *wire.MsgPing: - func() { - p.statsMtx.Lock() - defer p.statsMtx.Unlock() - p.lastPingNonce = m.Nonce - p.lastPingTime = time.Now() - }() - } - - p.stallControl <- stallControlMsg{sccSendMessage, msg.msg} - - err := p.writeMessage(msg.msg) - if err != nil { - p.Disconnect() - log.Errorf("Failed to send message to "+ - "%s: %s", p, err) - if msg.doneChan != nil { - msg.doneChan <- struct{}{} - } - continue - } - - // At this point, the message was successfully sent, so - // update the last send time, signal the sender of the - // message that it has been sent (if requested), and - // signal the send queue to the deliver the next queued - // message. - atomic.StoreInt64(&p.lastSend, mstime.Now().UnixMilliseconds()) - if msg.doneChan != nil { - msg.doneChan <- struct{}{} - } - p.sendDoneQueue <- struct{}{} - - case <-p.quit: - break out - } - } - - <-p.queueQuit - - // Drain any wait channels before we go away so we don't leave something - // waiting for us. We have waited on queueQuit and thus we can be sure - // that we will not miss anything sent on sendQueue. -cleanup: - for { - select { - case msg := <-p.sendQueue: - if msg.doneChan != nil { - msg.doneChan <- struct{}{} - } - // no need to send on sendDoneQueue since queueHandler - // has been waited on and already exited. - default: - break cleanup - } - } - close(p.outQuit) - log.Tracef("Peer output handler done for %s", p) -} - -// pingHandler periodically pings the peer. It must be run as a goroutine. -func (p *Peer) pingHandler() { - pingTicker := time.NewTicker(pingInterval) - defer pingTicker.Stop() - -out: - for { - select { - case <-pingTicker.C: - nonce, err := random.Uint64() - if err != nil { - log.Errorf("Not sending ping to %s: %s", p, err) - continue - } - p.QueueMessage(wire.NewMsgPing(nonce), nil) - - case <-p.quit: - break out - } - } -} - -// QueueMessage adds the passed kaspa message to the peer send queue. -// -// This function is safe for concurrent access. -func (p *Peer) QueueMessage(msg wire.Message, doneChan chan<- struct{}) { - // Avoid risk of deadlock if goroutine already exited. The goroutine - // we will be sending to hangs around until it knows for a fact that - // it is marked as disconnected and *then* it drains the channels. - if !p.Connected() { - if doneChan != nil { - spawn("Peer.QueueMessage-sendToDoneChan", func() { - doneChan <- struct{}{} - }) - } - return - } - p.outputQueue <- outMsg{msg: msg, doneChan: doneChan} -} - -// QueueInventory adds the passed inventory to the inventory send queue which -// might not be sent right away, rather it is trickled to the peer in batches. -// Inventory that the peer is already known to have is ignored. -// -// This function is safe for concurrent access. -func (p *Peer) QueueInventory(invVect *wire.InvVect) { - // Don't add the inventory to the send queue if the peer is already - // known to have it. - if p.knownInventory.Exists(invVect) { - return - } - - // Avoid risk of deadlock if goroutine already exited. The goroutine - // we will be sending to hangs around until it knows for a fact that - // it is marked as disconnected and *then* it drains the channels. - if !p.Connected() { - return - } - - p.outputInvChan <- invVect -} - -// AssociateConnection associates the given conn to the peer. Calling this -// function when the peer is already connected will have no effect. -func (p *Peer) AssociateConnection(conn net.Conn) error { - // Already connected? - if !atomic.CompareAndSwapInt32(&p.connected, 0, 1) { - return nil - } - - p.conn = conn - p.timeConnected = time.Now() - - if p.inbound { - p.addr = p.conn.RemoteAddr().String() - - // Set up a NetAddress for the peer to be used with AddrManager. We - // only do this inbound because outbound set this up at connection time - // and no point recomputing. - na, err := newNetAddress(p.conn.RemoteAddr(), p.services) - if err != nil { - p.Disconnect() - return errors.Wrap(err, "Cannot create remote net address") - } - p.na = na - } - - if err := p.start(); err != nil { - p.Disconnect() - return errors.Wrapf(err, "Cannot start peer %s", p) - } - - return nil -} - -// Connected returns whether or not the peer is currently connected. -// -// This function is safe for concurrent access. -func (p *Peer) Connected() bool { - return atomic.LoadInt32(&p.connected) != 0 && - atomic.LoadInt32(&p.disconnect) == 0 -} - -// Disconnect disconnects the peer by closing the connection. Calling this -// function when the peer is already disconnected or in the process of -// disconnecting will have no effect. -func (p *Peer) Disconnect() { - if atomic.AddInt32(&p.disconnect, 1) != 1 { - return - } - - log.Tracef("Disconnecting %s", p) - if atomic.LoadInt32(&p.connected) != 0 { - p.conn.Close() - } - close(p.quit) -} - -// start begins processing input and output messages. -func (p *Peer) start() error { - log.Tracef("Starting peer %s", p) - - negotiateErr := make(chan error, 1) - spawn("Peer.start-negotiateProtocol", func() { - if p.inbound { - negotiateErr <- p.negotiateInboundProtocol() - } else { - negotiateErr <- p.negotiateOutboundProtocol() - } - }) - - // Negotiate the protocol within the specified negotiateTimeout. - select { - case err := <-negotiateErr: - if err != nil { - return err - } - case <-time.After(negotiateTimeout): - return errors.New("protocol negotiation timeout") - } - log.Debugf("Connected to %s", p.Addr()) - - // The protocol has been negotiated successfully so start processing input - // and output messages. - spawn("Peer.stallHandler", p.stallHandler) - spawn("Peer.inHandler", p.inHandler) - spawn("Peer.queueHandler", p.queueHandler) - spawn("Peer.outHandler", p.outHandler) - spawn("Peer.pingHandler", p.pingHandler) - - // Send our verack message now that the IO processing machinery has started. - p.QueueMessage(wire.NewMsgVerAck(), nil) - - return nil -} - -// WaitForDisconnect waits until the peer has completely disconnected and all -// resources are cleaned up. This will happen if either the local or remote -// side has been disconnected or the peer is forcibly disconnected via -// Disconnect. -func (p *Peer) WaitForDisconnect() { - <-p.quit -} - -// readRemoteVersionMsg waits for the next message to arrive from the remote -// peer. If the next message is not a version message or the version is not -// acceptable then return an error. -func (p *Peer) readRemoteVersionMsg() error { - // Read their version message. - msg, _, err := p.readMessage() - if err != nil { - return err - } - - remoteVerMsg, ok := msg.(*wire.MsgVersion) - if !ok { - errStr := "A version message must precede all others" - log.Errorf(errStr) - - p.AddBanScore(BanScoreNonVersionFirstMessage, 0, errStr) - - rejectMsg := wire.NewMsgReject(msg.Command(), wire.RejectMalformed, - errStr) - return p.writeMessage(rejectMsg) - } - - if err := p.handleRemoteVersionMsg(remoteVerMsg); err != nil { - return err - } - - if p.cfg.Listeners.OnVersion != nil { - p.cfg.Listeners.OnVersion(p, remoteVerMsg) - } - return nil -} - -// writeLocalVersionMsg writes our version message to the remote peer. -func (p *Peer) writeLocalVersionMsg() error { - localVerMsg, err := p.localVersionMsg() - if err != nil { - return err - } - - return p.writeMessage(localVerMsg) -} - -// negotiateInboundProtocol waits to receive a version message from the peer -// then sends our version message. If the events do not occur in that order then -// it returns an error. -func (p *Peer) negotiateInboundProtocol() error { - if err := p.readRemoteVersionMsg(); err != nil { - return err - } - - return p.writeLocalVersionMsg() -} - -// negotiateOutboundProtocol sends our version message then waits to receive a -// version message from the peer. If the events do not occur in that order then -// it returns an error. -func (p *Peer) negotiateOutboundProtocol() error { - if err := p.writeLocalVersionMsg(); err != nil { - return err - } - - return p.readRemoteVersionMsg() -} - -// newPeerBase returns a new base kaspa peer based on the inbound flag. This -// is used by the NewInboundPeer and NewOutboundPeer functions to perform base -// setup needed by both types of peers. -func newPeerBase(origCfg *Config, appCfg *config.Config, inbound bool) *Peer { - // Default to the max supported protocol version if not specified by the - // caller. - cfg := *origCfg // Copy to avoid mutating caller. - if cfg.ProtocolVersion == 0 { - cfg.ProtocolVersion = MaxProtocolVersion - } - - // Set the DAG parameters to testnet if the caller did not specify any. - if cfg.DAGParams == nil { - cfg.DAGParams = &dagconfig.TestnetParams - } - - p := Peer{ - inbound: inbound, - knownInventory: newMruInventoryMap(maxKnownInventory), - stallControl: make(chan stallControlMsg, 1), // nonblocking sync - outputQueue: make(chan outMsg, outputBufferSize), - sendQueue: make(chan outMsg, 1), // nonblocking sync - sendDoneQueue: make(chan struct{}, 1), // nonblocking sync - outputInvChan: make(chan *wire.InvVect, outputBufferSize), - inQuit: make(chan struct{}), - queueQuit: make(chan struct{}), - outQuit: make(chan struct{}), - quit: make(chan struct{}), - cfg: cfg, // Copy so caller can't mutate. - AppCfg: appCfg, - services: cfg.Services, - protocolVersion: cfg.ProtocolVersion, - } - return &p -} - -// NewInboundPeer returns a new inbound kaspa peer. Use Start to begin -// processing incoming and outgoing messages. -func NewInboundPeer(cfg *Config, appCfg *config.Config) *Peer { - return newPeerBase(cfg, appCfg, true) -} - -// NewOutboundPeer returns a new outbound kaspa peer. -func NewOutboundPeer(cfg *Config, appCfg *config.Config, addr string) (*Peer, error) { - p := newPeerBase(cfg, appCfg, false) - p.addr = addr - - host, portStr, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - - port, err := strconv.ParseUint(portStr, 10, 16) - if err != nil { - return nil, err - } - - if cfg.HostToNetAddress != nil { - na, err := cfg.HostToNetAddress(host, uint16(port), cfg.Services) - if err != nil { - return nil, err - } - p.na = na - } else { - p.na = wire.NewNetAddressIPPort(net.ParseIP(host), uint16(port), - cfg.Services) - } - - return p, nil -} - -func init() { - rand.Seed(time.Now().UnixNano()) -} diff --git a/protocol/flowcontext/blocks.go b/protocol/flowcontext/blocks.go index 4c2f26222..4b1e68fcd 100644 --- a/protocol/flowcontext/blocks.go +++ b/protocol/flowcontext/blocks.go @@ -46,3 +46,9 @@ func (f *FlowContext) OnNewBlock(block *util.Block) error { func (f *FlowContext) SharedRequestedBlocks() *blockrelay.SharedRequestedBlocks { return f.sharedRequestedBlocks } + +// AddBlock adds the given block to the DAG and propagates it. +func (f *FlowContext) AddBlock(block *util.Block) error { + // TODO(libp2p): unimplemented + panic("unimplemented") +} diff --git a/protocol/flowcontext/flow_context.go b/protocol/flowcontext/flow_context.go index a7cdf31e2..99f7cc904 100644 --- a/protocol/flowcontext/flow_context.go +++ b/protocol/flowcontext/flow_context.go @@ -35,6 +35,7 @@ type FlowContext struct { isInIBD uint32 startIBDMutex sync.Mutex + ibdPeer *peerpkg.Peer peers map[*id.ID]*peerpkg.Peer peersMutex sync.RWMutex diff --git a/protocol/flowcontext/ibd.go b/protocol/flowcontext/ibd.go index ef1b3e056..068427572 100644 --- a/protocol/flowcontext/ibd.go +++ b/protocol/flowcontext/ibd.go @@ -24,6 +24,7 @@ func (f *FlowContext) StartIBDIfRequired() { } atomic.StoreUint32(&f.isInIBD, 1) + f.ibdPeer = peer peer.StartIBD() } @@ -64,7 +65,18 @@ func (f *FlowContext) requestSelectedTips() { // FinishIBD finishes the current IBD flow and starts a new one if required. func (f *FlowContext) FinishIBD() { + f.ibdPeer = nil + atomic.StoreUint32(&f.isInIBD, 0) f.StartIBDIfRequired() } + +// IBDPeer returns the currently active IBD peer. +// Returns nil if we aren't currently in IBD +func (f *FlowContext) IBDPeer() *peerpkg.Peer { + if !f.IsInIBD() { + return nil + } + return f.ibdPeer +} diff --git a/protocol/flowcontext/network.go b/protocol/flowcontext/network.go index 252d58a52..2b8ed900e 100644 --- a/protocol/flowcontext/network.go +++ b/protocol/flowcontext/network.go @@ -44,3 +44,17 @@ func (f *FlowContext) readyPeerIDs() []*id.ID { func (f *FlowContext) Broadcast(message wire.Message) error { return f.netAdapter.Broadcast(f.readyPeerIDs(), message) } + +// Peers returns the currently active peers +func (f *FlowContext) Peers() []*peerpkg.Peer { + f.peersMutex.RLock() + defer f.peersMutex.RUnlock() + + peers := make([]*peerpkg.Peer, len(f.peers)) + i := 0 + for _, peer := range f.peers { + peers[i] = peer + i++ + } + return peers +} diff --git a/protocol/flows/handshake/handshake.go b/protocol/flows/handshake/handshake.go index eab28a907..67131ec73 100644 --- a/protocol/flows/handshake/handshake.go +++ b/protocol/flows/handshake/handshake.go @@ -28,7 +28,8 @@ type HandleHandshakeContext interface { // HandleHandshake sets up the handshake protocol - It sends a version message and waits for an incoming // version message, as well as a verack for the sent version -func HandleHandshake(context HandleHandshakeContext, router *routerpkg.Router) (peer *peerpkg.Peer, closed bool, err error) { +func HandleHandshake(context HandleHandshakeContext, router *routerpkg.Router, + netConnection *netadapter.NetConnection) (peer *peerpkg.Peer, closed bool, err error) { receiveVersionRoute, err := router.AddIncomingRoute([]wire.MessageCommand{wire.CmdVersion}) if err != nil { @@ -49,7 +50,7 @@ func HandleHandshake(context HandleHandshakeContext, router *routerpkg.Router) ( errChanUsed := uint32(0) errChan := make(chan error) - peer = peerpkg.New() + peer = peerpkg.New(netConnection) var peerAddress *wire.NetAddress spawn("HandleHandshake-ReceiveVersion", func() { diff --git a/protocol/manager.go b/protocol/manager.go index 6a32a2e9b..4a5b5fbbe 100644 --- a/protocol/manager.go +++ b/protocol/manager.go @@ -7,6 +7,8 @@ import ( "github.com/kaspanet/kaspad/mempool" "github.com/kaspanet/kaspad/netadapter" "github.com/kaspanet/kaspad/protocol/flowcontext" + peerpkg "github.com/kaspanet/kaspad/protocol/peer" + "github.com/kaspanet/kaspad/util" ) // Manager manages the p2p protocol @@ -39,3 +41,24 @@ func (m *Manager) Start() error { func (m *Manager) Stop() error { return m.context.NetAdapter().Stop() } + +// Peers returns the currently active peers +func (m *Manager) Peers() []*peerpkg.Peer { + return m.context.Peers() +} + +// IBDPeer returns the currently active IBD peer. +// Returns nil if we aren't currently in IBD +func (m *Manager) IBDPeer() *peerpkg.Peer { + return m.context.IBDPeer() +} + +// AddTransaction adds transaction to the mempool and propagates it. +func (m *Manager) AddTransaction(tx *util.Tx) error { + return m.context.AddTransaction(tx) +} + +// AddBlock adds the given block to the DAG and propagates it. +func (m *Manager) AddBlock(block *util.Block) error { + return m.context.AddBlock(block) +} diff --git a/protocol/peer/peer.go b/protocol/peer/peer.go index d7669c17a..216db306a 100644 --- a/protocol/peer/peer.go +++ b/protocol/peer/peer.go @@ -1,6 +1,7 @@ package peer import ( + "github.com/kaspanet/kaspad/netadapter" "sync" "sync/atomic" "time" @@ -15,6 +16,8 @@ import ( // Peer holds data about a peer. type Peer struct { + connection *netadapter.NetConnection + selectedTipHashMtx sync.RWMutex selectedTipHash *daghash.Hash @@ -39,8 +42,9 @@ type Peer struct { } // New returns a new Peer -func New() *Peer { +func New(connection *netadapter.NetConnection) *Peer { return &Peer{ + connection: connection, selectedTipRequestChan: make(chan struct{}), ibdStartChan: make(chan struct{}), } @@ -113,8 +117,7 @@ func (p *Peer) SetPingIdle() { } func (p *Peer) String() string { - //TODO(libp2p) - panic("unimplemented") + return p.connection.String() } // RequestSelectedTipIfRequired notifies the peer that requesting @@ -156,3 +159,17 @@ func (p *Peer) StartIBD() { func (p *Peer) WaitForIBDStart() { <-p.ibdStartChan } + +// Address returns the address associated with this connection +func (p *Peer) Address() string { + return p.connection.Address() +} + +// LastPingDuration returns the duration of the last ping to +// this peer +func (p *Peer) LastPingDuration() time.Duration { + p.pingLock.Lock() + defer p.pingLock.Unlock() + + return p.lastPingDuration +} diff --git a/protocol/protocol.go b/protocol/protocol.go index 68c0443e7..901613ccc 100644 --- a/protocol/protocol.go +++ b/protocol/protocol.go @@ -2,6 +2,7 @@ package protocol import ( "fmt" + "github.com/kaspanet/kaspad/netadapter" "sync/atomic" "github.com/kaspanet/kaspad/protocol/flows/handshake" @@ -19,11 +20,10 @@ import ( "github.com/pkg/errors" ) -func (m *Manager) routerInitializer() (*routerpkg.Router, error) { - +func (m *Manager) routerInitializer(netConnection *netadapter.NetConnection) (*routerpkg.Router, error) { router := routerpkg.NewRouter() spawn("newRouterInitializer-startFlows", func() { - err := m.startFlows(router) + err := m.startFlows(netConnection, router) if err != nil { if protocolErr := &(protocolerrors.ProtocolError{}); errors.As(err, &protocolErr) { if protocolErr.ShouldBan { @@ -50,14 +50,13 @@ func (m *Manager) routerInitializer() (*routerpkg.Router, error) { } }) return router, nil - } -func (m *Manager) startFlows(router *routerpkg.Router) error { +func (m *Manager) startFlows(netConnection *netadapter.NetConnection, router *routerpkg.Router) error { stop := make(chan error) stopped := uint32(0) - peer, closed, err := handshake.HandleHandshake(m.context, router) + peer, closed, err := handshake.HandleHandshake(m.context, router, netConnection) if err != nil { return err } diff --git a/rpcclient/CONTRIBUTORS b/rpc/client/CONTRIBUTORS similarity index 100% rename from rpcclient/CONTRIBUTORS rename to rpc/client/CONTRIBUTORS diff --git a/rpcclient/README.md b/rpc/client/README.md similarity index 100% rename from rpcclient/README.md rename to rpc/client/README.md diff --git a/rpcclient/dag.go b/rpc/client/dag.go similarity index 87% rename from rpcclient/dag.go rename to rpc/client/dag.go index 9e5bbc72b..32d09858d 100644 --- a/rpcclient/dag.go +++ b/rpc/client/dag.go @@ -3,7 +3,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package rpcclient +package client import ( "bytes" @@ -13,7 +13,7 @@ import ( "github.com/pkg/errors" - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" "github.com/kaspanet/kaspad/util/daghash" "github.com/kaspanet/kaspad/wire" ) @@ -45,7 +45,7 @@ func (r FutureGetSelectedTipHashResult) Receive() (*daghash.Hash, error) { // // See GetSelectedTipHash for the blocking version and more details. func (c *Client) GetSelectedTipHashAsync() FutureGetSelectedTipHashResult { - cmd := rpcmodel.NewGetSelectedTipHashCmd() + cmd := model.NewGetSelectedTipHashCmd() return c.sendCmd(cmd) } @@ -100,7 +100,7 @@ func (c *Client) GetBlockAsync(blockHash *daghash.Hash, subnetworkID *string) Fu hash = blockHash.String() } - cmd := rpcmodel.NewGetBlockCmd(hash, pointers.Bool(false), pointers.Bool(false), subnetworkID) + cmd := model.NewGetBlockCmd(hash, pointers.Bool(false), pointers.Bool(false), subnetworkID) return c.sendCmd(cmd) } @@ -118,13 +118,13 @@ type FutureGetBlocksResult chan *response // Receive waits for the response promised by the future and returns the blocks // starting from lowHash up to the virtual ordered by blue score. -func (r FutureGetBlocksResult) Receive() (*rpcmodel.GetBlocksResult, error) { +func (r FutureGetBlocksResult) Receive() (*model.GetBlocksResult, error) { res, err := receiveFuture(r) if err != nil { return nil, err } - var result rpcmodel.GetBlocksResult + var result model.GetBlocksResult if err := json.Unmarshal(res, &result); err != nil { return nil, errors.Wrap(err, string(res)) } @@ -137,13 +137,13 @@ func (r FutureGetBlocksResult) Receive() (*rpcmodel.GetBlocksResult, error) { // // See GetBlocks for the blocking version and more details. func (c *Client) GetBlocksAsync(includeRawBlockData bool, IncludeVerboseBlockData bool, lowHash *string) FutureGetBlocksResult { - cmd := rpcmodel.NewGetBlocksCmd(includeRawBlockData, IncludeVerboseBlockData, lowHash) + cmd := model.NewGetBlocksCmd(includeRawBlockData, IncludeVerboseBlockData, lowHash) return c.sendCmd(cmd) } // GetBlocks returns the blocks starting from lowHash up to the virtual ordered // by blue score. -func (c *Client) GetBlocks(includeRawBlockData bool, includeVerboseBlockData bool, lowHash *string) (*rpcmodel.GetBlocksResult, error) { +func (c *Client) GetBlocks(includeRawBlockData bool, includeVerboseBlockData bool, lowHash *string) (*model.GetBlocksResult, error) { return c.GetBlocksAsync(includeRawBlockData, includeVerboseBlockData, lowHash).Receive() } @@ -153,14 +153,14 @@ type FutureGetBlockVerboseResult chan *response // Receive waits for the response promised by the future and returns the data // structure from the server with information about the requested block. -func (r FutureGetBlockVerboseResult) Receive() (*rpcmodel.GetBlockVerboseResult, error) { +func (r FutureGetBlockVerboseResult) Receive() (*model.GetBlockVerboseResult, error) { res, err := receiveFuture(r) if err != nil { return nil, err } // Unmarshal the raw result into a BlockResult. - var blockResult rpcmodel.GetBlockVerboseResult + var blockResult model.GetBlockVerboseResult err = json.Unmarshal(res, &blockResult) if err != nil { return nil, errors.Wrap(err, "couldn't decode getBlock response") @@ -179,7 +179,7 @@ func (c *Client) GetBlockVerboseAsync(blockHash *daghash.Hash, subnetworkID *str hash = blockHash.String() } - cmd := rpcmodel.NewGetBlockCmd(hash, pointers.Bool(true), pointers.Bool(false), subnetworkID) + cmd := model.NewGetBlockCmd(hash, pointers.Bool(true), pointers.Bool(false), subnetworkID) return c.sendCmd(cmd) } @@ -188,7 +188,7 @@ func (c *Client) GetBlockVerboseAsync(blockHash *daghash.Hash, subnetworkID *str // // See GetBlockVerboseTx to retrieve transaction data structures as well. // See GetBlock to retrieve a raw block instead. -func (c *Client) GetBlockVerbose(blockHash *daghash.Hash, subnetworkID *string) (*rpcmodel.GetBlockVerboseResult, error) { +func (c *Client) GetBlockVerbose(blockHash *daghash.Hash, subnetworkID *string) (*model.GetBlockVerboseResult, error) { return c.GetBlockVerboseAsync(blockHash, subnetworkID).Receive() } @@ -203,7 +203,7 @@ func (c *Client) GetBlockVerboseTxAsync(blockHash *daghash.Hash, subnetworkID *s hash = blockHash.String() } - cmd := rpcmodel.NewGetBlockCmd(hash, pointers.Bool(true), pointers.Bool(true), subnetworkID) + cmd := model.NewGetBlockCmd(hash, pointers.Bool(true), pointers.Bool(true), subnetworkID) return c.sendCmd(cmd) } @@ -212,7 +212,7 @@ func (c *Client) GetBlockVerboseTxAsync(blockHash *daghash.Hash, subnetworkID *s // // See GetBlockVerbose if only transaction hashes are preferred. // See GetBlock to retrieve a raw block instead. -func (c *Client) GetBlockVerboseTx(blockHash *daghash.Hash, subnetworkID *string) (*rpcmodel.GetBlockVerboseResult, error) { +func (c *Client) GetBlockVerboseTx(blockHash *daghash.Hash, subnetworkID *string) (*model.GetBlockVerboseResult, error) { return c.GetBlockVerboseTxAsync(blockHash, subnetworkID).Receive() } @@ -243,7 +243,7 @@ func (r FutureGetBlockCountResult) Receive() (int64, error) { // // See GetBlockCount for the blocking version and more details. func (c *Client) GetBlockCountAsync() FutureGetBlockCountResult { - cmd := rpcmodel.NewGetBlockCountCmd() + cmd := model.NewGetBlockCountCmd() return c.sendCmd(cmd) } @@ -260,13 +260,13 @@ type FutureGetChainFromBlockResult chan *response // parent chain starting from startHash up to the virtual. If startHash is not in // the selected parent chain, it goes down the DAG until it does reach a hash in // the selected parent chain while collecting hashes into RemovedChainBlockHashes. -func (r FutureGetChainFromBlockResult) Receive() (*rpcmodel.GetChainFromBlockResult, error) { +func (r FutureGetChainFromBlockResult) Receive() (*model.GetChainFromBlockResult, error) { res, err := receiveFuture(r) if err != nil { return nil, err } - var result rpcmodel.GetChainFromBlockResult + var result model.GetChainFromBlockResult if err := json.Unmarshal(res, &result); err != nil { return nil, errors.Wrap(err, "couldn't decode getChainFromBlock response") } @@ -279,7 +279,7 @@ func (r FutureGetChainFromBlockResult) Receive() (*rpcmodel.GetChainFromBlockRes // // See GetChainFromBlock for the blocking version and more details. func (c *Client) GetChainFromBlockAsync(includeBlocks bool, startHash *string) FutureGetChainFromBlockResult { - cmd := rpcmodel.NewGetChainFromBlockCmd(includeBlocks, startHash) + cmd := model.NewGetChainFromBlockCmd(includeBlocks, startHash) return c.sendCmd(cmd) } @@ -287,7 +287,7 @@ func (c *Client) GetChainFromBlockAsync(includeBlocks bool, startHash *string) F // up to the virtual. If startHash is not in the selected parent chain, it goes // down the DAG until it does reach a hash in the selected parent chain while // collecting hashes into RemovedChainBlockHashes. -func (c *Client) GetChainFromBlock(includeBlocks bool, startHash *string) (*rpcmodel.GetChainFromBlockResult, error) { +func (c *Client) GetChainFromBlock(includeBlocks bool, startHash *string) (*model.GetChainFromBlockResult, error) { return c.GetChainFromBlockAsync(includeBlocks, startHash).Receive() } @@ -318,7 +318,7 @@ func (r FutureGetDifficultyResult) Receive() (float64, error) { // // See GetDifficulty for the blocking version and more details. func (c *Client) GetDifficultyAsync() FutureGetDifficultyResult { - cmd := rpcmodel.NewGetDifficultyCmd() + cmd := model.NewGetDifficultyCmd() return c.sendCmd(cmd) } @@ -334,13 +334,13 @@ type FutureGetBlockDAGInfoResult chan *response // Receive waits for the response promised by the future and returns dag info // result provided by the server. -func (r FutureGetBlockDAGInfoResult) Receive() (*rpcmodel.GetBlockDAGInfoResult, error) { +func (r FutureGetBlockDAGInfoResult) Receive() (*model.GetBlockDAGInfoResult, error) { res, err := receiveFuture(r) if err != nil { return nil, err } - var dagInfo rpcmodel.GetBlockDAGInfoResult + var dagInfo model.GetBlockDAGInfoResult if err := json.Unmarshal(res, &dagInfo); err != nil { return nil, errors.Wrap(err, "couldn't decode getBlockDagInfo response") } @@ -353,14 +353,14 @@ func (r FutureGetBlockDAGInfoResult) Receive() (*rpcmodel.GetBlockDAGInfoResult, // // See GetBlockDAGInfo for the blocking version and more details. func (c *Client) GetBlockDAGInfoAsync() FutureGetBlockDAGInfoResult { - cmd := rpcmodel.NewGetBlockDAGInfoCmd() + cmd := model.NewGetBlockDAGInfoCmd() return c.sendCmd(cmd) } // GetBlockDAGInfo returns information related to the processing state of // various dag-specific details such as the current difficulty from the tip // of the main dag. -func (c *Client) GetBlockDAGInfo() (*rpcmodel.GetBlockDAGInfoResult, error) { +func (c *Client) GetBlockDAGInfo() (*model.GetBlockDAGInfoResult, error) { return c.GetBlockDAGInfoAsync().Receive() } @@ -430,7 +430,7 @@ func (c *Client) GetBlockHeaderAsync(blockHash *daghash.Hash) FutureGetBlockHead hash = blockHash.String() } - cmd := rpcmodel.NewGetBlockHeaderCmd(hash, pointers.Bool(false)) + cmd := model.NewGetBlockHeaderCmd(hash, pointers.Bool(false)) return c.sendCmd(cmd) } @@ -448,14 +448,14 @@ type FutureGetBlockHeaderVerboseResult chan *response // Receive waits for the response promised by the future and returns the // data structure of the blockheader requested from the server given its hash. -func (r FutureGetBlockHeaderVerboseResult) Receive() (*rpcmodel.GetBlockHeaderVerboseResult, error) { +func (r FutureGetBlockHeaderVerboseResult) Receive() (*model.GetBlockHeaderVerboseResult, error) { res, err := receiveFuture(r) if err != nil { return nil, err } // Unmarshal result as a string. - var bh rpcmodel.GetBlockHeaderVerboseResult + var bh model.GetBlockHeaderVerboseResult err = json.Unmarshal(res, &bh) if err != nil { return nil, errors.Wrap(err, "couldn't decode getBlockHeader response") @@ -475,7 +475,7 @@ func (c *Client) GetBlockHeaderVerboseAsync(blockHash *daghash.Hash) FutureGetBl hash = blockHash.String() } - cmd := rpcmodel.NewGetBlockHeaderCmd(hash, pointers.Bool(true)) + cmd := model.NewGetBlockHeaderCmd(hash, pointers.Bool(true)) return c.sendCmd(cmd) } @@ -483,7 +483,7 @@ func (c *Client) GetBlockHeaderVerboseAsync(blockHash *daghash.Hash) FutureGetBl // blockheader from the server given its hash. // // See GetBlockHeader to retrieve a blockheader instead. -func (c *Client) GetBlockHeaderVerbose(blockHash *daghash.Hash) (*rpcmodel.GetBlockHeaderVerboseResult, error) { +func (c *Client) GetBlockHeaderVerbose(blockHash *daghash.Hash) (*model.GetBlockHeaderVerboseResult, error) { return c.GetBlockHeaderVerboseAsync(blockHash).Receive() } @@ -494,14 +494,14 @@ type FutureGetMempoolEntryResult chan *response // Receive waits for the response promised by the future and returns a data // structure with information about the transaction in the memory pool given // its hash. -func (r FutureGetMempoolEntryResult) Receive() (*rpcmodel.GetMempoolEntryResult, error) { +func (r FutureGetMempoolEntryResult) Receive() (*model.GetMempoolEntryResult, error) { res, err := receiveFuture(r) if err != nil { return nil, err } // Unmarshal the result as an array of strings. - var mempoolEntryResult rpcmodel.GetMempoolEntryResult + var mempoolEntryResult model.GetMempoolEntryResult err = json.Unmarshal(res, &mempoolEntryResult) if err != nil { return nil, errors.Wrap(err, "couldn't decode getMempoolEntry response") @@ -516,13 +516,13 @@ func (r FutureGetMempoolEntryResult) Receive() (*rpcmodel.GetMempoolEntryResult, // // See GetMempoolEntry for the blocking version and more details. func (c *Client) GetMempoolEntryAsync(txHash string) FutureGetMempoolEntryResult { - cmd := rpcmodel.NewGetMempoolEntryCmd(txHash) + cmd := model.NewGetMempoolEntryCmd(txHash) return c.sendCmd(cmd) } // GetMempoolEntry returns a data structure with information about the // transaction in the memory pool given its hash. -func (c *Client) GetMempoolEntry(txHash string) (*rpcmodel.GetMempoolEntryResult, error) { +func (c *Client) GetMempoolEntry(txHash string) (*model.GetMempoolEntryResult, error) { return c.GetMempoolEntryAsync(txHash).Receive() } @@ -564,7 +564,7 @@ func (r FutureGetRawMempoolResult) Receive() ([]*daghash.Hash, error) { // // See GetRawMempool for the blocking version and more details. func (c *Client) GetRawMempoolAsync() FutureGetRawMempoolResult { - cmd := rpcmodel.NewGetRawMempoolCmd(pointers.Bool(false)) + cmd := model.NewGetRawMempoolCmd(pointers.Bool(false)) return c.sendCmd(cmd) } @@ -583,7 +583,7 @@ type FutureGetRawMempoolVerboseResult chan *response // Receive waits for the response promised by the future and returns a map of // transaction hashes to an associated data structure with information about the // transaction for all transactions in the memory pool. -func (r FutureGetRawMempoolVerboseResult) Receive() (map[string]rpcmodel.GetRawMempoolVerboseResult, error) { +func (r FutureGetRawMempoolVerboseResult) Receive() (map[string]model.GetRawMempoolVerboseResult, error) { res, err := receiveFuture(r) if err != nil { return nil, err @@ -591,7 +591,7 @@ func (r FutureGetRawMempoolVerboseResult) Receive() (map[string]rpcmodel.GetRawM // Unmarshal the result as a map of strings (tx shas) to their detailed // results. - var mempoolItems map[string]rpcmodel.GetRawMempoolVerboseResult + var mempoolItems map[string]model.GetRawMempoolVerboseResult err = json.Unmarshal(res, &mempoolItems) if err != nil { return nil, errors.Wrap(err, "couldn't decode getRawMempool response") @@ -605,7 +605,7 @@ func (r FutureGetRawMempoolVerboseResult) Receive() (map[string]rpcmodel.GetRawM // // See GetRawMempoolVerbose for the blocking version and more details. func (c *Client) GetRawMempoolVerboseAsync() FutureGetRawMempoolVerboseResult { - cmd := rpcmodel.NewGetRawMempoolCmd(pointers.Bool(true)) + cmd := model.NewGetRawMempoolCmd(pointers.Bool(true)) return c.sendCmd(cmd) } @@ -614,7 +614,7 @@ func (c *Client) GetRawMempoolVerboseAsync() FutureGetRawMempoolVerboseResult { // the memory pool. // // See GetRawMempool to retrieve only the transaction hashes instead. -func (c *Client) GetRawMempoolVerbose() (map[string]rpcmodel.GetRawMempoolVerboseResult, error) { +func (c *Client) GetRawMempoolVerbose() (map[string]model.GetRawMempoolVerboseResult, error) { return c.GetRawMempoolVerboseAsync().Receive() } @@ -624,14 +624,14 @@ type FutureGetSubnetworkResult chan *response // Receive waits for the response promised by the future and returns information // regarding the requested subnetwork -func (r FutureGetSubnetworkResult) Receive() (*rpcmodel.GetSubnetworkResult, error) { +func (r FutureGetSubnetworkResult) Receive() (*model.GetSubnetworkResult, error) { res, err := receiveFuture(r) if err != nil { return nil, err } // Unmarshal result as a getSubnetwork result object. - var getSubnetworkResult *rpcmodel.GetSubnetworkResult + var getSubnetworkResult *model.GetSubnetworkResult err = json.Unmarshal(res, &getSubnetworkResult) if err != nil { return nil, errors.Wrap(err, "couldn't decode getSubnetwork response") @@ -646,12 +646,12 @@ func (r FutureGetSubnetworkResult) Receive() (*rpcmodel.GetSubnetworkResult, err // // See GetSubnetwork for the blocking version and more details. func (c *Client) GetSubnetworkAsync(subnetworkID string) FutureGetSubnetworkResult { - cmd := rpcmodel.NewGetSubnetworkCmd(subnetworkID) + cmd := model.NewGetSubnetworkCmd(subnetworkID) return c.sendCmd(cmd) } // GetSubnetwork provides information about a subnetwork given its ID. -func (c *Client) GetSubnetwork(subnetworkID string) (*rpcmodel.GetSubnetworkResult, error) { +func (c *Client) GetSubnetwork(subnetworkID string) (*model.GetSubnetworkResult, error) { return c.GetSubnetworkAsync(subnetworkID).Receive() } @@ -661,7 +661,7 @@ type FutureGetTxOutResult chan *response // Receive waits for the response promised by the future and returns a // transaction given its hash. -func (r FutureGetTxOutResult) Receive() (*rpcmodel.GetTxOutResult, error) { +func (r FutureGetTxOutResult) Receive() (*model.GetTxOutResult, error) { res, err := receiveFuture(r) if err != nil { return nil, err @@ -674,7 +674,7 @@ func (r FutureGetTxOutResult) Receive() (*rpcmodel.GetTxOutResult, error) { } // Unmarshal result as an gettxout result object. - var txOutInfo *rpcmodel.GetTxOutResult + var txOutInfo *model.GetTxOutResult err = json.Unmarshal(res, &txOutInfo) if err != nil { return nil, errors.Wrap(err, "couldn't decode getTxOut response") @@ -694,13 +694,13 @@ func (c *Client) GetTxOutAsync(txHash *daghash.Hash, index uint32, mempool bool) hash = txHash.String() } - cmd := rpcmodel.NewGetTxOutCmd(hash, index, &mempool) + cmd := model.NewGetTxOutCmd(hash, index, &mempool) return c.sendCmd(cmd) } // GetTxOut returns the transaction output info if it's unspent and // nil, otherwise. -func (c *Client) GetTxOut(txHash *daghash.Hash, index uint32, mempool bool) (*rpcmodel.GetTxOutResult, error) { +func (c *Client) GetTxOut(txHash *daghash.Hash, index uint32, mempool bool) (*model.GetTxOutResult, error) { return c.GetTxOutAsync(txHash, index, mempool).Receive() } @@ -710,13 +710,13 @@ type FutureRescanBlocksResult chan *response // Receive waits for the response promised by the future and returns the // discovered rescanblocks data. -func (r FutureRescanBlocksResult) Receive() ([]rpcmodel.RescannedBlock, error) { +func (r FutureRescanBlocksResult) Receive() ([]model.RescannedBlock, error) { res, err := receiveFuture(r) if err != nil { return nil, err } - var rescanBlocksResult []rpcmodel.RescannedBlock + var rescanBlocksResult []model.RescannedBlock err = json.Unmarshal(res, &rescanBlocksResult) if err != nil { return nil, errors.Wrap(err, "couldn't decode rescanBlocks response") @@ -736,13 +736,13 @@ func (c *Client) RescanBlocksAsync(blockHashes []*daghash.Hash) FutureRescanBloc strBlockHashes[i] = blockHashes[i].String() } - cmd := rpcmodel.NewRescanBlocksCmd(strBlockHashes) + cmd := model.NewRescanBlocksCmd(strBlockHashes) return c.sendCmd(cmd) } // RescanBlocks rescans the blocks identified by blockHashes, in order, using // the client's loaded transaction filter. The blocks do not need to be on the // main dag, but they do need to be adjacent to each other. -func (c *Client) RescanBlocks(blockHashes []*daghash.Hash) ([]rpcmodel.RescannedBlock, error) { +func (c *Client) RescanBlocks(blockHashes []*daghash.Hash) ([]model.RescannedBlock, error) { return c.RescanBlocksAsync(blockHashes).Receive() } diff --git a/rpcclient/doc.go b/rpc/client/doc.go similarity index 96% rename from rpcclient/doc.go rename to rpc/client/doc.go index 671a88786..f54cee068 100644 --- a/rpcclient/doc.go +++ b/rpc/client/doc.go @@ -1,5 +1,5 @@ /* -Package rpcclient implements a websocket-enabled kaspa JSON-RPC client. +Package client implements a websocket-enabled kaspa JSON-RPC client. Overview @@ -116,15 +116,15 @@ the type can vary, but usually will be best handled by simply showing/logging it. The third category of errors, that is errors returned by the server, can be -detected by type asserting the error in a *rpcmodel.RPCError. For example, to +detected by type asserting the error in a *model.RPCError. For example, to detect if a command is unimplemented by the remote RPC server: netTotals, err := client.GetNetTotals() if err != nil { - var jErr *rpcmodel.RPCError + var jErr *model.RPCError if errors.As(err, jErr) { switch jErr.Code { - case rpcmodel.ErrRPCUnimplemented: + case model.ErrRPCUnimplemented: // Handle not implemented error // Handle other specific errors you care about @@ -146,4 +146,4 @@ The following full-blown client examples are in the examples directory: Connects to a kaspad RPC server using TLS-secured websockets, registers for block added notifications, and gets the current block count */ -package rpcclient +package client diff --git a/rpcclient/examples/httppost/README.md b/rpc/client/examples/httppost/README.md similarity index 100% rename from rpcclient/examples/httppost/README.md rename to rpc/client/examples/httppost/README.md diff --git a/rpcclient/examples/httppost/main.go b/rpc/client/examples/httppost/main.go similarity index 86% rename from rpcclient/examples/httppost/main.go rename to rpc/client/examples/httppost/main.go index adc190959..ee50e7ef2 100644 --- a/rpcclient/examples/httppost/main.go +++ b/rpc/client/examples/httppost/main.go @@ -7,12 +7,12 @@ package main import ( "log" - "github.com/kaspanet/kaspad/rpcclient" + "github.com/kaspanet/kaspad/rpc/client" ) func main() { // Connect to a local kaspa RPC server using HTTP POST mode. - connCfg := &rpcclient.ConnConfig{ + connCfg := &client.ConnConfig{ Host: "localhost:8332", User: "yourrpcuser", Pass: "yourrpcpass", @@ -21,7 +21,7 @@ func main() { } // Notice the notification parameter is nil since notifications are // not supported in HTTP POST mode. - client, err := rpcclient.New(connCfg, nil) + client, err := client.New(connCfg, nil) if err != nil { log.Fatal(err) } diff --git a/rpcclient/examples/websockets/README.md b/rpc/client/examples/websockets/README.md similarity index 100% rename from rpcclient/examples/websockets/README.md rename to rpc/client/examples/websockets/README.md diff --git a/rpcclient/examples/websockets/main.go b/rpc/client/examples/websockets/main.go similarity index 91% rename from rpcclient/examples/websockets/main.go rename to rpc/client/examples/websockets/main.go index 0273861ac..1dcd30251 100644 --- a/rpcclient/examples/websockets/main.go +++ b/rpc/client/examples/websockets/main.go @@ -10,7 +10,7 @@ import ( "path/filepath" "time" - "github.com/kaspanet/kaspad/rpcclient" + "github.com/kaspanet/kaspad/rpc/client" "github.com/kaspanet/kaspad/util" "github.com/kaspanet/kaspad/wire" ) @@ -20,7 +20,7 @@ func main() { // Also note most of these handlers will only be called if you register // for notifications. See the documentation of the rpcclient // NotificationHandlers type for more details about each handler. - ntfnHandlers := rpcclient.NotificationHandlers{ + ntfnHandlers := client.NotificationHandlers{ OnFilteredBlockAdded: func(blueScore uint64, header *wire.BlockHeader, txns []*util.Tx) { log.Printf("Block added: %s (%d) %s", header.BlockHash(), blueScore, header.Timestamp) @@ -33,14 +33,14 @@ func main() { if err != nil { log.Fatal(err) } - connCfg := &rpcclient.ConnConfig{ + connCfg := &client.ConnConfig{ Host: "localhost:16110", Endpoint: "ws", User: "yourrpcuser", Pass: "yourrpcpass", Certificates: certs, } - client, err := rpcclient.New(connCfg, &ntfnHandlers) + client, err := client.New(connCfg, &ntfnHandlers) if err != nil { log.Fatal(err) } diff --git a/rpcclient/infrastructure.go b/rpc/client/infrastructure.go similarity index 98% rename from rpcclient/infrastructure.go rename to rpc/client/infrastructure.go index 457932d88..c5b8b7e3a 100644 --- a/rpcclient/infrastructure.go +++ b/rpc/client/infrastructure.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package rpcclient +package client import ( "bytes" @@ -26,7 +26,7 @@ import ( "github.com/btcsuite/go-socks/socks" "github.com/btcsuite/websocket" - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" ) var ( @@ -252,13 +252,13 @@ func (c *Client) trackRegisteredNtfns(cmd interface{}) { defer c.ntfnStateLock.Unlock() switch bcmd := cmd.(type) { - case *rpcmodel.NotifyBlocksCmd: + case *model.NotifyBlocksCmd: c.ntfnState.notifyBlocks = true - case *rpcmodel.NotifyChainChangesCmd: + case *model.NotifyChainChangesCmd: c.ntfnState.notifyChainChanges = true - case *rpcmodel.NotifyNewTransactionsCmd: + case *model.NotifyNewTransactionsCmd: if bcmd.Verbose != nil && *bcmd.Verbose { c.ntfnState.notifyNewTxVerbose = true } else { @@ -289,8 +289,8 @@ type ( // rawResponse is a partially-unmarshaled JSON-RPC response. For this // to be valid (according to JSON-RPC 1.0 spec), ID may not be nil. rawResponse struct { - Result json.RawMessage `json:"result"` - Error *rpcmodel.RPCError `json:"error"` + Result json.RawMessage `json:"result"` + Error *model.RPCError `json:"error"` } ) @@ -302,7 +302,7 @@ type response struct { } // result checks whether the unmarshaled response contains a non-nil error, -// returning an unmarshaled rpcmodel.RPCError (or an unmarshaling error) if so. +// returning an unmarshaled model.RPCError (or an unmarshaling error) if so. // If the response is not an error, the raw bytes of the request are // returned for further unmashaling into specific result types. func (r rawResponse) result() (result []byte, err error) { @@ -896,14 +896,14 @@ func (c *Client) sendRequest(data *jsonRequestData) chan *response { // configuration of the client. func (c *Client) sendCmd(cmd interface{}) chan *response { // Get the method associated with the command. - method, err := rpcmodel.CommandMethod(cmd) + method, err := model.CommandMethod(cmd) if err != nil { return newFutureError(err) } // Marshal the command. id := c.NextID() - marshalledJSON, err := rpcmodel.MarshalCommand(id, cmd) + marshalledJSON, err := model.MarshalCommand(id, cmd) if err != nil { return newFutureError(err) } diff --git a/rpcclient/log.go b/rpc/client/log.go similarity index 98% rename from rpcclient/log.go rename to rpc/client/log.go index 8d0c9b98f..f8a6d80a1 100644 --- a/rpcclient/log.go +++ b/rpc/client/log.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package rpcclient +package client import ( "github.com/kaspanet/kaspad/logs" diff --git a/rpcclient/mining.go b/rpc/client/mining.go similarity index 86% rename from rpcclient/mining.go rename to rpc/client/mining.go index e19556cc4..a063875aa 100644 --- a/rpcclient/mining.go +++ b/rpc/client/mining.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package rpcclient +package client import ( "encoding/hex" @@ -10,7 +10,7 @@ import ( "strconv" "strings" - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" "github.com/kaspanet/kaspad/util" "github.com/kaspanet/kaspad/util/daghash" "github.com/kaspanet/kaspad/wire" @@ -47,7 +47,7 @@ func (r FutureSubmitBlockResult) Receive() error { // returned instance. // // See SubmitBlock for the blocking version and more details. -func (c *Client) SubmitBlockAsync(block *util.Block, options *rpcmodel.SubmitBlockOptions) FutureSubmitBlockResult { +func (c *Client) SubmitBlockAsync(block *util.Block, options *model.SubmitBlockOptions) FutureSubmitBlockResult { blockHex := "" if block != nil { blockBytes, err := block.Bytes() @@ -58,12 +58,12 @@ func (c *Client) SubmitBlockAsync(block *util.Block, options *rpcmodel.SubmitBlo blockHex = hex.EncodeToString(blockBytes) } - cmd := rpcmodel.NewSubmitBlockCmd(blockHex, options) + cmd := model.NewSubmitBlockCmd(blockHex, options) return c.sendCmd(cmd) } // SubmitBlock attempts to submit a new block into the kaspa network. -func (c *Client) SubmitBlock(block *util.Block, options *rpcmodel.SubmitBlockOptions) error { +func (c *Client) SubmitBlock(block *util.Block, options *model.SubmitBlockOptions) error { return c.SubmitBlockAsync(block, options).Receive() } @@ -77,24 +77,24 @@ type FutureGetBlockTemplateResult chan *response // // See GetBlockTemplate for the blocking version and more details func (c *Client) GetBlockTemplateAsync(payAddress string, longPollID string) FutureGetBlockTemplateResult { - request := &rpcmodel.TemplateRequest{ + request := &model.TemplateRequest{ Mode: "template", LongPollID: longPollID, PayAddress: payAddress, } - cmd := rpcmodel.NewGetBlockTemplateCmd(request) + cmd := model.NewGetBlockTemplateCmd(request) return c.sendCmd(cmd) } // Receive waits for the response promised by the future and returns an error if // any occurred when submitting the block. -func (r FutureGetBlockTemplateResult) Receive() (*rpcmodel.GetBlockTemplateResult, error) { +func (r FutureGetBlockTemplateResult) Receive() (*model.GetBlockTemplateResult, error) { res, err := receiveFuture(r) if err != nil { return nil, err } - var result rpcmodel.GetBlockTemplateResult + var result model.GetBlockTemplateResult if err := json.Unmarshal(res, &result); err != nil { return nil, err } @@ -102,12 +102,12 @@ func (r FutureGetBlockTemplateResult) Receive() (*rpcmodel.GetBlockTemplateResul } // GetBlockTemplate request a block template from the server, to mine upon -func (c *Client) GetBlockTemplate(payAddress string, longPollID string) (*rpcmodel.GetBlockTemplateResult, error) { +func (c *Client) GetBlockTemplate(payAddress string, longPollID string) (*model.GetBlockTemplateResult, error) { return c.GetBlockTemplateAsync(payAddress, longPollID).Receive() } // ConvertGetBlockTemplateResultToBlock Accepts a GetBlockTemplateResult and parses it into a Block -func ConvertGetBlockTemplateResultToBlock(template *rpcmodel.GetBlockTemplateResult) (*util.Block, error) { +func ConvertGetBlockTemplateResultToBlock(template *model.GetBlockTemplateResult) (*util.Block, error) { // parse parent hashes parentHashes := make([]*daghash.Hash, len(template.ParentHashes)) for i, parentHash := range template.ParentHashes { diff --git a/rpcclient/net.go b/rpc/client/net.go similarity index 76% rename from rpcclient/net.go rename to rpc/client/net.go index dbd126358..d02416fa3 100644 --- a/rpcclient/net.go +++ b/rpc/client/net.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package rpcclient +package client import ( "bytes" @@ -12,7 +12,7 @@ import ( "github.com/kaspanet/kaspad/util/pointers" "github.com/kaspanet/kaspad/wire" - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" ) // FutureAddNodeResult is a future promise to deliver the result of an @@ -32,7 +32,7 @@ func (r FutureAddNodeResult) Receive() error { // // See AddNode for the blocking version and more details. func (c *Client) AddManualNodeAsync(host string) FutureAddNodeResult { - cmd := rpcmodel.NewAddManualNodeCmd(host, pointers.Bool(false)) + cmd := model.NewConnectCmd(host, pointers.Bool(false)) return c.sendCmd(cmd) } @@ -45,87 +45,6 @@ func (c *Client) AddManualNode(host string) error { return c.AddManualNodeAsync(host).Receive() } -// FutureGetManualNodeInfoResult is a future promise to deliver the result of a -// GetManualNodeInfoAsync RPC invocation (or an applicable error). -type FutureGetManualNodeInfoResult chan *response - -// Receive waits for the response promised by the future and returns information -// about manually added (persistent) peers. -func (r FutureGetManualNodeInfoResult) Receive() ([]rpcmodel.GetManualNodeInfoResult, error) { - res, err := receiveFuture(r) - if err != nil { - return nil, err - } - - // Unmarshal as an array of getmanualnodeinfo result objects. - var nodeInfo []rpcmodel.GetManualNodeInfoResult - err = json.Unmarshal(res, &nodeInfo) - if err != nil { - return nil, err - } - - return nodeInfo, nil -} - -// GetManualNodeInfoAsync returns an instance of a type that can be used to get -// the result of the RPC at some future time by invoking the Receive function on -// the returned instance. -// -// See GetManualNodeInfo for the blocking version and more details. -func (c *Client) GetManualNodeInfoAsync(peer string) FutureGetManualNodeInfoResult { - cmd := rpcmodel.NewGetManualNodeInfoCmd(peer, nil) - return c.sendCmd(cmd) -} - -// GetManualNodeInfo returns information about manually added (persistent) peers. -// -// See GetManualNodeInfoNoDNS to retrieve only a list of the added (persistent) -// peers. -func (c *Client) GetManualNodeInfo(peer string) ([]rpcmodel.GetManualNodeInfoResult, error) { - return c.GetManualNodeInfoAsync(peer).Receive() -} - -// FutureGetManualNodeInfoNoDNSResult is a future promise to deliver the result -// of a GetManualNodeInfoNoDNSAsync RPC invocation (or an applicable error). -type FutureGetManualNodeInfoNoDNSResult chan *response - -// Receive waits for the response promised by the future and returns a list of -// manually added (persistent) peers. -func (r FutureGetManualNodeInfoNoDNSResult) Receive() ([]string, error) { - res, err := receiveFuture(r) - if err != nil { - return nil, err - } - - // Unmarshal result as an array of strings. - var nodes []string - err = json.Unmarshal(res, &nodes) - if err != nil { - return nil, err - } - - return nodes, nil -} - -// GetManualNodeInfoNoDNSAsync returns an instance of a type that can be used to -// get the result of the RPC at some future time by invoking the Receive -// function on the returned instance. -// -// See GetManualNodeInfoNoDNS for the blocking version and more details. -func (c *Client) GetManualNodeInfoNoDNSAsync(peer string) FutureGetManualNodeInfoNoDNSResult { - cmd := rpcmodel.NewGetManualNodeInfoCmd(peer, pointers.Bool(false)) - return c.sendCmd(cmd) -} - -// GetManualNodeInfoNoDNS returns a list of manually added (persistent) peers. -// This works by setting the dns flag to false in the underlying RPC. -// -// See GetManualNodeInfo to obtain more information about each added (persistent) -// peer. -func (c *Client) GetManualNodeInfoNoDNS(peer string) ([]string, error) { - return c.GetManualNodeInfoNoDNSAsync(peer).Receive() -} - // FutureGetConnectionCountResult is a future promise to deliver the result // of a GetConnectionCountAsync RPC invocation (or an applicable error). type FutureGetConnectionCountResult chan *response @@ -154,7 +73,7 @@ func (r FutureGetConnectionCountResult) Receive() (int64, error) { // // See GetConnectionCount for the blocking version and more details. func (c *Client) GetConnectionCountAsync() FutureGetConnectionCountResult { - cmd := rpcmodel.NewGetConnectionCountCmd() + cmd := model.NewGetConnectionCountCmd() return c.sendCmd(cmd) } @@ -180,7 +99,7 @@ func (r FuturePingResult) Receive() error { // // See Ping for the blocking version and more details. func (c *Client) PingAsync() FuturePingResult { - cmd := rpcmodel.NewPingCmd() + cmd := model.NewPingCmd() return c.sendCmd(cmd) } @@ -198,14 +117,14 @@ type FutureGetConnectedPeerInfo chan *response // Receive waits for the response promised by the future and returns data about // each connected network peer. -func (r FutureGetConnectedPeerInfo) Receive() ([]rpcmodel.GetConnectedPeerInfoResult, error) { +func (r FutureGetConnectedPeerInfo) Receive() ([]model.GetConnectedPeerInfoResult, error) { res, err := receiveFuture(r) if err != nil { return nil, err } // Unmarshal result as an array of getConnectedPeerInfo result objects. - var peerInfo []rpcmodel.GetConnectedPeerInfoResult + var peerInfo []model.GetConnectedPeerInfoResult err = json.Unmarshal(res, &peerInfo) if err != nil { return nil, err @@ -220,12 +139,12 @@ func (r FutureGetConnectedPeerInfo) Receive() ([]rpcmodel.GetConnectedPeerInfoRe // // See GetConnectedPeerInfo for the blocking version and more details. func (c *Client) GetConnectedPeerInfoAsync() FutureGetConnectedPeerInfo { - cmd := rpcmodel.NewGetConnectedPeerInfoCmd() + cmd := model.NewGetConnectedPeerInfoCmd() return c.sendCmd(cmd) } // GetConnectedPeerInfo returns data about each connected network peer. -func (c *Client) GetConnectedPeerInfo() ([]rpcmodel.GetConnectedPeerInfoResult, error) { +func (c *Client) GetConnectedPeerInfo() ([]model.GetConnectedPeerInfoResult, error) { return c.GetConnectedPeerInfoAsync().Receive() } @@ -235,14 +154,14 @@ type FutureGetNetTotalsResult chan *response // Receive waits for the response promised by the future and returns network // traffic statistics. -func (r FutureGetNetTotalsResult) Receive() (*rpcmodel.GetNetTotalsResult, error) { +func (r FutureGetNetTotalsResult) Receive() (*model.GetNetTotalsResult, error) { res, err := receiveFuture(r) if err != nil { return nil, err } // Unmarshal result as a getnettotals result object. - var totals rpcmodel.GetNetTotalsResult + var totals model.GetNetTotalsResult err = json.Unmarshal(res, &totals) if err != nil { return nil, err @@ -257,12 +176,12 @@ func (r FutureGetNetTotalsResult) Receive() (*rpcmodel.GetNetTotalsResult, error // // See GetNetTotals for the blocking version and more details. func (c *Client) GetNetTotalsAsync() FutureGetNetTotalsResult { - cmd := rpcmodel.NewGetNetTotalsCmd() + cmd := model.NewGetNetTotalsCmd() return c.sendCmd(cmd) } // GetNetTotals returns network traffic statistics. -func (c *Client) GetNetTotals() (*rpcmodel.GetNetTotalsResult, error) { +func (c *Client) GetNetTotals() (*model.GetNetTotalsResult, error) { return c.GetNetTotalsAsync().Receive() } @@ -294,7 +213,7 @@ func (r FutureDebugLevelResult) Receive() (string, error) { // // See DebugLevel for the blocking version and more details. func (c *Client) DebugLevelAsync(levelSpec string) FutureDebugLevelResult { - cmd := rpcmodel.NewDebugLevelCmd(levelSpec) + cmd := model.NewDebugLevelCmd(levelSpec) return c.sendCmd(cmd) } @@ -350,12 +269,12 @@ func (r FutureGetSelectedTipResult) Receive() (*wire.MsgBlock, error) { // // See GetSelectedTip for the blocking version and more details. func (c *Client) GetSelectedTipAsync() FutureGetSelectedTipResult { - cmd := rpcmodel.NewGetSelectedTipCmd(pointers.Bool(false), pointers.Bool(false)) + cmd := model.NewGetSelectedTipCmd(pointers.Bool(false), pointers.Bool(false)) return c.sendCmd(cmd) } // GetSelectedTip returns the block of the selected DAG tip -func (c *Client) GetSelectedTip() (*rpcmodel.GetBlockVerboseResult, error) { +func (c *Client) GetSelectedTip() (*model.GetBlockVerboseResult, error) { return c.GetSelectedTipVerboseAsync().Receive() } @@ -365,14 +284,14 @@ type FutureGetSelectedTipVerboseResult chan *response // Receive waits for the response promised by the future and returns the data // structure from the server with information about the requested block. -func (r FutureGetSelectedTipVerboseResult) Receive() (*rpcmodel.GetBlockVerboseResult, error) { +func (r FutureGetSelectedTipVerboseResult) Receive() (*model.GetBlockVerboseResult, error) { res, err := receiveFuture(r) if err != nil { return nil, err } // Unmarshal the raw result into a BlockResult. - var blockResult rpcmodel.GetBlockVerboseResult + var blockResult model.GetBlockVerboseResult err = json.Unmarshal(res, &blockResult) if err != nil { return nil, err @@ -386,7 +305,7 @@ func (r FutureGetSelectedTipVerboseResult) Receive() (*rpcmodel.GetBlockVerboseR // // See GeSelectedTipBlockVerbose for the blocking version and more details. func (c *Client) GetSelectedTipVerboseAsync() FutureGetSelectedTipVerboseResult { - cmd := rpcmodel.NewGetSelectedTipCmd(pointers.Bool(true), pointers.Bool(false)) + cmd := model.NewGetSelectedTipCmd(pointers.Bool(true), pointers.Bool(false)) return c.sendCmd(cmd) } @@ -418,7 +337,7 @@ func (r FutureGetCurrentNetResult) Receive() (wire.KaspaNet, error) { // // See GetCurrentNet for the blocking version and more details. func (c *Client) GetCurrentNetAsync() FutureGetCurrentNetResult { - cmd := rpcmodel.NewGetCurrentNetCmd() + cmd := model.NewGetCurrentNetCmd() return c.sendCmd(cmd) } @@ -470,7 +389,7 @@ func (c *Client) GetTopHeadersAsync(highHash *daghash.Hash) FutureGetHeadersResu if highHash != nil { hash = pointers.String(highHash.String()) } - cmd := rpcmodel.NewGetTopHeadersCmd(hash) + cmd := model.NewGetTopHeadersCmd(hash) return c.sendCmd(cmd) } @@ -492,7 +411,7 @@ func (c *Client) GetHeadersAsync(lowHash, highHash *daghash.Hash) FutureGetHeade if highHash != nil { highHashStr = highHash.String() } - cmd := rpcmodel.NewGetHeadersCmd(lowHashStr, highHashStr) + cmd := model.NewGetHeadersCmd(lowHashStr, highHashStr) return c.sendCmd(cmd) } @@ -509,14 +428,14 @@ type FutureSessionResult chan *response // Receive waits for the response promised by the future and returns the // session result. -func (r FutureSessionResult) Receive() (*rpcmodel.SessionResult, error) { +func (r FutureSessionResult) Receive() (*model.SessionResult, error) { res, err := receiveFuture(r) if err != nil { return nil, err } // Unmarshal result as a session result object. - var session rpcmodel.SessionResult + var session model.SessionResult err = json.Unmarshal(res, &session) if err != nil { return nil, err @@ -536,14 +455,14 @@ func (c *Client) SessionAsync() FutureSessionResult { return newFutureError(ErrWebsocketsRequired) } - cmd := rpcmodel.NewSessionCmd() + cmd := model.NewSessionCmd() return c.sendCmd(cmd) } // Session returns details regarding a websocket client's current connection. // // This RPC requires the client to be running in websocket mode. -func (c *Client) Session() (*rpcmodel.SessionResult, error) { +func (c *Client) Session() (*model.SessionResult, error) { return c.SessionAsync().Receive() } @@ -553,7 +472,7 @@ type FutureVersionResult chan *response // Receive waits for the response promised by the future and returns the version // result. -func (r FutureVersionResult) Receive() (map[string]rpcmodel.VersionResult, +func (r FutureVersionResult) Receive() (map[string]model.VersionResult, error) { res, err := receiveFuture(r) if err != nil { @@ -561,7 +480,7 @@ func (r FutureVersionResult) Receive() (map[string]rpcmodel.VersionResult, } // Unmarshal result as a version result object. - var vr map[string]rpcmodel.VersionResult + var vr map[string]model.VersionResult err = json.Unmarshal(res, &vr) if err != nil { return nil, err @@ -576,11 +495,11 @@ func (r FutureVersionResult) Receive() (map[string]rpcmodel.VersionResult, // // See Version for the blocking version and more details. func (c *Client) VersionAsync() FutureVersionResult { - cmd := rpcmodel.NewVersionCmd() + cmd := model.NewVersionCmd() return c.sendCmd(cmd) } // Version returns information about the server's JSON-RPC API versions. -func (c *Client) Version() (map[string]rpcmodel.VersionResult, error) { +func (c *Client) Version() (map[string]model.VersionResult, error) { return c.VersionAsync().Receive() } diff --git a/rpcclient/notify.go b/rpc/client/notify.go similarity index 96% rename from rpcclient/notify.go rename to rpc/client/notify.go index 0a0346b0e..bc483cf51 100644 --- a/rpcclient/notify.go +++ b/rpc/client/notify.go @@ -3,7 +3,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package rpcclient +package client import ( "bytes" @@ -13,7 +13,7 @@ import ( "github.com/kaspanet/kaspad/util/mstime" "github.com/pkg/errors" - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" "github.com/kaspanet/kaspad/util" "github.com/kaspanet/kaspad/util/daghash" "github.com/kaspanet/kaspad/wire" @@ -112,7 +112,7 @@ type NotificationHandlers struct { // memory pool. It will only be invoked if a preceding call to // NotifyNewTransactions with the verbose flag set to true has been // made to register for the notification and the function is non-nil. - OnTxAcceptedVerbose func(txDetails *rpcmodel.TxRawResult) + OnTxAcceptedVerbose func(txDetails *model.TxRawResult) // OnUnknownNotification is invoked when an unrecognized notification // is received. This typically means the notification handling code @@ -136,7 +136,7 @@ func (c *Client) handleNotification(ntfn *rawNotification) { switch ntfn.Method { // ChainChangedNtfnMethod - case rpcmodel.ChainChangedNtfnMethod: + case model.ChainChangedNtfnMethod: // Ignore the notification if the client is not interested in // it. if c.ntfnHandlers.OnChainChanged == nil { @@ -153,7 +153,7 @@ func (c *Client) handleNotification(ntfn *rawNotification) { c.ntfnHandlers.OnChainChanged(removedChainBlockHashes, addedChainBlocks) // OnFilteredBlockAdded - case rpcmodel.FilteredBlockAddedNtfnMethod: + case model.FilteredBlockAddedNtfnMethod: // Ignore the notification if the client is not interested in // it. if c.ntfnHandlers.OnFilteredBlockAdded == nil { @@ -172,7 +172,7 @@ func (c *Client) handleNotification(ntfn *rawNotification) { blockHeader, transactions) // OnRelevantTxAccepted - case rpcmodel.RelevantTxAcceptedNtfnMethod: + case model.RelevantTxAcceptedNtfnMethod: // Ignore the notification if the client is not interested in // it. if c.ntfnHandlers.OnRelevantTxAccepted == nil { @@ -189,7 +189,7 @@ func (c *Client) handleNotification(ntfn *rawNotification) { c.ntfnHandlers.OnRelevantTxAccepted(transaction) // OnTxAccepted - case rpcmodel.TxAcceptedNtfnMethod: + case model.TxAcceptedNtfnMethod: // Ignore the notification if the client is not interested in // it. if c.ntfnHandlers.OnTxAccepted == nil { @@ -206,7 +206,7 @@ func (c *Client) handleNotification(ntfn *rawNotification) { c.ntfnHandlers.OnTxAccepted(hash, amt) // OnTxAcceptedVerbose - case rpcmodel.TxAcceptedVerboseNtfnMethod: + case model.TxAcceptedVerboseNtfnMethod: // Ignore the notification if the client is not interested in // it. if c.ntfnHandlers.OnTxAcceptedVerbose == nil { @@ -264,7 +264,7 @@ func parseChainChangedParams(params []json.RawMessage) (removedChainBlockHashes } // Unmarshal first parameter as a raw transaction result object. - var rawParam rpcmodel.ChainChangedRawParam + var rawParam model.ChainChangedRawParam err = json.Unmarshal(params[0], &rawParam) if err != nil { return nil, nil, err @@ -425,7 +425,7 @@ func parseTxAcceptedNtfnParams(params []json.RawMessage) (*daghash.Hash, // parseTxAcceptedVerboseNtfnParams parses out details about a raw transaction // from the parameters of a txacceptedverbose notification. -func parseTxAcceptedVerboseNtfnParams(params []json.RawMessage) (*rpcmodel.TxRawResult, +func parseTxAcceptedVerboseNtfnParams(params []json.RawMessage) (*model.TxRawResult, error) { if len(params) != 1 { @@ -433,7 +433,7 @@ func parseTxAcceptedVerboseNtfnParams(params []json.RawMessage) (*rpcmodel.TxRaw } // Unmarshal first parameter as a raw transaction result object. - var rawTx rpcmodel.TxRawResult + var rawTx model.TxRawResult err := json.Unmarshal(params[0], &rawTx) if err != nil { return nil, err @@ -473,7 +473,7 @@ func (c *Client) NotifyBlocksAsync() FutureNotifyBlocksResult { return newNilFutureResult() } - cmd := rpcmodel.NewNotifyBlocksCmd() + cmd := model.NewNotifyBlocksCmd() return c.sendCmd(cmd) } @@ -516,7 +516,7 @@ func (c *Client) NotifyChainChangesAsync() FutureNotifyBlocksResult { return newNilFutureResult() } - cmd := rpcmodel.NewNotifyChainChangesCmd() + cmd := model.NewNotifyChainChangesCmd() return c.sendCmd(cmd) } @@ -559,7 +559,7 @@ func (c *Client) NotifyNewTransactionsAsync(verbose bool, subnetworkID *string) return newNilFutureResult() } - cmd := rpcmodel.NewNotifyNewTransactionsCmd(&verbose, subnetworkID) + cmd := model.NewNotifyNewTransactionsCmd(&verbose, subnetworkID) return c.sendCmd(cmd) } @@ -599,15 +599,15 @@ func (c *Client) LoadTxFilterAsync(reload bool, addresses []util.Address, for i, a := range addresses { addrStrs[i] = a.EncodeAddress() } - outpointObjects := make([]rpcmodel.Outpoint, len(outpoints)) + outpointObjects := make([]model.Outpoint, len(outpoints)) for i := range outpoints { - outpointObjects[i] = rpcmodel.Outpoint{ + outpointObjects[i] = model.Outpoint{ TxID: outpoints[i].TxID.String(), Index: outpoints[i].Index, } } - cmd := rpcmodel.NewLoadTxFilterCmd(reload, addrStrs, outpointObjects) + cmd := model.NewLoadTxFilterCmd(reload, addrStrs, outpointObjects) return c.sendCmd(cmd) } diff --git a/rpcclient/rawrequest.go b/rpc/client/rawrequest.go similarity index 96% rename from rpcclient/rawrequest.go rename to rpc/client/rawrequest.go index 102da8908..053196401 100644 --- a/rpcclient/rawrequest.go +++ b/rpc/client/rawrequest.go @@ -2,11 +2,11 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package rpcclient +package client import ( "encoding/json" - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" "github.com/pkg/errors" ) @@ -42,7 +42,7 @@ func (c *Client) RawRequestAsync(method string, params []json.RawMessage) Future // since that relies on marshalling registered jsonrpc commands rather // than custom commands. id := c.NextID() - rawRequest := &rpcmodel.Request{ + rawRequest := &model.Request{ JSONRPC: "1.0", ID: id, Method: method, diff --git a/rpc/client/rawtransactions.go b/rpc/client/rawtransactions.go new file mode 100644 index 000000000..0ceaef015 --- /dev/null +++ b/rpc/client/rawtransactions.go @@ -0,0 +1,63 @@ +// Copyright (c) 2014-2017 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package client + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "github.com/kaspanet/kaspad/rpc/model" + "github.com/kaspanet/kaspad/util/daghash" + "github.com/kaspanet/kaspad/wire" +) + +// FutureSendRawTransactionResult is a future promise to deliver the result +// of a SendRawTransactionAsync RPC invocation (or an applicable error). +type FutureSendRawTransactionResult chan *response + +// Receive waits for the response promised by the future and returns the result +// of submitting the encoded transaction to the server which then relays it to +// the network. +func (r FutureSendRawTransactionResult) Receive() (*daghash.TxID, error) { + res, err := receiveFuture(r) + if err != nil { + return nil, err + } + + // Unmarshal result as a string. + var txIDStr string + err = json.Unmarshal(res, &txIDStr) + if err != nil { + return nil, err + } + + return daghash.NewTxIDFromStr(txIDStr) +} + +// SendRawTransactionAsync returns an instance of a type that can be used to get +// the result of the RPC at some future time by invoking the Receive function on +// the returned instance. +// +// See SendRawTransaction for the blocking version and more details. +func (c *Client) SendRawTransactionAsync(tx *wire.MsgTx, allowHighFees bool) FutureSendRawTransactionResult { + txHex := "" + if tx != nil { + // Serialize the transaction and convert to hex string. + buf := bytes.NewBuffer(make([]byte, 0, tx.SerializeSize())) + if err := tx.Serialize(buf); err != nil { + return newFutureError(err) + } + txHex = hex.EncodeToString(buf.Bytes()) + } + + cmd := model.NewSendRawTransactionCmd(txHex, &allowHighFees) + return c.sendCmd(cmd) +} + +// SendRawTransaction submits the encoded transaction to the server which will +// then relay it to the network. +func (c *Client) SendRawTransaction(tx *wire.MsgTx, allowHighFees bool) (*daghash.TxID, error) { + return c.SendRawTransactionAsync(tx, allowHighFees).Receive() +} diff --git a/server/rpc/common.go b/rpc/common.go similarity index 80% rename from server/rpc/common.go rename to rpc/common.go index 2ad15d4bd..cfc20ac68 100644 --- a/server/rpc/common.go +++ b/rpc/common.go @@ -5,7 +5,7 @@ import ( "encoding/hex" "fmt" "github.com/kaspanet/kaspad/dagconfig" - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" "github.com/kaspanet/kaspad/txscript" "github.com/kaspanet/kaspad/util" "github.com/kaspanet/kaspad/util/daghash" @@ -18,8 +18,8 @@ import ( var ( // ErrRPCUnimplemented is an error returned to RPC clients when the // provided command is recognized, but not implemented. - ErrRPCUnimplemented = &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCUnimplemented, + ErrRPCUnimplemented = &model.RPCError{ + Code: model.ErrRPCUnimplemented, Message: "Command unimplemented", } ) @@ -29,19 +29,19 @@ var ( // RPC server subsystem since internal errors really should not occur. The // context parameter is only used in the log message and may be empty if it's // not needed. -func internalRPCError(errStr, context string) *rpcmodel.RPCError { +func internalRPCError(errStr, context string) *model.RPCError { logStr := errStr if context != "" { logStr = context + ": " + errStr } log.Error(logStr) - return rpcmodel.NewRPCError(rpcmodel.ErrRPCInternal.Code, errStr) + return model.NewRPCError(model.ErrRPCInternal.Code, errStr) } // rpcDecodeHexError is a convenience function for returning a nicely formatted // RPC error which indicates the provided hex string failed to decode. -func rpcDecodeHexError(gotHex string) *rpcmodel.RPCError { - return rpcmodel.NewRPCError(rpcmodel.ErrRPCDecodeHexString, +func rpcDecodeHexError(gotHex string) *model.RPCError { + return model.NewRPCError(model.ErrRPCDecodeHexString, fmt.Sprintf("Argument must be hexadecimal string (not %q)", gotHex)) } @@ -49,8 +49,8 @@ func rpcDecodeHexError(gotHex string) *rpcmodel.RPCError { // rpcNoTxInfoError is a convenience function for returning a nicely formatted // RPC error which indicates there is no information available for the provided // transaction hash. -func rpcNoTxInfoError(txID *daghash.TxID) *rpcmodel.RPCError { - return rpcmodel.NewRPCError(rpcmodel.ErrRPCNoTxInfo, +func rpcNoTxInfoError(txID *daghash.TxID) *model.RPCError { + return model.NewRPCError(model.ErrRPCNoTxInfo, fmt.Sprintf("No information available about transaction %s", txID)) } @@ -69,8 +69,8 @@ func messageToHex(msg wire.Message) (string, error) { // createVinList returns a slice of JSON objects for the inputs of the passed // transaction. -func createVinList(mtx *wire.MsgTx) []rpcmodel.Vin { - vinList := make([]rpcmodel.Vin, len(mtx.TxIn)) +func createVinList(mtx *wire.MsgTx) []model.Vin { + vinList := make([]model.Vin, len(mtx.TxIn)) for i, txIn := range mtx.TxIn { // The disassembled string will contain [error] inline // if the script doesn't fully parse, so ignore the @@ -81,7 +81,7 @@ func createVinList(mtx *wire.MsgTx) []rpcmodel.Vin { vinEntry.TxID = txIn.PreviousOutpoint.TxID.String() vinEntry.Vout = txIn.PreviousOutpoint.Index vinEntry.Sequence = txIn.Sequence - vinEntry.ScriptSig = &rpcmodel.ScriptSig{ + vinEntry.ScriptSig = &model.ScriptSig{ Asm: disbuf, Hex: hex.EncodeToString(txIn.SignatureScript), } @@ -92,8 +92,8 @@ func createVinList(mtx *wire.MsgTx) []rpcmodel.Vin { // createVoutList returns a slice of JSON objects for the outputs of the passed // transaction. -func createVoutList(mtx *wire.MsgTx, dagParams *dagconfig.Params, filterAddrMap map[string]struct{}) []rpcmodel.Vout { - voutList := make([]rpcmodel.Vout, 0, len(mtx.TxOut)) +func createVoutList(mtx *wire.MsgTx, dagParams *dagconfig.Params, filterAddrMap map[string]struct{}) []model.Vout { + voutList := make([]model.Vout, 0, len(mtx.TxOut)) for i, v := range mtx.TxOut { // The disassembled string will contain [error] inline if the // script doesn't fully parse, so ignore the error here. @@ -123,7 +123,7 @@ func createVoutList(mtx *wire.MsgTx, dagParams *dagconfig.Params, filterAddrMap continue } - var vout rpcmodel.Vout + var vout model.Vout vout.N = uint32(i) vout.Value = v.Value vout.ScriptPubKey.Address = encodedAddr @@ -141,7 +141,7 @@ func createVoutList(mtx *wire.MsgTx, dagParams *dagconfig.Params, filterAddrMap // to a raw transaction JSON object. func createTxRawResult(dagParams *dagconfig.Params, mtx *wire.MsgTx, txID string, blkHeader *wire.BlockHeader, blkHash string, - acceptingBlock *daghash.Hash, isInMempool bool) (*rpcmodel.TxRawResult, error) { + acceptingBlock *daghash.Hash, isInMempool bool) (*model.TxRawResult, error) { mtxHex, err := messageToHex(mtx) if err != nil { @@ -153,7 +153,7 @@ func createTxRawResult(dagParams *dagconfig.Params, mtx *wire.MsgTx, payloadHash = mtx.PayloadHash.String() } - txReply := &rpcmodel.TxRawResult{ + txReply := &model.TxRawResult{ Hex: mtxHex, TxID: txID, Hash: mtx.TxHash().String(), @@ -201,34 +201,34 @@ func getDifficultyRatio(bits uint32, params *dagconfig.Params) float64 { return diff } -// buildGetBlockVerboseResult takes a block and convert it to rpcmodel.GetBlockVerboseResult +// buildGetBlockVerboseResult takes a block and convert it to model.GetBlockVerboseResult // // This function MUST be called with the DAG state lock held (for reads). -func buildGetBlockVerboseResult(s *Server, block *util.Block, isVerboseTx bool) (*rpcmodel.GetBlockVerboseResult, error) { +func buildGetBlockVerboseResult(s *Server, block *util.Block, isVerboseTx bool) (*model.GetBlockVerboseResult, error) { hash := block.Hash() - params := s.cfg.DAGParams + params := s.dag.Params blockHeader := block.MsgBlock().Header - blockBlueScore, err := s.cfg.DAG.BlueScoreByBlockHash(hash) + blockBlueScore, err := s.dag.BlueScoreByBlockHash(hash) if err != nil { context := "Could not get block blue score" return nil, internalRPCError(err.Error(), context) } // Get the hashes for the next blocks unless there are none. - childHashes, err := s.cfg.DAG.ChildHashesByHash(hash) + childHashes, err := s.dag.ChildHashesByHash(hash) if err != nil { context := "No next block" return nil, internalRPCError(err.Error(), context) } - blockConfirmations, err := s.cfg.DAG.BlockConfirmationsByHashNoLock(hash) + blockConfirmations, err := s.dag.BlockConfirmationsByHashNoLock(hash) if err != nil { context := "Could not get block confirmations" return nil, internalRPCError(err.Error(), context) } - selectedParentHash, err := s.cfg.DAG.SelectedParentHash(hash) + selectedParentHash, err := s.dag.SelectedParentHash(hash) if err != nil { context := "Could not get block selected parent" return nil, internalRPCError(err.Error(), context) @@ -238,19 +238,19 @@ func buildGetBlockVerboseResult(s *Server, block *util.Block, isVerboseTx bool) selectedParentHashStr = selectedParentHash.String() } - isChainBlock, err := s.cfg.DAG.IsInSelectedParentChain(hash) + isChainBlock, err := s.dag.IsInSelectedParentChain(hash) if err != nil { context := "Could not get whether block is in the selected parent chain" return nil, internalRPCError(err.Error(), context) } - acceptedBlockHashes, err := s.cfg.DAG.BluesByBlockHash(hash) + acceptedBlockHashes, err := s.dag.BluesByBlockHash(hash) if err != nil { context := fmt.Sprintf("Could not get block accepted blocks for block %s", hash) return nil, internalRPCError(err.Error(), context) } - result := &rpcmodel.GetBlockVerboseResult{ + result := &model.GetBlockVerboseResult{ Hash: hash.String(), Version: blockHeader.Version, VersionHex: fmt.Sprintf("%08x", blockHeader.Version), @@ -281,7 +281,7 @@ func buildGetBlockVerboseResult(s *Server, block *util.Block, isVerboseTx bool) result.Tx = txNames } else { txns := block.Transactions() - rawTxns := make([]rpcmodel.TxRawResult, len(txns)) + rawTxns := make([]model.TxRawResult, len(txns)) for i, tx := range txns { rawTxn, err := createTxRawResult(params, tx.MsgTx(), tx.ID().String(), &blockHeader, hash.String(), nil, false) @@ -296,18 +296,18 @@ func buildGetBlockVerboseResult(s *Server, block *util.Block, isVerboseTx bool) return result, nil } -func collectChainBlocks(s *Server, hashes []*daghash.Hash) ([]rpcmodel.ChainBlock, error) { - chainBlocks := make([]rpcmodel.ChainBlock, 0, len(hashes)) +func collectChainBlocks(s *Server, hashes []*daghash.Hash) ([]model.ChainBlock, error) { + chainBlocks := make([]model.ChainBlock, 0, len(hashes)) for _, hash := range hashes { - acceptanceData, err := s.cfg.AcceptanceIndex.TxsAcceptanceData(hash) + acceptanceData, err := s.acceptanceIndex.TxsAcceptanceData(hash) if err != nil { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCInternal.Code, + return nil, &model.RPCError{ + Code: model.ErrRPCInternal.Code, Message: fmt.Sprintf("could not retrieve acceptance data for block %s", hash), } } - acceptedBlocks := make([]rpcmodel.AcceptedBlock, 0, len(acceptanceData)) + acceptedBlocks := make([]model.AcceptedBlock, 0, len(acceptanceData)) for _, blockAcceptanceData := range acceptanceData { acceptedTxIds := make([]string, 0, len(blockAcceptanceData.TxAcceptanceData)) for _, txAcceptanceData := range blockAcceptanceData.TxAcceptanceData { @@ -315,14 +315,14 @@ func collectChainBlocks(s *Server, hashes []*daghash.Hash) ([]rpcmodel.ChainBloc acceptedTxIds = append(acceptedTxIds, txAcceptanceData.Tx.ID().String()) } } - acceptedBlock := rpcmodel.AcceptedBlock{ + acceptedBlock := model.AcceptedBlock{ Hash: blockAcceptanceData.BlockHash.String(), AcceptedTxIDs: acceptedTxIds, } acceptedBlocks = append(acceptedBlocks, acceptedBlock) } - chainBlock := rpcmodel.ChainBlock{ + chainBlock := model.ChainBlock{ Hash: hash.String(), AcceptedBlocks: acceptedBlocks, } @@ -335,20 +335,20 @@ func collectChainBlocks(s *Server, hashes []*daghash.Hash) ([]rpcmodel.ChainBloc // correspondent block verbose. // // This function MUST be called with the DAG state lock held (for reads). -func hashesToGetBlockVerboseResults(s *Server, hashes []*daghash.Hash) ([]rpcmodel.GetBlockVerboseResult, error) { - getBlockVerboseResults := make([]rpcmodel.GetBlockVerboseResult, 0, len(hashes)) +func hashesToGetBlockVerboseResults(s *Server, hashes []*daghash.Hash) ([]model.GetBlockVerboseResult, error) { + getBlockVerboseResults := make([]model.GetBlockVerboseResult, 0, len(hashes)) for _, blockHash := range hashes { - block, err := s.cfg.DAG.BlockByHash(blockHash) + block, err := s.dag.BlockByHash(blockHash) if err != nil { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCInternal.Code, + return nil, &model.RPCError{ + Code: model.ErrRPCInternal.Code, Message: fmt.Sprintf("could not retrieve block %s.", blockHash), } } getBlockVerboseResult, err := buildGetBlockVerboseResult(s, block, false) if err != nil { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCInternal.Code, + return nil, &model.RPCError{ + Code: model.ErrRPCInternal.Code, Message: fmt.Sprintf("could not build getBlockVerboseResult for block %s: %s", blockHash, err), } } diff --git a/rpc/handle_connect.go b/rpc/handle_connect.go new file mode 100644 index 000000000..c275ae105 --- /dev/null +++ b/rpc/handle_connect.go @@ -0,0 +1,24 @@ +package rpc + +import ( + "github.com/kaspanet/kaspad/rpc/model" + "github.com/kaspanet/kaspad/util/network" +) + +// handleConnect handles connect commands. +func handleConnect(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { + c := cmd.(*model.ConnectCmd) + + isPermanent := c.IsPermanent != nil && *c.IsPermanent + + address, err := network.NormalizeAddress(c.Address, s.dag.Params.DefaultPort) + if err != nil { + return nil, &model.RPCError{ + Code: model.ErrRPCInvalidParameter, + Message: err.Error(), + } + } + + s.connectionManager.AddConnectionRequest(address, isPermanent) + return nil, nil +} diff --git a/server/rpc/handle_debug_level.go b/rpc/handle_debug_level.go similarity index 76% rename from server/rpc/handle_debug_level.go rename to rpc/handle_debug_level.go index 5ba7cfd7d..bd70352fd 100644 --- a/server/rpc/handle_debug_level.go +++ b/rpc/handle_debug_level.go @@ -3,12 +3,12 @@ package rpc import ( "fmt" "github.com/kaspanet/kaspad/logger" - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" ) // handleDebugLevel handles debugLevel commands. func handleDebugLevel(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*rpcmodel.DebugLevelCmd) + c := cmd.(*model.DebugLevelCmd) // Special show command to list supported subsystems. if c.LevelSpec == "show" { @@ -18,8 +18,8 @@ func handleDebugLevel(s *Server, cmd interface{}, closeChan <-chan struct{}) (in err := logger.ParseAndSetDebugLevels(c.LevelSpec) if err != nil { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCInvalidParams.Code, + return nil, &model.RPCError{ + Code: model.ErrRPCInvalidParams.Code, Message: err.Error(), } } diff --git a/rpc/handle_disconnect.go b/rpc/handle_disconnect.go new file mode 100644 index 000000000..751cbb85d --- /dev/null +++ b/rpc/handle_disconnect.go @@ -0,0 +1,22 @@ +package rpc + +import ( + "github.com/kaspanet/kaspad/rpc/model" + "github.com/kaspanet/kaspad/util/network" +) + +// handleDisconnect handles disconnect commands. +func handleDisconnect(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { + c := cmd.(*model.DisconnectCmd) + + address, err := network.NormalizeAddress(c.Address, s.dag.Params.DefaultPort) + if err != nil { + return nil, &model.RPCError{ + Code: model.ErrRPCInvalidParameter, + Message: err.Error(), + } + } + + s.connectionManager.RemoveConnection(address) + return nil, nil +} diff --git a/server/rpc/handle_get_block.go b/rpc/handle_get_block.go similarity index 74% rename from server/rpc/handle_get_block.go rename to rpc/handle_get_block.go index 17ce5a550..b23de9871 100644 --- a/server/rpc/handle_get_block.go +++ b/rpc/handle_get_block.go @@ -5,7 +5,7 @@ import ( "bytes" "encoding/hex" - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" "github.com/kaspanet/kaspad/util" "github.com/kaspanet/kaspad/util/daghash" "github.com/kaspanet/kaspad/util/subnetworkid" @@ -13,7 +13,7 @@ import ( // handleGetBlock implements the getBlock command. func handleGetBlock(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*rpcmodel.GetBlockCmd) + c := cmd.(*model.GetBlockCmd) // Load the raw block bytes from the database. hash, err := daghash.NewHashFromStr(c.Hash) @@ -22,32 +22,32 @@ func handleGetBlock(s *Server, cmd interface{}, closeChan <-chan struct{}) (inte } // Return an appropriate error if the block is known to be invalid - if s.cfg.DAG.IsKnownInvalid(hash) { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCBlockInvalid, + if s.dag.IsKnownInvalid(hash) { + return nil, &model.RPCError{ + Code: model.ErrRPCBlockInvalid, Message: "Block is known to be invalid", } } // Return an appropriate error if the block is an orphan - if s.cfg.DAG.IsKnownOrphan(hash) { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCOrphanBlock, + if s.dag.IsKnownOrphan(hash) { + return nil, &model.RPCError{ + Code: model.ErrRPCOrphanBlock, Message: "Block is an orphan", } } - block, err := s.cfg.DAG.BlockByHash(hash) + block, err := s.dag.BlockByHash(hash) if err != nil { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCBlockNotFound, + return nil, &model.RPCError{ + Code: model.ErrRPCBlockNotFound, Message: "Block not found", } } blockBytes, err := block.Bytes() if err != nil { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCBlockInvalid, + return nil, &model.RPCError{ + Code: model.ErrRPCBlockInvalid, Message: "Cannot serialize block", } } @@ -56,18 +56,18 @@ func handleGetBlock(s *Server, cmd interface{}, closeChan <-chan struct{}) (inte if c.Subnetwork != nil { requestSubnetworkID, err := subnetworkid.NewFromStr(*c.Subnetwork) if err != nil { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCInvalidRequest.Code, + return nil, &model.RPCError{ + Code: model.ErrRPCInvalidRequest.Code, Message: "invalid subnetwork string", } } - nodeSubnetworkID := s.appCfg.SubnetworkID + nodeSubnetworkID := s.cfg.SubnetworkID if requestSubnetworkID != nil { if nodeSubnetworkID != nil { if !nodeSubnetworkID.IsEqual(requestSubnetworkID) { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCInvalidRequest.Code, + return nil, &model.RPCError{ + Code: model.ErrRPCInvalidRequest.Code, Message: "subnetwork does not match this partial node", } } @@ -98,8 +98,8 @@ func handleGetBlock(s *Server, cmd interface{}, closeChan <-chan struct{}) (inte return nil, internalRPCError(err.Error(), context) } - s.cfg.DAG.RLock() - defer s.cfg.DAG.RUnlock() + s.dag.RLock() + defer s.dag.RUnlock() blockReply, err := buildGetBlockVerboseResult(s, block, c.VerboseTx == nil || !*c.VerboseTx) if err != nil { return nil, err diff --git a/server/rpc/handle_get_block_count.go b/rpc/handle_get_block_count.go similarity index 83% rename from server/rpc/handle_get_block_count.go rename to rpc/handle_get_block_count.go index 6f0461b34..f9ca4a1ee 100644 --- a/server/rpc/handle_get_block_count.go +++ b/rpc/handle_get_block_count.go @@ -2,5 +2,5 @@ package rpc // handleGetBlockCount implements the getBlockCount command. func handleGetBlockCount(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - return s.cfg.DAG.BlockCount(), nil + return s.dag.BlockCount(), nil } diff --git a/server/rpc/handle_get_block_dag_info.go b/rpc/handle_get_block_dag_info.go similarity index 86% rename from server/rpc/handle_get_block_dag_info.go rename to rpc/handle_get_block_dag_info.go index f41fa8ab0..90c5b3761 100644 --- a/server/rpc/handle_get_block_dag_info.go +++ b/rpc/handle_get_block_dag_info.go @@ -4,7 +4,7 @@ import ( "fmt" "github.com/kaspanet/kaspad/blockdag" "github.com/kaspanet/kaspad/dagconfig" - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" "github.com/kaspanet/kaspad/util/daghash" "github.com/pkg/errors" "strings" @@ -14,10 +14,10 @@ import ( func handleGetBlockDAGInfo(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { // Obtain a snapshot of the current best known DAG state. We'll // populate the response to this call primarily from this snapshot. - params := s.cfg.DAGParams - dag := s.cfg.DAG + params := s.dag.Params + dag := s.dag - dagInfo := &rpcmodel.GetBlockDAGInfoResult{ + dagInfo := &model.GetBlockDAGInfoResult{ DAG: params.Name, Blocks: dag.BlockCount(), Headers: dag.BlockCount(), @@ -25,7 +25,7 @@ func handleGetBlockDAGInfo(s *Server, cmd interface{}, closeChan <-chan struct{} Difficulty: getDifficultyRatio(dag.CurrentBits(), params), MedianTime: dag.CalcPastMedianTime().UnixMilliseconds(), Pruned: false, - Bip9SoftForks: make(map[string]*rpcmodel.Bip9SoftForkDescription), + Bip9SoftForks: make(map[string]*model.Bip9SoftForkDescription), } // Finally, query the BIP0009 version bits state for all currently @@ -39,8 +39,8 @@ func handleGetBlockDAGInfo(s *Server, cmd interface{}, closeChan <-chan struct{} forkName = "dummy" default: - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCInternal.Code, + return nil, &model.RPCError{ + Code: model.ErrRPCInternal.Code, Message: fmt.Sprintf("Unknown deployment %d "+ "detected", deployment), } @@ -59,8 +59,8 @@ func handleGetBlockDAGInfo(s *Server, cmd interface{}, closeChan <-chan struct{} // non-nil error is returned. statusString, err := softForkStatus(deploymentStatus) if err != nil { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCInternal.Code, + return nil, &model.RPCError{ + Code: model.ErrRPCInternal.Code, Message: fmt.Sprintf("unknown deployment status: %d", deploymentStatus), } @@ -68,7 +68,7 @@ func handleGetBlockDAGInfo(s *Server, cmd interface{}, closeChan <-chan struct{} // Finally, populate the soft-fork description with all the // information gathered above. - dagInfo.Bip9SoftForks[forkName] = &rpcmodel.Bip9SoftForkDescription{ + dagInfo.Bip9SoftForks[forkName] = &model.Bip9SoftForkDescription{ Status: strings.ToLower(statusString), Bit: deploymentDetails.BitNumber, StartTime: int64(deploymentDetails.StartTime), diff --git a/server/rpc/handle_get_block_header.go b/rpc/handle_get_block_header.go similarity index 81% rename from server/rpc/handle_get_block_header.go rename to rpc/handle_get_block_header.go index cd554d8d9..1ddc9fca5 100644 --- a/server/rpc/handle_get_block_header.go +++ b/rpc/handle_get_block_header.go @@ -4,24 +4,24 @@ import ( "bytes" "encoding/hex" "fmt" - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" "github.com/kaspanet/kaspad/util/daghash" "strconv" ) // handleGetBlockHeader implements the getBlockHeader command. func handleGetBlockHeader(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*rpcmodel.GetBlockHeaderCmd) + c := cmd.(*model.GetBlockHeaderCmd) // Fetch the header from DAG. hash, err := daghash.NewHashFromStr(c.Hash) if err != nil { return nil, rpcDecodeHexError(c.Hash) } - blockHeader, err := s.cfg.DAG.HeaderByHash(hash) + blockHeader, err := s.dag.HeaderByHash(hash) if err != nil { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCBlockNotFound, + return nil, &model.RPCError{ + Code: model.ErrRPCBlockNotFound, Message: "Block not found", } } @@ -41,27 +41,27 @@ func handleGetBlockHeader(s *Server, cmd interface{}, closeChan <-chan struct{}) // The verbose flag is set, so generate the JSON object and return it. // Get the hashes for the next blocks unless there are none. - childHashes, err := s.cfg.DAG.ChildHashesByHash(hash) + childHashes, err := s.dag.ChildHashesByHash(hash) if err != nil { context := "No next block" return nil, internalRPCError(err.Error(), context) } childHashStrings := daghash.Strings(childHashes) - blockConfirmations, err := s.cfg.DAG.BlockConfirmationsByHash(hash) + blockConfirmations, err := s.dag.BlockConfirmationsByHash(hash) if err != nil { context := "Could not get block confirmations" return nil, internalRPCError(err.Error(), context) } - selectedParentHash, err := s.cfg.DAG.SelectedParentHash(hash) + selectedParentHash, err := s.dag.SelectedParentHash(hash) if err != nil { context := "Could not get block selected parent" return nil, internalRPCError(err.Error(), context) } - params := s.cfg.DAGParams - blockHeaderReply := rpcmodel.GetBlockHeaderVerboseResult{ + params := s.dag.Params + blockHeaderReply := model.GetBlockHeaderVerboseResult{ Hash: c.Hash, Confirmations: blockConfirmations, Version: blockHeader.Version, diff --git a/server/rpc/handle_get_block_template.go b/rpc/handle_get_block_template.go similarity index 93% rename from server/rpc/handle_get_block_template.go rename to rpc/handle_get_block_template.go index 0a96b2bc8..43ec6e4fd 100644 --- a/server/rpc/handle_get_block_template.go +++ b/rpc/handle_get_block_template.go @@ -13,7 +13,7 @@ import ( "github.com/kaspanet/kaspad/blockdag" "github.com/kaspanet/kaspad/mining" - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" "github.com/kaspanet/kaspad/txscript" "github.com/kaspanet/kaspad/util" "github.com/kaspanet/kaspad/util/daghash" @@ -86,7 +86,7 @@ func builderScript(builder *txscript.ScriptBuilder) []byte { // handleGetBlockTemplate implements the getBlockTemplate command. func handleGetBlockTemplate(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*rpcmodel.GetBlockTemplateCmd) + c := cmd.(*model.GetBlockTemplateCmd) request := c.Request // Set the default mode and override it if supplied. @@ -102,8 +102,8 @@ func handleGetBlockTemplate(s *Server, cmd interface{}, closeChan <-chan struct{ return handleGetBlockTemplateProposal(s, request) } - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCInvalidParameter, + return nil, &model.RPCError{ + Code: model.ErrRPCInvalidParameter, Message: "Invalid mode", } } @@ -112,21 +112,21 @@ func handleGetBlockTemplate(s *Server, cmd interface{}, closeChan <-chan struct{ // deals with generating and returning block templates to the caller. It // handles both long poll requests as specified by BIP 0022 as well as regular // requests. -func handleGetBlockTemplateRequest(s *Server, request *rpcmodel.TemplateRequest, closeChan <-chan struct{}) (interface{}, error) { +func handleGetBlockTemplateRequest(s *Server, request *model.TemplateRequest, closeChan <-chan struct{}) (interface{}, error) { // Return an error if there are no peers connected since there is no // way to relay a found block or receive transactions to work on. // However, allow this state when running in the regression test or // simulation test mode. - if !(s.appCfg.RegressionTest || s.appCfg.Simnet) && - s.cfg.ConnMgr.ConnectedCount() == 0 { + if !(s.cfg.RegressionTest || s.cfg.Simnet) && + s.connectionManager.ConnectionCount() == 0 { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCClientNotConnected, + return nil, &model.RPCError{ + Code: model.ErrRPCClientNotConnected, Message: "Kaspa is not connected", } } - payAddr, err := util.DecodeAddress(request.PayAddress, s.cfg.DAGParams.Prefix) + payAddr, err := util.DecodeAddress(request.PayAddress, s.dag.Params.Prefix) if err != nil { return nil, err } @@ -209,7 +209,7 @@ func handleGetBlockTemplateLongPoll(s *Server, longPollID string, payAddr util.A // template identified by the provided long poll ID is stale or // invalid. Otherwise, it returns a channel that will notify // when there's a more current template. -func blockTemplateOrLongPollChan(s *Server, longPollID string, payAddr util.Address) (*rpcmodel.GetBlockTemplateResult, chan struct{}, error) { +func blockTemplateOrLongPollChan(s *Server, longPollID string, payAddr util.Address) (*model.GetBlockTemplateResult, chan struct{}, error) { state := s.gbtWorkState state.Lock() @@ -262,11 +262,11 @@ func blockTemplateOrLongPollChan(s *Server, longPollID string, payAddr util.Addr // handleGetBlockTemplateProposal is a helper for handleGetBlockTemplate which // deals with block proposals. -func handleGetBlockTemplateProposal(s *Server, request *rpcmodel.TemplateRequest) (interface{}, error) { +func handleGetBlockTemplateProposal(s *Server, request *model.TemplateRequest) (interface{}, error) { hexData := request.Data if hexData == "" { - return false, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCType, + return false, &model.RPCError{ + Code: model.ErrRPCType, Message: fmt.Sprintf("Data must contain the " + "hex-encoded serialized block that is being " + "proposed"), @@ -279,34 +279,34 @@ func handleGetBlockTemplateProposal(s *Server, request *rpcmodel.TemplateRequest } dataBytes, err := hex.DecodeString(hexData) if err != nil { - return false, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCDeserialization, + return false, &model.RPCError{ + Code: model.ErrRPCDeserialization, Message: fmt.Sprintf("Data must be "+ "hexadecimal string (not %q)", hexData), } } var msgBlock wire.MsgBlock if err := msgBlock.Deserialize(bytes.NewReader(dataBytes)); err != nil { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCDeserialization, + return nil, &model.RPCError{ + Code: model.ErrRPCDeserialization, Message: "Block decode failed: " + err.Error(), } } block := util.NewBlock(&msgBlock) // Ensure the block is building from the expected parent blocks. - expectedParentHashes := s.cfg.DAG.TipHashes() + expectedParentHashes := s.dag.TipHashes() parentHashes := block.MsgBlock().Header.ParentHashes if !daghash.AreEqual(expectedParentHashes, parentHashes) { return "bad-parentblk", nil } - if err := s.cfg.DAG.CheckConnectBlockTemplate(block); err != nil { + if err := s.dag.CheckConnectBlockTemplate(block); err != nil { if !errors.As(err, &blockdag.RuleError{}) { errStr := fmt.Sprintf("Failed to process block proposal: %s", err) log.Error(errStr) - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCVerify, + return nil, &model.RPCError{ + Code: model.ErrRPCVerify, Message: errStr, } } @@ -524,7 +524,7 @@ func (state *gbtWorkState) templateUpdateChan(tipHashes []*daghash.Hash, lastGen // // This function MUST be called with the state locked. func (state *gbtWorkState) updateBlockTemplate(s *Server, payAddr util.Address) error { - generator := s.cfg.Generator + generator := s.blockTemplateGenerator lastTxUpdate := generator.TxSource().LastUpdated() if lastTxUpdate.IsZero() { lastTxUpdate = mstime.Now() @@ -536,7 +536,7 @@ func (state *gbtWorkState) updateBlockTemplate(s *Server, payAddr util.Address) // generated. var msgBlock *wire.MsgBlock var targetDifficulty string - tipHashes := s.cfg.DAG.TipHashes() + tipHashes := s.dag.TipHashes() template := state.template if template == nil || state.tipHashes == nil || !daghash.AreEqual(state.tipHashes, tipHashes) || @@ -575,7 +575,7 @@ func (state *gbtWorkState) updateBlockTemplate(s *Server, payAddr util.Address) // Get the minimum allowed timestamp for the block based on the // median timestamp of the last several blocks per the DAG // consensus rules. - minTimestamp := s.cfg.DAG.NextBlockMinimumTime() + minTimestamp := s.dag.NextBlockMinimumTime() // Update work state to ensure another block template isn't // generated until needed. @@ -621,12 +621,12 @@ func (state *gbtWorkState) updateBlockTemplate(s *Server, payAddr util.Address) } // blockTemplateResult returns the current block template associated with the -// state as a rpcmodel.GetBlockTemplateResult that is ready to be encoded to JSON +// state as a model.GetBlockTemplateResult that is ready to be encoded to JSON // and returned to the caller. // // This function MUST be called with the state locked. -func (state *gbtWorkState) blockTemplateResult(s *Server) (*rpcmodel.GetBlockTemplateResult, error) { - dag := s.cfg.DAG +func (state *gbtWorkState) blockTemplateResult(s *Server) (*model.GetBlockTemplateResult, error) { + dag := s.dag // Ensure the timestamps are still in valid range for the template. // This should really only ever happen if the local clock is changed // after the template is generated, but it's important to avoid serving @@ -637,8 +637,8 @@ func (state *gbtWorkState) blockTemplateResult(s *Server) (*rpcmodel.GetBlockTem adjustedTime := dag.Now() maxTime := adjustedTime.Add(time.Millisecond * time.Duration(dag.TimestampDeviationTolerance)) if header.Timestamp.After(maxTime) { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCOutOfRange, + return nil, &model.RPCError{ + Code: model.ErrRPCOutOfRange, Message: fmt.Sprintf("The template time is after the "+ "maximum allowed time for a block - template "+ "time %s, maximum time %s", adjustedTime, @@ -650,7 +650,7 @@ func (state *gbtWorkState) blockTemplateResult(s *Server) (*rpcmodel.GetBlockTem // transaction. The result does not include the coinbase, so notice // the adjustments to the various lengths and indices. numTx := len(msgBlock.Transactions) - transactions := make([]rpcmodel.GetBlockTemplateResultTx, 0, numTx-1) + transactions := make([]model.GetBlockTemplateResultTx, 0, numTx-1) txIndex := make(map[daghash.TxID]int64, numTx) for i, tx := range msgBlock.Transactions { txID := tx.TxID() @@ -680,7 +680,7 @@ func (state *gbtWorkState) blockTemplateResult(s *Server) (*rpcmodel.GetBlockTem return nil, internalRPCError(err.Error(), context) } - resultTx := rpcmodel.GetBlockTemplateResultTx{ + resultTx := model.GetBlockTemplateResultTx{ Data: hex.EncodeToString(txBuf.Bytes()), ID: txID.String(), Depends: depends, @@ -704,9 +704,9 @@ func (state *gbtWorkState) blockTemplateResult(s *Server) (*rpcmodel.GetBlockTem // This is not a straight-up error because the choice of whether // to mine or not is the responsibility of the miner rather // than the node's. - isSynced := s.cfg.SyncMgr.IsSynced() + isSynced := s.blockTemplateGenerator.IsSynced() - reply := rpcmodel.GetBlockTemplateResult{ + reply := model.GetBlockTemplateResult{ Bits: strconv.FormatInt(int64(header.Bits), 16), CurTime: header.Timestamp.UnixMilliseconds(), Height: template.Height, diff --git a/server/rpc/handle_get_blocks.go b/rpc/handle_get_blocks.go similarity index 81% rename from server/rpc/handle_get_blocks.go rename to rpc/handle_get_blocks.go index de837cd42..08445e1e6 100644 --- a/server/rpc/handle_get_blocks.go +++ b/rpc/handle_get_blocks.go @@ -2,7 +2,7 @@ package rpc import ( "encoding/hex" - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" "github.com/kaspanet/kaspad/util" "github.com/kaspanet/kaspad/util/daghash" ) @@ -14,7 +14,7 @@ const ( ) func handleGetBlocks(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*rpcmodel.GetBlocksCmd) + c := cmd.(*model.GetBlocksCmd) var lowHash *daghash.Hash if c.LowHash != nil { lowHash = &daghash.Hash{} @@ -24,19 +24,19 @@ func handleGetBlocks(s *Server, cmd interface{}, closeChan <-chan struct{}) (int } } - s.cfg.DAG.RLock() - defer s.cfg.DAG.RUnlock() + s.dag.RLock() + defer s.dag.RUnlock() // If lowHash is not in the DAG, there's nothing to do; return an error. - if lowHash != nil && !s.cfg.DAG.IsKnownBlock(lowHash) { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCBlockNotFound, + if lowHash != nil && !s.dag.IsKnownBlock(lowHash) { + return nil, &model.RPCError{ + Code: model.ErrRPCBlockNotFound, Message: "Block not found", } } // Retrieve the block hashes. - blockHashes, err := s.cfg.DAG.BlockHashesFrom(lowHash, maxBlocksInGetBlocksResult) + blockHashes, err := s.dag.BlockHashesFrom(lowHash, maxBlocksInGetBlocksResult) if err != nil { return nil, err } @@ -47,7 +47,7 @@ func handleGetBlocks(s *Server, cmd interface{}, closeChan <-chan struct{}) (int hashes[i] = blockHash.String() } - result := &rpcmodel.GetBlocksResult{ + result := &model.GetBlocksResult{ Hashes: hashes, RawBlocks: nil, VerboseBlocks: nil, @@ -77,7 +77,7 @@ func handleGetBlocks(s *Server, cmd interface{}, closeChan <-chan struct{}) (int func hashesToBlockBytes(s *Server, hashes []*daghash.Hash) ([][]byte, error) { blocks := make([][]byte, len(hashes)) for i, hash := range hashes { - block, err := s.cfg.DAG.BlockByHash(hash) + block, err := s.dag.BlockByHash(hash) if err != nil { return nil, err } @@ -98,8 +98,8 @@ func blockBytesToStrings(blockBytesSlice [][]byte) []string { return rawBlocks } -func blockBytesToBlockVerboseResults(s *Server, blockBytesSlice [][]byte) ([]rpcmodel.GetBlockVerboseResult, error) { - verboseBlocks := make([]rpcmodel.GetBlockVerboseResult, len(blockBytesSlice)) +func blockBytesToBlockVerboseResults(s *Server, blockBytesSlice [][]byte) ([]model.GetBlockVerboseResult, error) { + verboseBlocks := make([]model.GetBlockVerboseResult, len(blockBytesSlice)) for i, blockBytes := range blockBytesSlice { block, err := util.NewBlockFromBytes(blockBytes) if err != nil { diff --git a/server/rpc/handle_get_chain_from_block.go b/rpc/handle_get_chain_from_block.go similarity index 77% rename from server/rpc/handle_get_chain_from_block.go rename to rpc/handle_get_chain_from_block.go index be69502f2..da34d95c1 100644 --- a/server/rpc/handle_get_chain_from_block.go +++ b/rpc/handle_get_chain_from_block.go @@ -2,7 +2,7 @@ package rpc import ( "fmt" - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" "github.com/kaspanet/kaspad/util/daghash" ) @@ -14,16 +14,16 @@ const ( // handleGetChainFromBlock implements the getChainFromBlock command. func handleGetChainFromBlock(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - if s.cfg.AcceptanceIndex == nil { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCNoAcceptanceIndex, + if s.acceptanceIndex == nil { + return nil, &model.RPCError{ + Code: model.ErrRPCNoAcceptanceIndex, Message: "The acceptance index must be " + "enabled to get the selected parent chain " + "(specify --acceptanceindex)", } } - c := cmd.(*rpcmodel.GetChainFromBlockCmd) + c := cmd.(*model.GetChainFromBlockCmd) var startHash *daghash.Hash if c.StartHash != nil { startHash = &daghash.Hash{} @@ -33,20 +33,20 @@ func handleGetChainFromBlock(s *Server, cmd interface{}, closeChan <-chan struct } } - s.cfg.DAG.RLock() - defer s.cfg.DAG.RUnlock() + s.dag.RLock() + defer s.dag.RUnlock() // If startHash is not in the selected parent chain, there's nothing // to do; return an error. - if startHash != nil && !s.cfg.DAG.IsInDAG(startHash) { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCBlockNotFound, + if startHash != nil && !s.dag.IsInDAG(startHash) { + return nil, &model.RPCError{ + Code: model.ErrRPCBlockNotFound, Message: "Block not found in the DAG", } } // Retrieve the selected parent chain. - removedChainHashes, addedChainHashes, err := s.cfg.DAG.SelectedParentChain(startHash) + removedChainHashes, addedChainHashes, err := s.dag.SelectedParentChain(startHash) if err != nil { return nil, err } @@ -59,8 +59,8 @@ func handleGetChainFromBlock(s *Server, cmd interface{}, closeChan <-chan struct // Collect addedChainBlocks. addedChainBlocks, err := collectChainBlocks(s, addedChainHashes) if err != nil { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCInternal.Code, + return nil, &model.RPCError{ + Code: model.ErrRPCInternal.Code, Message: fmt.Sprintf("could not collect chain blocks: %s", err), } } @@ -71,7 +71,7 @@ func handleGetChainFromBlock(s *Server, cmd interface{}, closeChan <-chan struct removedHashes[i] = hash.String() } - result := &rpcmodel.GetChainFromBlockResult{ + result := &model.GetChainFromBlockResult{ RemovedChainBlockHashes: removedHashes, AddedChainBlocks: addedChainBlocks, Blocks: nil, diff --git a/rpc/handle_get_connected_peer_info.go b/rpc/handle_get_connected_peer_info.go new file mode 100644 index 000000000..cfd436bc6 --- /dev/null +++ b/rpc/handle_get_connected_peer_info.go @@ -0,0 +1,30 @@ +package rpc + +import ( + "github.com/kaspanet/kaspad/rpc/model" +) + +// handleGetConnectedPeerInfo implements the getConnectedPeerInfo command. +func handleGetConnectedPeerInfo(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { + peers := s.protocolManager.Peers() + infos := make([]*model.GetConnectedPeerInfoResult, 0, len(peers)) + for _, peer := range peers { + info := &model.GetConnectedPeerInfoResult{ + ID: peer.ID().String(), + Address: peer.Address(), + LastPingDuration: peer.LastPingDuration().Milliseconds(), + SelectedTipHash: peer.SelectedTipHash().String(), + IsSyncNode: peer == s.protocolManager.IBDPeer(), + + // TODO(libp2p): populate the following with real values + IsInbound: false, + BanScore: 0, + TimeOffset: 0, + UserAgent: "", + ProtocolVersion: 0, + TimeConnected: 0, + } + infos = append(infos, info) + } + return infos, nil +} diff --git a/server/rpc/handle_get_connection_count.go b/rpc/handle_get_connection_count.go similarity index 79% rename from server/rpc/handle_get_connection_count.go rename to rpc/handle_get_connection_count.go index 1335b83ed..d10f6cdf5 100644 --- a/server/rpc/handle_get_connection_count.go +++ b/rpc/handle_get_connection_count.go @@ -2,5 +2,5 @@ package rpc // handleGetConnectionCount implements the getConnectionCount command. func handleGetConnectionCount(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - return s.cfg.ConnMgr.ConnectedCount(), nil + return s.connectionManager.ConnectionCount(), nil } diff --git a/server/rpc/handle_get_current_net.go b/rpc/handle_get_current_net.go similarity index 84% rename from server/rpc/handle_get_current_net.go rename to rpc/handle_get_current_net.go index 492dd1699..0c853206e 100644 --- a/server/rpc/handle_get_current_net.go +++ b/rpc/handle_get_current_net.go @@ -2,5 +2,5 @@ package rpc // handleGetCurrentNet implements the getCurrentNet command. func handleGetCurrentNet(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - return s.cfg.DAGParams.Net, nil + return s.dag.Params.Net, nil } diff --git a/server/rpc/handle_get_difficulty.go b/rpc/handle_get_difficulty.go similarity index 67% rename from server/rpc/handle_get_difficulty.go rename to rpc/handle_get_difficulty.go index 3d22d9150..c43bd0842 100644 --- a/server/rpc/handle_get_difficulty.go +++ b/rpc/handle_get_difficulty.go @@ -2,5 +2,5 @@ package rpc // handleGetDifficulty implements the getDifficulty command. func handleGetDifficulty(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - return getDifficultyRatio(s.cfg.DAG.SelectedTipHeader().Bits, s.cfg.DAGParams), nil + return getDifficultyRatio(s.dag.SelectedTipHeader().Bits, s.dag.Params), nil } diff --git a/server/rpc/handle_get_headers.go b/rpc/handle_get_headers.go similarity index 81% rename from server/rpc/handle_get_headers.go rename to rpc/handle_get_headers.go index 3bca75a9c..41818eb80 100644 --- a/server/rpc/handle_get_headers.go +++ b/rpc/handle_get_headers.go @@ -3,7 +3,7 @@ package rpc import ( "bytes" "encoding/hex" - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" "github.com/kaspanet/kaspad/util/daghash" ) @@ -11,7 +11,7 @@ const getHeadersMaxHeaders = 2000 // handleGetHeaders implements the getHeaders command. func handleGetHeaders(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*rpcmodel.GetHeadersCmd) + c := cmd.(*model.GetHeadersCmd) lowHash := &daghash.ZeroHash if c.LowHash != "" { @@ -27,10 +27,10 @@ func handleGetHeaders(s *Server, cmd interface{}, closeChan <-chan struct{}) (in return nil, rpcDecodeHexError(c.HighHash) } } - headers, err := s.cfg.SyncMgr.AntiPastHeadersBetween(lowHash, highHash, getHeadersMaxHeaders) + headers, err := s.dag.AntiPastHeadersBetween(lowHash, highHash, getHeadersMaxHeaders) if err != nil { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCMisc, + return nil, &model.RPCError{ + Code: model.ErrRPCMisc, Message: err.Error(), } } diff --git a/rpc/handle_get_info.go b/rpc/handle_get_info.go new file mode 100644 index 000000000..253425810 --- /dev/null +++ b/rpc/handle_get_info.go @@ -0,0 +1,24 @@ +package rpc + +import ( + "github.com/kaspanet/kaspad/rpc/model" + "github.com/kaspanet/kaspad/version" +) + +// handleGetInfo implements the getInfo command. We only return the fields +// that are not related to wallet functionality. +func handleGetInfo(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { + ret := &model.InfoDAGResult{ + Version: version.Version(), + ProtocolVersion: int32(maxProtocolVersion), + Blocks: s.dag.BlockCount(), + Connections: int32(s.connectionManager.ConnectionCount()), + Proxy: s.cfg.Proxy, + Difficulty: getDifficultyRatio(s.dag.CurrentBits(), s.dag.Params), + Testnet: s.cfg.Testnet, + Devnet: s.cfg.Devnet, + RelayFee: s.cfg.MinRelayTxFee.ToKAS(), + } + + return ret, nil +} diff --git a/server/rpc/handle_get_mempool_entry.go b/rpc/handle_get_mempool_entry.go similarity index 64% rename from server/rpc/handle_get_mempool_entry.go rename to rpc/handle_get_mempool_entry.go index 9c5401ff3..f87b32331 100644 --- a/server/rpc/handle_get_mempool_entry.go +++ b/rpc/handle_get_mempool_entry.go @@ -1,30 +1,30 @@ package rpc import ( - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" "github.com/kaspanet/kaspad/util/daghash" ) func handleGetMempoolEntry(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*rpcmodel.GetMempoolEntryCmd) + c := cmd.(*model.GetMempoolEntryCmd) txID, err := daghash.NewTxIDFromStr(c.TxID) if err != nil { return nil, err } - txDesc, err := s.cfg.TxMemPool.FetchTxDesc(txID) + txDesc, err := s.txMempool.FetchTxDesc(txID) if err != nil { return nil, err } tx := txDesc.Tx - rawTx, err := createTxRawResult(s.cfg.DAGParams, tx.MsgTx(), tx.ID().String(), + rawTx, err := createTxRawResult(s.dag.Params, tx.MsgTx(), tx.ID().String(), nil, "", nil, true) if err != nil { return nil, err } - return &rpcmodel.GetMempoolEntryResult{ + return &model.GetMempoolEntryResult{ Fee: txDesc.Fee, Time: txDesc.Added.UnixMilliseconds(), RawTx: *rawTx, diff --git a/server/rpc/handle_get_mempool_info.go b/rpc/handle_get_mempool_info.go similarity index 74% rename from server/rpc/handle_get_mempool_info.go rename to rpc/handle_get_mempool_info.go index c3e7e3df2..ba58da2b2 100644 --- a/server/rpc/handle_get_mempool_info.go +++ b/rpc/handle_get_mempool_info.go @@ -1,17 +1,17 @@ package rpc -import "github.com/kaspanet/kaspad/rpcmodel" +import "github.com/kaspanet/kaspad/rpc/model" // handleGetMempoolInfo implements the getMempoolInfo command. func handleGetMempoolInfo(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - mempoolTxns := s.cfg.TxMemPool.TxDescs() + mempoolTxns := s.txMempool.TxDescs() var numBytes int64 for _, txD := range mempoolTxns { numBytes += int64(txD.Tx.MsgTx().SerializeSize()) } - ret := &rpcmodel.GetMempoolInfoResult{ + ret := &model.GetMempoolInfoResult{ Size: int64(len(mempoolTxns)), Bytes: numBytes, } diff --git a/server/rpc/handle_get_net_totals.go b/rpc/handle_get_net_totals.go similarity index 58% rename from server/rpc/handle_get_net_totals.go rename to rpc/handle_get_net_totals.go index 84f8c076f..b3d7f19af 100644 --- a/server/rpc/handle_get_net_totals.go +++ b/rpc/handle_get_net_totals.go @@ -1,16 +1,16 @@ package rpc import ( - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" "time" ) // handleGetNetTotals implements the getNetTotals command. func handleGetNetTotals(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - totalBytesRecv, totalBytesSent := s.cfg.ConnMgr.NetTotals() - reply := &rpcmodel.GetNetTotalsResult{ - TotalBytesRecv: totalBytesRecv, - TotalBytesSent: totalBytesSent, + // TODO(libp2p): fill this up with real values + reply := &model.GetNetTotalsResult{ + TotalBytesRecv: 0, + TotalBytesSent: 0, TimeMillis: time.Now().UTC().UnixNano() / int64(time.Millisecond), } return reply, nil diff --git a/server/rpc/handle_get_peer_addresses.go b/rpc/handle_get_peer_addresses.go similarity index 57% rename from server/rpc/handle_get_peer_addresses.go rename to rpc/handle_get_peer_addresses.go index 0623afea6..9863e88a8 100644 --- a/server/rpc/handle_get_peer_addresses.go +++ b/rpc/handle_get_peer_addresses.go @@ -1,26 +1,26 @@ package rpc -import "github.com/kaspanet/kaspad/rpcmodel" +import "github.com/kaspanet/kaspad/rpc/model" // handleGetPeerAddresses handles getPeerAddresses commands. func handleGetPeerAddresses(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - peersState, err := s.cfg.addressManager.PeersStateForSerialization() + peersState, err := s.addressManager.PeersStateForSerialization() if err != nil { return nil, err } - rpcPeersState := rpcmodel.GetPeerAddressesResult{ + rpcPeersState := model.GetPeerAddressesResult{ Version: peersState.Version, Key: peersState.Key, - Addresses: make([]*rpcmodel.GetPeerAddressesKnownAddressResult, len(peersState.Addresses)), - NewBuckets: make(map[string]*rpcmodel.GetPeerAddressesNewBucketResult), - NewBucketFullNodes: rpcmodel.GetPeerAddressesNewBucketResult{}, - TriedBuckets: make(map[string]*rpcmodel.GetPeerAddressesTriedBucketResult), - TriedBucketFullNodes: rpcmodel.GetPeerAddressesTriedBucketResult{}, + Addresses: make([]*model.GetPeerAddressesKnownAddressResult, len(peersState.Addresses)), + NewBuckets: make(map[string]*model.GetPeerAddressesNewBucketResult), + NewBucketFullNodes: model.GetPeerAddressesNewBucketResult{}, + TriedBuckets: make(map[string]*model.GetPeerAddressesTriedBucketResult), + TriedBucketFullNodes: model.GetPeerAddressesTriedBucketResult{}, } for i, addr := range peersState.Addresses { - rpcPeersState.Addresses[i] = &rpcmodel.GetPeerAddressesKnownAddressResult{ + rpcPeersState.Addresses[i] = &model.GetPeerAddressesKnownAddressResult{ Addr: addr.Addr, Src: addr.Src, SubnetworkID: addr.SubnetworkID, @@ -32,7 +32,7 @@ func handleGetPeerAddresses(s *Server, cmd interface{}, closeChan <-chan struct{ } for subnetworkID, bucket := range peersState.NewBuckets { - rpcPeersState.NewBuckets[subnetworkID] = &rpcmodel.GetPeerAddressesNewBucketResult{} + rpcPeersState.NewBuckets[subnetworkID] = &model.GetPeerAddressesNewBucketResult{} for i, addr := range bucket { rpcPeersState.NewBuckets[subnetworkID][i] = addr } @@ -43,7 +43,7 @@ func handleGetPeerAddresses(s *Server, cmd interface{}, closeChan <-chan struct{ } for subnetworkID, bucket := range peersState.TriedBuckets { - rpcPeersState.TriedBuckets[subnetworkID] = &rpcmodel.GetPeerAddressesTriedBucketResult{} + rpcPeersState.TriedBuckets[subnetworkID] = &model.GetPeerAddressesTriedBucketResult{} for i, addr := range bucket { rpcPeersState.TriedBuckets[subnetworkID][i] = addr } diff --git a/rpc/handle_get_raw_mempool.go b/rpc/handle_get_raw_mempool.go new file mode 100644 index 000000000..b5433f264 --- /dev/null +++ b/rpc/handle_get_raw_mempool.go @@ -0,0 +1,58 @@ +package rpc + +import ( + "github.com/kaspanet/kaspad/rpc/model" + "github.com/kaspanet/kaspad/util" +) + +// handleGetRawMempool implements the getRawMempool command. +func handleGetRawMempool(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { + c := cmd.(*model.GetRawMempoolCmd) + mp := s.txMempool + + if c.Verbose != nil && *c.Verbose { + return rawMempoolVerbose(s), nil + } + + // The response is simply an array of the transaction hashes if the + // verbose flag is not set. + descs := mp.TxDescs() + hashStrings := make([]string, len(descs)) + for i := range hashStrings { + hashStrings[i] = descs[i].Tx.ID().String() + } + + return hashStrings, nil +} + +// rawMempoolVerbose returns all of the entries in the mempool as a fully +// populated jsonrpc result. +func rawMempoolVerbose(s *Server) map[string]*model.GetRawMempoolVerboseResult { + descs := s.txMempool.TxDescs() + result := make(map[string]*model.GetRawMempoolVerboseResult, len(descs)) + + for _, desc := range descs { + // Calculate the current priority based on the inputs to + // the transaction. Use zero if one or more of the + // input transactions can't be found for some reason. + tx := desc.Tx + + mpd := &model.GetRawMempoolVerboseResult{ + Size: int32(tx.MsgTx().SerializeSize()), + Fee: util.Amount(desc.Fee).ToKAS(), + Time: desc.Added.UnixMilliseconds(), + Depends: make([]string, 0), + } + for _, txIn := range tx.MsgTx().TxIn { + txID := &txIn.PreviousOutpoint.TxID + if s.txMempool.HaveTransaction(txID) { + mpd.Depends = append(mpd.Depends, + txID.String()) + } + } + + result[tx.ID().String()] = mpd + } + + return result +} diff --git a/server/rpc/handle_get_selected_tip.go b/rpc/handle_get_selected_tip.go similarity index 70% rename from server/rpc/handle_get_selected_tip.go rename to rpc/handle_get_selected_tip.go index 584d8c452..4c28530a1 100644 --- a/server/rpc/handle_get_selected_tip.go +++ b/rpc/handle_get_selected_tip.go @@ -2,25 +2,25 @@ package rpc import ( "encoding/hex" - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" ) // handleGetSelectedTip implements the getSelectedTip command. func handleGetSelectedTip(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - getSelectedTipCmd := cmd.(*rpcmodel.GetSelectedTipCmd) - selectedTipHash := s.cfg.DAG.SelectedTipHash() + getSelectedTipCmd := cmd.(*model.GetSelectedTipCmd) + selectedTipHash := s.dag.SelectedTipHash() - block, err := s.cfg.DAG.BlockByHash(selectedTipHash) + block, err := s.dag.BlockByHash(selectedTipHash) if err != nil { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCBlockNotFound, + return nil, &model.RPCError{ + Code: model.ErrRPCBlockNotFound, Message: "Block not found", } } blockBytes, err := block.Bytes() if err != nil { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCBlockInvalid, + return nil, &model.RPCError{ + Code: model.ErrRPCBlockInvalid, Message: "Cannot serialize block", } } diff --git a/server/rpc/handle_get_selected_tip_hash.go b/rpc/handle_get_selected_tip_hash.go similarity index 79% rename from server/rpc/handle_get_selected_tip_hash.go rename to rpc/handle_get_selected_tip_hash.go index 211cef935..7ec1b4fed 100644 --- a/server/rpc/handle_get_selected_tip_hash.go +++ b/rpc/handle_get_selected_tip_hash.go @@ -2,5 +2,5 @@ package rpc // handleGetSelectedTipHash implements the getSelectedTipHash command. func handleGetSelectedTipHash(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - return s.cfg.DAG.SelectedTipHash().String(), nil + return s.dag.SelectedTipHash().String(), nil } diff --git a/server/rpc/handle_get_subnetwork.go b/rpc/handle_get_subnetwork.go similarity index 70% rename from server/rpc/handle_get_subnetwork.go rename to rpc/handle_get_subnetwork.go index 7be8b09ea..a66fbe45f 100644 --- a/server/rpc/handle_get_subnetwork.go +++ b/rpc/handle_get_subnetwork.go @@ -1,13 +1,13 @@ package rpc import ( - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" "github.com/kaspanet/kaspad/util/subnetworkid" ) // handleGetSubnetwork handles the getSubnetwork command. func handleGetSubnetwork(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*rpcmodel.GetSubnetworkCmd) + c := cmd.(*model.GetSubnetworkCmd) subnetworkID, err := subnetworkid.NewFromStr(c.SubnetworkID) if err != nil { @@ -17,17 +17,17 @@ func handleGetSubnetwork(s *Server, cmd interface{}, closeChan <-chan struct{}) var gasLimit *uint64 if !subnetworkID.IsEqual(subnetworkid.SubnetworkIDNative) && !subnetworkID.IsBuiltIn() { - limit, err := s.cfg.DAG.GasLimit(subnetworkID) + limit, err := s.dag.GasLimit(subnetworkID) if err != nil { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCSubnetworkNotFound, + return nil, &model.RPCError{ + Code: model.ErrRPCSubnetworkNotFound, Message: "Subnetwork not found.", } } gasLimit = &limit } - subnetworkReply := &rpcmodel.GetSubnetworkResult{ + subnetworkReply := &model.GetSubnetworkResult{ GasLimit: gasLimit, } return subnetworkReply, nil diff --git a/server/rpc/handle_get_top_headers.go b/rpc/handle_get_top_headers.go similarity index 86% rename from server/rpc/handle_get_top_headers.go rename to rpc/handle_get_top_headers.go index 119de437f..d68a9e0d7 100644 --- a/server/rpc/handle_get_top_headers.go +++ b/rpc/handle_get_top_headers.go @@ -3,7 +3,7 @@ package rpc import ( "bytes" "encoding/hex" - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" "github.com/kaspanet/kaspad/util/daghash" ) @@ -11,7 +11,7 @@ const getTopHeadersMaxHeaders = getHeadersMaxHeaders // handleGetTopHeaders implements the getTopHeaders command. func handleGetTopHeaders(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*rpcmodel.GetTopHeadersCmd) + c := cmd.(*model.GetTopHeadersCmd) var highHash *daghash.Hash if c.HighHash != nil { @@ -21,7 +21,7 @@ func handleGetTopHeaders(s *Server, cmd interface{}, closeChan <-chan struct{}) return nil, rpcDecodeHexError(*c.HighHash) } } - headers, err := s.cfg.DAG.GetTopHeaders(highHash, getTopHeadersMaxHeaders) + headers, err := s.dag.GetTopHeaders(highHash, getTopHeadersMaxHeaders) if err != nil { return nil, internalRPCError(err.Error(), "Failed to get top headers") diff --git a/server/rpc/handle_get_tx_out.go b/rpc/handle_get_tx_out.go similarity index 84% rename from server/rpc/handle_get_tx_out.go rename to rpc/handle_get_tx_out.go index 7a78d6c65..ad1bdae90 100644 --- a/server/rpc/handle_get_tx_out.go +++ b/rpc/handle_get_tx_out.go @@ -3,7 +3,7 @@ package rpc import ( "encoding/hex" "fmt" - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" "github.com/kaspanet/kaspad/txscript" "github.com/kaspanet/kaspad/util" "github.com/kaspanet/kaspad/util/daghash" @@ -13,7 +13,7 @@ import ( // handleGetTxOut handles getTxOut commands. func handleGetTxOut(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*rpcmodel.GetTxOutCmd) + c := cmd.(*model.GetTxOutCmd) // Convert the provided transaction hash hex to a Hash. txID, err := daghash.NewTxIDFromStr(c.TxID) @@ -35,16 +35,16 @@ func handleGetTxOut(s *Server, cmd interface{}, closeChan <-chan struct{}) (inte } // TODO: This is racy. It should attempt to fetch it directly and check // the error. - if includeMempool && s.cfg.TxMemPool.HaveTransaction(txID) { - tx, err := s.cfg.TxMemPool.FetchTransaction(txID) + if includeMempool && s.txMempool.HaveTransaction(txID) { + tx, err := s.txMempool.FetchTransaction(txID) if err != nil { return nil, rpcNoTxInfoError(txID) } mtx := tx.MsgTx() if c.Vout > uint32(len(mtx.TxOut)-1) { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCInvalidTxVout, + return nil, &model.RPCError{ + Code: model.ErrRPCInvalidTxVout, Message: "Output index number (vout) does not " + "exist for transaction.", } @@ -57,14 +57,14 @@ func handleGetTxOut(s *Server, cmd interface{}, closeChan <-chan struct{}) (inte return nil, internalRPCError(errStr, "") } - selectedTipHash = s.cfg.DAG.SelectedTipHash().String() + selectedTipHash = s.dag.SelectedTipHash().String() value = txOut.Value scriptPubKey = txOut.ScriptPubKey isCoinbase = mtx.IsCoinBase() isInMempool = true } else { out := wire.Outpoint{TxID: *txID, Index: c.Vout} - entry, ok := s.cfg.DAG.GetUTXOEntry(out) + entry, ok := s.dag.GetUTXOEntry(out) if !ok { return nil, rpcNoTxInfoError(txID) } @@ -78,7 +78,7 @@ func handleGetTxOut(s *Server, cmd interface{}, closeChan <-chan struct{}) (inte return nil, nil } - utxoConfirmations, ok := s.cfg.DAG.UTXOConfirmations(&out) + utxoConfirmations, ok := s.dag.UTXOConfirmations(&out) if !ok { errStr := fmt.Sprintf("Cannot get confirmations for tx id %s, index %d", out.TxID, out.Index) @@ -86,7 +86,7 @@ func handleGetTxOut(s *Server, cmd interface{}, closeChan <-chan struct{}) (inte } confirmations = &utxoConfirmations - selectedTipHash = s.cfg.DAG.SelectedTipHash().String() + selectedTipHash = s.dag.SelectedTipHash().String() value = entry.Amount() scriptPubKey = entry.ScriptPubKey() isCoinbase = entry.IsCoinbase() @@ -101,18 +101,18 @@ func handleGetTxOut(s *Server, cmd interface{}, closeChan <-chan struct{}) (inte // Ignore the error here since an error means the script couldn't parse // and there is no additional information about it anyways. scriptClass, addr, _ := txscript.ExtractScriptPubKeyAddress(scriptPubKey, - s.cfg.DAGParams) + s.dag.Params) var address *string if addr != nil { address = pointers.String(addr.EncodeAddress()) } - txOutReply := &rpcmodel.GetTxOutResult{ + txOutReply := &model.GetTxOutResult{ SelectedTip: selectedTipHash, Confirmations: confirmations, IsInMempool: isInMempool, Value: util.Amount(value).ToKAS(), - ScriptPubKey: rpcmodel.ScriptPubKeyResult{ + ScriptPubKey: model.ScriptPubKeyResult{ Asm: disbuf, Hex: hex.EncodeToString(scriptPubKey), Type: scriptClass.String(), diff --git a/server/rpc/handle_help.go b/rpc/handle_help.go similarity index 87% rename from server/rpc/handle_help.go rename to rpc/handle_help.go index 4ef9824fe..43efb7408 100644 --- a/server/rpc/handle_help.go +++ b/rpc/handle_help.go @@ -1,10 +1,10 @@ package rpc -import "github.com/kaspanet/kaspad/rpcmodel" +import "github.com/kaspanet/kaspad/rpc/model" // handleHelp implements the help command. func handleHelp(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*rpcmodel.HelpCmd) + c := cmd.(*model.HelpCmd) // Provide a usage overview of all commands when no specific command // was specified. @@ -26,8 +26,8 @@ func handleHelp(s *Server, cmd interface{}, closeChan <-chan struct{}) (interfac // for commands that are unimplemented or related to wallet // functionality. if _, ok := rpcHandlers[command]; !ok { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCInvalidParameter, + return nil, &model.RPCError{ + Code: model.ErrRPCInvalidParameter, Message: "Unknown command: " + command, } } diff --git a/server/rpc/handle_load_tx_filter.go b/rpc/handle_load_tx_filter.go similarity index 85% rename from server/rpc/handle_load_tx_filter.go rename to rpc/handle_load_tx_filter.go index fabe9f926..6eae3276f 100644 --- a/server/rpc/handle_load_tx_filter.go +++ b/rpc/handle_load_tx_filter.go @@ -1,7 +1,7 @@ package rpc import ( - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" "github.com/kaspanet/kaspad/util/daghash" "github.com/kaspanet/kaspad/wire" ) @@ -11,14 +11,14 @@ import ( // // NOTE: This extension is ported from github.com/decred/dcrd func handleLoadTxFilter(wsc *wsClient, icmd interface{}) (interface{}, error) { - cmd := icmd.(*rpcmodel.LoadTxFilterCmd) + cmd := icmd.(*model.LoadTxFilterCmd) outpoints := make([]wire.Outpoint, len(cmd.Outpoints)) for i := range cmd.Outpoints { txID, err := daghash.NewTxIDFromStr(cmd.Outpoints[i].TxID) if err != nil { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCInvalidParameter, + return nil, &model.RPCError{ + Code: model.ErrRPCInvalidParameter, Message: err.Error(), } } @@ -28,7 +28,7 @@ func handleLoadTxFilter(wsc *wsClient, icmd interface{}) (interface{}, error) { } } - params := wsc.server.cfg.DAGParams + params := wsc.server.dag.Params reloadedFilterData := func() bool { wsc.Lock() diff --git a/server/rpc/handle_notify_blocks.go b/rpc/handle_notify_blocks.go similarity index 100% rename from server/rpc/handle_notify_blocks.go rename to rpc/handle_notify_blocks.go diff --git a/server/rpc/handle_notify_chain_changes.go b/rpc/handle_notify_chain_changes.go similarity index 70% rename from server/rpc/handle_notify_chain_changes.go rename to rpc/handle_notify_chain_changes.go index 034b0a7cd..1c175306f 100644 --- a/server/rpc/handle_notify_chain_changes.go +++ b/rpc/handle_notify_chain_changes.go @@ -1,13 +1,13 @@ package rpc -import "github.com/kaspanet/kaspad/rpcmodel" +import "github.com/kaspanet/kaspad/rpc/model" // handleNotifyChainChanges implements the notifyChainChanges command extension for // websocket connections. func handleNotifyChainChanges(wsc *wsClient, icmd interface{}) (interface{}, error) { - if wsc.server.cfg.AcceptanceIndex == nil { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCNoAcceptanceIndex, + if wsc.server.acceptanceIndex == nil { + return nil, &model.RPCError{ + Code: model.ErrRPCNoAcceptanceIndex, Message: "The acceptance index must be " + "enabled to receive chain changes " + "(specify --acceptanceindex)", diff --git a/server/rpc/handle_notify_new_transactions.go b/rpc/handle_notify_new_transactions.go similarity index 68% rename from server/rpc/handle_notify_new_transactions.go rename to rpc/handle_notify_new_transactions.go index ec9612dd2..270391750 100644 --- a/server/rpc/handle_notify_new_transactions.go +++ b/rpc/handle_notify_new_transactions.go @@ -1,22 +1,22 @@ package rpc import ( - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" "github.com/kaspanet/kaspad/util/subnetworkid" ) // handleNotifyNewTransations implements the notifyNewTransactions command // extension for websocket connections. func handleNotifyNewTransactions(wsc *wsClient, icmd interface{}) (interface{}, error) { - cmd, ok := icmd.(*rpcmodel.NotifyNewTransactionsCmd) + cmd, ok := icmd.(*model.NotifyNewTransactionsCmd) if !ok { - return nil, rpcmodel.ErrRPCInternal + return nil, model.ErrRPCInternal } isVerbose := cmd.Verbose != nil && *cmd.Verbose if !isVerbose && cmd.Subnetwork != nil { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCInvalidParameter, + return nil, &model.RPCError{ + Code: model.ErrRPCInvalidParameter, Message: "Subnetwork switch is only allowed if verbose=true", } } @@ -26,30 +26,30 @@ func handleNotifyNewTransactions(wsc *wsClient, icmd interface{}) (interface{}, var err error subnetworkID, err = subnetworkid.NewFromStr(*cmd.Subnetwork) if err != nil { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCInvalidParameter, + return nil, &model.RPCError{ + Code: model.ErrRPCInvalidParameter, Message: "Subnetwork is malformed", } } } if isVerbose { - nodeSubnetworkID := wsc.server.cfg.DAG.SubnetworkID() + nodeSubnetworkID := wsc.server.dag.SubnetworkID() if nodeSubnetworkID.IsEqual(subnetworkid.SubnetworkIDNative) && subnetworkID != nil { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCInvalidParameter, + return nil, &model.RPCError{ + Code: model.ErrRPCInvalidParameter, Message: "Subnetwork switch is disabled when node is in Native subnetwork", } } else if nodeSubnetworkID != nil { if subnetworkID == nil { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCInvalidParameter, + return nil, &model.RPCError{ + Code: model.ErrRPCInvalidParameter, Message: "Subnetwork switch is required when node is partial", } } if !nodeSubnetworkID.IsEqual(subnetworkID) { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCInvalidParameter, + return nil, &model.RPCError{ + Code: model.ErrRPCInvalidParameter, Message: "Subnetwork must equal the node's subnetwork when the node is partial", } } diff --git a/server/rpc/handle_rescan_block_filter.go b/rpc/handle_rescan_block_filter.go similarity index 100% rename from server/rpc/handle_rescan_block_filter.go rename to rpc/handle_rescan_block_filter.go diff --git a/server/rpc/handle_rescan_blocks.go b/rpc/handle_rescan_blocks.go similarity index 74% rename from server/rpc/handle_rescan_blocks.go rename to rpc/handle_rescan_blocks.go index bb816d394..89f69c8b5 100644 --- a/server/rpc/handle_rescan_blocks.go +++ b/rpc/handle_rescan_blocks.go @@ -2,7 +2,7 @@ package rpc import ( "fmt" - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" "github.com/kaspanet/kaspad/util/daghash" ) @@ -11,16 +11,16 @@ import ( // // NOTE: This extension is ported from github.com/decred/dcrd func handleRescanBlocks(wsc *wsClient, icmd interface{}) (interface{}, error) { - cmd, ok := icmd.(*rpcmodel.RescanBlocksCmd) + cmd, ok := icmd.(*model.RescanBlocksCmd) if !ok { - return nil, rpcmodel.ErrRPCInternal + return nil, model.ErrRPCInternal } // Load client's transaction filter. Must exist in order to continue. filter := wsc.FilterData() if filter == nil { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCMisc, + return nil, &model.RPCError{ + Code: model.ErrRPCMisc, Message: "Transaction filter must be loaded before rescanning", } } @@ -35,24 +35,24 @@ func handleRescanBlocks(wsc *wsClient, icmd interface{}) (interface{}, error) { blockHashes[i] = hash } - discoveredData := make([]rpcmodel.RescannedBlock, 0, len(blockHashes)) + discoveredData := make([]model.RescannedBlock, 0, len(blockHashes)) // Iterate over each block in the request and rescan. When a block // contains relevant transactions, add it to the response. - bc := wsc.server.cfg.DAG - params := wsc.server.cfg.DAGParams + bc := wsc.server.dag + params := wsc.server.dag.Params var lastBlockHash *daghash.Hash for i := range blockHashes { block, err := bc.BlockByHash(blockHashes[i]) if err != nil { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCBlockNotFound, + return nil, &model.RPCError{ + Code: model.ErrRPCBlockNotFound, Message: "Failed to fetch block: " + err.Error(), } } if lastBlockHash != nil && !block.MsgBlock().Header.ParentHashes[0].IsEqual(lastBlockHash) { // TODO: (Stas) This is likely wrong. Modified to satisfy compilation. - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCInvalidParameter, + return nil, &model.RPCError{ + Code: model.ErrRPCInvalidParameter, Message: fmt.Sprintf("Block %s is not a child of %s", blockHashes[i], lastBlockHash), } @@ -61,7 +61,7 @@ func handleRescanBlocks(wsc *wsClient, icmd interface{}) (interface{}, error) { transactions := rescanBlockFilter(filter, block, params) if len(transactions) != 0 { - discoveredData = append(discoveredData, rpcmodel.RescannedBlock{ + discoveredData = append(discoveredData, model.RescannedBlock{ Hash: cmd.BlockHashes[i], Transactions: transactions, }) diff --git a/rpc/handle_send_raw_transaction.go b/rpc/handle_send_raw_transaction.go new file mode 100644 index 000000000..9d2a389c7 --- /dev/null +++ b/rpc/handle_send_raw_transaction.go @@ -0,0 +1,46 @@ +package rpc + +import ( + "bytes" + "encoding/hex" + "github.com/kaspanet/kaspad/mempool" + "github.com/kaspanet/kaspad/rpc/model" + "github.com/kaspanet/kaspad/util" + "github.com/kaspanet/kaspad/wire" + "github.com/pkg/errors" +) + +// handleSendRawTransaction implements the sendRawTransaction command. +func handleSendRawTransaction(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { + c := cmd.(*model.SendRawTransactionCmd) + // Deserialize and send off to tx relay + hexStr := c.HexTx + serializedTx, err := hex.DecodeString(hexStr) + if err != nil { + return nil, rpcDecodeHexError(hexStr) + } + var msgTx wire.MsgTx + err = msgTx.Deserialize(bytes.NewReader(serializedTx)) + if err != nil { + return nil, &model.RPCError{ + Code: model.ErrRPCDeserialization, + Message: "TX decode failed: " + err.Error(), + } + } + + tx := util.NewTx(&msgTx) + err = s.protocolManager.AddTransaction(tx) + if err != nil { + if !errors.As(err, &mempool.RuleError{}) { + panic(err) + } + + log.Debugf("Rejected transaction %s: %s", tx.ID(), err) + return nil, &model.RPCError{ + Code: model.ErrRPCVerify, + Message: "TX rejected: " + err.Error(), + } + } + + return tx.ID().String(), nil +} diff --git a/server/rpc/handle_session.go b/rpc/handle_session.go similarity index 62% rename from server/rpc/handle_session.go rename to rpc/handle_session.go index 6248e7cd9..79b6b03ab 100644 --- a/server/rpc/handle_session.go +++ b/rpc/handle_session.go @@ -1,9 +1,9 @@ package rpc -import "github.com/kaspanet/kaspad/rpcmodel" +import "github.com/kaspanet/kaspad/rpc/model" // handleSession implements the session command extension for websocket // connections. func handleSession(wsc *wsClient, icmd interface{}) (interface{}, error) { - return &rpcmodel.SessionResult{SessionID: wsc.sessionID}, nil + return &model.SessionResult{SessionID: wsc.sessionID}, nil } diff --git a/server/rpc/handle_stop.go b/rpc/handle_stop.go similarity index 100% rename from server/rpc/handle_stop.go rename to rpc/handle_stop.go diff --git a/server/rpc/handle_stop_notify_blocks.go b/rpc/handle_stop_notify_blocks.go similarity index 100% rename from server/rpc/handle_stop_notify_blocks.go rename to rpc/handle_stop_notify_blocks.go diff --git a/server/rpc/handle_stop_notify_chain_changes.go b/rpc/handle_stop_notify_chain_changes.go similarity index 100% rename from server/rpc/handle_stop_notify_chain_changes.go rename to rpc/handle_stop_notify_chain_changes.go diff --git a/server/rpc/handle_stop_notify_new_transactions.go b/rpc/handle_stop_notify_new_transactions.go similarity index 100% rename from server/rpc/handle_stop_notify_new_transactions.go rename to rpc/handle_stop_notify_new_transactions.go diff --git a/server/rpc/handle_submit_block.go b/rpc/handle_submit_block.go similarity index 73% rename from server/rpc/handle_submit_block.go rename to rpc/handle_submit_block.go index 34f376e04..3ed36393e 100644 --- a/server/rpc/handle_submit_block.go +++ b/rpc/handle_submit_block.go @@ -3,14 +3,13 @@ package rpc import ( "encoding/hex" "fmt" - "github.com/kaspanet/kaspad/blockdag" - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" "github.com/kaspanet/kaspad/util" ) // handleSubmitBlock implements the submitBlock command. func handleSubmitBlock(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*rpcmodel.SubmitBlockCmd) + c := cmd.(*model.SubmitBlockCmd) // Deserialize the submitted block. hexStr := c.HexBlock @@ -24,18 +23,18 @@ func handleSubmitBlock(s *Server, cmd interface{}, closeChan <-chan struct{}) (i block, err := util.NewBlockFromBytes(serializedBlock) if err != nil { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCDeserialization, + return nil, &model.RPCError{ + Code: model.ErrRPCDeserialization, Message: "Block decode failed: " + err.Error(), } } // Process this block using the same rules as blocks coming from other // nodes. This will in turn relay it to the network like normal. - _, err = s.cfg.SyncMgr.SubmitBlock(block, blockdag.BFNone) + err = s.protocolManager.AddBlock(block) if err != nil { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCVerify, + return nil, &model.RPCError{ + Code: model.ErrRPCVerify, Message: fmt.Sprintf("Block rejected. Reason: %s", err), } } diff --git a/server/rpc/handle_uptime.go b/rpc/handle_uptime.go similarity index 72% rename from server/rpc/handle_uptime.go rename to rpc/handle_uptime.go index 5447aeb15..51a4aae70 100644 --- a/server/rpc/handle_uptime.go +++ b/rpc/handle_uptime.go @@ -6,5 +6,5 @@ import ( // handleUptime implements the uptime command. func handleUptime(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - return mstime.Now().UnixMilliseconds() - s.cfg.StartupTime, nil + return mstime.Now().UnixMilliseconds() - s.startupTime.UnixMilliseconds(), nil } diff --git a/server/rpc/handle_version.go b/rpc/handle_version.go similarity index 84% rename from server/rpc/handle_version.go rename to rpc/handle_version.go index d5987efd7..18298a225 100644 --- a/server/rpc/handle_version.go +++ b/rpc/handle_version.go @@ -1,6 +1,6 @@ package rpc -import "github.com/kaspanet/kaspad/rpcmodel" +import "github.com/kaspanet/kaspad/rpc/model" // API version constants const ( @@ -12,7 +12,7 @@ const ( // handleVersion implements the version command. func handleVersion(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - result := map[string]rpcmodel.VersionResult{ + result := map[string]model.VersionResult{ "kaspadjsonrpcapi": { VersionString: jsonrpcSemverString, Major: jsonrpcSemverMajor, diff --git a/server/rpc/handle_websocket_help.go b/rpc/handle_websocket_help.go similarity index 85% rename from server/rpc/handle_websocket_help.go rename to rpc/handle_websocket_help.go index 83a8a1c2b..50985fa5e 100644 --- a/server/rpc/handle_websocket_help.go +++ b/rpc/handle_websocket_help.go @@ -1,12 +1,12 @@ package rpc -import "github.com/kaspanet/kaspad/rpcmodel" +import "github.com/kaspanet/kaspad/rpc/model" // handleWebsocketHelp implements the help command for websocket connections. func handleWebsocketHelp(wsc *wsClient, icmd interface{}) (interface{}, error) { - cmd, ok := icmd.(*rpcmodel.HelpCmd) + cmd, ok := icmd.(*model.HelpCmd) if !ok { - return nil, rpcmodel.ErrRPCInternal + return nil, model.ErrRPCInternal } // Provide a usage overview of all commands when no specific command @@ -34,8 +34,8 @@ func handleWebsocketHelp(wsc *wsClient, icmd interface{}) (interface{}, error) { } } if !valid { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCInvalidParameter, + return nil, &model.RPCError{ + Code: model.ErrRPCInvalidParameter, Message: "Unknown command: " + command, } } diff --git a/server/rpc/log.go b/rpc/log.go similarity index 100% rename from server/rpc/log.go rename to rpc/log.go diff --git a/rpcmodel/CONTRIBUTORS b/rpc/model/CONTRIBUTORS similarity index 100% rename from rpcmodel/CONTRIBUTORS rename to rpc/model/CONTRIBUTORS diff --git a/rpcmodel/README.md b/rpc/model/README.md similarity index 100% rename from rpcmodel/README.md rename to rpc/model/README.md diff --git a/rpcmodel/command_info.go b/rpc/model/command_info.go similarity index 99% rename from rpcmodel/command_info.go rename to rpc/model/command_info.go index 07d1ed24a..c2c13c243 100644 --- a/rpcmodel/command_info.go +++ b/rpc/model/command_info.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package rpcmodel +package model import ( "fmt" diff --git a/rpcmodel/command_info_test.go b/rpc/model/command_info_test.go similarity index 91% rename from rpcmodel/command_info_test.go rename to rpc/model/command_info_test.go index 4ec1d1228..f5b6be937 100644 --- a/rpcmodel/command_info_test.go +++ b/rpc/model/command_info_test.go @@ -2,14 +2,14 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package rpcmodel_test +package model_test import ( "github.com/pkg/errors" "reflect" "testing" - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" ) // TestCommandMethod tests the CommandMethod function to ensure it retunrs the expected @@ -26,33 +26,33 @@ func TestCommandMethod(t *testing.T) { { name: "unregistered type", cmd: (*int)(nil), - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrUnregisteredMethod}, + err: model.Error{ErrorCode: model.ErrUnregisteredMethod}, }, { name: "nil pointer of registered type", - cmd: (*rpcmodel.GetBlockCmd)(nil), + cmd: (*model.GetBlockCmd)(nil), method: "getBlock", }, { name: "nil instance of registered type", - cmd: &rpcmodel.GetBlockCountCmd{}, + cmd: &model.GetBlockCountCmd{}, method: "getBlockCount", }, } t.Logf("Running %d tests", len(tests)) for i, test := range tests { - method, err := rpcmodel.CommandMethod(test.cmd) + method, err := model.CommandMethod(test.cmd) if reflect.TypeOf(err) != reflect.TypeOf(test.err) { t.Errorf("Test #%d (%s) wrong error - got %T (%[3]v), "+ "want %T", i, test.name, err, test.err) continue } if err != nil { - var gotRPCModelErr rpcmodel.Error + var gotRPCModelErr model.Error errors.As(err, &gotRPCModelErr) gotErrorCode := gotRPCModelErr.ErrorCode - var testRPCModelErr rpcmodel.Error + var testRPCModelErr model.Error errors.As(err, &testRPCModelErr) testErrorCode := testRPCModelErr.ErrorCode if gotErrorCode != testErrorCode { @@ -84,12 +84,12 @@ func TestMethodUsageFlags(t *testing.T) { name string method string err error - flags rpcmodel.UsageFlag + flags model.UsageFlag }{ { name: "unregistered type", method: "bogusMethod", - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrUnregisteredMethod}, + err: model.Error{ErrorCode: model.ErrUnregisteredMethod}, }, { name: "getBlock", @@ -100,17 +100,17 @@ func TestMethodUsageFlags(t *testing.T) { t.Logf("Running %d tests", len(tests)) for i, test := range tests { - flags, err := rpcmodel.MethodUsageFlags(test.method) + flags, err := model.MethodUsageFlags(test.method) if reflect.TypeOf(err) != reflect.TypeOf(test.err) { t.Errorf("Test #%d (%s) wrong error - got %T (%[3]v), "+ "want %T", i, test.name, err, test.err) continue } if err != nil { - var gotRPCModelErr rpcmodel.Error + var gotRPCModelErr model.Error errors.As(err, &gotRPCModelErr) gotErrorCode := gotRPCModelErr.ErrorCode - var testRPCModelErr rpcmodel.Error + var testRPCModelErr model.Error errors.As(err, &testRPCModelErr) testErrorCode := testRPCModelErr.ErrorCode if gotErrorCode != testErrorCode { @@ -147,7 +147,7 @@ func TestMethodUsageText(t *testing.T) { { name: "unregistered type", method: "bogusMethod", - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrUnregisteredMethod}, + err: model.Error{ErrorCode: model.ErrUnregisteredMethod}, }, { name: "getBlockCount", @@ -163,17 +163,17 @@ func TestMethodUsageText(t *testing.T) { t.Logf("Running %d tests", len(tests)) for i, test := range tests { - usage, err := rpcmodel.MethodUsageText(test.method) + usage, err := model.MethodUsageText(test.method) if reflect.TypeOf(err) != reflect.TypeOf(test.err) { t.Errorf("Test #%d (%s) wrong error - got %T (%[3]v), "+ "want %T", i, test.name, err, test.err) continue } if err != nil { - var gotRPCModelErr rpcmodel.Error + var gotRPCModelErr model.Error errors.As(err, &gotRPCModelErr) gotErrorCode := gotRPCModelErr.ErrorCode - var testRPCModelErr rpcmodel.Error + var testRPCModelErr model.Error errors.As(err, &testRPCModelErr) testErrorCode := testRPCModelErr.ErrorCode if gotErrorCode != testErrorCode { @@ -195,7 +195,7 @@ func TestMethodUsageText(t *testing.T) { } // Get the usage again to exercise caching. - usage, err = rpcmodel.MethodUsageText(test.method) + usage, err = model.MethodUsageText(test.method) if err != nil { t.Errorf("Test #%d (%s) unexpected error: %v", i, test.name, err) @@ -431,7 +431,7 @@ func TestFieldUsage(t *testing.T) { t.Logf("Running %d tests", len(tests)) for i, test := range tests { // Ensure usage matches the expected value. - usage := rpcmodel.TstFieldUsage(test.field, test.defValue) + usage := model.TstFieldUsage(test.field, test.defValue) if usage != test.expected { t.Errorf("Test #%d (%s) mismatched usage - got %v, "+ "want %v", i, test.name, usage, test.expected) diff --git a/rpcmodel/command_parse.go b/rpc/model/command_parse.go similarity index 99% rename from rpcmodel/command_parse.go rename to rpc/model/command_parse.go index 3c143f7d2..6fe148681 100644 --- a/rpcmodel/command_parse.go +++ b/rpc/model/command_parse.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package rpcmodel +package model import ( "encoding/json" diff --git a/rpcmodel/command_parse_test.go b/rpc/model/command_parse_test.go similarity index 78% rename from rpcmodel/command_parse_test.go rename to rpc/model/command_parse_test.go index 0ef0682b4..83732a47a 100644 --- a/rpcmodel/command_parse_test.go +++ b/rpc/model/command_parse_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package rpcmodel_test +package model_test import ( "encoding/json" @@ -11,7 +11,7 @@ import ( "reflect" "testing" - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" ) // TestAssignField tests the assignField function handles supported combinations @@ -169,7 +169,7 @@ func TestAssignField(t *testing.T) { for i, test := range tests { dst := reflect.New(reflect.TypeOf(test.dest)).Elem() src := reflect.ValueOf(test.src) - err := rpcmodel.TstAssignField(1, "testField", dst, src) + err := model.TstAssignField(1, "testField", dst, src) if err != nil { t.Errorf("Test #%d (%s) unexpected error: %v", i, test.name, err) @@ -198,133 +198,133 @@ func TestAssignFieldErrors(t *testing.T) { name string dest interface{} src interface{} - err rpcmodel.Error + err model.Error }{ { name: "general incompatible int -> string", dest: string(0), src: int(0), - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrInvalidType}, + err: model.Error{ErrorCode: model.ErrInvalidType}, }, { name: "overflow source int -> dest int", dest: int8(0), src: int(128), - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrInvalidType}, + err: model.Error{ErrorCode: model.ErrInvalidType}, }, { name: "overflow source int -> dest uint", dest: uint8(0), src: int(256), - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrInvalidType}, + err: model.Error{ErrorCode: model.ErrInvalidType}, }, { name: "int -> float", dest: float32(0), src: int(256), - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrInvalidType}, + err: model.Error{ErrorCode: model.ErrInvalidType}, }, { name: "overflow source uint64 -> dest int64", dest: int64(0), src: uint64(1 << 63), - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrInvalidType}, + err: model.Error{ErrorCode: model.ErrInvalidType}, }, { name: "overflow source uint -> dest int", dest: int8(0), src: uint(128), - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrInvalidType}, + err: model.Error{ErrorCode: model.ErrInvalidType}, }, { name: "overflow source uint -> dest uint", dest: uint8(0), src: uint(256), - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrInvalidType}, + err: model.Error{ErrorCode: model.ErrInvalidType}, }, { name: "uint -> float", dest: float32(0), src: uint(256), - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrInvalidType}, + err: model.Error{ErrorCode: model.ErrInvalidType}, }, { name: "float -> int", dest: int(0), src: float32(1.0), - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrInvalidType}, + err: model.Error{ErrorCode: model.ErrInvalidType}, }, { name: "overflow float64 -> float32", dest: float32(0), src: float64(math.MaxFloat64), - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrInvalidType}, + err: model.Error{ErrorCode: model.ErrInvalidType}, }, { name: "invalid string -> bool", dest: true, src: "foo", - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrInvalidType}, + err: model.Error{ErrorCode: model.ErrInvalidType}, }, { name: "invalid string -> int", dest: int8(0), src: "foo", - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrInvalidType}, + err: model.Error{ErrorCode: model.ErrInvalidType}, }, { name: "overflow string -> int", dest: int8(0), src: "128", - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrInvalidType}, + err: model.Error{ErrorCode: model.ErrInvalidType}, }, { name: "invalid string -> uint", dest: uint8(0), src: "foo", - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrInvalidType}, + err: model.Error{ErrorCode: model.ErrInvalidType}, }, { name: "overflow string -> uint", dest: uint8(0), src: "256", - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrInvalidType}, + err: model.Error{ErrorCode: model.ErrInvalidType}, }, { name: "invalid string -> float", dest: float32(0), src: "foo", - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrInvalidType}, + err: model.Error{ErrorCode: model.ErrInvalidType}, }, { name: "overflow string -> float", dest: float32(0), src: "1.7976931348623157e+308", - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrInvalidType}, + err: model.Error{ErrorCode: model.ErrInvalidType}, }, { name: "invalid string -> array", dest: [3]int{}, src: "foo", - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrInvalidType}, + err: model.Error{ErrorCode: model.ErrInvalidType}, }, { name: "invalid string -> slice", dest: []int{}, src: "foo", - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrInvalidType}, + err: model.Error{ErrorCode: model.ErrInvalidType}, }, { name: "invalid string -> struct", dest: struct{ A int }{}, src: "foo", - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrInvalidType}, + err: model.Error{ErrorCode: model.ErrInvalidType}, }, { name: "invalid string -> map", dest: map[string]int{}, src: "foo", - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrInvalidType}, + err: model.Error{ErrorCode: model.ErrInvalidType}, }, } @@ -332,13 +332,13 @@ func TestAssignFieldErrors(t *testing.T) { for i, test := range tests { dst := reflect.New(reflect.TypeOf(test.dest)).Elem() src := reflect.ValueOf(test.src) - err := rpcmodel.TstAssignField(1, "testField", dst, src) + err := model.TstAssignField(1, "testField", dst, src) if reflect.TypeOf(err) != reflect.TypeOf(test.err) { t.Errorf("Test #%d (%s) wrong error - got %T (%[3]v), "+ "want %T", i, test.name, err, test.err) continue } - var gotRPCModelErr rpcmodel.Error + var gotRPCModelErr model.Error errors.As(err, &gotRPCModelErr) gotErrorCode := gotRPCModelErr.ErrorCode if gotErrorCode != test.err.ErrorCode { @@ -358,43 +358,43 @@ func TestNewCommandErrors(t *testing.T) { name string method string args []interface{} - err rpcmodel.Error + err model.Error }{ { name: "unregistered command", method: "bogusCommand", args: []interface{}{}, - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrUnregisteredMethod}, + err: model.Error{ErrorCode: model.ErrUnregisteredMethod}, }, { name: "too few parameters to command with required + optional", method: "getBlock", args: []interface{}{}, - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrNumParams}, + err: model.Error{ErrorCode: model.ErrNumParams}, }, { name: "too many parameters to command with no optional", method: "getBlockCount", args: []interface{}{"123"}, - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrNumParams}, + err: model.Error{ErrorCode: model.ErrNumParams}, }, { name: "incorrect parameter type", method: "getBlock", args: []interface{}{1}, - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrInvalidType}, + err: model.Error{ErrorCode: model.ErrInvalidType}, }, } t.Logf("Running %d tests", len(tests)) for i, test := range tests { - _, err := rpcmodel.NewCommand(test.method, test.args...) + _, err := model.NewCommand(test.method, test.args...) if reflect.TypeOf(err) != reflect.TypeOf(test.err) { t.Errorf("Test #%d (%s) wrong error - got %T (%[2]v), "+ "want %T", i, test.name, err, test.err) continue } - var gotRPCModelErr rpcmodel.Error + var gotRPCModelErr model.Error errors.As(err, &gotRPCModelErr) gotErrorCode := gotRPCModelErr.ErrorCode if gotErrorCode != test.err.ErrorCode { @@ -414,37 +414,37 @@ func TestMarshalCommandErrors(t *testing.T) { name string id interface{} cmd interface{} - err rpcmodel.Error + err model.Error }{ { name: "unregistered type", id: 1, cmd: (*int)(nil), - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrUnregisteredMethod}, + err: model.Error{ErrorCode: model.ErrUnregisteredMethod}, }, { name: "nil instance of registered type", id: 1, - cmd: (*rpcmodel.GetBlockCmd)(nil), - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrInvalidType}, + cmd: (*model.GetBlockCmd)(nil), + err: model.Error{ErrorCode: model.ErrInvalidType}, }, { name: "nil instance of registered type", id: []int{0, 1}, - cmd: &rpcmodel.GetBlockCountCmd{}, - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrInvalidType}, + cmd: &model.GetBlockCountCmd{}, + err: model.Error{ErrorCode: model.ErrInvalidType}, }, } t.Logf("Running %d tests", len(tests)) for i, test := range tests { - _, err := rpcmodel.MarshalCommand(test.id, test.cmd) + _, err := model.MarshalCommand(test.id, test.cmd) if reflect.TypeOf(err) != reflect.TypeOf(test.err) { t.Errorf("Test #%d (%s) wrong error - got %T (%[2]v), "+ "want %T", i, test.name, err, test.err) continue } - var gotRPCModelErr rpcmodel.Error + var gotRPCModelErr model.Error errors.As(err, &gotRPCModelErr) gotErrorCode := gotRPCModelErr.ErrorCode if gotErrorCode != test.err.ErrorCode { @@ -462,60 +462,60 @@ func TestUnmarshalCommandErrors(t *testing.T) { tests := []struct { name string - request rpcmodel.Request - err rpcmodel.Error + request model.Request + err model.Error }{ { name: "unregistered type", - request: rpcmodel.Request{ + request: model.Request{ JSONRPC: "1.0", Method: "bogusMethod", Params: nil, ID: nil, }, - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrUnregisteredMethod}, + err: model.Error{ErrorCode: model.ErrUnregisteredMethod}, }, { name: "incorrect number of params", - request: rpcmodel.Request{ + request: model.Request{ JSONRPC: "1.0", Method: "getBlockCount", Params: []json.RawMessage{[]byte(`"bogusparam"`)}, ID: nil, }, - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrNumParams}, + err: model.Error{ErrorCode: model.ErrNumParams}, }, { name: "invalid type for a parameter", - request: rpcmodel.Request{ + request: model.Request{ JSONRPC: "1.0", Method: "getBlock", Params: []json.RawMessage{[]byte("1")}, ID: nil, }, - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrInvalidType}, + err: model.Error{ErrorCode: model.ErrInvalidType}, }, { name: "invalid JSON for a parameter", - request: rpcmodel.Request{ + request: model.Request{ JSONRPC: "1.0", Method: "getBlock", Params: []json.RawMessage{[]byte(`"1`)}, ID: nil, }, - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrInvalidType}, + err: model.Error{ErrorCode: model.ErrInvalidType}, }, } t.Logf("Running %d tests", len(tests)) for i, test := range tests { - _, err := rpcmodel.UnmarshalCommand(&test.request) + _, err := model.UnmarshalCommand(&test.request) if reflect.TypeOf(err) != reflect.TypeOf(test.err) { t.Errorf("Test #%d (%s) wrong error - got %T (%[2]v), "+ "want %T", i, test.name, err, test.err) continue } - var gotRPCModelErr rpcmodel.Error + var gotRPCModelErr model.Error errors.As(err, &gotRPCModelErr) gotErrorCode := gotRPCModelErr.ErrorCode if gotErrorCode != test.err.ErrorCode { diff --git a/rpcmodel/doc.go b/rpc/model/doc.go similarity index 97% rename from rpcmodel/doc.go rename to rpc/model/doc.go index e5c4948d9..f0a7d877f 100644 --- a/rpcmodel/doc.go +++ b/rpc/model/doc.go @@ -1,5 +1,5 @@ /* -Package rpcmodel provides primitives for working with the kaspa JSON-RPC API. +Package model provides primitives for working with the kaspa JSON-RPC API. Overview @@ -128,10 +128,10 @@ returned from the various functions available in this package. They identify issues such as unsupported field types, attempts to register malformed commands, and attempting to create a new command with an improper number of parameters. The specific reason for the error can be detected by type asserting it to a -*rpcmodel.Error and accessing the ErrorCode field. +*model.Error and accessing the ErrorCode field. The second category of errors (type RPCError), on the other hand, are useful for returning errors to RPC clients. Consequently, they are used in the previously described Response type. */ -package rpcmodel +package model diff --git a/rpcmodel/error.go b/rpc/model/error.go similarity index 99% rename from rpcmodel/error.go rename to rpc/model/error.go index 31945f3cf..0d9eb9f49 100644 --- a/rpcmodel/error.go +++ b/rpc/model/error.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package rpcmodel +package model import ( "fmt" diff --git a/rpcmodel/error_test.go b/rpc/model/error_test.go similarity index 56% rename from rpcmodel/error_test.go rename to rpc/model/error_test.go index f17acd34b..d6a75381f 100644 --- a/rpcmodel/error_test.go +++ b/rpc/model/error_test.go @@ -2,12 +2,12 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package rpcmodel_test +package model_test import ( "testing" - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" ) // TestErrorCodeStringer tests the stringized output for the ErrorCode type. @@ -15,26 +15,26 @@ func TestErrorCodeStringer(t *testing.T) { t.Parallel() tests := []struct { - in rpcmodel.ErrorCode + in model.ErrorCode want string }{ - {rpcmodel.ErrDuplicateMethod, "ErrDuplicateMethod"}, - {rpcmodel.ErrInvalidUsageFlags, "ErrInvalidUsageFlags"}, - {rpcmodel.ErrInvalidType, "ErrInvalidType"}, - {rpcmodel.ErrEmbeddedType, "ErrEmbeddedType"}, - {rpcmodel.ErrUnexportedField, "ErrUnexportedField"}, - {rpcmodel.ErrUnsupportedFieldType, "ErrUnsupportedFieldType"}, - {rpcmodel.ErrNonOptionalField, "ErrNonOptionalField"}, - {rpcmodel.ErrNonOptionalDefault, "ErrNonOptionalDefault"}, - {rpcmodel.ErrMismatchedDefault, "ErrMismatchedDefault"}, - {rpcmodel.ErrUnregisteredMethod, "ErrUnregisteredMethod"}, - {rpcmodel.ErrNumParams, "ErrNumParams"}, - {rpcmodel.ErrMissingDescription, "ErrMissingDescription"}, + {model.ErrDuplicateMethod, "ErrDuplicateMethod"}, + {model.ErrInvalidUsageFlags, "ErrInvalidUsageFlags"}, + {model.ErrInvalidType, "ErrInvalidType"}, + {model.ErrEmbeddedType, "ErrEmbeddedType"}, + {model.ErrUnexportedField, "ErrUnexportedField"}, + {model.ErrUnsupportedFieldType, "ErrUnsupportedFieldType"}, + {model.ErrNonOptionalField, "ErrNonOptionalField"}, + {model.ErrNonOptionalDefault, "ErrNonOptionalDefault"}, + {model.ErrMismatchedDefault, "ErrMismatchedDefault"}, + {model.ErrUnregisteredMethod, "ErrUnregisteredMethod"}, + {model.ErrNumParams, "ErrNumParams"}, + {model.ErrMissingDescription, "ErrMissingDescription"}, {0xffff, "Unknown ErrorCode (65535)"}, } // Detect additional error codes that don't have the stringer added. - if len(tests)-1 != int(rpcmodel.TstNumErrorCodes) { + if len(tests)-1 != int(model.TstNumErrorCodes) { t.Errorf("It appears an error code was added without adding an " + "associated stringer test") } @@ -55,15 +55,15 @@ func TestError(t *testing.T) { t.Parallel() tests := []struct { - in rpcmodel.Error + in model.Error want string }{ { - rpcmodel.Error{Description: "some error"}, + model.Error{Description: "some error"}, "some error", }, { - rpcmodel.Error{Description: "human-readable error"}, + model.Error{Description: "human-readable error"}, "human-readable error", }, } diff --git a/rpcmodel/example_test.go b/rpc/model/example_test.go similarity index 91% rename from rpcmodel/example_test.go rename to rpc/model/example_test.go index 6d4265d44..6b23fc1ec 100644 --- a/rpcmodel/example_test.go +++ b/rpc/model/example_test.go @@ -2,14 +2,14 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package rpcmodel_test +package model_test import ( "encoding/json" "fmt" "github.com/kaspanet/kaspad/util/pointers" - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" ) // This example demonstrates how to create and marshal a command into a JSON-RPC @@ -22,13 +22,13 @@ func ExampleMarshalCommand() { // convenience function for creating a pointer out of a primitive for // optional parameters. blockHash := "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f" - gbCmd := rpcmodel.NewGetBlockCmd(blockHash, pointers.Bool(false), nil, nil) + gbCmd := model.NewGetBlockCmd(blockHash, pointers.Bool(false), nil, nil) // Marshal the command to the format suitable for sending to the RPC // server. Typically the client would increment the id here which is // request so the response can be identified. id := 1 - marshalledBytes, err := rpcmodel.MarshalCommand(id, gbCmd) + marshalledBytes, err := model.MarshalCommand(id, gbCmd) if err != nil { fmt.Println(err) return @@ -50,7 +50,7 @@ func ExampleUnmarshalCommand() { data := []byte(`{"jsonrpc":"1.0","method":"getBlock","params":["000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f",false],"id":1}`) // Unmarshal the raw bytes from the wire into a JSON-RPC request. - var request rpcmodel.Request + var request model.Request if err := json.Unmarshal(data, &request); err != nil { fmt.Println(err) return @@ -70,14 +70,14 @@ func ExampleUnmarshalCommand() { } // Unmarshal the request into a concrete command. - cmd, err := rpcmodel.UnmarshalCommand(&request) + cmd, err := model.UnmarshalCommand(&request) if err != nil { fmt.Println(err) return } // Type assert the command to the appropriate type. - gbCmd, ok := cmd.(*rpcmodel.GetBlockCmd) + gbCmd, ok := cmd.(*model.GetBlockCmd) if !ok { fmt.Printf("Incorrect command type: %T\n", cmd) return @@ -101,7 +101,7 @@ func ExampleUnmarshalCommand() { func ExampleMarshalResponse() { // Marshal a new JSON-RPC response. For example, this is a response // to a getblockheight request. - marshalledBytes, err := rpcmodel.MarshalResponse(1, 350001, nil) + marshalledBytes, err := model.MarshalResponse(1, 350001, nil) if err != nil { fmt.Println(err) return @@ -125,7 +125,7 @@ func Example_unmarshalResponse() { data := []byte(`{"result":350001,"error":null,"id":1}`) // Unmarshal the raw bytes from the wire into a JSON-RPC response. - var response rpcmodel.Response + var response model.Response if err := json.Unmarshal(data, &response); err != nil { fmt.Println("Malformed JSON-RPC response:", err) return diff --git a/rpcmodel/export_test.go b/rpc/model/export_test.go similarity index 99% rename from rpcmodel/export_test.go rename to rpc/model/export_test.go index 23b26b54c..d1beeadb4 100644 --- a/rpcmodel/export_test.go +++ b/rpc/model/export_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package rpcmodel +package model // TstHighestUsageFlagBit makes the internal highestUsageFlagBit parameter // available to the test package. diff --git a/rpcmodel/help.go b/rpc/model/help.go similarity index 99% rename from rpcmodel/help.go rename to rpc/model/help.go index 2914d9e4e..9a4c36848 100644 --- a/rpcmodel/help.go +++ b/rpc/model/help.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package rpcmodel +package model import ( "bytes" diff --git a/rpcmodel/help_test.go b/rpc/model/help_test.go similarity index 94% rename from rpcmodel/help_test.go rename to rpc/model/help_test.go index 16e24a812..a5c4b42c7 100644 --- a/rpcmodel/help_test.go +++ b/rpc/model/help_test.go @@ -2,14 +2,14 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package rpcmodel_test +package model_test import ( "github.com/pkg/errors" "reflect" "testing" - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" ) // TestHelpReflectInternals ensures the various help functions which deal with @@ -238,7 +238,7 @@ func TestHelpReflectInternals(t *testing.T) { t.Logf("Running %d tests", len(tests)) for i, test := range tests { // Ensure the description key is the expected value. - key := rpcmodel.TstReflectTypeToJSONType(xT, test.reflectType) + key := model.TstReflectTypeToJSONType(xT, test.reflectType) if key != test.key { t.Errorf("Test #%d (%s) unexpected key - got: %v, "+ "want: %v", i, test.name, key, test.key) @@ -246,7 +246,7 @@ func TestHelpReflectInternals(t *testing.T) { } // Ensure the generated example is as expected. - examples, isComplex := rpcmodel.TstReflectTypeToJSONExample(xT, + examples, isComplex := model.TstReflectTypeToJSONExample(xT, test.reflectType, test.indentLevel, "fdk") if isComplex != test.isComplex { t.Errorf("Test #%d (%s) unexpected isComplex - got: %v, "+ @@ -270,7 +270,7 @@ func TestHelpReflectInternals(t *testing.T) { } // Ensure the generated result type help is as expected. - helpText := rpcmodel.TstResultTypeHelp(xT, test.reflectType, "fdk") + helpText := model.TstResultTypeHelp(xT, test.reflectType, "fdk") if helpText != test.help { t.Errorf("Test #%d (%s) unexpected result help - "+ "got: %v, want: %v", i, test.name, helpText, @@ -278,7 +278,7 @@ func TestHelpReflectInternals(t *testing.T) { continue } - isValid := rpcmodel.TstIsValidResultType(test.reflectType.Kind()) + isValid := model.TstIsValidResultType(test.reflectType.Kind()) if isValid != !test.isInvalid { t.Errorf("Test #%d (%s) unexpected result type validity "+ "- got: %v", i, test.name, isValid) @@ -403,7 +403,7 @@ func TestResultStructHelp(t *testing.T) { t.Logf("Running %d tests", len(tests)) for i, test := range tests { - results := rpcmodel.TstResultStructHelp(xT, test.reflectType, 0) + results := model.TstResultStructHelp(xT, test.reflectType, 0) if len(results) != len(test.expected) { t.Errorf("Test #%d (%s) unexpected result length - "+ "got: %v, want: %v", i, test.name, len(results), @@ -556,7 +556,7 @@ func TestHelpArgInternals(t *testing.T) { t.Logf("Running %d tests", len(tests)) for i, test := range tests { - help := rpcmodel.TstArgHelp(xT, test.reflectType, test.defaults, + help := model.TstArgHelp(xT, test.reflectType, test.defaults, test.method) if help != test.help { t.Errorf("Test #%d (%s) unexpected help - got:\n%v\n"+ @@ -649,7 +649,7 @@ func TestMethodHelp(t *testing.T) { t.Logf("Running %d tests", len(tests)) for i, test := range tests { - help := rpcmodel.TestMethodHelp(xT, test.reflectType, + help := model.TestMethodHelp(xT, test.reflectType, test.defaults, test.method, test.resultTypes) if help != test.help { t.Errorf("Test #%d (%s) unexpected help - got:\n%v\n"+ @@ -668,43 +668,43 @@ func TestGenerateHelpErrors(t *testing.T) { name string method string resultTypes []interface{} - err rpcmodel.Error + err model.Error }{ { name: "unregistered command", method: "boguscommand", - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrUnregisteredMethod}, + err: model.Error{ErrorCode: model.ErrUnregisteredMethod}, }, { name: "non-pointer result type", method: "help", resultTypes: []interface{}{0}, - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrInvalidType}, + err: model.Error{ErrorCode: model.ErrInvalidType}, }, { name: "invalid result type", method: "help", resultTypes: []interface{}{(*complex64)(nil)}, - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrInvalidType}, + err: model.Error{ErrorCode: model.ErrInvalidType}, }, { name: "missing description", method: "help", resultTypes: []interface{}{(*string)(nil), nil}, - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrMissingDescription}, + err: model.Error{ErrorCode: model.ErrMissingDescription}, }, } t.Logf("Running %d tests", len(tests)) for i, test := range tests { - _, err := rpcmodel.GenerateHelp(test.method, nil, + _, err := model.GenerateHelp(test.method, nil, test.resultTypes...) if reflect.TypeOf(err) != reflect.TypeOf(test.err) { t.Errorf("Test #%d (%s) wrong error - got %T (%[2]v), "+ "want %T", i, test.name, err, test.err) continue } - var gotRPCModelErr rpcmodel.Error + var gotRPCModelErr model.Error errors.As(err, &gotRPCModelErr) gotErrorCode := gotRPCModelErr.ErrorCode if gotErrorCode != test.err.ErrorCode { @@ -726,7 +726,7 @@ func TestGenerateHelp(t *testing.T) { "help--synopsis": "test", "help-command": "test", } - help, err := rpcmodel.GenerateHelp("help", descs) + help, err := model.GenerateHelp("help", descs) if err != nil { t.Fatalf("GenerateHelp: unexpected error: %v", err) } diff --git a/rpcmodel/jsonrpc.go b/rpc/model/jsonrpc.go similarity index 99% rename from rpcmodel/jsonrpc.go rename to rpc/model/jsonrpc.go index c1f62fd10..6c3e964f1 100644 --- a/rpcmodel/jsonrpc.go +++ b/rpc/model/jsonrpc.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package rpcmodel +package model import ( "encoding/json" diff --git a/rpcmodel/jsonrpc_errors.go b/rpc/model/jsonrpc_errors.go similarity index 99% rename from rpcmodel/jsonrpc_errors.go rename to rpc/model/jsonrpc_errors.go index 00eb32a54..81a39ea01 100644 --- a/rpcmodel/jsonrpc_errors.go +++ b/rpc/model/jsonrpc_errors.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package rpcmodel +package model // Standard JSON-RPC 2.0 errors. var ( diff --git a/rpcmodel/jsonrpc_test.go b/rpc/model/jsonrpc_test.go similarity index 82% rename from rpcmodel/jsonrpc_test.go rename to rpc/model/jsonrpc_test.go index 022c829f2..2891f186a 100644 --- a/rpcmodel/jsonrpc_test.go +++ b/rpc/model/jsonrpc_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package rpcmodel_test +package model_test import ( "encoding/json" @@ -10,7 +10,7 @@ import ( "reflect" "testing" - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" ) // TestIsValidIDType ensures the IsValidIDType function behaves as expected. @@ -45,7 +45,7 @@ func TestIsValidIDType(t *testing.T) { t.Logf("Running %d tests", len(tests)) for i, test := range tests { - if rpcmodel.IsValidIDType(test.id) != test.isValid { + if model.IsValidIDType(test.id) != test.isValid { t.Errorf("Test #%d (%s) valid mismatch - got %v, "+ "want %v", i, test.name, !test.isValid, test.isValid) @@ -62,7 +62,7 @@ func TestMarshalResponse(t *testing.T) { tests := []struct { name string result interface{} - jsonErr *rpcmodel.RPCError + jsonErr *model.RPCError expected []byte }{ { @@ -74,8 +74,8 @@ func TestMarshalResponse(t *testing.T) { { name: "result with error", result: nil, - jsonErr: func() *rpcmodel.RPCError { - return rpcmodel.NewRPCError(rpcmodel.ErrRPCBlockNotFound, "123 not found") + jsonErr: func() *model.RPCError { + return model.NewRPCError(model.ErrRPCBlockNotFound, "123 not found") }(), expected: []byte(`{"result":null,"error":{"code":-5,"message":"123 not found"},"id":1}`), }, @@ -84,7 +84,7 @@ func TestMarshalResponse(t *testing.T) { t.Logf("Running %d tests", len(tests)) for i, test := range tests { _, _ = i, test - marshalled, err := rpcmodel.MarshalResponse(testID, test.result, test.jsonErr) + marshalled, err := model.MarshalResponse(testID, test.result, test.jsonErr) if err != nil { t.Errorf("Test #%d (%s) unexpected error: %v", i, test.name, err) @@ -105,7 +105,7 @@ func TestMiscErrors(t *testing.T) { // Force an error in NewRequest by giving it a parameter type that is // not supported. - _, err := rpcmodel.NewRequest(nil, "test", []interface{}{make(chan int)}) + _, err := model.NewRequest(nil, "test", []interface{}{make(chan int)}) if err == nil { t.Error("NewRequest: did not receive error") return @@ -113,9 +113,9 @@ func TestMiscErrors(t *testing.T) { // Force an error in MarshalResponse by giving it an id type that is not // supported. - wantErr := rpcmodel.Error{ErrorCode: rpcmodel.ErrInvalidType} - _, err = rpcmodel.MarshalResponse(make(chan int), nil, nil) - var rpcModelErr rpcmodel.Error + wantErr := model.Error{ErrorCode: model.ErrInvalidType} + _, err = model.MarshalResponse(make(chan int), nil, nil) + var rpcModelErr model.Error if ok := errors.As(err, &rpcModelErr); !ok || rpcModelErr.ErrorCode != wantErr.ErrorCode { t.Errorf("MarshalResult: did not receive expected error - got "+ "%v (%[1]T), want %v (%[2]T)", err, wantErr) @@ -124,7 +124,7 @@ func TestMiscErrors(t *testing.T) { // Force an error in MarshalResponse by giving it a result type that // can't be marshalled. - _, err = rpcmodel.MarshalResponse(1, make(chan int), nil) + _, err = model.MarshalResponse(1, make(chan int), nil) if jErr := &(json.UnsupportedTypeError{}); !errors.As(err, &jErr) { wantErr := &json.UnsupportedTypeError{} t.Errorf("MarshalResult: did not receive expected error - got "+ @@ -138,15 +138,15 @@ func TestRPCError(t *testing.T) { t.Parallel() tests := []struct { - in *rpcmodel.RPCError + in *model.RPCError want string }{ { - rpcmodel.ErrRPCInvalidRequest, + model.ErrRPCInvalidRequest, "-32600: Invalid request", }, { - rpcmodel.ErrRPCMethodNotFound, + model.ErrRPCMethodNotFound, "-32601: Method not found", }, } diff --git a/rpcmodel/register.go b/rpc/model/register.go similarity index 99% rename from rpcmodel/register.go rename to rpc/model/register.go index 41fc6916a..28d5f0ec9 100644 --- a/rpcmodel/register.go +++ b/rpc/model/register.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package rpcmodel +package model import ( "encoding/json" diff --git a/rpcmodel/register_test.go b/rpc/model/register_test.go similarity index 76% rename from rpcmodel/register_test.go rename to rpc/model/register_test.go index 27601ed95..1fb79f169 100644 --- a/rpcmodel/register_test.go +++ b/rpc/model/register_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package rpcmodel_test +package model_test import ( "github.com/pkg/errors" @@ -10,7 +10,7 @@ import ( "sort" "testing" - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" ) // TestUsageFlagStringer tests the stringized output for the UsageFlag type. @@ -18,21 +18,21 @@ func TestUsageFlagStringer(t *testing.T) { t.Parallel() tests := []struct { - in rpcmodel.UsageFlag + in model.UsageFlag want string }{ {0, "0x0"}, - {rpcmodel.UFWebsocketOnly, "UFWebsocketOnly"}, - {rpcmodel.UFNotification, "UFNotification"}, - {rpcmodel.UFWebsocketOnly | rpcmodel.UFNotification, + {model.UFWebsocketOnly, "UFWebsocketOnly"}, + {model.UFNotification, "UFNotification"}, + {model.UFWebsocketOnly | model.UFNotification, "UFWebsocketOnly|UFNotification"}, - {rpcmodel.UFWebsocketOnly | rpcmodel.UFNotification | (1 << 31), + {model.UFWebsocketOnly | model.UFNotification | (1 << 31), "UFWebsocketOnly|UFNotification|0x80000000"}, } // Detect additional usage flags that don't have the stringer added. numUsageFlags := 0 - highestUsageFlagBit := rpcmodel.TstHighestUsageFlagBit + highestUsageFlagBit := model.TstHighestUsageFlagBit for highestUsageFlagBit > 1 { numUsageFlags++ highestUsageFlagBit >>= 1 @@ -62,8 +62,8 @@ func TestRegisterCmdErrors(t *testing.T) { name string method string cmdFunc func() interface{} - flags rpcmodel.UsageFlag - err rpcmodel.Error + flags model.UsageFlag + err model.Error }{ { name: "duplicate method", @@ -71,7 +71,7 @@ func TestRegisterCmdErrors(t *testing.T) { cmdFunc: func() interface{} { return struct{}{} }, - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrDuplicateMethod}, + err: model.Error{ErrorCode: model.ErrDuplicateMethod}, }, { name: "invalid usage flags", @@ -79,8 +79,8 @@ func TestRegisterCmdErrors(t *testing.T) { cmdFunc: func() interface{} { return 0 }, - flags: rpcmodel.TstHighestUsageFlagBit, - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrInvalidUsageFlags}, + flags: model.TstHighestUsageFlagBit, + err: model.Error{ErrorCode: model.ErrInvalidUsageFlags}, }, { name: "invalid type", @@ -88,7 +88,7 @@ func TestRegisterCmdErrors(t *testing.T) { cmdFunc: func() interface{} { return 0 }, - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrInvalidType}, + err: model.Error{ErrorCode: model.ErrInvalidType}, }, { name: "invalid type 2", @@ -96,7 +96,7 @@ func TestRegisterCmdErrors(t *testing.T) { cmdFunc: func() interface{} { return &[]string{} }, - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrInvalidType}, + err: model.Error{ErrorCode: model.ErrInvalidType}, }, { name: "embedded field", @@ -105,7 +105,7 @@ func TestRegisterCmdErrors(t *testing.T) { type test struct{ int } return (*test)(nil) }, - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrEmbeddedType}, + err: model.Error{ErrorCode: model.ErrEmbeddedType}, }, { name: "unexported field", @@ -114,7 +114,7 @@ func TestRegisterCmdErrors(t *testing.T) { type test struct{ a int } return (*test)(nil) }, - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrUnexportedField}, + err: model.Error{ErrorCode: model.ErrUnexportedField}, }, { name: "unsupported field type 1", @@ -123,7 +123,7 @@ func TestRegisterCmdErrors(t *testing.T) { type test struct{ A **int } return (*test)(nil) }, - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrUnsupportedFieldType}, + err: model.Error{ErrorCode: model.ErrUnsupportedFieldType}, }, { name: "unsupported field type 2", @@ -132,7 +132,7 @@ func TestRegisterCmdErrors(t *testing.T) { type test struct{ A chan int } return (*test)(nil) }, - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrUnsupportedFieldType}, + err: model.Error{ErrorCode: model.ErrUnsupportedFieldType}, }, { name: "unsupported field type 3", @@ -141,7 +141,7 @@ func TestRegisterCmdErrors(t *testing.T) { type test struct{ A complex64 } return (*test)(nil) }, - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrUnsupportedFieldType}, + err: model.Error{ErrorCode: model.ErrUnsupportedFieldType}, }, { name: "unsupported field type 4", @@ -150,7 +150,7 @@ func TestRegisterCmdErrors(t *testing.T) { type test struct{ A complex128 } return (*test)(nil) }, - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrUnsupportedFieldType}, + err: model.Error{ErrorCode: model.ErrUnsupportedFieldType}, }, { name: "unsupported field type 5", @@ -159,7 +159,7 @@ func TestRegisterCmdErrors(t *testing.T) { type test struct{ A func() } return (*test)(nil) }, - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrUnsupportedFieldType}, + err: model.Error{ErrorCode: model.ErrUnsupportedFieldType}, }, { name: "unsupported field type 6", @@ -168,7 +168,7 @@ func TestRegisterCmdErrors(t *testing.T) { type test struct{ A interface{} } return (*test)(nil) }, - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrUnsupportedFieldType}, + err: model.Error{ErrorCode: model.ErrUnsupportedFieldType}, }, { name: "required after optional", @@ -180,7 +180,7 @@ func TestRegisterCmdErrors(t *testing.T) { } return (*test)(nil) }, - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrNonOptionalField}, + err: model.Error{ErrorCode: model.ErrNonOptionalField}, }, { name: "non-optional with default", @@ -191,7 +191,7 @@ func TestRegisterCmdErrors(t *testing.T) { } return (*test)(nil) }, - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrNonOptionalDefault}, + err: model.Error{ErrorCode: model.ErrNonOptionalDefault}, }, { name: "mismatched default", @@ -202,20 +202,20 @@ func TestRegisterCmdErrors(t *testing.T) { } return (*test)(nil) }, - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrMismatchedDefault}, + err: model.Error{ErrorCode: model.ErrMismatchedDefault}, }, } t.Logf("Running %d tests", len(tests)) for i, test := range tests { - err := rpcmodel.RegisterCmd(test.method, test.cmdFunc(), + err := model.RegisterCmd(test.method, test.cmdFunc(), test.flags) if reflect.TypeOf(err) != reflect.TypeOf(test.err) { t.Errorf("Test #%d (%s) wrong error - got %T, "+ "want %T", i, test.name, err, test.err) continue } - var gotRPCModelErr rpcmodel.Error + var gotRPCModelErr model.Error errors.As(err, &gotRPCModelErr) gotErrorCode := gotRPCModelErr.ErrorCode if gotErrorCode != test.err.ErrorCode { @@ -241,7 +241,7 @@ func TestMustRegisterCmdPanic(t *testing.T) { }() // Intentionally try to register an invalid type to force a panic. - rpcmodel.MustRegisterCommand("panicme", 0, 0) + model.MustRegisterCommand("panicme", 0, 0) } // TestRegisteredCmdMethods tests the RegisteredCmdMethods function ensure it @@ -250,7 +250,7 @@ func TestRegisteredCmdMethods(t *testing.T) { t.Parallel() // Ensure the registered methods are returned. - methods := rpcmodel.RegisteredCmdMethods() + methods := model.RegisteredCmdMethods() if len(methods) == 0 { t.Fatal("RegisteredCmdMethods: no methods") } diff --git a/rpcmodel/rpc_commands.go b/rpc/model/rpc_commands.go similarity index 84% rename from rpcmodel/rpc_commands.go rename to rpc/model/rpc_commands.go index 67f90146e..59e729780 100644 --- a/rpcmodel/rpc_commands.go +++ b/rpc/model/rpc_commands.go @@ -5,38 +5,38 @@ // NOTE: This file is intended to house the RPC commands that are supported by // a kaspa rpc server. -package rpcmodel +package model import ( "encoding/json" "fmt" ) -// AddManualNodeCmd defines the addManualNode JSON-RPC command. -type AddManualNodeCmd struct { - Addr string - OneTry *bool `jsonrpcdefault:"false"` +// ConnectCmd defines the connect JSON-RPC command. +type ConnectCmd struct { + Address string + IsPermanent *bool `jsonrpcdefault:"false"` } -// NewAddManualNodeCmd returns a new instance which can be used to issue an addManualNode +// NewConnectCmd returns a new instance which can be used to issue a connection // JSON-RPC command. -func NewAddManualNodeCmd(addr string, oneTry *bool) *AddManualNodeCmd { - return &AddManualNodeCmd{ - Addr: addr, - OneTry: oneTry, +func NewConnectCmd(address string, isPermanent *bool) *ConnectCmd { + return &ConnectCmd{ + Address: address, + IsPermanent: isPermanent, } } -// RemoveManualNodeCmd defines the removeManualNode JSON-RPC command. -type RemoveManualNodeCmd struct { - Addr string +// DisconnectCmd defines the disconnect JSON-RPC command. +type DisconnectCmd struct { + Address string } -// NewRemoveManualNodeCmd returns a new instance which can be used to issue an removeManualNode +// NewDisconnectCmd returns a new instance which can be used to issue an disconnect // JSON-RPC command. -func NewRemoveManualNodeCmd(addr string) *RemoveManualNodeCmd { - return &RemoveManualNodeCmd{ - Addr: addr, +func NewDisconnectCmd(address string) *DisconnectCmd { + return &DisconnectCmd{ + Address: address, } } @@ -47,81 +47,6 @@ type TransactionInput struct { Vout uint32 `json:"vout"` } -// CreateRawTransactionCmd defines the createRawTransaction JSON-RPC command. -type CreateRawTransactionCmd struct { - Inputs []TransactionInput - Amounts map[string]float64 `jsonrpcusage:"{\"address\":amount,...}"` // In KAS - LockTime *uint64 -} - -// NewCreateRawTransactionCmd returns a new instance which can be used to issue -// a createRawTransaction JSON-RPC command. -// -// Amounts are in KAS. -func NewCreateRawTransactionCmd(inputs []TransactionInput, amounts map[string]float64, - lockTime *uint64) *CreateRawTransactionCmd { - - return &CreateRawTransactionCmd{ - Inputs: inputs, - Amounts: amounts, - LockTime: lockTime, - } -} - -// DecodeRawTransactionCmd defines the decodeRawTransaction JSON-RPC command. -type DecodeRawTransactionCmd struct { - HexTx string -} - -// NewDecodeRawTransactionCmd returns a new instance which can be used to issue -// a decodeRawTransaction JSON-RPC command. -func NewDecodeRawTransactionCmd(hexTx string) *DecodeRawTransactionCmd { - return &DecodeRawTransactionCmd{ - HexTx: hexTx, - } -} - -// DecodeScriptCmd defines the decodeScript JSON-RPC command. -type DecodeScriptCmd struct { - HexScript string -} - -// NewDecodeScriptCmd returns a new instance which can be used to issue a -// decodeScript JSON-RPC command. -func NewDecodeScriptCmd(hexScript string) *DecodeScriptCmd { - return &DecodeScriptCmd{ - HexScript: hexScript, - } -} - -// GetManualNodeInfoCmd defines the getManualNodeInfo JSON-RPC command. -type GetManualNodeInfoCmd struct { - Node string - Details *bool `jsonrpcdefault:"true"` -} - -// NewGetManualNodeInfoCmd returns a new instance which can be used to issue a -// getManualNodeInfo JSON-RPC command. -func NewGetManualNodeInfoCmd(node string, details *bool) *GetManualNodeInfoCmd { - return &GetManualNodeInfoCmd{ - Details: details, - Node: node, - } -} - -// GetAllManualNodesInfoCmd defines the getAllManualNodesInfo JSON-RPC command. -type GetAllManualNodesInfoCmd struct { - Details *bool `jsonrpcdefault:"true"` -} - -// NewGetAllManualNodesInfoCmd returns a new instance which can be used to issue a -// getAllManualNodesInfo JSON-RPC command. -func NewGetAllManualNodesInfoCmd(details *bool) *GetAllManualNodesInfoCmd { - return &GetAllManualNodesInfoCmd{ - Details: details, - } -} - // GetSelectedTipHashCmd defines the getSelectedTipHash JSON-RPC command. type GetSelectedTipHashCmd struct{} @@ -667,11 +592,7 @@ func init() { // No special flags for commands in this file. flags := UsageFlag(0) - MustRegisterCommand("addManualNode", (*AddManualNodeCmd)(nil), flags) - MustRegisterCommand("createRawTransaction", (*CreateRawTransactionCmd)(nil), flags) - MustRegisterCommand("decodeRawTransaction", (*DecodeRawTransactionCmd)(nil), flags) - MustRegisterCommand("decodeScript", (*DecodeScriptCmd)(nil), flags) - MustRegisterCommand("getAllManualNodesInfo", (*GetAllManualNodesInfoCmd)(nil), flags) + MustRegisterCommand("connect", (*ConnectCmd)(nil), flags) MustRegisterCommand("getSelectedTipHash", (*GetSelectedTipHashCmd)(nil), flags) MustRegisterCommand("getBlock", (*GetBlockCmd)(nil), flags) MustRegisterCommand("getBlocks", (*GetBlocksCmd)(nil), flags) @@ -684,7 +605,6 @@ func init() { MustRegisterCommand("getConnectionCount", (*GetConnectionCountCmd)(nil), flags) MustRegisterCommand("getDifficulty", (*GetDifficultyCmd)(nil), flags) MustRegisterCommand("getInfo", (*GetInfoCmd)(nil), flags) - MustRegisterCommand("getManualNodeInfo", (*GetManualNodeInfoCmd)(nil), flags) MustRegisterCommand("getMempoolEntry", (*GetMempoolEntryCmd)(nil), flags) MustRegisterCommand("getMempoolInfo", (*GetMempoolInfoCmd)(nil), flags) MustRegisterCommand("getNetworkInfo", (*GetNetworkInfoCmd)(nil), flags) @@ -697,7 +617,7 @@ func init() { MustRegisterCommand("getTxOutSetInfo", (*GetTxOutSetInfoCmd)(nil), flags) MustRegisterCommand("help", (*HelpCmd)(nil), flags) MustRegisterCommand("ping", (*PingCmd)(nil), flags) - MustRegisterCommand("removeManualNode", (*RemoveManualNodeCmd)(nil), flags) + MustRegisterCommand("disconnect", (*DisconnectCmd)(nil), flags) MustRegisterCommand("sendRawTransaction", (*SendRawTransactionCmd)(nil), flags) MustRegisterCommand("stop", (*StopCmd)(nil), flags) MustRegisterCommand("submitBlock", (*SubmitBlockCmd)(nil), flags) diff --git a/rpcmodel/rpc_commands_test.go b/rpc/model/rpc_commands_test.go similarity index 58% rename from rpcmodel/rpc_commands_test.go rename to rpc/model/rpc_commands_test.go index bb0e3f0ef..3c52e21a5 100644 --- a/rpcmodel/rpc_commands_test.go +++ b/rpc/model/rpc_commands_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package rpcmodel_test +package model_test import ( "bytes" @@ -13,7 +13,7 @@ import ( "reflect" "testing" - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" ) // TestRPCServerCommands tests all of the kaspa rpc server commands marshal and unmarshal @@ -32,110 +32,37 @@ func TestRPCServerCommands(t *testing.T) { unmarshalled interface{} }{ { - name: "addManualNode", + name: "connect", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("addManualNode", "127.0.0.1") + return model.NewCommand("connect", "127.0.0.1") }, staticCmd: func() interface{} { - return rpcmodel.NewAddManualNodeCmd("127.0.0.1", nil) + return model.NewConnectCmd("127.0.0.1", nil) }, - marshalled: `{"jsonrpc":"1.0","method":"addManualNode","params":["127.0.0.1"],"id":1}`, - unmarshalled: &rpcmodel.AddManualNodeCmd{Addr: "127.0.0.1", OneTry: pointers.Bool(false)}, - }, - { - name: "createRawTransaction", - newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("createRawTransaction", `[{"txId":"123","vout":1}]`, - `{"456":0.0123}`) - }, - staticCmd: func() interface{} { - txInputs := []rpcmodel.TransactionInput{ - {TxID: "123", Vout: 1}, - } - amounts := map[string]float64{"456": .0123} - return rpcmodel.NewCreateRawTransactionCmd(txInputs, amounts, nil) - }, - marshalled: `{"jsonrpc":"1.0","method":"createRawTransaction","params":[[{"txId":"123","vout":1}],{"456":0.0123}],"id":1}`, - unmarshalled: &rpcmodel.CreateRawTransactionCmd{ - Inputs: []rpcmodel.TransactionInput{{TxID: "123", Vout: 1}}, - Amounts: map[string]float64{"456": .0123}, - }, - }, - { - name: "createRawTransaction optional", - newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("createRawTransaction", `[{"txId":"123","vout":1}]`, - `{"456":0.0123}`, int64(12312333333)) - }, - staticCmd: func() interface{} { - txInputs := []rpcmodel.TransactionInput{ - {TxID: "123", Vout: 1}, - } - amounts := map[string]float64{"456": .0123} - return rpcmodel.NewCreateRawTransactionCmd(txInputs, amounts, pointers.Uint64(12312333333)) - }, - marshalled: `{"jsonrpc":"1.0","method":"createRawTransaction","params":[[{"txId":"123","vout":1}],{"456":0.0123},12312333333],"id":1}`, - unmarshalled: &rpcmodel.CreateRawTransactionCmd{ - Inputs: []rpcmodel.TransactionInput{{TxID: "123", Vout: 1}}, - Amounts: map[string]float64{"456": .0123}, - LockTime: pointers.Uint64(12312333333), - }, - }, - - { - name: "decodeRawTransaction", - newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("decodeRawTransaction", "123") - }, - staticCmd: func() interface{} { - return rpcmodel.NewDecodeRawTransactionCmd("123") - }, - marshalled: `{"jsonrpc":"1.0","method":"decodeRawTransaction","params":["123"],"id":1}`, - unmarshalled: &rpcmodel.DecodeRawTransactionCmd{HexTx: "123"}, - }, - { - name: "decodeScript", - newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("decodeScript", "00") - }, - staticCmd: func() interface{} { - return rpcmodel.NewDecodeScriptCmd("00") - }, - marshalled: `{"jsonrpc":"1.0","method":"decodeScript","params":["00"],"id":1}`, - unmarshalled: &rpcmodel.DecodeScriptCmd{HexScript: "00"}, - }, - { - name: "getAllManualNodesInfo", - newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getAllManualNodesInfo") - }, - staticCmd: func() interface{} { - return rpcmodel.NewGetAllManualNodesInfoCmd(nil) - }, - marshalled: `{"jsonrpc":"1.0","method":"getAllManualNodesInfo","params":[],"id":1}`, - unmarshalled: &rpcmodel.GetAllManualNodesInfoCmd{Details: pointers.Bool(true)}, + marshalled: `{"jsonrpc":"1.0","method":"connect","params":["127.0.0.1"],"id":1}`, + unmarshalled: &model.ConnectCmd{Address: "127.0.0.1", IsPermanent: pointers.Bool(false)}, }, { name: "getSelectedTipHash", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getSelectedTipHash") + return model.NewCommand("getSelectedTipHash") }, staticCmd: func() interface{} { - return rpcmodel.NewGetSelectedTipHashCmd() + return model.NewGetSelectedTipHashCmd() }, marshalled: `{"jsonrpc":"1.0","method":"getSelectedTipHash","params":[],"id":1}`, - unmarshalled: &rpcmodel.GetSelectedTipHashCmd{}, + unmarshalled: &model.GetSelectedTipHashCmd{}, }, { name: "getBlock", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getBlock", "123") + return model.NewCommand("getBlock", "123") }, staticCmd: func() interface{} { - return rpcmodel.NewGetBlockCmd("123", nil, nil, nil) + return model.NewGetBlockCmd("123", nil, nil, nil) }, marshalled: `{"jsonrpc":"1.0","method":"getBlock","params":["123"],"id":1}`, - unmarshalled: &rpcmodel.GetBlockCmd{ + unmarshalled: &model.GetBlockCmd{ Hash: "123", Verbose: pointers.Bool(true), VerboseTx: pointers.Bool(false), @@ -148,13 +75,13 @@ func TestRPCServerCommands(t *testing.T) { // more pointers than the destination to // exercise that path. verbosePtr := pointers.Bool(true) - return rpcmodel.NewCommand("getBlock", "123", &verbosePtr) + return model.NewCommand("getBlock", "123", &verbosePtr) }, staticCmd: func() interface{} { - return rpcmodel.NewGetBlockCmd("123", pointers.Bool(true), nil, nil) + return model.NewGetBlockCmd("123", pointers.Bool(true), nil, nil) }, marshalled: `{"jsonrpc":"1.0","method":"getBlock","params":["123",true],"id":1}`, - unmarshalled: &rpcmodel.GetBlockCmd{ + unmarshalled: &model.GetBlockCmd{ Hash: "123", Verbose: pointers.Bool(true), VerboseTx: pointers.Bool(false), @@ -163,13 +90,13 @@ func TestRPCServerCommands(t *testing.T) { { name: "getBlock required optional2", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getBlock", "123", true, true) + return model.NewCommand("getBlock", "123", true, true) }, staticCmd: func() interface{} { - return rpcmodel.NewGetBlockCmd("123", pointers.Bool(true), pointers.Bool(true), nil) + return model.NewGetBlockCmd("123", pointers.Bool(true), pointers.Bool(true), nil) }, marshalled: `{"jsonrpc":"1.0","method":"getBlock","params":["123",true,true],"id":1}`, - unmarshalled: &rpcmodel.GetBlockCmd{ + unmarshalled: &model.GetBlockCmd{ Hash: "123", Verbose: pointers.Bool(true), VerboseTx: pointers.Bool(true), @@ -178,13 +105,13 @@ func TestRPCServerCommands(t *testing.T) { { name: "getBlock required optional3", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getBlock", "123", true, true, "456") + return model.NewCommand("getBlock", "123", true, true, "456") }, staticCmd: func() interface{} { - return rpcmodel.NewGetBlockCmd("123", pointers.Bool(true), pointers.Bool(true), pointers.String("456")) + return model.NewGetBlockCmd("123", pointers.Bool(true), pointers.Bool(true), pointers.String("456")) }, marshalled: `{"jsonrpc":"1.0","method":"getBlock","params":["123",true,true,"456"],"id":1}`, - unmarshalled: &rpcmodel.GetBlockCmd{ + unmarshalled: &model.GetBlockCmd{ Hash: "123", Verbose: pointers.Bool(true), VerboseTx: pointers.Bool(true), @@ -194,13 +121,13 @@ func TestRPCServerCommands(t *testing.T) { { name: "getBlocks", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getBlocks", true, true, "123") + return model.NewCommand("getBlocks", true, true, "123") }, staticCmd: func() interface{} { - return rpcmodel.NewGetBlocksCmd(true, true, pointers.String("123")) + return model.NewGetBlocksCmd(true, true, pointers.String("123")) }, marshalled: `{"jsonrpc":"1.0","method":"getBlocks","params":[true,true,"123"],"id":1}`, - unmarshalled: &rpcmodel.GetBlocksCmd{ + unmarshalled: &model.GetBlocksCmd{ IncludeRawBlockData: true, IncludeVerboseBlockData: true, LowHash: pointers.String("123"), @@ -209,35 +136,35 @@ func TestRPCServerCommands(t *testing.T) { { name: "getBlockDagInfo", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getBlockDagInfo") + return model.NewCommand("getBlockDagInfo") }, staticCmd: func() interface{} { - return rpcmodel.NewGetBlockDAGInfoCmd() + return model.NewGetBlockDAGInfoCmd() }, marshalled: `{"jsonrpc":"1.0","method":"getBlockDagInfo","params":[],"id":1}`, - unmarshalled: &rpcmodel.GetBlockDAGInfoCmd{}, + unmarshalled: &model.GetBlockDAGInfoCmd{}, }, { name: "getBlockCount", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getBlockCount") + return model.NewCommand("getBlockCount") }, staticCmd: func() interface{} { - return rpcmodel.NewGetBlockCountCmd() + return model.NewGetBlockCountCmd() }, marshalled: `{"jsonrpc":"1.0","method":"getBlockCount","params":[],"id":1}`, - unmarshalled: &rpcmodel.GetBlockCountCmd{}, + unmarshalled: &model.GetBlockCountCmd{}, }, { name: "getBlockHeader", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getBlockHeader", "123") + return model.NewCommand("getBlockHeader", "123") }, staticCmd: func() interface{} { - return rpcmodel.NewGetBlockHeaderCmd("123", nil) + return model.NewGetBlockHeaderCmd("123", nil) }, marshalled: `{"jsonrpc":"1.0","method":"getBlockHeader","params":["123"],"id":1}`, - unmarshalled: &rpcmodel.GetBlockHeaderCmd{ + unmarshalled: &model.GetBlockHeaderCmd{ Hash: "123", Verbose: pointers.Bool(true), }, @@ -245,29 +172,29 @@ func TestRPCServerCommands(t *testing.T) { { name: "getBlockTemplate", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getBlockTemplate") + return model.NewCommand("getBlockTemplate") }, staticCmd: func() interface{} { - return rpcmodel.NewGetBlockTemplateCmd(nil) + return model.NewGetBlockTemplateCmd(nil) }, marshalled: `{"jsonrpc":"1.0","method":"getBlockTemplate","params":[],"id":1}`, - unmarshalled: &rpcmodel.GetBlockTemplateCmd{Request: nil}, + unmarshalled: &model.GetBlockTemplateCmd{Request: nil}, }, { name: "getBlockTemplate optional - template request", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getBlockTemplate", `{"mode":"template","payAddress":"kaspa:qph364lxa0ul5h0jrvl3u7xu8erc7mu3dv7prcn7x3"}`) + return model.NewCommand("getBlockTemplate", `{"mode":"template","payAddress":"kaspa:qph364lxa0ul5h0jrvl3u7xu8erc7mu3dv7prcn7x3"}`) }, staticCmd: func() interface{} { - template := rpcmodel.TemplateRequest{ + template := model.TemplateRequest{ Mode: "template", PayAddress: "kaspa:qph364lxa0ul5h0jrvl3u7xu8erc7mu3dv7prcn7x3", } - return rpcmodel.NewGetBlockTemplateCmd(&template) + return model.NewGetBlockTemplateCmd(&template) }, marshalled: `{"jsonrpc":"1.0","method":"getBlockTemplate","params":[{"mode":"template","payAddress":"kaspa:qph364lxa0ul5h0jrvl3u7xu8erc7mu3dv7prcn7x3"}],"id":1}`, - unmarshalled: &rpcmodel.GetBlockTemplateCmd{ - Request: &rpcmodel.TemplateRequest{ + unmarshalled: &model.GetBlockTemplateCmd{ + Request: &model.TemplateRequest{ Mode: "template", PayAddress: "kaspa:qph364lxa0ul5h0jrvl3u7xu8erc7mu3dv7prcn7x3", }, @@ -276,21 +203,21 @@ func TestRPCServerCommands(t *testing.T) { { name: "getBlockTemplate optional - template request with tweaks", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getBlockTemplate", `{"mode":"template","sigOpLimit":500,"massLimit":100000000,"maxVersion":1,"payAddress":"kaspa:qph364lxa0ul5h0jrvl3u7xu8erc7mu3dv7prcn7x3"}`) + return model.NewCommand("getBlockTemplate", `{"mode":"template","sigOpLimit":500,"massLimit":100000000,"maxVersion":1,"payAddress":"kaspa:qph364lxa0ul5h0jrvl3u7xu8erc7mu3dv7prcn7x3"}`) }, staticCmd: func() interface{} { - template := rpcmodel.TemplateRequest{ + template := model.TemplateRequest{ Mode: "template", PayAddress: "kaspa:qph364lxa0ul5h0jrvl3u7xu8erc7mu3dv7prcn7x3", SigOpLimit: 500, MassLimit: 100000000, MaxVersion: 1, } - return rpcmodel.NewGetBlockTemplateCmd(&template) + return model.NewGetBlockTemplateCmd(&template) }, marshalled: `{"jsonrpc":"1.0","method":"getBlockTemplate","params":[{"mode":"template","sigOpLimit":500,"massLimit":100000000,"maxVersion":1,"payAddress":"kaspa:qph364lxa0ul5h0jrvl3u7xu8erc7mu3dv7prcn7x3"}],"id":1}`, - unmarshalled: &rpcmodel.GetBlockTemplateCmd{ - Request: &rpcmodel.TemplateRequest{ + unmarshalled: &model.GetBlockTemplateCmd{ + Request: &model.TemplateRequest{ Mode: "template", PayAddress: "kaspa:qph364lxa0ul5h0jrvl3u7xu8erc7mu3dv7prcn7x3", SigOpLimit: int64(500), @@ -302,21 +229,21 @@ func TestRPCServerCommands(t *testing.T) { { name: "getBlockTemplate optional - template request with tweaks 2", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getBlockTemplate", `{"mode":"template","payAddress":"kaspa:qph364lxa0ul5h0jrvl3u7xu8erc7mu3dv7prcn7x3","sigOpLimit":true,"massLimit":100000000,"maxVersion":1}`) + return model.NewCommand("getBlockTemplate", `{"mode":"template","payAddress":"kaspa:qph364lxa0ul5h0jrvl3u7xu8erc7mu3dv7prcn7x3","sigOpLimit":true,"massLimit":100000000,"maxVersion":1}`) }, staticCmd: func() interface{} { - template := rpcmodel.TemplateRequest{ + template := model.TemplateRequest{ Mode: "template", PayAddress: "kaspa:qph364lxa0ul5h0jrvl3u7xu8erc7mu3dv7prcn7x3", SigOpLimit: true, MassLimit: 100000000, MaxVersion: 1, } - return rpcmodel.NewGetBlockTemplateCmd(&template) + return model.NewGetBlockTemplateCmd(&template) }, marshalled: `{"jsonrpc":"1.0","method":"getBlockTemplate","params":[{"mode":"template","sigOpLimit":true,"massLimit":100000000,"maxVersion":1,"payAddress":"kaspa:qph364lxa0ul5h0jrvl3u7xu8erc7mu3dv7prcn7x3"}],"id":1}`, - unmarshalled: &rpcmodel.GetBlockTemplateCmd{ - Request: &rpcmodel.TemplateRequest{ + unmarshalled: &model.GetBlockTemplateCmd{ + Request: &model.TemplateRequest{ Mode: "template", PayAddress: "kaspa:qph364lxa0ul5h0jrvl3u7xu8erc7mu3dv7prcn7x3", SigOpLimit: true, @@ -328,13 +255,13 @@ func TestRPCServerCommands(t *testing.T) { { name: "getChainFromBlock", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getChainFromBlock", true, "123") + return model.NewCommand("getChainFromBlock", true, "123") }, staticCmd: func() interface{} { - return rpcmodel.NewGetChainFromBlockCmd(true, pointers.String("123")) + return model.NewGetChainFromBlockCmd(true, pointers.String("123")) }, marshalled: `{"jsonrpc":"1.0","method":"getChainFromBlock","params":[true,"123"],"id":1}`, - unmarshalled: &rpcmodel.GetChainFromBlockCmd{ + unmarshalled: &model.GetChainFromBlockCmd{ IncludeBlocks: true, StartHash: pointers.String("123"), }, @@ -342,167 +269,153 @@ func TestRPCServerCommands(t *testing.T) { { name: "getDagTips", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getDagTips") + return model.NewCommand("getDagTips") }, staticCmd: func() interface{} { - return rpcmodel.NewGetDAGTipsCmd() + return model.NewGetDAGTipsCmd() }, marshalled: `{"jsonrpc":"1.0","method":"getDagTips","params":[],"id":1}`, - unmarshalled: &rpcmodel.GetDAGTipsCmd{}, + unmarshalled: &model.GetDAGTipsCmd{}, }, { name: "getConnectionCount", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getConnectionCount") + return model.NewCommand("getConnectionCount") }, staticCmd: func() interface{} { - return rpcmodel.NewGetConnectionCountCmd() + return model.NewGetConnectionCountCmd() }, marshalled: `{"jsonrpc":"1.0","method":"getConnectionCount","params":[],"id":1}`, - unmarshalled: &rpcmodel.GetConnectionCountCmd{}, + unmarshalled: &model.GetConnectionCountCmd{}, }, { name: "getDifficulty", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getDifficulty") + return model.NewCommand("getDifficulty") }, staticCmd: func() interface{} { - return rpcmodel.NewGetDifficultyCmd() + return model.NewGetDifficultyCmd() }, marshalled: `{"jsonrpc":"1.0","method":"getDifficulty","params":[],"id":1}`, - unmarshalled: &rpcmodel.GetDifficultyCmd{}, + unmarshalled: &model.GetDifficultyCmd{}, }, { name: "getInfo", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getInfo") + return model.NewCommand("getInfo") }, staticCmd: func() interface{} { - return rpcmodel.NewGetInfoCmd() + return model.NewGetInfoCmd() }, marshalled: `{"jsonrpc":"1.0","method":"getInfo","params":[],"id":1}`, - unmarshalled: &rpcmodel.GetInfoCmd{}, - }, - { - name: "getManualNodeInfo", - newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getManualNodeInfo", "127.0.0.1") - }, - staticCmd: func() interface{} { - return rpcmodel.NewGetManualNodeInfoCmd("127.0.0.1", nil) - }, - marshalled: `{"jsonrpc":"1.0","method":"getManualNodeInfo","params":["127.0.0.1"],"id":1}`, - unmarshalled: &rpcmodel.GetManualNodeInfoCmd{ - Node: "127.0.0.1", - Details: pointers.Bool(true), - }, + unmarshalled: &model.GetInfoCmd{}, }, { name: "getMempoolEntry", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getMempoolEntry", "txhash") + return model.NewCommand("getMempoolEntry", "txhash") }, staticCmd: func() interface{} { - return rpcmodel.NewGetMempoolEntryCmd("txhash") + return model.NewGetMempoolEntryCmd("txhash") }, marshalled: `{"jsonrpc":"1.0","method":"getMempoolEntry","params":["txhash"],"id":1}`, - unmarshalled: &rpcmodel.GetMempoolEntryCmd{ + unmarshalled: &model.GetMempoolEntryCmd{ TxID: "txhash", }, }, { name: "getMempoolInfo", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getMempoolInfo") + return model.NewCommand("getMempoolInfo") }, staticCmd: func() interface{} { - return rpcmodel.NewGetMempoolInfoCmd() + return model.NewGetMempoolInfoCmd() }, marshalled: `{"jsonrpc":"1.0","method":"getMempoolInfo","params":[],"id":1}`, - unmarshalled: &rpcmodel.GetMempoolInfoCmd{}, + unmarshalled: &model.GetMempoolInfoCmd{}, }, { name: "getNetworkInfo", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getNetworkInfo") + return model.NewCommand("getNetworkInfo") }, staticCmd: func() interface{} { - return rpcmodel.NewGetNetworkInfoCmd() + return model.NewGetNetworkInfoCmd() }, marshalled: `{"jsonrpc":"1.0","method":"getNetworkInfo","params":[],"id":1}`, - unmarshalled: &rpcmodel.GetNetworkInfoCmd{}, + unmarshalled: &model.GetNetworkInfoCmd{}, }, { name: "getNetTotals", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getNetTotals") + return model.NewCommand("getNetTotals") }, staticCmd: func() interface{} { - return rpcmodel.NewGetNetTotalsCmd() + return model.NewGetNetTotalsCmd() }, marshalled: `{"jsonrpc":"1.0","method":"getNetTotals","params":[],"id":1}`, - unmarshalled: &rpcmodel.GetNetTotalsCmd{}, + unmarshalled: &model.GetNetTotalsCmd{}, }, { name: "getConnectedPeerInfo", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getConnectedPeerInfo") + return model.NewCommand("getConnectedPeerInfo") }, staticCmd: func() interface{} { - return rpcmodel.NewGetConnectedPeerInfoCmd() + return model.NewGetConnectedPeerInfoCmd() }, marshalled: `{"jsonrpc":"1.0","method":"getConnectedPeerInfo","params":[],"id":1}`, - unmarshalled: &rpcmodel.GetConnectedPeerInfoCmd{}, + unmarshalled: &model.GetConnectedPeerInfoCmd{}, }, { name: "getRawMempool", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getRawMempool") + return model.NewCommand("getRawMempool") }, staticCmd: func() interface{} { - return rpcmodel.NewGetRawMempoolCmd(nil) + return model.NewGetRawMempoolCmd(nil) }, marshalled: `{"jsonrpc":"1.0","method":"getRawMempool","params":[],"id":1}`, - unmarshalled: &rpcmodel.GetRawMempoolCmd{ + unmarshalled: &model.GetRawMempoolCmd{ Verbose: pointers.Bool(false), }, }, { name: "getRawMempool optional", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getRawMempool", false) + return model.NewCommand("getRawMempool", false) }, staticCmd: func() interface{} { - return rpcmodel.NewGetRawMempoolCmd(pointers.Bool(false)) + return model.NewGetRawMempoolCmd(pointers.Bool(false)) }, marshalled: `{"jsonrpc":"1.0","method":"getRawMempool","params":[false],"id":1}`, - unmarshalled: &rpcmodel.GetRawMempoolCmd{ + unmarshalled: &model.GetRawMempoolCmd{ Verbose: pointers.Bool(false), }, }, { name: "getSubnetwork", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getSubnetwork", "123") + return model.NewCommand("getSubnetwork", "123") }, staticCmd: func() interface{} { - return rpcmodel.NewGetSubnetworkCmd("123") + return model.NewGetSubnetworkCmd("123") }, marshalled: `{"jsonrpc":"1.0","method":"getSubnetwork","params":["123"],"id":1}`, - unmarshalled: &rpcmodel.GetSubnetworkCmd{ + unmarshalled: &model.GetSubnetworkCmd{ SubnetworkID: "123", }, }, { name: "getTxOut", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getTxOut", "123", 1) + return model.NewCommand("getTxOut", "123", 1) }, staticCmd: func() interface{} { - return rpcmodel.NewGetTxOutCmd("123", 1, nil) + return model.NewGetTxOutCmd("123", 1, nil) }, marshalled: `{"jsonrpc":"1.0","method":"getTxOut","params":["123",1],"id":1}`, - unmarshalled: &rpcmodel.GetTxOutCmd{ + unmarshalled: &model.GetTxOutCmd{ TxID: "123", Vout: 1, IncludeMempool: pointers.Bool(true), @@ -511,13 +424,13 @@ func TestRPCServerCommands(t *testing.T) { { name: "getTxOut optional", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getTxOut", "123", 1, true) + return model.NewCommand("getTxOut", "123", 1, true) }, staticCmd: func() interface{} { - return rpcmodel.NewGetTxOutCmd("123", 1, pointers.Bool(true)) + return model.NewGetTxOutCmd("123", 1, pointers.Bool(true)) }, marshalled: `{"jsonrpc":"1.0","method":"getTxOut","params":["123",1,true],"id":1}`, - unmarshalled: &rpcmodel.GetTxOutCmd{ + unmarshalled: &model.GetTxOutCmd{ TxID: "123", Vout: 1, IncludeMempool: pointers.Bool(true), @@ -526,72 +439,72 @@ func TestRPCServerCommands(t *testing.T) { { name: "getTxOutSetInfo", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getTxOutSetInfo") + return model.NewCommand("getTxOutSetInfo") }, staticCmd: func() interface{} { - return rpcmodel.NewGetTxOutSetInfoCmd() + return model.NewGetTxOutSetInfoCmd() }, marshalled: `{"jsonrpc":"1.0","method":"getTxOutSetInfo","params":[],"id":1}`, - unmarshalled: &rpcmodel.GetTxOutSetInfoCmd{}, + unmarshalled: &model.GetTxOutSetInfoCmd{}, }, { name: "help", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("help") + return model.NewCommand("help") }, staticCmd: func() interface{} { - return rpcmodel.NewHelpCmd(nil) + return model.NewHelpCmd(nil) }, marshalled: `{"jsonrpc":"1.0","method":"help","params":[],"id":1}`, - unmarshalled: &rpcmodel.HelpCmd{ + unmarshalled: &model.HelpCmd{ Command: nil, }, }, { name: "help optional", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("help", "getBlock") + return model.NewCommand("help", "getBlock") }, staticCmd: func() interface{} { - return rpcmodel.NewHelpCmd(pointers.String("getBlock")) + return model.NewHelpCmd(pointers.String("getBlock")) }, marshalled: `{"jsonrpc":"1.0","method":"help","params":["getBlock"],"id":1}`, - unmarshalled: &rpcmodel.HelpCmd{ + unmarshalled: &model.HelpCmd{ Command: pointers.String("getBlock"), }, }, { name: "ping", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("ping") + return model.NewCommand("ping") }, staticCmd: func() interface{} { - return rpcmodel.NewPingCmd() + return model.NewPingCmd() }, marshalled: `{"jsonrpc":"1.0","method":"ping","params":[],"id":1}`, - unmarshalled: &rpcmodel.PingCmd{}, + unmarshalled: &model.PingCmd{}, }, { - name: "removeManualNode", + name: "disconnect", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("removeManualNode", "127.0.0.1") + return model.NewCommand("disconnect", "127.0.0.1") }, staticCmd: func() interface{} { - return rpcmodel.NewRemoveManualNodeCmd("127.0.0.1") + return model.NewDisconnectCmd("127.0.0.1") }, - marshalled: `{"jsonrpc":"1.0","method":"removeManualNode","params":["127.0.0.1"],"id":1}`, - unmarshalled: &rpcmodel.RemoveManualNodeCmd{Addr: "127.0.0.1"}, + marshalled: `{"jsonrpc":"1.0","method":"disconnect","params":["127.0.0.1"],"id":1}`, + unmarshalled: &model.DisconnectCmd{Address: "127.0.0.1"}, }, { name: "sendRawTransaction", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("sendRawTransaction", "1122") + return model.NewCommand("sendRawTransaction", "1122") }, staticCmd: func() interface{} { - return rpcmodel.NewSendRawTransactionCmd("1122", nil) + return model.NewSendRawTransactionCmd("1122", nil) }, marshalled: `{"jsonrpc":"1.0","method":"sendRawTransaction","params":["1122"],"id":1}`, - unmarshalled: &rpcmodel.SendRawTransactionCmd{ + unmarshalled: &model.SendRawTransactionCmd{ HexTx: "1122", AllowHighFees: pointers.Bool(false), }, @@ -599,13 +512,13 @@ func TestRPCServerCommands(t *testing.T) { { name: "sendRawTransaction optional", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("sendRawTransaction", "1122", false) + return model.NewCommand("sendRawTransaction", "1122", false) }, staticCmd: func() interface{} { - return rpcmodel.NewSendRawTransactionCmd("1122", pointers.Bool(false)) + return model.NewSendRawTransactionCmd("1122", pointers.Bool(false)) }, marshalled: `{"jsonrpc":"1.0","method":"sendRawTransaction","params":["1122",false],"id":1}`, - unmarshalled: &rpcmodel.SendRawTransactionCmd{ + unmarshalled: &model.SendRawTransactionCmd{ HexTx: "1122", AllowHighFees: pointers.Bool(false), }, @@ -613,24 +526,24 @@ func TestRPCServerCommands(t *testing.T) { { name: "stop", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("stop") + return model.NewCommand("stop") }, staticCmd: func() interface{} { - return rpcmodel.NewStopCmd() + return model.NewStopCmd() }, marshalled: `{"jsonrpc":"1.0","method":"stop","params":[],"id":1}`, - unmarshalled: &rpcmodel.StopCmd{}, + unmarshalled: &model.StopCmd{}, }, { name: "submitBlock", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("submitBlock", "112233") + return model.NewCommand("submitBlock", "112233") }, staticCmd: func() interface{} { - return rpcmodel.NewSubmitBlockCmd("112233", nil) + return model.NewSubmitBlockCmd("112233", nil) }, marshalled: `{"jsonrpc":"1.0","method":"submitBlock","params":["112233"],"id":1}`, - unmarshalled: &rpcmodel.SubmitBlockCmd{ + unmarshalled: &model.SubmitBlockCmd{ HexBlock: "112233", Options: nil, }, @@ -638,18 +551,18 @@ func TestRPCServerCommands(t *testing.T) { { name: "submitBlock optional", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("submitBlock", "112233", `{"workId":"12345"}`) + return model.NewCommand("submitBlock", "112233", `{"workId":"12345"}`) }, staticCmd: func() interface{} { - options := rpcmodel.SubmitBlockOptions{ + options := model.SubmitBlockOptions{ WorkID: "12345", } - return rpcmodel.NewSubmitBlockCmd("112233", &options) + return model.NewSubmitBlockCmd("112233", &options) }, marshalled: `{"jsonrpc":"1.0","method":"submitBlock","params":["112233",{"workId":"12345"}],"id":1}`, - unmarshalled: &rpcmodel.SubmitBlockCmd{ + unmarshalled: &model.SubmitBlockCmd{ HexBlock: "112233", - Options: &rpcmodel.SubmitBlockOptions{ + Options: &model.SubmitBlockOptions{ WorkID: "12345", }, }, @@ -657,79 +570,79 @@ func TestRPCServerCommands(t *testing.T) { { name: "uptime", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("uptime") + return model.NewCommand("uptime") }, staticCmd: func() interface{} { - return rpcmodel.NewUptimeCmd() + return model.NewUptimeCmd() }, marshalled: `{"jsonrpc":"1.0","method":"uptime","params":[],"id":1}`, - unmarshalled: &rpcmodel.UptimeCmd{}, + unmarshalled: &model.UptimeCmd{}, }, { name: "validateAddress", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("validateAddress", "1Address") + return model.NewCommand("validateAddress", "1Address") }, staticCmd: func() interface{} { - return rpcmodel.NewValidateAddressCmd("1Address") + return model.NewValidateAddressCmd("1Address") }, marshalled: `{"jsonrpc":"1.0","method":"validateAddress","params":["1Address"],"id":1}`, - unmarshalled: &rpcmodel.ValidateAddressCmd{ + unmarshalled: &model.ValidateAddressCmd{ Address: "1Address", }, }, { name: "debugLevel", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("debugLevel", "trace") + return model.NewCommand("debugLevel", "trace") }, staticCmd: func() interface{} { - return rpcmodel.NewDebugLevelCmd("trace") + return model.NewDebugLevelCmd("trace") }, marshalled: `{"jsonrpc":"1.0","method":"debugLevel","params":["trace"],"id":1}`, - unmarshalled: &rpcmodel.DebugLevelCmd{ + unmarshalled: &model.DebugLevelCmd{ LevelSpec: "trace", }, }, { name: "node", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("node", rpcmodel.NRemove, "1.1.1.1") + return model.NewCommand("node", model.NRemove, "1.1.1.1") }, staticCmd: func() interface{} { - return rpcmodel.NewNodeCmd("remove", "1.1.1.1", nil) + return model.NewNodeCmd("remove", "1.1.1.1", nil) }, marshalled: `{"jsonrpc":"1.0","method":"node","params":["remove","1.1.1.1"],"id":1}`, - unmarshalled: &rpcmodel.NodeCmd{ - SubCmd: rpcmodel.NRemove, + unmarshalled: &model.NodeCmd{ + SubCmd: model.NRemove, Target: "1.1.1.1", }, }, { name: "node", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("node", rpcmodel.NDisconnect, "1.1.1.1") + return model.NewCommand("node", model.NDisconnect, "1.1.1.1") }, staticCmd: func() interface{} { - return rpcmodel.NewNodeCmd("disconnect", "1.1.1.1", nil) + return model.NewNodeCmd("disconnect", "1.1.1.1", nil) }, marshalled: `{"jsonrpc":"1.0","method":"node","params":["disconnect","1.1.1.1"],"id":1}`, - unmarshalled: &rpcmodel.NodeCmd{ - SubCmd: rpcmodel.NDisconnect, + unmarshalled: &model.NodeCmd{ + SubCmd: model.NDisconnect, Target: "1.1.1.1", }, }, { name: "node", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("node", rpcmodel.NConnect, "1.1.1.1", "perm") + return model.NewCommand("node", model.NConnect, "1.1.1.1", "perm") }, staticCmd: func() interface{} { - return rpcmodel.NewNodeCmd("connect", "1.1.1.1", pointers.String("perm")) + return model.NewNodeCmd("connect", "1.1.1.1", pointers.String("perm")) }, marshalled: `{"jsonrpc":"1.0","method":"node","params":["connect","1.1.1.1","perm"],"id":1}`, - unmarshalled: &rpcmodel.NodeCmd{ - SubCmd: rpcmodel.NConnect, + unmarshalled: &model.NodeCmd{ + SubCmd: model.NConnect, Target: "1.1.1.1", ConnectSubCmd: pointers.String("perm"), }, @@ -737,14 +650,14 @@ func TestRPCServerCommands(t *testing.T) { { name: "node", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("node", rpcmodel.NConnect, "1.1.1.1", "temp") + return model.NewCommand("node", model.NConnect, "1.1.1.1", "temp") }, staticCmd: func() interface{} { - return rpcmodel.NewNodeCmd("connect", "1.1.1.1", pointers.String("temp")) + return model.NewNodeCmd("connect", "1.1.1.1", pointers.String("temp")) }, marshalled: `{"jsonrpc":"1.0","method":"node","params":["connect","1.1.1.1","temp"],"id":1}`, - unmarshalled: &rpcmodel.NodeCmd{ - SubCmd: rpcmodel.NConnect, + unmarshalled: &model.NodeCmd{ + SubCmd: model.NConnect, Target: "1.1.1.1", ConnectSubCmd: pointers.String("temp"), }, @@ -752,13 +665,13 @@ func TestRPCServerCommands(t *testing.T) { { name: "getSelectedTip", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getSelectedTip") + return model.NewCommand("getSelectedTip") }, staticCmd: func() interface{} { - return rpcmodel.NewGetSelectedTipCmd(nil, nil) + return model.NewGetSelectedTipCmd(nil, nil) }, marshalled: `{"jsonrpc":"1.0","method":"getSelectedTip","params":[],"id":1}`, - unmarshalled: &rpcmodel.GetSelectedTipCmd{ + unmarshalled: &model.GetSelectedTipCmd{ Verbose: pointers.Bool(true), VerboseTx: pointers.Bool(false), }, @@ -766,27 +679,27 @@ func TestRPCServerCommands(t *testing.T) { { name: "getCurrentNet", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getCurrentNet") + return model.NewCommand("getCurrentNet") }, staticCmd: func() interface{} { - return rpcmodel.NewGetCurrentNetCmd() + return model.NewGetCurrentNetCmd() }, marshalled: `{"jsonrpc":"1.0","method":"getCurrentNet","params":[],"id":1}`, - unmarshalled: &rpcmodel.GetCurrentNetCmd{}, + unmarshalled: &model.GetCurrentNetCmd{}, }, { name: "getHeaders", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getHeaders", "", "") + return model.NewCommand("getHeaders", "", "") }, staticCmd: func() interface{} { - return rpcmodel.NewGetHeadersCmd( + return model.NewGetHeadersCmd( "", "", ) }, marshalled: `{"jsonrpc":"1.0","method":"getHeaders","params":["",""],"id":1}`, - unmarshalled: &rpcmodel.GetHeadersCmd{ + unmarshalled: &model.GetHeadersCmd{ LowHash: "", HighHash: "", }, @@ -794,16 +707,16 @@ func TestRPCServerCommands(t *testing.T) { { name: "getHeaders - with arguments", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getHeaders", "000000000000000001f1739002418e2f9a84c47a4fd2a0eb7a787a6b7dc12f16", "000000000000000000ba33b33e1fad70b69e234fc24414dd47113bff38f523f7") + return model.NewCommand("getHeaders", "000000000000000001f1739002418e2f9a84c47a4fd2a0eb7a787a6b7dc12f16", "000000000000000000ba33b33e1fad70b69e234fc24414dd47113bff38f523f7") }, staticCmd: func() interface{} { - return rpcmodel.NewGetHeadersCmd( + return model.NewGetHeadersCmd( "000000000000000001f1739002418e2f9a84c47a4fd2a0eb7a787a6b7dc12f16", "000000000000000000ba33b33e1fad70b69e234fc24414dd47113bff38f523f7", ) }, marshalled: `{"jsonrpc":"1.0","method":"getHeaders","params":["000000000000000001f1739002418e2f9a84c47a4fd2a0eb7a787a6b7dc12f16","000000000000000000ba33b33e1fad70b69e234fc24414dd47113bff38f523f7"],"id":1}`, - unmarshalled: &rpcmodel.GetHeadersCmd{ + unmarshalled: &model.GetHeadersCmd{ LowHash: "000000000000000001f1739002418e2f9a84c47a4fd2a0eb7a787a6b7dc12f16", HighHash: "000000000000000000ba33b33e1fad70b69e234fc24414dd47113bff38f523f7", }, @@ -811,41 +724,41 @@ func TestRPCServerCommands(t *testing.T) { { name: "getTopHeaders", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getTopHeaders") + return model.NewCommand("getTopHeaders") }, staticCmd: func() interface{} { - return rpcmodel.NewGetTopHeadersCmd( + return model.NewGetTopHeadersCmd( nil, ) }, marshalled: `{"jsonrpc":"1.0","method":"getTopHeaders","params":[],"id":1}`, - unmarshalled: &rpcmodel.GetTopHeadersCmd{}, + unmarshalled: &model.GetTopHeadersCmd{}, }, { name: "getTopHeaders - with high hash", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getTopHeaders", "000000000000000000ba33b33e1fad70b69e234fc24414dd47113bff38f523f7") + return model.NewCommand("getTopHeaders", "000000000000000000ba33b33e1fad70b69e234fc24414dd47113bff38f523f7") }, staticCmd: func() interface{} { - return rpcmodel.NewGetTopHeadersCmd( + return model.NewGetTopHeadersCmd( pointers.String("000000000000000000ba33b33e1fad70b69e234fc24414dd47113bff38f523f7"), ) }, marshalled: `{"jsonrpc":"1.0","method":"getTopHeaders","params":["000000000000000000ba33b33e1fad70b69e234fc24414dd47113bff38f523f7"],"id":1}`, - unmarshalled: &rpcmodel.GetTopHeadersCmd{ + unmarshalled: &model.GetTopHeadersCmd{ HighHash: pointers.String("000000000000000000ba33b33e1fad70b69e234fc24414dd47113bff38f523f7"), }, }, { name: "version", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("version") + return model.NewCommand("version") }, staticCmd: func() interface{} { - return rpcmodel.NewVersionCmd() + return model.NewVersionCmd() }, marshalled: `{"jsonrpc":"1.0","method":"version","params":[],"id":1}`, - unmarshalled: &rpcmodel.VersionCmd{}, + unmarshalled: &model.VersionCmd{}, }, } @@ -853,7 +766,7 @@ func TestRPCServerCommands(t *testing.T) { for i, test := range tests { // Marshal the command as created by the new static command // creation function. - marshalled, err := rpcmodel.MarshalCommand(testID, test.staticCmd()) + marshalled, err := model.MarshalCommand(testID, test.staticCmd()) if err != nil { t.Errorf("MarshalCommand #%d (%s) unexpected error: %v", i, test.name, err) @@ -878,7 +791,7 @@ func TestRPCServerCommands(t *testing.T) { // Marshal the command as created by the generic new command // creation function. - marshalled, err = rpcmodel.MarshalCommand(testID, cmd) + marshalled, err = model.MarshalCommand(testID, cmd) if err != nil { t.Errorf("MarshalCommand #%d (%s) unexpected error: %v", i, test.name, err) @@ -892,7 +805,7 @@ func TestRPCServerCommands(t *testing.T) { continue } - var request rpcmodel.Request + var request model.Request if err := json.Unmarshal(marshalled, &request); err != nil { t.Errorf("Test #%d (%s) unexpected error while "+ "unmarshalling JSON-RPC request: %v", i, @@ -900,7 +813,7 @@ func TestRPCServerCommands(t *testing.T) { continue } - cmd, err = rpcmodel.UnmarshalCommand(&request) + cmd, err = model.UnmarshalCommand(&request) if err != nil { t.Errorf("UnmarshalCommand #%d (%s) unexpected error: %v", i, test.name, err) @@ -930,21 +843,21 @@ func TestRPCServerCommandErrors(t *testing.T) { }{ { name: "template request with invalid type", - result: &rpcmodel.TemplateRequest{}, + result: &model.TemplateRequest{}, marshalled: `{"mode":1}`, err: &json.UnmarshalTypeError{}, }, { name: "invalid template request sigoplimit field", - result: &rpcmodel.TemplateRequest{}, + result: &model.TemplateRequest{}, marshalled: `{"sigoplimit":"invalid"}`, - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrInvalidType}, + err: model.Error{ErrorCode: model.ErrInvalidType}, }, { name: "invalid template request masslimit field", - result: &rpcmodel.TemplateRequest{}, + result: &model.TemplateRequest{}, marshalled: `{"masslimit":"invalid"}`, - err: rpcmodel.Error{ErrorCode: rpcmodel.ErrInvalidType}, + err: model.Error{ErrorCode: model.ErrInvalidType}, }, } @@ -957,9 +870,9 @@ func TestRPCServerCommandErrors(t *testing.T) { continue } - var testErr rpcmodel.Error + var testErr model.Error if errors.As(err, &testErr) { - var gotRPCModelErr rpcmodel.Error + var gotRPCModelErr model.Error errors.As(err, &gotRPCModelErr) gotErrorCode := gotRPCModelErr.ErrorCode if gotErrorCode != testErr.ErrorCode { diff --git a/rpcmodel/rpc_results.go b/rpc/model/rpc_results.go similarity index 92% rename from rpcmodel/rpc_results.go rename to rpc/model/rpc_results.go index 86977433c..debb7d94b 100644 --- a/rpcmodel/rpc_results.go +++ b/rpc/model/rpc_results.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package rpcmodel +package model import ( "encoding/json" @@ -69,20 +69,6 @@ type DecodeScriptResult struct { P2sh string `json:"p2sh,omitempty"` } -// GetManualNodeInfoResultAddr models the data of the addresses portion of the -// getmanualnodeinfo command. -type GetManualNodeInfoResultAddr struct { - Address string `json:"address"` - Connected string `json:"connected"` -} - -// GetManualNodeInfoResult models the data from the getmanualnodeinfo command. -type GetManualNodeInfoResult struct { - ManualNode string `json:"manualNode"` - Connected *bool `json:"connected,omitempty"` - Addresses *[]GetManualNodeInfoResultAddr `json:"addresses,omitempty"` -} - // SoftForkDescription describes the current state of a soft-fork which was // deployed using a super-majority block signalling. type SoftForkDescription struct { @@ -219,25 +205,17 @@ type GetNetworkInfoResult struct { // GetConnectedPeerInfoResult models the data returned from the getConnectedPeerInfo command. type GetConnectedPeerInfoResult struct { - ID int32 `json:"id"` - Addr string `json:"addr"` - Services string `json:"services"` - RelayTxes bool `json:"relayTxes"` - LastSend int64 `json:"lastSend"` - LastRecv int64 `json:"lastRecv"` - BytesSent uint64 `json:"bytesSent"` - BytesRecv uint64 `json:"bytesRecv"` - ConnTime int64 `json:"connTime"` - TimeOffset int64 `json:"timeOffset"` - PingTime float64 `json:"pingTime"` - PingWait float64 `json:"pingWait,omitempty"` - Version uint32 `json:"version"` - SubVer string `json:"subVer"` - Inbound bool `json:"inbound"` - SelectedTip string `json:"selectedTip,omitempty"` - BanScore int32 `json:"banScore"` - FeeFilter int64 `json:"feeFilter"` - SyncNode bool `json:"syncNode"` + ID string `json:"id"` + Address string `json:"address"` + LastPingDuration int64 `json:"lastPingDuration"` + SelectedTipHash string `json:"selectedTipHash"` + IsSyncNode bool `json:"isSyncNode"` + IsInbound bool `json:"isInbound"` + BanScore uint32 `json:"banScore"` + TimeOffset int64 `json:"timeOffset"` + UserAgent string `json:"userAgent"` + ProtocolVersion uint32 `json:"protocolVersion"` + TimeConnected int64 `json:"timeConnected"` } // GetPeerAddressesResult models the data returned from the getPeerAddresses command. diff --git a/rpcmodel/rpc_results_test.go b/rpc/model/rpc_results_test.go similarity index 87% rename from rpcmodel/rpc_results_test.go rename to rpc/model/rpc_results_test.go index 49da883e3..478c8495a 100644 --- a/rpcmodel/rpc_results_test.go +++ b/rpc/model/rpc_results_test.go @@ -2,14 +2,14 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package rpcmodel_test +package model_test import ( "encoding/json" "github.com/kaspanet/kaspad/util/pointers" "testing" - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" ) // TestRPCServerCustomResults ensures any results that have custom marshalling @@ -25,10 +25,10 @@ func TestRPCServerCustomResults(t *testing.T) { }{ { name: "custom vin marshal without coinbase", - result: &rpcmodel.Vin{ + result: &model.Vin{ TxID: "123", Vout: 1, - ScriptSig: &rpcmodel.ScriptSig{ + ScriptSig: &model.ScriptSig{ Asm: "0", Hex: "00", }, @@ -38,7 +38,7 @@ func TestRPCServerCustomResults(t *testing.T) { }, { name: "custom vinprevout marshal with coinbase", - result: &rpcmodel.VinPrevOut{ + result: &model.VinPrevOut{ Coinbase: "021234", Sequence: 4294967295, }, @@ -46,14 +46,14 @@ func TestRPCServerCustomResults(t *testing.T) { }, { name: "custom vinprevout marshal without coinbase", - result: &rpcmodel.VinPrevOut{ + result: &model.VinPrevOut{ TxID: "123", Vout: 1, - ScriptSig: &rpcmodel.ScriptSig{ + ScriptSig: &model.ScriptSig{ Asm: "0", Hex: "00", }, - PrevOut: &rpcmodel.PrevOut{ + PrevOut: &model.PrevOut{ Address: pointers.String("addr1"), Value: 0, }, @@ -63,7 +63,7 @@ func TestRPCServerCustomResults(t *testing.T) { }, { name: "versionresult", - result: &rpcmodel.VersionResult{ + result: &model.VersionResult{ VersionString: "1.0.0", Major: 1, Minor: 0, diff --git a/rpcmodel/rpc_websocket_commands.go b/rpc/model/rpc_websocket_commands.go similarity index 99% rename from rpcmodel/rpc_websocket_commands.go rename to rpc/model/rpc_websocket_commands.go index 59f073a6c..cca7ac53e 100644 --- a/rpcmodel/rpc_websocket_commands.go +++ b/rpc/model/rpc_websocket_commands.go @@ -6,7 +6,7 @@ // NOTE: This file is intended to house the RPC commands that are supported by // a kaspa rpc server, but are only available via websockets. -package rpcmodel +package model // AuthenticateCmd defines the authenticate JSON-RPC command. type AuthenticateCmd struct { diff --git a/rpcmodel/rpc_websocket_commands_test.go b/rpc/model/rpc_websocket_commands_test.go similarity index 69% rename from rpcmodel/rpc_websocket_commands_test.go rename to rpc/model/rpc_websocket_commands_test.go index a5780e6c7..3288e38be 100644 --- a/rpcmodel/rpc_websocket_commands_test.go +++ b/rpc/model/rpc_websocket_commands_test.go @@ -3,7 +3,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package rpcmodel_test +package model_test import ( "bytes" @@ -13,7 +13,7 @@ import ( "reflect" "testing" - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" ) // TestRPCServerWebsocketCommands tests all of the kaspa rpc server websocket-specific commands @@ -34,94 +34,94 @@ func TestRPCServerWebsocketCommands(t *testing.T) { { name: "authenticate", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("authenticate", "user", "pass") + return model.NewCommand("authenticate", "user", "pass") }, staticCmd: func() interface{} { - return rpcmodel.NewAuthenticateCmd("user", "pass") + return model.NewAuthenticateCmd("user", "pass") }, marshalled: `{"jsonrpc":"1.0","method":"authenticate","params":["user","pass"],"id":1}`, - unmarshalled: &rpcmodel.AuthenticateCmd{Username: "user", Passphrase: "pass"}, + unmarshalled: &model.AuthenticateCmd{Username: "user", Passphrase: "pass"}, }, { name: "notifyBlocks", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("notifyBlocks") + return model.NewCommand("notifyBlocks") }, staticCmd: func() interface{} { - return rpcmodel.NewNotifyBlocksCmd() + return model.NewNotifyBlocksCmd() }, marshalled: `{"jsonrpc":"1.0","method":"notifyBlocks","params":[],"id":1}`, - unmarshalled: &rpcmodel.NotifyBlocksCmd{}, + unmarshalled: &model.NotifyBlocksCmd{}, }, { name: "stopNotifyBlocks", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("stopNotifyBlocks") + return model.NewCommand("stopNotifyBlocks") }, staticCmd: func() interface{} { - return rpcmodel.NewStopNotifyBlocksCmd() + return model.NewStopNotifyBlocksCmd() }, marshalled: `{"jsonrpc":"1.0","method":"stopNotifyBlocks","params":[],"id":1}`, - unmarshalled: &rpcmodel.StopNotifyBlocksCmd{}, + unmarshalled: &model.StopNotifyBlocksCmd{}, }, { name: "notifyChainChanges", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("notifyChainChanges") + return model.NewCommand("notifyChainChanges") }, staticCmd: func() interface{} { - return rpcmodel.NewNotifyChainChangesCmd() + return model.NewNotifyChainChangesCmd() }, marshalled: `{"jsonrpc":"1.0","method":"notifyChainChanges","params":[],"id":1}`, - unmarshalled: &rpcmodel.NotifyChainChangesCmd{}, + unmarshalled: &model.NotifyChainChangesCmd{}, }, { name: "stopNotifyChainChanges", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("stopNotifyChainChanges") + return model.NewCommand("stopNotifyChainChanges") }, staticCmd: func() interface{} { - return rpcmodel.NewStopNotifyChainChangesCmd() + return model.NewStopNotifyChainChangesCmd() }, marshalled: `{"jsonrpc":"1.0","method":"stopNotifyChainChanges","params":[],"id":1}`, - unmarshalled: &rpcmodel.StopNotifyChainChangesCmd{}, + unmarshalled: &model.StopNotifyChainChangesCmd{}, }, { name: "notifyNewTransactions", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("notifyNewTransactions") + return model.NewCommand("notifyNewTransactions") }, staticCmd: func() interface{} { - return rpcmodel.NewNotifyNewTransactionsCmd(nil, nil) + return model.NewNotifyNewTransactionsCmd(nil, nil) }, marshalled: `{"jsonrpc":"1.0","method":"notifyNewTransactions","params":[],"id":1}`, - unmarshalled: &rpcmodel.NotifyNewTransactionsCmd{ + unmarshalled: &model.NotifyNewTransactionsCmd{ Verbose: pointers.Bool(false), }, }, { name: "notifyNewTransactions optional", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("notifyNewTransactions", true) + return model.NewCommand("notifyNewTransactions", true) }, staticCmd: func() interface{} { - return rpcmodel.NewNotifyNewTransactionsCmd(pointers.Bool(true), nil) + return model.NewNotifyNewTransactionsCmd(pointers.Bool(true), nil) }, marshalled: `{"jsonrpc":"1.0","method":"notifyNewTransactions","params":[true],"id":1}`, - unmarshalled: &rpcmodel.NotifyNewTransactionsCmd{ + unmarshalled: &model.NotifyNewTransactionsCmd{ Verbose: pointers.Bool(true), }, }, { name: "notifyNewTransactions optional 2", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("notifyNewTransactions", true, "0000000000000000000000000000000000000123") + return model.NewCommand("notifyNewTransactions", true, "0000000000000000000000000000000000000123") }, staticCmd: func() interface{} { - return rpcmodel.NewNotifyNewTransactionsCmd(pointers.Bool(true), pointers.String("0000000000000000000000000000000000000123")) + return model.NewNotifyNewTransactionsCmd(pointers.Bool(true), pointers.String("0000000000000000000000000000000000000123")) }, marshalled: `{"jsonrpc":"1.0","method":"notifyNewTransactions","params":[true,"0000000000000000000000000000000000000123"],"id":1}`, - unmarshalled: &rpcmodel.NotifyNewTransactionsCmd{ + unmarshalled: &model.NotifyNewTransactionsCmd{ Verbose: pointers.Bool(true), Subnetwork: pointers.String("0000000000000000000000000000000000000123"), }, @@ -129,45 +129,45 @@ func TestRPCServerWebsocketCommands(t *testing.T) { { name: "stopNotifyNewTransactions", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("stopNotifyNewTransactions") + return model.NewCommand("stopNotifyNewTransactions") }, staticCmd: func() interface{} { - return rpcmodel.NewStopNotifyNewTransactionsCmd() + return model.NewStopNotifyNewTransactionsCmd() }, marshalled: `{"jsonrpc":"1.0","method":"stopNotifyNewTransactions","params":[],"id":1}`, - unmarshalled: &rpcmodel.StopNotifyNewTransactionsCmd{}, + unmarshalled: &model.StopNotifyNewTransactionsCmd{}, }, { name: "loadTxFilter", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("loadTxFilter", false, `["1Address"]`, `[{"txid":"0000000000000000000000000000000000000000000000000000000000000123","index":0}]`) + return model.NewCommand("loadTxFilter", false, `["1Address"]`, `[{"txid":"0000000000000000000000000000000000000000000000000000000000000123","index":0}]`) }, staticCmd: func() interface{} { addrs := []string{"1Address"} - ops := []rpcmodel.Outpoint{{ + ops := []model.Outpoint{{ TxID: "0000000000000000000000000000000000000000000000000000000000000123", Index: 0, }} - return rpcmodel.NewLoadTxFilterCmd(false, addrs, ops) + return model.NewLoadTxFilterCmd(false, addrs, ops) }, marshalled: `{"jsonrpc":"1.0","method":"loadTxFilter","params":[false,["1Address"],[{"txid":"0000000000000000000000000000000000000000000000000000000000000123","index":0}]],"id":1}`, - unmarshalled: &rpcmodel.LoadTxFilterCmd{ + unmarshalled: &model.LoadTxFilterCmd{ Reload: false, Addresses: []string{"1Address"}, - Outpoints: []rpcmodel.Outpoint{{TxID: "0000000000000000000000000000000000000000000000000000000000000123", Index: 0}}, + Outpoints: []model.Outpoint{{TxID: "0000000000000000000000000000000000000000000000000000000000000123", Index: 0}}, }, }, { name: "rescanBlocks", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("rescanBlocks", `["0000000000000000000000000000000000000000000000000000000000000123"]`) + return model.NewCommand("rescanBlocks", `["0000000000000000000000000000000000000000000000000000000000000123"]`) }, staticCmd: func() interface{} { blockhashes := []string{"0000000000000000000000000000000000000000000000000000000000000123"} - return rpcmodel.NewRescanBlocksCmd(blockhashes) + return model.NewRescanBlocksCmd(blockhashes) }, marshalled: `{"jsonrpc":"1.0","method":"rescanBlocks","params":[["0000000000000000000000000000000000000000000000000000000000000123"]],"id":1}`, - unmarshalled: &rpcmodel.RescanBlocksCmd{ + unmarshalled: &model.RescanBlocksCmd{ BlockHashes: []string{"0000000000000000000000000000000000000000000000000000000000000123"}, }, }, @@ -177,7 +177,7 @@ func TestRPCServerWebsocketCommands(t *testing.T) { for i, test := range tests { // Marshal the command as created by the new static command // creation function. - marshalled, err := rpcmodel.MarshalCommand(testID, test.staticCmd()) + marshalled, err := model.MarshalCommand(testID, test.staticCmd()) if err != nil { t.Errorf("MarshalCommand #%d (%s) unexpected error: %v", i, test.name, err) @@ -201,7 +201,7 @@ func TestRPCServerWebsocketCommands(t *testing.T) { // Marshal the command as created by the generic new command // creation function. - marshalled, err = rpcmodel.MarshalCommand(testID, cmd) + marshalled, err = model.MarshalCommand(testID, cmd) if err != nil { t.Errorf("MarshalCommand #%d (%s) unexpected error: %v", i, test.name, err) @@ -215,7 +215,7 @@ func TestRPCServerWebsocketCommands(t *testing.T) { continue } - var request rpcmodel.Request + var request model.Request if err := json.Unmarshal(marshalled, &request); err != nil { t.Errorf("Test #%d (%s) unexpected error while "+ "unmarshalling JSON-RPC request: %v", i, @@ -223,7 +223,7 @@ func TestRPCServerWebsocketCommands(t *testing.T) { continue } - cmd, err = rpcmodel.UnmarshalCommand(&request) + cmd, err = model.UnmarshalCommand(&request) if err != nil { t.Errorf("UnmarshalCommand #%d (%s) unexpected error: %v", i, test.name, err) diff --git a/rpcmodel/rpc_websocket_notifications.go b/rpc/model/rpc_websocket_notifications.go similarity index 99% rename from rpcmodel/rpc_websocket_notifications.go rename to rpc/model/rpc_websocket_notifications.go index f5f3abf9b..77623ac83 100644 --- a/rpcmodel/rpc_websocket_notifications.go +++ b/rpc/model/rpc_websocket_notifications.go @@ -6,7 +6,7 @@ // NOTE: This file is intended to house the RPC websocket notifications that are // supported by a kaspa rpc server. -package rpcmodel +package model const ( // FilteredBlockAddedNtfnMethod is the new method used for diff --git a/rpcmodel/rpc_websocket_notifications_test.go b/rpc/model/rpc_websocket_notifications_test.go similarity index 76% rename from rpcmodel/rpc_websocket_notifications_test.go rename to rpc/model/rpc_websocket_notifications_test.go index 6fd011446..e39ab04af 100644 --- a/rpcmodel/rpc_websocket_notifications_test.go +++ b/rpc/model/rpc_websocket_notifications_test.go @@ -3,7 +3,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package rpcmodel_test +package model_test import ( "bytes" @@ -14,7 +14,7 @@ import ( "github.com/kaspanet/kaspad/util/subnetworkid" - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" "github.com/kaspanet/kaspad/util/daghash" ) @@ -35,13 +35,13 @@ func TestRPCServerWebsocketNotifications(t *testing.T) { { name: "filteredBlockAdded", newNtfn: func() (interface{}, error) { - return rpcmodel.NewCommand("filteredBlockAdded", 100, "header", []string{"tx0", "tx1"}) + return model.NewCommand("filteredBlockAdded", 100, "header", []string{"tx0", "tx1"}) }, staticNtfn: func() interface{} { - return rpcmodel.NewFilteredBlockAddedNtfn(100, "header", []string{"tx0", "tx1"}) + return model.NewFilteredBlockAddedNtfn(100, "header", []string{"tx0", "tx1"}) }, marshalled: `{"jsonrpc":"1.0","method":"filteredBlockAdded","params":[100,"header",["tx0","tx1"]],"id":null}`, - unmarshalled: &rpcmodel.FilteredBlockAddedNtfn{ + unmarshalled: &model.FilteredBlockAddedNtfn{ BlueScore: 100, Header: "header", SubscribedTxs: []string{"tx0", "tx1"}, @@ -50,13 +50,13 @@ func TestRPCServerWebsocketNotifications(t *testing.T) { { name: "txAccepted", newNtfn: func() (interface{}, error) { - return rpcmodel.NewCommand("txAccepted", "123", 1.5) + return model.NewCommand("txAccepted", "123", 1.5) }, staticNtfn: func() interface{} { - return rpcmodel.NewTxAcceptedNtfn("123", 1.5) + return model.NewTxAcceptedNtfn("123", 1.5) }, marshalled: `{"jsonrpc":"1.0","method":"txAccepted","params":["123",1.5],"id":null}`, - unmarshalled: &rpcmodel.TxAcceptedNtfn{ + unmarshalled: &model.TxAcceptedNtfn{ TxID: "123", Amount: 1.5, }, @@ -64,10 +64,10 @@ func TestRPCServerWebsocketNotifications(t *testing.T) { { name: "txAcceptedVerbose", newNtfn: func() (interface{}, error) { - return rpcmodel.NewCommand("txAcceptedVerbose", `{"hex":"001122","txid":"123","version":1,"locktime":4294967295,"subnetwork":"0000000000000000000000000000000000000000","gas":0,"payloadHash":"","payload":"","vin":null,"vout":null,"isInMempool":false}`) + return model.NewCommand("txAcceptedVerbose", `{"hex":"001122","txid":"123","version":1,"locktime":4294967295,"subnetwork":"0000000000000000000000000000000000000000","gas":0,"payloadHash":"","payload":"","vin":null,"vout":null,"isInMempool":false}`) }, staticNtfn: func() interface{} { - txResult := rpcmodel.TxRawResult{ + txResult := model.TxRawResult{ Hex: "001122", TxID: "123", Version: 1, @@ -76,11 +76,11 @@ func TestRPCServerWebsocketNotifications(t *testing.T) { Vin: nil, Vout: nil, } - return rpcmodel.NewTxAcceptedVerboseNtfn(txResult) + return model.NewTxAcceptedVerboseNtfn(txResult) }, marshalled: `{"jsonrpc":"1.0","method":"txAcceptedVerbose","params":[{"hex":"001122","txId":"123","version":1,"lockTime":4294967295,"subnetwork":"0000000000000000000000000000000000000000","gas":0,"payloadHash":"","payload":"","vin":null,"vout":null,"isInMempool":false}],"id":null}`, - unmarshalled: &rpcmodel.TxAcceptedVerboseNtfn{ - RawTx: rpcmodel.TxRawResult{ + unmarshalled: &model.TxAcceptedVerboseNtfn{ + RawTx: model.TxRawResult{ Hex: "001122", TxID: "123", Version: 1, @@ -94,10 +94,10 @@ func TestRPCServerWebsocketNotifications(t *testing.T) { { name: "txAcceptedVerbose with subnetwork, gas and paylaod", newNtfn: func() (interface{}, error) { - return rpcmodel.NewCommand("txAcceptedVerbose", `{"hex":"001122","txId":"123","version":1,"lockTime":4294967295,"subnetwork":"000000000000000000000000000000000000432d","gas":10,"payloadHash":"bf8ccdb364499a3e628200c3d3512c2c2a43b7a7d4f1a40d7f716715e449f442","payload":"102030","vin":null,"vout":null,"isInMempool":false}`) + return model.NewCommand("txAcceptedVerbose", `{"hex":"001122","txId":"123","version":1,"lockTime":4294967295,"subnetwork":"000000000000000000000000000000000000432d","gas":10,"payloadHash":"bf8ccdb364499a3e628200c3d3512c2c2a43b7a7d4f1a40d7f716715e449f442","payload":"102030","vin":null,"vout":null,"isInMempool":false}`) }, staticNtfn: func() interface{} { - txResult := rpcmodel.TxRawResult{ + txResult := model.TxRawResult{ Hex: "001122", TxID: "123", Version: 1, @@ -109,11 +109,11 @@ func TestRPCServerWebsocketNotifications(t *testing.T) { Vin: nil, Vout: nil, } - return rpcmodel.NewTxAcceptedVerboseNtfn(txResult) + return model.NewTxAcceptedVerboseNtfn(txResult) }, marshalled: `{"jsonrpc":"1.0","method":"txAcceptedVerbose","params":[{"hex":"001122","txId":"123","version":1,"lockTime":4294967295,"subnetwork":"000000000000000000000000000000000000432d","gas":10,"payloadHash":"bf8ccdb364499a3e628200c3d3512c2c2a43b7a7d4f1a40d7f716715e449f442","payload":"102030","vin":null,"vout":null,"isInMempool":false}],"id":null}`, - unmarshalled: &rpcmodel.TxAcceptedVerboseNtfn{ - RawTx: rpcmodel.TxRawResult{ + unmarshalled: &model.TxAcceptedVerboseNtfn{ + RawTx: model.TxRawResult{ Hex: "001122", TxID: "123", Version: 1, @@ -130,13 +130,13 @@ func TestRPCServerWebsocketNotifications(t *testing.T) { { name: "relevantTxAccepted", newNtfn: func() (interface{}, error) { - return rpcmodel.NewCommand("relevantTxAccepted", "001122") + return model.NewCommand("relevantTxAccepted", "001122") }, staticNtfn: func() interface{} { - return rpcmodel.NewRelevantTxAcceptedNtfn("001122") + return model.NewRelevantTxAcceptedNtfn("001122") }, marshalled: `{"jsonrpc":"1.0","method":"relevantTxAccepted","params":["001122"],"id":null}`, - unmarshalled: &rpcmodel.RelevantTxAcceptedNtfn{ + unmarshalled: &model.RelevantTxAcceptedNtfn{ Transaction: "001122", }, }, @@ -146,7 +146,7 @@ func TestRPCServerWebsocketNotifications(t *testing.T) { for i, test := range tests { // Marshal the notification as created by the new static // creation function. The ID is nil for notifications. - marshalled, err := rpcmodel.MarshalCommand(nil, test.staticNtfn()) + marshalled, err := model.MarshalCommand(nil, test.staticNtfn()) if err != nil { t.Errorf("MarshalCommand #%d (%s) unexpected error: %v", i, test.name, err) @@ -171,7 +171,7 @@ func TestRPCServerWebsocketNotifications(t *testing.T) { // Marshal the notification as created by the generic new // notification creation function. The ID is nil for // notifications. - marshalled, err = rpcmodel.MarshalCommand(nil, cmd) + marshalled, err = model.MarshalCommand(nil, cmd) if err != nil { t.Errorf("MarshalCommand #%d (%s) unexpected error: %v", i, test.name, err) @@ -185,7 +185,7 @@ func TestRPCServerWebsocketNotifications(t *testing.T) { continue } - var request rpcmodel.Request + var request model.Request if err := json.Unmarshal(marshalled, &request); err != nil { t.Errorf("Test #%d (%s) unexpected error while "+ "unmarshalling JSON-RPC request: %v", i, @@ -193,7 +193,7 @@ func TestRPCServerWebsocketNotifications(t *testing.T) { continue } - cmd, err = rpcmodel.UnmarshalCommand(&request) + cmd, err = model.UnmarshalCommand(&request) if err != nil { t.Errorf("UnmarshalCommand #%d (%s) unexpected error: %v", i, test.name, err) diff --git a/rpcmodel/rpc_websocket_results.go b/rpc/model/rpc_websocket_results.go similarity index 96% rename from rpcmodel/rpc_websocket_results.go rename to rpc/model/rpc_websocket_results.go index 9ebae7820..150d3e416 100644 --- a/rpcmodel/rpc_websocket_results.go +++ b/rpc/model/rpc_websocket_results.go @@ -3,7 +3,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package rpcmodel +package model // SessionResult models the data from the session command. type SessionResult struct { diff --git a/rpcmodel/rpc_websocket_results_test.go b/rpc/model/rpc_websocket_results_test.go similarity index 91% rename from rpcmodel/rpc_websocket_results_test.go rename to rpc/model/rpc_websocket_results_test.go index 8b273194a..7ce516e1c 100644 --- a/rpcmodel/rpc_websocket_results_test.go +++ b/rpc/model/rpc_websocket_results_test.go @@ -3,13 +3,13 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package rpcmodel_test +package model_test import ( "encoding/json" "testing" - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" ) // TestRPCServerWebsocketResults ensures any results that have custom marshalling @@ -24,7 +24,7 @@ func TestRPCServerWebsocketResults(t *testing.T) { }{ { name: "RescannedBlock", - result: &rpcmodel.RescannedBlock{ + result: &model.RescannedBlock{ Hash: "blockhash", Transactions: []string{"serializedtx"}, }, diff --git a/server/rpc/rpcserver.go b/rpc/rpcserver.go similarity index 67% rename from server/rpc/rpcserver.go rename to rpc/rpcserver.go index c1abb1830..4416004fa 100644 --- a/server/rpc/rpcserver.go +++ b/rpc/rpcserver.go @@ -12,6 +12,10 @@ import ( "encoding/base64" "encoding/json" "fmt" + "github.com/kaspanet/kaspad/addrmgr" + "github.com/kaspanet/kaspad/connmanager" + "github.com/kaspanet/kaspad/protocol" + "github.com/kaspanet/kaspad/util/mstime" "io" "io/ioutil" "math/rand" @@ -22,27 +26,17 @@ import ( "sync/atomic" "time" - "github.com/kaspanet/kaspad/util/mstime" - - "github.com/kaspanet/kaspad/addrmgr" - "github.com/pkg/errors" "github.com/btcsuite/websocket" "github.com/kaspanet/kaspad/blockdag" "github.com/kaspanet/kaspad/blockdag/indexers" "github.com/kaspanet/kaspad/config" - "github.com/kaspanet/kaspad/dagconfig" "github.com/kaspanet/kaspad/mempool" "github.com/kaspanet/kaspad/mining" - "github.com/kaspanet/kaspad/peer" - "github.com/kaspanet/kaspad/rpcmodel" - "github.com/kaspanet/kaspad/server/serverutils" - "github.com/kaspanet/kaspad/util" - "github.com/kaspanet/kaspad/util/daghash" + "github.com/kaspanet/kaspad/rpc/model" "github.com/kaspanet/kaspad/util/fs" "github.com/kaspanet/kaspad/util/network" - "github.com/kaspanet/kaspad/wire" ) const ( @@ -62,46 +56,38 @@ type commandHandler func(*Server, interface{}, <-chan struct{}) (interface{}, er // a dependency loop. var rpcHandlers map[string]commandHandler var rpcHandlersBeforeInit = map[string]commandHandler{ - "addManualNode": handleAddManualNode, - "createRawTransaction": handleCreateRawTransaction, - "debugLevel": handleDebugLevel, - "decodeRawTransaction": handleDecodeRawTransaction, - "decodeScript": handleDecodeScript, - "getAllManualNodesInfo": handleGetAllManualNodesInfo, - "getSelectedTip": handleGetSelectedTip, - "getSelectedTipHash": handleGetSelectedTipHash, - "getBlock": handleGetBlock, - "getBlocks": handleGetBlocks, - "getBlockDagInfo": handleGetBlockDAGInfo, - "getBlockCount": handleGetBlockCount, - "getBlockHeader": handleGetBlockHeader, - "getBlockTemplate": handleGetBlockTemplate, - "getChainFromBlock": handleGetChainFromBlock, - "getConnectionCount": handleGetConnectionCount, - "getCurrentNet": handleGetCurrentNet, - "getDifficulty": handleGetDifficulty, - "getHeaders": handleGetHeaders, - "getTopHeaders": handleGetTopHeaders, - "getInfo": handleGetInfo, - "getManualNodeInfo": handleGetManualNodeInfo, - "getMempoolInfo": handleGetMempoolInfo, - "getMempoolEntry": handleGetMempoolEntry, - "getNetTotals": handleGetNetTotals, - "getConnectedPeerInfo": handleGetConnectedPeerInfo, - "getPeerAddresses": handleGetPeerAddresses, - "getRawMempool": handleGetRawMempool, - "getSubnetwork": handleGetSubnetwork, - "getTxOut": handleGetTxOut, - "help": handleHelp, - "node": handleNode, - "ping": handlePing, - "removeManualNode": handleRemoveManualNode, - "sendRawTransaction": handleSendRawTransaction, - "stop": handleStop, - "submitBlock": handleSubmitBlock, - "uptime": handleUptime, - "validateAddress": handleValidateAddress, - "version": handleVersion, + "connect": handleConnect, + "debugLevel": handleDebugLevel, + "getSelectedTip": handleGetSelectedTip, + "getSelectedTipHash": handleGetSelectedTipHash, + "getBlock": handleGetBlock, + "getBlocks": handleGetBlocks, + "getBlockDagInfo": handleGetBlockDAGInfo, + "getBlockCount": handleGetBlockCount, + "getBlockHeader": handleGetBlockHeader, + "getBlockTemplate": handleGetBlockTemplate, + "getChainFromBlock": handleGetChainFromBlock, + "getConnectionCount": handleGetConnectionCount, + "getCurrentNet": handleGetCurrentNet, + "getDifficulty": handleGetDifficulty, + "getHeaders": handleGetHeaders, + "getTopHeaders": handleGetTopHeaders, + "getInfo": handleGetInfo, + "getMempoolInfo": handleGetMempoolInfo, + "getMempoolEntry": handleGetMempoolEntry, + "getNetTotals": handleGetNetTotals, + "getConnectedPeerInfo": handleGetConnectedPeerInfo, + "getPeerAddresses": handleGetPeerAddresses, + "getRawMempool": handleGetRawMempool, + "getSubnetwork": handleGetSubnetwork, + "getTxOut": handleGetTxOut, + "help": handleHelp, + "disconnect": handleDisconnect, + "sendRawTransaction": handleSendRawTransaction, + "stop": handleStop, + "submitBlock": handleSubmitBlock, + "uptime": handleUptime, + "version": handleVersion, } // Commands that are currently unimplemented, but should ultimately be. @@ -160,10 +146,11 @@ func handleUnimplemented(s *Server, cmd interface{}, closeChan <-chan struct{}) // Server provides a concurrent safe RPC server to a kaspa node. type Server struct { + listeners []net.Listener started int32 shutdown int32 - cfg rpcserverConfig - appCfg *config.Config + cfg *config.Config + startupTime mstime.Time authsha [sha256.Size]byte limitauthsha [sha256.Size]byte ntfnMgr *wsNotificationManager @@ -175,6 +162,14 @@ type Server struct { helpCacher *helpCacher requestProcessShutdown chan struct{} quit chan int + + dag *blockdag.BlockDAG + txMempool *mempool.TxPool + acceptanceIndex *indexers.AcceptanceIndex + blockTemplateGenerator *mining.BlkTmplGenerator + connectionManager *connmanager.ConnectionManager + addressManager *addrmgr.AddrManager + protocolManager *protocol.Manager } // httpStatusLine returns a response Status-Line (RFC 2616 Section 6.1) @@ -242,7 +237,7 @@ func (s *Server) Stop() error { return nil } log.Warnf("RPC server shutting down") - for _, listener := range s.cfg.Listeners { + for _, listener := range s.listeners { err := listener.Close() if err != nil { log.Errorf("Problem shutting down rpc: %s", err) @@ -274,7 +269,7 @@ func (s *Server) NotifyNewTransactions(txns []*mempool.TxDesc) { // Potentially notify any getBlockTemplate long poll clients // about stale block templates due to the new transaction. - s.gbtWorkState.NotifyMempoolTx(s.cfg.TxMemPool.LastUpdated()) + s.gbtWorkState.NotifyMempoolTx(s.txMempool.LastUpdated()) } } @@ -283,9 +278,9 @@ func (s *Server) NotifyNewTransactions(txns []*mempool.TxDesc) { // // This function is safe for concurrent access. func (s *Server) limitConnections(w http.ResponseWriter, remoteAddr string) bool { - if int(atomic.LoadInt32(&s.numClients)+1) > s.appCfg.RPCMaxClients { + if int(atomic.LoadInt32(&s.numClients)+1) > s.cfg.RPCMaxClients { log.Infof("Max RPC clients exceeded [%d] - "+ - "disconnecting client %s", s.appCfg.RPCMaxClients, + "disconnecting client %s", s.cfg.RPCMaxClients, remoteAddr) http.Error(w, "503 Too busy. Try again later.", http.StatusServiceUnavailable) @@ -362,7 +357,7 @@ type parsedRPCCmd struct { id interface{} method string cmd interface{} - err *rpcmodel.RPCError + err *model.RPCError } // standardCmdResult checks that a parsed command is a standard kaspa JSON-RPC @@ -379,7 +374,7 @@ func (s *Server) standardCmdResult(cmd *parsedRPCCmd, closeChan <-chan struct{}) handler = handleUnimplemented goto handled } - return nil, rpcmodel.ErrRPCMethodNotFound + return nil, model.ErrRPCMethodNotFound handled: return handler(s, cmd.cmd, closeChan) @@ -389,27 +384,27 @@ handled: // err field of the returned parsedRPCCmd struct will contain an RPC error that // is suitable for use in replies if the command is invalid in some way such as // an unregistered command or invalid parameters. -func parseCmd(request *rpcmodel.Request) *parsedRPCCmd { +func parseCmd(request *model.Request) *parsedRPCCmd { var parsedCmd parsedRPCCmd parsedCmd.id = request.ID parsedCmd.method = request.Method - cmd, err := rpcmodel.UnmarshalCommand(request) + cmd, err := model.UnmarshalCommand(request) if err != nil { // When the error is because the method is not registered, // produce a method not found RPC error. - var rpcModelErr rpcmodel.Error + var rpcModelErr model.Error if ok := errors.As(err, &rpcModelErr); ok && - rpcModelErr.ErrorCode == rpcmodel.ErrUnregisteredMethod { + rpcModelErr.ErrorCode == model.ErrUnregisteredMethod { - parsedCmd.err = rpcmodel.ErrRPCMethodNotFound + parsedCmd.err = model.ErrRPCMethodNotFound return &parsedCmd } // Otherwise, some type of invalid parameters is the // cause, so produce the equivalent RPC error. - parsedCmd.err = rpcmodel.NewRPCError( - rpcmodel.ErrRPCInvalidParams.Code, err.Error()) + parsedCmd.err = model.NewRPCError( + model.ErrRPCInvalidParams.Code, err.Error()) return &parsedCmd } @@ -419,18 +414,18 @@ func parseCmd(request *rpcmodel.Request) *parsedRPCCmd { // createMarshalledReply returns a new marshalled JSON-RPC response given the // passed parameters. It will automatically convert errors that are not of -// the type *rpcmodel.RPCError to the appropriate type as needed. +// the type *model.RPCError to the appropriate type as needed. func createMarshalledReply(id, result interface{}, replyErr error) ([]byte, error) { - var jsonErr *rpcmodel.RPCError + var jsonErr *model.RPCError if replyErr != nil { - if jErr, ok := replyErr.(*rpcmodel.RPCError); ok { + if jErr, ok := replyErr.(*model.RPCError); ok { jsonErr = jErr } else { jsonErr = internalRPCError(replyErr.Error(), "") } } - return rpcmodel.MarshalResponse(id, result, jsonErr) + return model.MarshalResponse(id, result, jsonErr) } // jsonRPCRead handles reading and responding to RPC messages. @@ -478,10 +473,10 @@ func (s *Server) jsonRPCRead(w http.ResponseWriter, r *http.Request, isAdmin boo var responseID interface{} var jsonErr error var result interface{} - var request rpcmodel.Request + var request model.Request if err := json.Unmarshal(body, &request); err != nil { - jsonErr = &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCParse.Code, + jsonErr = &model.RPCError{ + Code: model.ErrRPCParse.Code, Message: "Failed to parse request: " + err.Error(), } } @@ -517,8 +512,8 @@ func (s *Server) jsonRPCRead(w http.ResponseWriter, r *http.Request, isAdmin boo // Check if the user is limited and set error if method unauthorized if !isAdmin { if _, ok := rpcLimited[request.Method]; !ok { - jsonErr = &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCInvalidParams.Code, + jsonErr = &model.RPCError{ + Code: model.ErrRPCInvalidParams.Code, Message: "limited user not authorized for this method", } } @@ -626,7 +621,7 @@ func (s *Server) Start() { s.WebsocketHandler(ws, r.RemoteAddr, authenticated, isAdmin) }) - for _, listener := range s.cfg.Listeners { + for _, listener := range s.listeners { s.wg.Add(1) // Declaring this variable is necessary as it needs be declared in the same // scope of the anonymous function below it. @@ -642,157 +637,6 @@ func (s *Server) Start() { s.ntfnMgr.Start() } -// rpcserverPeer represents a peer for use with the RPC server. -// -// The interface contract requires that all of these methods are safe for -// concurrent access. -type rpcserverPeer interface { - // ToPeer returns the underlying peer instance. - ToPeer() *peer.Peer - - // IsTxRelayDisabled returns whether or not the peer has disabled - // transaction relay. - IsTxRelayDisabled() bool - - // BanScore returns the current integer value that represents how close - // the peer is to being banned. - BanScore() uint32 - - // FeeFilter returns the requested current minimum fee rate for which - // transactions should be announced. - FeeFilter() int64 -} - -// rpcserverConnManager represents a connection manager for use with the RPC -// server. -// -// The interface contract requires that all of these methods are safe for -// concurrent access. -type rpcserverConnManager interface { - // Connect adds the provided address as a new outbound peer. The - // permanent flag indicates whether or not to make the peer persistent - // and reconnect if the connection is lost. Attempting to connect to an - // already existing peer will return an error. - Connect(addr string, permanent bool) error - - // RemoveByID removes the peer associated with the provided id from the - // list of persistent peers. Attempting to remove an id that does not - // exist will return an error. - RemoveByID(id int32) error - - // RemoveByAddr removes the peer associated with the provided address - // from the list of persistent peers. Attempting to remove an address - // that does not exist will return an error. - RemoveByAddr(addr string) error - - // DisconnectByID disconnects the peer associated with the provided id. - // This applies to both inbound and outbound peers. Attempting to - // remove an id that does not exist will return an error. - DisconnectByID(id int32) error - - // DisconnectByAddr disconnects the peer associated with the provided - // address. This applies to both inbound and outbound peers. - // Attempting to remove an address that does not exist will return an - // error. - DisconnectByAddr(addr string) error - - // ConnectedCount returns the number of currently connected peers. - ConnectedCount() int32 - - // NetTotals returns the sum of all bytes received and sent across the - // network for all peers. - NetTotals() (uint64, uint64) - - // ConnectedPeers returns an array consisting of all connected peers. - ConnectedPeers() []rpcserverPeer - - // PersistentPeers returns an array consisting of all the persistent - // peers. - PersistentPeers() []rpcserverPeer - - // BroadcastMessage sends the provided message to all currently - // connected peers. - BroadcastMessage(msg wire.Message) - - // AddRebroadcastInventory adds the provided inventory to the list of - // inventories to be rebroadcast at random intervals until they show up - // in a block. - AddRebroadcastInventory(iv *wire.InvVect, data interface{}) - - // RelayTransactions generates and relays inventory vectors for all of - // the passed transactions to all connected peers. - RelayTransactions(txns []*mempool.TxDesc) -} - -// rpcserverSyncManager represents a sync manager for use with the RPC server. -// -// The interface contract requires that all of these methods are safe for -// concurrent access. -type rpcserverSyncManager interface { - // IsSynced returns whether or not the sync manager believes the DAG - // is current as compared to the rest of the network. - IsSynced() bool - - // SubmitBlock submits the provided block to the network after - // processing it locally. - SubmitBlock(block *util.Block, flags blockdag.BehaviorFlags) (bool, error) - - // Pause pauses the sync manager until the returned channel is closed. - Pause() chan<- struct{} - - // SyncPeerID returns the ID of the peer that is currently the peer being - // used to sync from or 0 if there is none. - SyncPeerID() int32 - - // AntiPastHeadersBetween returns the headers of the blocks between the - // lowHash's antiPast and highHash's antiPast, or up to - // wire.MaxBlockHeadersPerMsg block headers. - AntiPastHeadersBetween(lowHash, highHash *daghash.Hash, maxHeaders uint64) ([]*wire.BlockHeader, error) -} - -// rpcserverConfig is a descriptor containing the RPC server configuration. -type rpcserverConfig struct { - // Listeners defines a slice of listeners for which the RPC server will - // take ownership of and accept connections. Since the RPC server takes - // ownership of these listeners, they will be closed when the RPC server - // is stopped. - Listeners []net.Listener - - // StartupTime is the unix timestamp for when the server that is hosting - // the RPC server started. - StartupTime int64 - - // ConnMgr defines the connection manager for the RPC server to use. It - // provides the RPC server with a means to do things such as add, - // remove, connect, disconnect, and query peers as well as other - // connection-related data and tasks. - ConnMgr rpcserverConnManager - - // SyncMgr defines the sync manager for the RPC server to use. - SyncMgr rpcserverSyncManager - - // These fields allow the RPC server to interface with the local block - // DAG data and state. - DAG *blockdag.BlockDAG - DAGParams *dagconfig.Params - - // TxMemPool defines the transaction memory pool to interact with. - TxMemPool *mempool.TxPool - - // These fields allow the RPC server to interface with mining. - // - // Generator produces block templates that can be retrieved - // by the getBlockTemplate command. - Generator *mining.BlkTmplGenerator - - // These fields define any optional indexes the RPC server can make use - // of to provide additional data when queried. - AcceptanceIndex *indexers.AcceptanceIndex - - // addressManager defines the address manager for the RPC server to use. - addressManager *addrmgr.AddrManager -} - // setupRPCListeners returns a slice of listeners that are configured for use // with the RPC server depending on the configuration settings for listen // addresses and TLS. @@ -803,7 +647,7 @@ func setupRPCListeners(appCfg *config.Config) ([]net.Listener, error) { // Generate the TLS cert and key file if both don't already // exist. if !fs.FileExists(appCfg.RPCKey) && !fs.FileExists(appCfg.RPCCert) { - err := serverutils.GenCertPair(appCfg.RPCCert, appCfg.RPCKey) + err := GenCertPair(appCfg.RPCCert, appCfg.RPCKey) if err != nil { return nil, err } @@ -844,50 +688,53 @@ func setupRPCListeners(appCfg *config.Config) ([]net.Listener, error) { // NewRPCServer returns a new instance of the rpcServer struct. func NewRPCServer( - appCfg *config.Config, + cfg *config.Config, dag *blockdag.BlockDAG, txMempool *mempool.TxPool, acceptanceIndex *indexers.AcceptanceIndex, blockTemplateGenerator *mining.BlkTmplGenerator, + connectionManager *connmanager.ConnectionManager, + addressManager *addrmgr.AddrManager, + protocolManager *protocol.Manager, ) (*Server, error) { // Setup listeners for the configured RPC listen addresses and // TLS settings. - rpcListeners, err := setupRPCListeners(appCfg) + rpcListeners, err := setupRPCListeners(cfg) if err != nil { return nil, err } if len(rpcListeners) == 0 { return nil, errors.New("RPCS: No valid listen address") } - cfg := &rpcserverConfig{ - Listeners: rpcListeners, - StartupTime: mstime.Now().UnixMilliseconds(), - DAGParams: dag.Params, - TxMemPool: txMempool, - Generator: blockTemplateGenerator, - AcceptanceIndex: acceptanceIndex, - DAG: dag, - } rpc := Server{ - cfg: *cfg, + listeners: rpcListeners, + startupTime: mstime.Now(), statusLines: make(map[int]string), gbtWorkState: newGbtWorkState(), helpCacher: newHelpCacher(), requestProcessShutdown: make(chan struct{}), quit: make(chan int), + + dag: dag, + txMempool: txMempool, + acceptanceIndex: acceptanceIndex, + blockTemplateGenerator: blockTemplateGenerator, + connectionManager: connectionManager, + addressManager: addressManager, + protocolManager: protocolManager, } - if appCfg.RPCUser != "" && appCfg.RPCPass != "" { - login := appCfg.RPCUser + ":" + appCfg.RPCPass + if cfg.RPCUser != "" && cfg.RPCPass != "" { + login := cfg.RPCUser + ":" + cfg.RPCPass auth := "Basic " + base64.StdEncoding.EncodeToString([]byte(login)) rpc.authsha = sha256.Sum256([]byte(auth)) } - if appCfg.RPCLimitUser != "" && appCfg.RPCLimitPass != "" { - login := appCfg.RPCLimitUser + ":" + appCfg.RPCLimitPass + if cfg.RPCLimitUser != "" && cfg.RPCLimitPass != "" { + login := cfg.RPCLimitUser + ":" + cfg.RPCLimitPass auth := "Basic " + base64.StdEncoding.EncodeToString([]byte(login)) rpc.limitauthsha = sha256.Sum256([]byte(auth)) } rpc.ntfnMgr = newWsNotificationManager(&rpc) - rpc.cfg.DAG.Subscribe(rpc.handleBlockDAGNotification) + rpc.dag.Subscribe(rpc.handleBlockDAGNotification) return &rpc, nil } @@ -904,7 +751,7 @@ func (s *Server) handleBlockDAGNotification(notification *blockdag.Notification) } block := data.Block - tipHashes := s.cfg.DAG.TipHashes() + tipHashes := s.dag.TipHashes() // Allow any clients performing long polling via the // getBlockTemplate RPC to be notified when the new block causes @@ -922,7 +769,7 @@ func (s *Server) handleBlockDAGNotification(notification *blockdag.Notification) // If the acceptance index is off we aren't capable of serving // ChainChanged notifications. - if s.cfg.AcceptanceIndex == nil { + if s.acceptanceIndex == nil { break } diff --git a/server/rpc/rpcserverhelp.go b/rpc/rpcserverhelp.go similarity index 82% rename from server/rpc/rpcserverhelp.go rename to rpc/rpcserverhelp.go index f6869a166..8c7fc9292 100644 --- a/server/rpc/rpcserverhelp.go +++ b/rpc/rpcserverhelp.go @@ -10,7 +10,7 @@ import ( "strings" "sync" - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" "github.com/pkg/errors" ) @@ -29,16 +29,10 @@ var helpDescsEnUS = map[string]string{ "debugLevel--result0": "The string 'Done.'", "debugLevel--result1": "The list of subsystems", - // AddManualNodeCmd help. - "addManualNode--synopsis": "Attempts to add or remove a persistent peer.", - "addManualNode-addr": "IP address and port of the peer to operate on", - "addManualNode-oneTry": "When enabled, will try a single connection to a peer", - - // NodeCmd help. - "node--synopsis": "Attempts to add or remove a peer.", - "node-subCmd": "'disconnect' to remove all matching non-persistent peers, 'remove' to remove a persistent peer, or 'connect' to connect to a peer", - "node-target": "Either the IP address and port of the peer to operate on, or a valid peer ID.", - "node-connectSubCmd": "'perm' to make the connected peer a permanent one, 'temp' to try a single connect to a peer", + // ConnectCmd help. + "connect--synopsis": "Attempts to connect a peer.", + "connect-address": "IP address and port of the peer to connect", + "connect-isPermanent": "Whether the connection for this address should be permanent", // TransactionInput help. "transactionInput-txId": "The hash of the input transaction", @@ -97,52 +91,6 @@ var helpDescsEnUS = map[string]string{ "acceptedBlock-hash": "The hash of the accepted block", "acceptedBlock-acceptedTxIds": "The transactions in this block accepted by the chain block", - // TxRawDecodeResult help. - "txRawDecodeResult-txId": "The hash of the transaction", - "txRawDecodeResult-version": "The transaction version", - "txRawDecodeResult-lockTime": "The transaction lock time", - "txRawDecodeResult-vin": "The transaction inputs as JSON objects", - "txRawDecodeResult-vout": "The transaction outputs as JSON objects", - - // DecodeRawTransactionCmd help. - "decodeRawTransaction--synopsis": "Returns a JSON object representing the provided serialized, hex-encoded transaction.", - "decodeRawTransaction-hexTx": "Serialized, hex-encoded transaction", - - // DecodeScriptResult help. - "decodeScriptResult-asm": "Disassembly of the script", - "decodeScriptResult-type": "The type of the script (e.g. 'pubkeyhash')", - "decodeScriptResult-reqSigs": "The number of required signatures", - "decodeScriptResult-address": "The kaspa address (if any) associated with this script", - "decodeScriptResult-p2sh": "The script hash for use in pay-to-script-hash transactions (only present if the provided redeem script is not already a pay-to-script-hash script)", - - // DecodeScriptCmd help. - "decodeScript--synopsis": "Returns a JSON object with information about the provided hex-encoded script.", - "decodeScript-hexScript": "Hex-encoded script", - - // GetAllManualNodesInfoCmd help. - "getAllManualNodesInfo--synopsis": "Returns information about manually added (persistent) peers.", - "getAllManualNodesInfo-details": "Specifies whether the returned data is a JSON object including DNS and connection information, or just a list of added peers", - "getAllManualNodesInfo--condition0": "details=false", - "getAllManualNodesInfo--condition1": "details=true", - "getAllManualNodesInfo--result0": "List of added peers", - - // GetManualNodeInfoResultAddr help. - "getManualNodeInfoResultAddr-address": "The ip address for this DNS entry", - "getManualNodeInfoResultAddr-connected": "The connection 'direction' (inbound/outbound/false)", - - // GetManualNodeInfoResult help. - "getManualNodeInfoResult-manualNode": "The ip address or domain of the manually added peer", - "getManualNodeInfoResult-connected": "Whether or not the peer is currently connected", - "getManualNodeInfoResult-addresses": "DNS lookup and connection information about the peer", - - // GetManualNodeInfoCmd help. - "getManualNodeInfo--synopsis": "Returns information about manually added (persistent) peers.", - "getManualNodeInfo-details": "Specifies whether the returned data is a JSON object including DNS and connection information, or just a list of added peers", - "getManualNodeInfo-node": "Only return information about this specific peer instead of all added peers", - "getManualNodeInfo--condition0": "details=false", - "getManualNodeInfo--condition1": "details=true", - "getManualNodeInfo--result0": "List of added peers", - // GetSelectedTipResult help. "getSelectedTipResult-hash": "Hex-encoded bytes of the best block hash", "getSelectedTipResult-height": "Height of the best block", @@ -418,25 +366,17 @@ var helpDescsEnUS = map[string]string{ "getNetTotalsResult-timeMillis": "Number of milliseconds since 1 Jan 1970 GMT", // GetConnectedPeerInfoResult help. - "getConnectedPeerInfoResult-id": "A unique node ID", - "getConnectedPeerInfoResult-addr": "The ip address and port of the peer", - "getConnectedPeerInfoResult-services": "Services bitmask which represents the services supported by the peer", - "getConnectedPeerInfoResult-relayTxes": "Peer has requested transactions be relayed to it", - "getConnectedPeerInfoResult-lastSend": "Time the last message was received in seconds since 1 Jan 1970 GMT", - "getConnectedPeerInfoResult-lastRecv": "Time the last message was sent in seconds since 1 Jan 1970 GMT", - "getConnectedPeerInfoResult-bytesSent": "Total bytes sent", - "getConnectedPeerInfoResult-bytesRecv": "Total bytes received", - "getConnectedPeerInfoResult-connTime": "Time the connection was made in seconds since 1 Jan 1970 GMT", - "getConnectedPeerInfoResult-timeOffset": "The time offset of the peer", - "getConnectedPeerInfoResult-pingTime": "Number of microseconds the last ping took", - "getConnectedPeerInfoResult-pingWait": "Number of microseconds a queued ping has been waiting for a response", - "getConnectedPeerInfoResult-version": "The protocol version of the peer", - "getConnectedPeerInfoResult-subVer": "The user agent of the peer", - "getConnectedPeerInfoResult-inbound": "Whether or not the peer is an inbound connection", - "getConnectedPeerInfoResult-selectedTip": "The selected tip of the peer", - "getConnectedPeerInfoResult-banScore": "The ban score", - "getConnectedPeerInfoResult-feeFilter": "The requested minimum fee a transaction must have to be announced to the peer", - "getConnectedPeerInfoResult-syncNode": "Whether or not the peer is the sync peer", + "getConnectedPeerInfoResult-id": "A unique node ID", + "getConnectedPeerInfoResult-address": "The ip address and port of the peer", + "getConnectedPeerInfoResult-selectedTipHash": "The hash of the selected tip of the peer", + "getConnectedPeerInfoResult-lastPingDuration": "The duration of the last ping to the peer in milliseconds", + "getConnectedPeerInfoResult-isSyncNode": "Whether or not the peer is the sync peer", + "getConnectedPeerInfoResult-isInbound": "Whether the peer is inbound or outbound", + "getConnectedPeerInfoResult-banScore": "The ban score of the peer", + "getConnectedPeerInfoResult-timeOffset": "The time difference between this node and the peer", + "getConnectedPeerInfoResult-userAgent": "The user agent of the peer", + "getConnectedPeerInfoResult-protocolVersion": "The p2p protocol version of the peer", + "getConnectedPeerInfoResult-timeConnected": "The timestamp of when the peer connected to this node", // GetConnectedPeerInfoCmd help. "getConnectedPeerInfo--synopsis": "Returns data about each connected network peer as an array of json objects.", @@ -517,9 +457,9 @@ var helpDescsEnUS = map[string]string{ "ping--synopsis": "Queues a ping to be sent to each connected peer.\n" + "Ping times are provided by getConnectedPeerInfo via the pingtime and pingwait fields.", - // RemoveManualNodeCmd help. - "removeManualNode--synopsis": "Removes a peer from the manual nodes list", - "removeManualNode-addr": "IP address and port of the peer to remove", + // DisconnectCmd help. + "disconnect--synopsis": "Disconnects a peer", + "disconnect-address": "IP address and port of the peer to disconnect", // SendRawTransactionCmd help. "sendRawTransaction--synopsis": "Submits the serialized, hex-encoded transaction to the local peer and relays it to the network.", @@ -618,57 +558,52 @@ var helpDescsEnUS = map[string]string{ // This information is used to generate the help. Each result type must be a // pointer to the type (or nil to indicate no return value). var rpcResultTypes = map[string][]interface{}{ - "addManualNode": nil, - "createRawTransaction": {(*string)(nil)}, - "debugLevel": {(*string)(nil), (*string)(nil)}, - "decodeRawTransaction": {(*rpcmodel.TxRawDecodeResult)(nil)}, - "decodeScript": {(*rpcmodel.DecodeScriptResult)(nil)}, - "getAllManualNodesInfo": {(*[]string)(nil), (*[]rpcmodel.GetManualNodeInfoResult)(nil)}, - "getSelectedTip": {(*rpcmodel.GetBlockVerboseResult)(nil)}, - "getSelectedTipHash": {(*string)(nil)}, - "getBlock": {(*string)(nil), (*rpcmodel.GetBlockVerboseResult)(nil)}, - "getBlocks": {(*rpcmodel.GetBlocksResult)(nil)}, - "getBlockCount": {(*int64)(nil)}, - "getBlockHeader": {(*string)(nil), (*rpcmodel.GetBlockHeaderVerboseResult)(nil)}, - "getBlockTemplate": {(*rpcmodel.GetBlockTemplateResult)(nil), (*string)(nil), nil}, - "getBlockDagInfo": {(*rpcmodel.GetBlockDAGInfoResult)(nil)}, - "getChainFromBlock": {(*rpcmodel.GetChainFromBlockResult)(nil)}, - "getConnectionCount": {(*int32)(nil)}, - "getCurrentNet": {(*uint32)(nil)}, - "getDifficulty": {(*float64)(nil)}, - "getTopHeaders": {(*[]string)(nil)}, - "getHeaders": {(*[]string)(nil)}, - "getInfo": {(*rpcmodel.InfoDAGResult)(nil)}, - "getManualNodeInfo": {(*string)(nil), (*rpcmodel.GetManualNodeInfoResult)(nil)}, - "getMempoolInfo": {(*rpcmodel.GetMempoolInfoResult)(nil)}, - "getMempoolEntry": {(*rpcmodel.GetMempoolEntryResult)(nil)}, - "getNetTotals": {(*rpcmodel.GetNetTotalsResult)(nil)}, - "getConnectedPeerInfo": {(*[]rpcmodel.GetConnectedPeerInfoResult)(nil)}, - "getPeerAddresses": {(*[]rpcmodel.GetPeerAddressesResult)(nil)}, - "getRawMempool": {(*[]string)(nil), (*rpcmodel.GetRawMempoolVerboseResult)(nil)}, - "getSubnetwork": {(*rpcmodel.GetSubnetworkResult)(nil)}, - "getTxOut": {(*rpcmodel.GetTxOutResult)(nil)}, - "node": nil, - "help": {(*string)(nil), (*string)(nil)}, - "ping": nil, - "removeManualNode": nil, - "sendRawTransaction": {(*string)(nil)}, - "stop": {(*string)(nil)}, - "submitBlock": {nil, (*string)(nil)}, - "uptime": {(*int64)(nil)}, - "validateAddress": {(*rpcmodel.ValidateAddressResult)(nil)}, - "version": {(*map[string]rpcmodel.VersionResult)(nil)}, + "connect": nil, + "debugLevel": {(*string)(nil), (*string)(nil)}, + "getSelectedTip": {(*model.GetBlockVerboseResult)(nil)}, + "getSelectedTipHash": {(*string)(nil)}, + "getBlock": {(*string)(nil), (*model.GetBlockVerboseResult)(nil)}, + "getBlocks": {(*model.GetBlocksResult)(nil)}, + "getBlockCount": {(*int64)(nil)}, + "getBlockHeader": {(*string)(nil), (*model.GetBlockHeaderVerboseResult)(nil)}, + "getBlockTemplate": {(*model.GetBlockTemplateResult)(nil), (*string)(nil), nil}, + "getBlockDagInfo": {(*model.GetBlockDAGInfoResult)(nil)}, + "getChainFromBlock": {(*model.GetChainFromBlockResult)(nil)}, + "getConnectionCount": {(*int32)(nil)}, + "getCurrentNet": {(*uint32)(nil)}, + "getDifficulty": {(*float64)(nil)}, + "getTopHeaders": {(*[]string)(nil)}, + "getHeaders": {(*[]string)(nil)}, + "getInfo": {(*model.InfoDAGResult)(nil)}, + "getMempoolInfo": {(*model.GetMempoolInfoResult)(nil)}, + "getMempoolEntry": {(*model.GetMempoolEntryResult)(nil)}, + "getNetTotals": {(*model.GetNetTotalsResult)(nil)}, + "getConnectedPeerInfo": {(*[]model.GetConnectedPeerInfoResult)(nil)}, + "getPeerAddresses": {(*[]model.GetPeerAddressesResult)(nil)}, + "getRawMempool": {(*[]string)(nil), (*model.GetRawMempoolVerboseResult)(nil)}, + "getSubnetwork": {(*model.GetSubnetworkResult)(nil)}, + "getTxOut": {(*model.GetTxOutResult)(nil)}, + "node": nil, + "help": {(*string)(nil), (*string)(nil)}, + "ping": nil, + "disconnect": nil, + "sendRawTransaction": {(*string)(nil)}, + "stop": {(*string)(nil)}, + "submitBlock": {nil, (*string)(nil)}, + "uptime": {(*int64)(nil)}, + "validateAddress": {(*model.ValidateAddressResult)(nil)}, + "version": {(*map[string]model.VersionResult)(nil)}, // Websocket commands. "loadTxFilter": nil, - "session": {(*rpcmodel.SessionResult)(nil)}, + "session": {(*model.SessionResult)(nil)}, "notifyBlocks": nil, "stopNotifyBlocks": nil, "notifyChainChanges": nil, "stopNotifyChainChanges": nil, "notifyNewTransactions": nil, "stopNotifyNewTransactions": nil, - "rescanBlocks": {(*[]rpcmodel.RescannedBlock)(nil)}, + "rescanBlocks": {(*[]model.RescannedBlock)(nil)}, } // helpCacher provides a concurrent safe type that provides help and usage for @@ -699,7 +634,7 @@ func (c *helpCacher) rpcMethodHelp(method string) (string, error) { } // Generate, cache, and return the help. - help, err := rpcmodel.GenerateHelp(method, helpDescsEnUS, resultTypes...) + help, err := model.GenerateHelp(method, helpDescsEnUS, resultTypes...) if err != nil { return "", err } @@ -722,7 +657,7 @@ func (c *helpCacher) rpcUsage(includeWebsockets bool) (string, error) { // Generate a list of one-line usage for every command. usageTexts := make([]string, 0, len(rpcHandlers)) for k := range rpcHandlers { - usage, err := rpcmodel.MethodUsageText(k) + usage, err := model.MethodUsageText(k) if err != nil { return "", err } @@ -732,7 +667,7 @@ func (c *helpCacher) rpcUsage(includeWebsockets bool) (string, error) { // Include websockets commands if requested. if includeWebsockets { for k := range wsHandlers { - usage, err := rpcmodel.MethodUsageText(k) + usage, err := model.MethodUsageText(k) if err != nil { return "", err } diff --git a/server/rpc/rpcserverhelp_test.go b/rpc/rpcserverhelp_test.go similarity index 100% rename from server/rpc/rpcserverhelp_test.go rename to rpc/rpcserverhelp_test.go diff --git a/server/rpc/rpcwebsocket.go b/rpc/rpcwebsocket.go similarity index 97% rename from server/rpc/rpcwebsocket.go rename to rpc/rpcwebsocket.go index da551e7cd..6e7d2a898 100644 --- a/server/rpc/rpcwebsocket.go +++ b/rpc/rpcwebsocket.go @@ -26,7 +26,7 @@ import ( "github.com/btcsuite/websocket" "github.com/kaspanet/kaspad/dagconfig" - "github.com/kaspanet/kaspad/rpcmodel" + "github.com/kaspanet/kaspad/rpc/model" "github.com/kaspanet/kaspad/txscript" "github.com/kaspanet/kaspad/util" "github.com/kaspanet/kaspad/util/daghash" @@ -90,9 +90,9 @@ func (s *Server) WebsocketHandler(conn *websocket.Conn, remoteAddr string, // Limit max number of websocket clients. log.Infof("New websocket client %s", remoteAddr) - if s.ntfnMgr.NumClients()+1 > s.appCfg.RPCMaxWebsockets { + if s.ntfnMgr.NumClients()+1 > s.cfg.RPCMaxWebsockets { log.Infof("Max websocket clients exceeded [%d] - "+ - "disconnecting client %s", s.appCfg.RPCMaxWebsockets, + "disconnecting client %s", s.cfg.RPCMaxWebsockets, remoteAddr) conn.Close() return @@ -539,13 +539,13 @@ func (m *wsNotificationManager) notifyChainChanged(clients map[chan struct{}]*ws } // Create the notification. - ntfn := rpcmodel.NewChainChangedNtfn(removedChainHashesStrs, addedChainBlocks) + ntfn := model.NewChainChangedNtfn(removedChainHashesStrs, addedChainBlocks) var marshalledJSON []byte if len(clients) != 0 { // Marshal notification var err error - marshalledJSON, err = rpcmodel.MarshalCommand(nil, ntfn) + marshalledJSON, err = model.MarshalCommand(nil, ntfn) if err != nil { log.Errorf("Failed to marshal chain changed "+ "notification: %s", err) @@ -586,7 +586,7 @@ func (m *wsNotificationManager) subscribedClients(tx *util.Tx, for i, output := range msgTx.TxOut { _, addr, err := txscript.ExtractScriptPubKeyAddress( - output.ScriptPubKey, m.server.cfg.DAGParams) + output.ScriptPubKey, m.server.dag.Params) if err != nil || addr == nil { // Clients are not able to subscribe to // nonstandard or non-address outputs. @@ -635,7 +635,7 @@ func (m *wsNotificationManager) notifyFilteredBlockAdded(clients map[chan struct "added notification: %s", err) return } - ntfn := rpcmodel.NewFilteredBlockAddedNtfn(blueScore, hex.EncodeToString(w.Bytes()), nil) + ntfn := model.NewFilteredBlockAddedNtfn(blueScore, hex.EncodeToString(w.Bytes()), nil) // Search for relevant transactions for each client and save them // serialized in hex encoding for the notification. @@ -655,7 +655,7 @@ func (m *wsNotificationManager) notifyFilteredBlockAdded(clients map[chan struct ntfn.SubscribedTxs = subscribedTxs[quitChan] // Marshal and queue notification. - marshalledJSON, err := rpcmodel.MarshalCommand(nil, ntfn) + marshalledJSON, err := model.MarshalCommand(nil, ntfn) if err != nil { log.Errorf("Failed to marshal filtered block "+ "connected notification: %s", err) @@ -688,8 +688,8 @@ func (m *wsNotificationManager) notifyForNewTx(clients map[chan struct{}]*wsClie amount += txOut.Value } - ntfn := rpcmodel.NewTxAcceptedNtfn(txIDStr, util.Amount(amount).ToKAS()) - marshalledJSON, err := rpcmodel.MarshalCommand(nil, ntfn) + ntfn := model.NewTxAcceptedNtfn(txIDStr, util.Amount(amount).ToKAS()) + marshalledJSON, err := model.MarshalCommand(nil, ntfn) if err != nil { log.Errorf("Failed to marshal tx notification: %s", err.Error()) return @@ -702,14 +702,14 @@ func (m *wsNotificationManager) notifyForNewTx(clients map[chan struct{}]*wsClie var marshalledJSONVerboseFull []byte var marshalledJSONVerbosePartial []byte initializeMarshalledJSONVerbose := func() bool { - net := m.server.cfg.DAGParams + net := m.server.dag.Params build := func() ([]byte, bool) { rawTx, err := createTxRawResult(net, mtx, txIDStr, nil, "", nil, true) if err != nil { return nil, false } - verboseNtfn := rpcmodel.NewTxAcceptedVerboseNtfn(*rawTx) - marshalledJSONVerbose, err := rpcmodel.MarshalCommand(nil, verboseNtfn) + verboseNtfn := model.NewTxAcceptedVerboseNtfn(*rawTx) + marshalledJSONVerbose, err := model.MarshalCommand(nil, verboseNtfn) if err != nil { log.Errorf("Failed to marshal verbose tx notification: %s", err.Error()) return nil, false @@ -742,7 +742,7 @@ func (m *wsNotificationManager) notifyForNewTx(clients map[chan struct{}]*wsClie } } - nodeSubnetworkID := m.server.cfg.DAG.SubnetworkID() + nodeSubnetworkID := m.server.dag.SubnetworkID() if wsc.subnetworkIDForTxUpdates == nil || wsc.subnetworkIDForTxUpdates.IsEqual(nodeSubnetworkID) { wsc.QueueNotification(marshalledJSONVerboseFull) } else { @@ -773,8 +773,8 @@ func (m *wsNotificationManager) notifyRelevantTxAccepted(tx *util.Tx, clientsToNotify := m.subscribedClients(tx, clients) if len(clientsToNotify) != 0 { - n := rpcmodel.NewRelevantTxAcceptedNtfn(txHexString(tx.MsgTx())) - marshalled, err := rpcmodel.MarshalCommand(nil, n) + n := model.NewRelevantTxAcceptedNtfn(txHexString(tx.MsgTx())) + marshalled, err := model.MarshalCommand(nil, n) if err != nil { log.Errorf("Failed to marshal notification: %s", err) return @@ -926,15 +926,15 @@ out: break out } - var request rpcmodel.Request + var request model.Request err = json.Unmarshal(msg, &request) if err != nil { if !c.authenticated { break out } - jsonErr := &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCParse.Code, + jsonErr := &model.RPCError{ + Code: model.ErrRPCParse.Code, Message: "Failed to parse request: " + err.Error(), } reply, err := createMarshalledReply(nil, nil, jsonErr) @@ -986,7 +986,7 @@ out: // the authenticate request, an authenticate request is received // when the client is already authenticated, or incorrect // authentication credentials are provided in the request. - switch authCmd, ok := cmd.cmd.(*rpcmodel.AuthenticateCmd); { + switch authCmd, ok := cmd.cmd.(*model.AuthenticateCmd); { case c.authenticated && ok: log.Warnf("Websocket client %s is already authenticated", c.addr) @@ -1024,8 +1024,8 @@ out: // error when not authorized to call this RPC. if !c.isAdmin { if _, ok := rpcLimited[request.Method]; !ok { - jsonErr := &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCInvalidParams.Code, + jsonErr := &model.RPCError{ + Code: model.ErrRPCInvalidParams.Code, Message: "limited user not authorized for this method", } // Marshal and send response. @@ -1323,7 +1323,7 @@ func newWebsocketClient(server *Server, conn *websocket.Conn, isAdmin: isAdmin, sessionID: sessionID, server: server, - serviceRequestSem: makeSemaphore(server.appCfg.RPCMaxConcurrentReqs), + serviceRequestSem: makeSemaphore(server.cfg.RPCMaxConcurrentReqs), ntfnChan: make(chan []byte, 1), // nonblocking sync sendChan: make(chan wsResponse, websocketSendBufferSize), quit: make(chan struct{}), diff --git a/server/serverutils/utils.go b/rpc/utils.go similarity index 98% rename from server/serverutils/utils.go rename to rpc/utils.go index 62590dfbf..db8180e38 100644 --- a/server/serverutils/utils.go +++ b/rpc/utils.go @@ -1,4 +1,4 @@ -package serverutils +package rpc import ( "io/ioutil" diff --git a/rpcclient/rawtransactions.go b/rpcclient/rawtransactions.go deleted file mode 100644 index f342c7ec1..000000000 --- a/rpcclient/rawtransactions.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright (c) 2014-2017 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package rpcclient - -import ( - "bytes" - "encoding/hex" - "encoding/json" - "github.com/kaspanet/kaspad/rpcmodel" - "github.com/kaspanet/kaspad/util" - "github.com/kaspanet/kaspad/util/daghash" - "github.com/kaspanet/kaspad/wire" -) - -// FutureDecodeRawTransactionResult is a future promise to deliver the result -// of a DecodeRawTransactionAsync RPC invocation (or an applicable error). -type FutureDecodeRawTransactionResult chan *response - -// Receive waits for the response promised by the future and returns information -// about a transaction given its serialized bytes. -func (r FutureDecodeRawTransactionResult) Receive() (*rpcmodel.TxRawResult, error) { - res, err := receiveFuture(r) - if err != nil { - return nil, err - } - - // Unmarshal result as a decoderawtransaction result object. - var rawTxResult rpcmodel.TxRawResult - err = json.Unmarshal(res, &rawTxResult) - if err != nil { - return nil, err - } - - return &rawTxResult, nil -} - -// DecodeRawTransactionAsync returns an instance of a type that can be used to -// get the result of the RPC at some future time by invoking the Receive -// function on the returned instance. -// -// See DecodeRawTransaction for the blocking version and more details. -func (c *Client) DecodeRawTransactionAsync(serializedTx []byte) FutureDecodeRawTransactionResult { - txHex := hex.EncodeToString(serializedTx) - cmd := rpcmodel.NewDecodeRawTransactionCmd(txHex) - return c.sendCmd(cmd) -} - -// DecodeRawTransaction returns information about a transaction given its -// serialized bytes. -func (c *Client) DecodeRawTransaction(serializedTx []byte) (*rpcmodel.TxRawResult, error) { - return c.DecodeRawTransactionAsync(serializedTx).Receive() -} - -// FutureCreateRawTransactionResult is a future promise to deliver the result -// of a CreateRawTransactionAsync RPC invocation (or an applicable error). -type FutureCreateRawTransactionResult chan *response - -// Receive waits for the response promised by the future and returns a new -// transaction spending the provided inputs and sending to the provided -// addresses. -func (r FutureCreateRawTransactionResult) Receive() (*wire.MsgTx, error) { - res, err := receiveFuture(r) - if err != nil { - return nil, err - } - - // Unmarshal result as a string. - var txHex string - err = json.Unmarshal(res, &txHex) - if err != nil { - return nil, err - } - - // Decode the serialized transaction hex to raw bytes. - serializedTx, err := hex.DecodeString(txHex) - if err != nil { - return nil, err - } - - // Deserialize the transaction and return it. - var msgTx wire.MsgTx - if err := msgTx.Deserialize(bytes.NewReader(serializedTx)); err != nil { - return nil, err - } - return &msgTx, nil -} - -// CreateRawTransactionAsync returns an instance of a type that can be used to -// get the result of the RPC at some future time by invoking the Receive -// function on the returned instance. -// -// See CreateRawTransaction for the blocking version and more details. -func (c *Client) CreateRawTransactionAsync(inputs []rpcmodel.TransactionInput, - amounts map[util.Address]util.Amount, lockTime *uint64) FutureCreateRawTransactionResult { - - convertedAmts := make(map[string]float64, len(amounts)) - for addr, amount := range amounts { - convertedAmts[addr.String()] = amount.ToKAS() - } - cmd := rpcmodel.NewCreateRawTransactionCmd(inputs, convertedAmts, lockTime) - return c.sendCmd(cmd) -} - -// CreateRawTransaction returns a new transaction spending the provided inputs -// and sending to the provided addresses. -func (c *Client) CreateRawTransaction(inputs []rpcmodel.TransactionInput, - amounts map[util.Address]util.Amount, lockTime *uint64) (*wire.MsgTx, error) { - - return c.CreateRawTransactionAsync(inputs, amounts, lockTime).Receive() -} - -// FutureSendRawTransactionResult is a future promise to deliver the result -// of a SendRawTransactionAsync RPC invocation (or an applicable error). -type FutureSendRawTransactionResult chan *response - -// Receive waits for the response promised by the future and returns the result -// of submitting the encoded transaction to the server which then relays it to -// the network. -func (r FutureSendRawTransactionResult) Receive() (*daghash.TxID, error) { - res, err := receiveFuture(r) - if err != nil { - return nil, err - } - - // Unmarshal result as a string. - var txIDStr string - err = json.Unmarshal(res, &txIDStr) - if err != nil { - return nil, err - } - - return daghash.NewTxIDFromStr(txIDStr) -} - -// SendRawTransactionAsync returns an instance of a type that can be used to get -// the result of the RPC at some future time by invoking the Receive function on -// the returned instance. -// -// See SendRawTransaction for the blocking version and more details. -func (c *Client) SendRawTransactionAsync(tx *wire.MsgTx, allowHighFees bool) FutureSendRawTransactionResult { - txHex := "" - if tx != nil { - // Serialize the transaction and convert to hex string. - buf := bytes.NewBuffer(make([]byte, 0, tx.SerializeSize())) - if err := tx.Serialize(buf); err != nil { - return newFutureError(err) - } - txHex = hex.EncodeToString(buf.Bytes()) - } - - cmd := rpcmodel.NewSendRawTransactionCmd(txHex, &allowHighFees) - return c.sendCmd(cmd) -} - -// SendRawTransaction submits the encoded transaction to the server which will -// then relay it to the network. -func (c *Client) SendRawTransaction(tx *wire.MsgTx, allowHighFees bool) (*daghash.TxID, error) { - return c.SendRawTransactionAsync(tx, allowHighFees).Receive() -} - -// FutureDecodeScriptResult is a future promise to deliver the result -// of a DecodeScriptAsync RPC invocation (or an applicable error). -type FutureDecodeScriptResult chan *response - -// Receive waits for the response promised by the future and returns information -// about a script given its serialized bytes. -func (r FutureDecodeScriptResult) Receive() (*rpcmodel.DecodeScriptResult, error) { - res, err := receiveFuture(r) - if err != nil { - return nil, err - } - - // Unmarshal result as a decodescript result object. - var decodeScriptResult rpcmodel.DecodeScriptResult - err = json.Unmarshal(res, &decodeScriptResult) - if err != nil { - return nil, err - } - - return &decodeScriptResult, nil -} - -// DecodeScriptAsync returns an instance of a type that can be used to -// get the result of the RPC at some future time by invoking the Receive -// function on the returned instance. -// -// See DecodeScript for the blocking version and more details. -func (c *Client) DecodeScriptAsync(serializedScript []byte) FutureDecodeScriptResult { - scriptHex := hex.EncodeToString(serializedScript) - cmd := rpcmodel.NewDecodeScriptCmd(scriptHex) - return c.sendCmd(cmd) -} - -// DecodeScript returns information about a script given its serialized bytes. -func (c *Client) DecodeScript(serializedScript []byte) (*rpcmodel.DecodeScriptResult, error) { - return c.DecodeScriptAsync(serializedScript).Receive() -} diff --git a/server/p2p/log.go b/server/p2p/log.go deleted file mode 100644 index 26396496f..000000000 --- a/server/p2p/log.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (c) 2013-2016 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package p2p - -import ( - "github.com/kaspanet/kaspad/logger" - "github.com/kaspanet/kaspad/util/panics" -) - -var ( - srvrLog, _ = logger.Get(logger.SubsystemTags.SRVR) - peerLog, _ = logger.Get(logger.SubsystemTags.PEER) - spawn = panics.GoroutineWrapperFunc(peerLog) - - txmpLog, _ = logger.Get(logger.SubsystemTags.TXMP) - indxLog, _ = logger.Get(logger.SubsystemTags.INDX) - amgrLog, _ = logger.Get(logger.SubsystemTags.AMGR) -) diff --git a/server/p2p/on_addr.go b/server/p2p/on_addr.go deleted file mode 100644 index 0b802e23c..000000000 --- a/server/p2p/on_addr.go +++ /dev/null @@ -1,64 +0,0 @@ -package p2p - -import ( - "fmt" - "time" - - "github.com/kaspanet/kaspad/addrmgr" - "github.com/kaspanet/kaspad/peer" - "github.com/kaspanet/kaspad/util/mstime" - "github.com/kaspanet/kaspad/wire" -) - -// OnAddr is invoked when a peer receives an addr kaspa message and is -// used to notify the server about advertised addresses. -func (sp *Peer) OnAddr(_ *peer.Peer, msg *wire.MsgAddresses) { - // Ignore addresses when running on the simulation test network. This - // helps prevent the network from becoming another public test network - // since it will not be able to learn about other peers that have not - // specifically been provided. - if sp.AppCfg.Simnet { - return - } - - if len(msg.AddrList) > addrmgr.GetAddressesMax { - sp.AddBanScoreAndPushRejectMsg(msg.Command(), wire.RejectInvalid, nil, - peer.BanScoreSentTooManyAddresses, 0, fmt.Sprintf("address count excceeded %d", addrmgr.GetAddressesMax)) - return - } - - if msg.IncludeAllSubnetworks { - sp.AddBanScoreAndPushRejectMsg(msg.Command(), wire.RejectInvalid, nil, - peer.BanScoreMsgAddressesWithInvalidSubnetwork, 0, - fmt.Sprintf("got unexpected IncludeAllSubnetworks=true in [%s] command", msg.Command())) - return - } else if !msg.SubnetworkID.IsEqual(sp.AppCfg.SubnetworkID) && msg.SubnetworkID != nil { - peerLog.Errorf("Only full nodes and %s subnetwork IDs are allowed in [%s] command, but got subnetwork ID %s from %s", - sp.AppCfg.SubnetworkID, msg.Command(), msg.SubnetworkID, sp.Peer) - sp.Disconnect() - return - } - - for _, na := range msg.AddrList { - // Don't add more address if we're disconnecting. - if !sp.Connected() { - return - } - - // Set the timestamp to 5 days ago if it's more than 24 hours - // in the future so this address is one of the first to be - // removed when space is needed. - now := mstime.Now() - if na.Timestamp.After(now.Add(time.Minute * 10)) { - na.Timestamp = now.Add(-1 * time.Hour * 24 * 5) - } - - // Add address to known addresses for this peer. - sp.addKnownAddresses([]*wire.NetAddress{na}) - } - - // Add addresses to server address manager. The address manager handles - // the details of things such as preventing duplicate addresses, max - // addresses, and last seen updates. - sp.server.AddrManager.AddAddresses(msg.AddrList, sp.NA(), msg.SubnetworkID) -} diff --git a/server/p2p/on_block.go b/server/p2p/on_block.go deleted file mode 100644 index 170ae6471..000000000 --- a/server/p2p/on_block.go +++ /dev/null @@ -1,33 +0,0 @@ -package p2p - -import ( - "github.com/kaspanet/kaspad/peer" - "github.com/kaspanet/kaspad/util" - "github.com/kaspanet/kaspad/wire" -) - -// OnBlock is invoked when a peer receives a block kaspa message. It -// blocks until the kaspa block has been fully processed. -func (sp *Peer) OnBlock(_ *peer.Peer, msg *wire.MsgBlock, buf []byte) { - // Convert the raw MsgBlock to a util.Block which provides some - // convenience methods and things such as hash caching. - block := util.NewBlockFromBlockAndBytes(msg, buf) - - // Add the block to the known inventory for the peer. - iv := wire.NewInvVect(wire.InvTypeBlock, block.Hash()) - sp.AddKnownInventory(iv) - - // Queue the block up to be handled by the block - // manager and intentionally block further receives - // until the kaspa block is fully processed and known - // good or bad. This helps prevent a malicious peer - // from queuing up a bunch of bad blocks before - // disconnecting (or being disconnected) and wasting - // memory. Additionally, this behavior is depended on - // by at least the block acceptance test tool as the - // reference implementation processes blocks in the same - // thread and therefore blocks further messages until - // the kaspa block has been fully processed. - sp.server.SyncManager.QueueBlock(block, sp.Peer, false, sp.blockProcessed) - <-sp.blockProcessed -} diff --git a/server/p2p/on_block_locator.go b/server/p2p/on_block_locator.go deleted file mode 100644 index f8b7072a8..000000000 --- a/server/p2p/on_block_locator.go +++ /dev/null @@ -1,54 +0,0 @@ -package p2p - -import ( - "github.com/kaspanet/kaspad/peer" - "github.com/kaspanet/kaspad/wire" -) - -// OnBlockLocator is invoked when a peer receives a locator kaspa -// message. -func (sp *Peer) OnBlockLocator(_ *peer.Peer, msg *wire.MsgBlockLocator) { - sp.SetWasBlockLocatorRequested(false) - // Find the highest known shared block between the peers, and asks - // the block and its future from the peer. If the block is not - // found, create a lower resolution block locator and send it to - // the peer in order to find it in the next iteration. - dag := sp.server.DAG - if len(msg.BlockLocatorHashes) == 0 { - sp.AddBanScoreAndPushRejectMsg(msg.Command(), wire.RejectInvalid, nil, - peer.BanScoreEmptyBlockLocator, 0, - "got empty block locator") - // Whether the peer will be banned or not, syncing from a node that doesn't follow - // the netsync protocol is undesired. - sp.server.SyncManager.RemoveFromSyncCandidates(sp.Peer) - return - } - // If the first hash of the block locator is known, it means we found - // the highest shared block. - highHash := msg.BlockLocatorHashes[0] - if dag.IsInDAG(highHash) { - if dag.IsKnownFinalizedBlock(highHash) { - peerLog.Debugf("Cannot sync with peer %s because the highest"+ - " shared chain block (%s) is below the finality point", sp, highHash) - sp.server.SyncManager.RemoveFromSyncCandidates(sp.Peer) - return - } - - // We send the highHash as the GetBlockInvsMsg's lowHash here. - // This is not a mistake. The invs we desire start from the highest - // hash that we know of and end at the highest hash that the peer - // knows of. - err := sp.Peer.PushGetBlockInvsMsg(highHash, sp.Peer.SelectedTipHash()) - if err != nil { - peerLog.Errorf("Failed pushing get blocks message for peer %s: %s", - sp, err) - return - } - return - } - highHash, lowHash := dag.FindNextLocatorBoundaries(msg.BlockLocatorHashes) - if highHash == nil { - panic("Couldn't find any unknown hashes in the block locator.") - } - sp.PushGetBlockLocatorMsg(highHash, lowHash) -} diff --git a/server/p2p/on_fee_filter.go b/server/p2p/on_fee_filter.go deleted file mode 100644 index 0749d6034..000000000 --- a/server/p2p/on_fee_filter.go +++ /dev/null @@ -1,24 +0,0 @@ -package p2p - -import ( - "fmt" - "github.com/kaspanet/kaspad/peer" - "github.com/kaspanet/kaspad/util" - "github.com/kaspanet/kaspad/wire" - "sync/atomic" -) - -// OnFeeFilter is invoked when a peer receives a feefilter kaspa message and -// is used by remote peers to request that no transactions which have a fee rate -// lower than provided value are inventoried to them. The peer will be -// disconnected if an invalid fee filter value is provided. -func (sp *Peer) OnFeeFilter(_ *peer.Peer, msg *wire.MsgFeeFilter) { - // Check that the passed minimum fee is a valid amount. - if msg.MinFee < 0 || msg.MinFee > util.MaxSompi { - sp.AddBanScoreAndPushRejectMsg(msg.Command(), wire.RejectInvalid, nil, - peer.BanScoreInvalidFeeFilter, 0, fmt.Sprintf("sent an invalid feefilter '%s'", util.Amount(msg.MinFee))) - return - } - - atomic.StoreInt64(&sp.FeeFilterInt, msg.MinFee) -} diff --git a/server/p2p/on_filter_add.go b/server/p2p/on_filter_add.go deleted file mode 100644 index dd8930246..000000000 --- a/server/p2p/on_filter_add.go +++ /dev/null @@ -1,26 +0,0 @@ -package p2p - -import ( - "github.com/kaspanet/kaspad/peer" - "github.com/kaspanet/kaspad/wire" -) - -// OnFilterAdd is invoked when a peer receives a filteradd kaspa -// message and is used by remote peers to add data to an already loaded bloom -// filter. The peer will be disconnected if a filter is not loaded when this -// message is received or the server is not configured to allow bloom filters. -func (sp *Peer) OnFilterAdd(_ *peer.Peer, msg *wire.MsgFilterAdd) { - // Disconnect and/or ban depending on the node bloom services flag and - // negotiated protocol version. - if !sp.enforceNodeBloomFlag(msg.Command()) { - return - } - - if sp.filter.IsLoaded() { - sp.AddBanScoreAndPushRejectMsg(wire.CmdFilterAdd, wire.RejectInvalid, nil, - peer.BanScoreNoFilterLoaded, 0, "sent a filteradd request with no filter loaded") - return - } - - sp.filter.Add(msg.Data) -} diff --git a/server/p2p/on_filter_clear.go b/server/p2p/on_filter_clear.go deleted file mode 100644 index bb80ce557..000000000 --- a/server/p2p/on_filter_clear.go +++ /dev/null @@ -1,26 +0,0 @@ -package p2p - -import ( - "github.com/kaspanet/kaspad/peer" - "github.com/kaspanet/kaspad/wire" -) - -// OnFilterClear is invoked when a peer receives a filterclear kaspa -// message and is used by remote peers to clear an already loaded bloom filter. -// The peer will be disconnected if a filter is not loaded when this message is -// received or the server is not configured to allow bloom filters. -func (sp *Peer) OnFilterClear(_ *peer.Peer, msg *wire.MsgFilterClear) { - // Disconnect and/or ban depending on the node bloom services flag and - // negotiated protocol version. - if !sp.enforceNodeBloomFlag(msg.Command()) { - return - } - - if !sp.filter.IsLoaded() { - sp.AddBanScoreAndPushRejectMsg(wire.CmdFilterClear, wire.RejectInvalid, nil, - peer.BanScoreNoFilterLoaded, 0, "sent a filterclear request with no filter loaded") - return - } - - sp.filter.Unload() -} diff --git a/server/p2p/on_filter_load.go b/server/p2p/on_filter_load.go deleted file mode 100644 index 94872f9a4..000000000 --- a/server/p2p/on_filter_load.go +++ /dev/null @@ -1,23 +0,0 @@ -package p2p - -import ( - "github.com/kaspanet/kaspad/peer" - "github.com/kaspanet/kaspad/wire" -) - -// OnFilterLoad is invoked when a peer receives a filterload kaspa -// message and it used to load a bloom filter that should be used for -// delivering merkle blocks and associated transactions that match the filter. -// The peer will be disconnected if the server is not configured to allow bloom -// filters. -func (sp *Peer) OnFilterLoad(_ *peer.Peer, msg *wire.MsgFilterLoad) { - // Disconnect and/or ban depending on the node bloom services flag and - // negotiated protocol version. - if !sp.enforceNodeBloomFlag(msg.Command()) { - return - } - - sp.setDisableRelayTx(false) - - sp.filter.Reload(msg) -} diff --git a/server/p2p/on_get_addr.go b/server/p2p/on_get_addr.go deleted file mode 100644 index 32accc83e..000000000 --- a/server/p2p/on_get_addr.go +++ /dev/null @@ -1,40 +0,0 @@ -package p2p - -import ( - "github.com/kaspanet/kaspad/peer" - "github.com/kaspanet/kaspad/wire" -) - -// OnGetAddr is invoked when a peer receives a getaddr kaspa message -// and is used to provide the peer with known addresses from the address -// manager. -func (sp *Peer) OnGetAddr(_ *peer.Peer, msg *wire.MsgGetAddresses) { - // Don't return any addresses when running on the simulation test - // network. This helps prevent the network from becoming another - // public test network since it will not be able to learn about other - // peers that have not specifically been provided. - if sp.AppCfg.Simnet { - return - } - - // Do not accept getaddr requests from outbound peers. This reduces - // fingerprinting attacks. - if !sp.Inbound() { - peerLog.Debugf("Ignoring getaddr request from outbound peer %s", sp) - return - } - - // Only allow one getaddr request per connection to discourage - // address stamping of inv announcements. - if sp.sentAddrs { - peerLog.Debugf("Ignoring repeated getaddr request from peer %s", sp) - return - } - sp.sentAddrs = true - - // Get the current known addresses from the address manager. - addrCache := sp.server.AddrManager.AddressCache(msg.IncludeAllSubnetworks, msg.SubnetworkID) - - // Push the addresses. - sp.pushAddrMsg(addrCache, sp.SubnetworkID()) -} diff --git a/server/p2p/on_get_block_invs.go b/server/p2p/on_get_block_invs.go deleted file mode 100644 index 828c39170..000000000 --- a/server/p2p/on_get_block_invs.go +++ /dev/null @@ -1,44 +0,0 @@ -package p2p - -import ( - "fmt" - "github.com/kaspanet/kaspad/peer" - "github.com/kaspanet/kaspad/wire" -) - -// OnGetBlockInvs is invoked when a peer receives a getblockinvs kaspa -// message. -// It finds the blue future between msg.LowHash and msg.HighHash -// and send the invs to the requesting peer. -func (sp *Peer) OnGetBlockInvs(_ *peer.Peer, msg *wire.MsgGetBlocks) { - dag := sp.server.DAG - // We want to prevent a situation where the syncing peer needs - // to call getblocks once again, but the block we sent it - // won't affect its selected chain, so next time it'll try - // to find the highest shared chain block, it'll find the - // same one as before. - // To prevent that we use blockdag.FinalityInterval as maxHashes. - // This way, if one getblocks is not enough to get the peer - // synced, we can know for sure that its selected chain will - // change, so we'll have higher shared chain block. - hashList, err := dag.AntiPastHashesBetween(msg.LowHash, msg.HighHash, - wire.MaxInvPerMsg) - if err != nil { - sp.AddBanScoreAndPushRejectMsg(wire.CmdGetBlocks, wire.RejectInvalid, nil, - peer.BanScoreInvalidMsgGetBlockInvs, 0, - fmt.Sprintf("error getting antiPast hashes between %s and %s: %s", msg.LowHash, msg.HighHash, err)) - return - } - - // Generate inventory message. - invMsg := wire.NewMsgInv() - for i := range hashList { - iv := wire.NewInvVect(wire.InvTypeSyncBlock, hashList[i]) - invMsg.AddInvVect(iv) - } - - // Send the inventory message if there is anything to send. - if len(invMsg.InvList) > 0 { - sp.QueueMessage(invMsg, nil) - } -} diff --git a/server/p2p/on_get_block_locator.go b/server/p2p/on_get_block_locator.go deleted file mode 100644 index 17e685d5b..000000000 --- a/server/p2p/on_get_block_locator.go +++ /dev/null @@ -1,30 +0,0 @@ -package p2p - -import ( - "fmt" - "github.com/kaspanet/kaspad/peer" - "github.com/kaspanet/kaspad/wire" -) - -// OnGetBlockLocator is invoked when a peer receives a getlocator kaspa -// message. -func (sp *Peer) OnGetBlockLocator(_ *peer.Peer, msg *wire.MsgGetBlockLocator) { - locator, err := sp.server.DAG.BlockLocatorFromHashes(msg.HighHash, msg.LowHash) - if err != nil || len(locator) == 0 { - if err != nil { - peerLog.Warnf("Couldn't build a block locator between blocks "+ - "%s and %s that was requested from peer %s: %s", msg.HighHash, msg.LowHash, sp, err) - } - sp.AddBanScoreAndPushRejectMsg(msg.Command(), wire.RejectInvalid, nil, - peer.BanScoreInvalidMsgGetBlockLocator, 0, - fmt.Sprintf("couldn't build a block locator between blocks %s and %s", msg.HighHash, msg.LowHash)) - return - } - - err = sp.PushBlockLocatorMsg(locator) - if err != nil { - peerLog.Errorf("Failed to send block locator message to peer %s: %s", - sp, err) - return - } -} diff --git a/server/p2p/on_get_data.go b/server/p2p/on_get_data.go deleted file mode 100644 index 24e37c7b2..000000000 --- a/server/p2p/on_get_data.go +++ /dev/null @@ -1,85 +0,0 @@ -package p2p - -import ( - "github.com/kaspanet/kaspad/peer" - "github.com/kaspanet/kaspad/util/daghash" - "github.com/kaspanet/kaspad/wire" -) - -// OnGetData is invoked when a peer receives a getdata kaspa message and -// is used to deliver block and transaction information. -func (sp *Peer) OnGetData(_ *peer.Peer, msg *wire.MsgGetData) { - numAdded := 0 - notFound := wire.NewMsgNotFound() - - length := len(msg.InvList) - // A decaying ban score increase is applied to prevent exhausting resources - // with unusually large inventory queries. - // Requesting more than the maximum inventory vector length within a short - // period of time yields a score above the default ban threshold. Sustained - // bursts of small requests are not penalized as that would potentially ban - // peers performing IBD. - // This incremental score decays each minute to half of its value. - sp.addBanScore(0, uint32(length)*99/wire.MaxInvPerMsg, "getdata") - - // We wait on this wait channel periodically to prevent queuing - // far more data than we can send in a reasonable time, wasting memory. - // The waiting occurs after the database fetch for the next one to - // provide a little pipelining. - var waitChan chan struct{} - doneChan := make(chan struct{}, 1) - - for i, iv := range msg.InvList { - var c chan struct{} - // If this will be the last message we send. - if i == length-1 && len(notFound.InvList) == 0 { - c = doneChan - } else if (i+1)%3 == 0 { - // Buffered so as to not make the send goroutine block. - c = make(chan struct{}, 1) - } - var err error - switch iv.Type { - case wire.InvTypeTx: - err = sp.server.pushTxMsg(sp, (*daghash.TxID)(iv.Hash), c, waitChan) - case wire.InvTypeSyncBlock: - fallthrough - case wire.InvTypeMissingAncestor: - fallthrough - case wire.InvTypeBlock: - err = sp.server.pushBlockMsg(sp, iv.Hash, c, waitChan) - case wire.InvTypeFilteredBlock: - err = sp.server.pushMerkleBlockMsg(sp, iv.Hash, c, waitChan) - default: - peerLog.Warnf("Unknown type in inventory request %d", - iv.Type) - continue - } - if err != nil { - notFound.AddInvVect(iv) - - // When there is a failure fetching the final entry - // and the done channel was sent in due to there - // being no outstanding not found inventory, consume - // it here because there is now not found inventory - // that will use the channel momentarily. - if i == len(msg.InvList)-1 && c != nil { - <-c - } - } - numAdded++ - waitChan = c - } - if len(notFound.InvList) != 0 { - sp.QueueMessage(notFound, doneChan) - } - - // Wait for messages to be sent. We can send quite a lot of data at this - // point and this will keep the peer busy for a decent amount of time. - // We don't process anything else by them in this time so that we - // have an idea of when we should hear back from them - else the idle - // timeout could fire when we were only half done sending the blocks. - if numAdded > 0 { - <-doneChan - } -} diff --git a/server/p2p/on_get_selected_tip.go b/server/p2p/on_get_selected_tip.go deleted file mode 100644 index d01394ce7..000000000 --- a/server/p2p/on_get_selected_tip.go +++ /dev/null @@ -1,11 +0,0 @@ -package p2p - -import ( - "github.com/kaspanet/kaspad/wire" -) - -// OnGetSelectedTip is invoked when a peer receives a getSelectedTip kaspa -// message. -func (sp *Peer) OnGetSelectedTip() { - sp.QueueMessage(wire.NewMsgSelectedTip(sp.selectedTipHash()), nil) -} diff --git a/server/p2p/on_inv.go b/server/p2p/on_inv.go deleted file mode 100644 index 1141e5b4d..000000000 --- a/server/p2p/on_inv.go +++ /dev/null @@ -1,39 +0,0 @@ -package p2p - -import ( - "github.com/kaspanet/kaspad/peer" - "github.com/kaspanet/kaspad/wire" -) - -// OnInv is invoked when a peer receives an inv kaspa message and is -// used to examine the inventory being advertised by the remote peer and react -// accordingly. We pass the message down to blockmanager which will call -// QueueMessage with any appropriate responses. -func (sp *Peer) OnInv(_ *peer.Peer, msg *wire.MsgInv) { - if !sp.AppCfg.BlocksOnly { - if len(msg.InvList) > 0 { - sp.server.SyncManager.QueueInv(msg, sp.Peer) - } - return - } - - newInv := wire.NewMsgInvSizeHint(uint(len(msg.InvList))) - for _, invVect := range msg.InvList { - if invVect.Type == wire.InvTypeTx { - peerLog.Tracef("Ignoring tx %s in inv from %s -- "+ - "blocksonly enabled", invVect.Hash, sp) - sp.AddBanScoreAndPushRejectMsg(msg.Command(), wire.RejectNotRequested, invVect.Hash, - peer.BanScoreSentTxToBlocksOnly, 0, "announced transactions when blocksonly is enabled") - return - } - err := newInv.AddInvVect(invVect) - if err != nil { - peerLog.Errorf("Failed to add inventory vector: %s", err) - break - } - } - - if len(newInv.InvList) > 0 { - sp.server.SyncManager.QueueInv(newInv, sp.Peer) - } -} diff --git a/server/p2p/on_selected_tip.go b/server/p2p/on_selected_tip.go deleted file mode 100644 index 82071cb9f..000000000 --- a/server/p2p/on_selected_tip.go +++ /dev/null @@ -1,14 +0,0 @@ -package p2p - -import ( - "github.com/kaspanet/kaspad/peer" - "github.com/kaspanet/kaspad/wire" -) - -// OnSelectedTip is invoked when a peer receives a selectedTip kaspa -// message. -func (sp *Peer) OnSelectedTip(peer *peer.Peer, msg *wire.MsgSelectedTip) { - done := make(chan struct{}) - sp.server.SyncManager.QueueSelectedTipMsg(msg, peer, done) - <-done -} diff --git a/server/p2p/on_tx.go b/server/p2p/on_tx.go deleted file mode 100644 index 69701d257..000000000 --- a/server/p2p/on_tx.go +++ /dev/null @@ -1,35 +0,0 @@ -package p2p - -import ( - "github.com/kaspanet/kaspad/peer" - "github.com/kaspanet/kaspad/util" - "github.com/kaspanet/kaspad/util/daghash" - "github.com/kaspanet/kaspad/wire" -) - -// OnTx is invoked when a peer receives a tx kaspa message. It blocks -// until the kaspa transaction has been fully processed. Unlock the block -// handler this does not serialize all transactions through a single thread -// transactions don't rely on the previous one in a linear fashion like blocks. -func (sp *Peer) OnTx(_ *peer.Peer, msg *wire.MsgTx) { - if sp.AppCfg.BlocksOnly { - peerLog.Tracef("Ignoring tx %s from %s - blocksonly enabled", - msg.TxID(), sp) - return - } - - // Add the transaction to the known inventory for the peer. - // Convert the raw MsgTx to a util.Tx which provides some convenience - // methods and things such as hash caching. - tx := util.NewTx(msg) - iv := wire.NewInvVect(wire.InvTypeTx, (*daghash.Hash)(tx.ID())) - sp.AddKnownInventory(iv) - - // Queue the transaction up to be handled by the sync manager and - // intentionally block further receives until the transaction is fully - // processed and known good or bad. This helps prevent a malicious peer - // from queuing up a bunch of bad transactions before disconnecting (or - // being disconnected) and wasting memory. - sp.server.SyncManager.QueueTx(tx, sp.Peer, sp.txProcessed) - <-sp.txProcessed -} diff --git a/server/p2p/on_version.go b/server/p2p/on_version.go deleted file mode 100644 index 598b33e4e..000000000 --- a/server/p2p/on_version.go +++ /dev/null @@ -1,52 +0,0 @@ -package p2p - -import ( - "github.com/kaspanet/kaspad/peer" - "github.com/kaspanet/kaspad/wire" -) - -// OnVersion is invoked when a peer receives a version kaspa message -// and is used to negotiate the protocol version details as well as kick start -// the communications. -func (sp *Peer) OnVersion(_ *peer.Peer, msg *wire.MsgVersion) { - // Choose whether or not to relay transactions before a filter command - // is received. - sp.setDisableRelayTx(msg.DisableRelayTx) - - // Update the address manager and request known addresses from the - // remote peer for outbound connections. This is skipped when running - // on the simulation test network since it is only intended to connect - // to specified peers and actively avoids advertising and connecting to - // discovered peers. - if !sp.AppCfg.Simnet { - addrManager := sp.server.AddrManager - - // Outbound connections. - if !sp.Inbound() { - // TODO(davec): Only do this if not doing the initial block - // download and the local address is routable. - if !sp.AppCfg.DisableListen { - // Get address that best matches. - lna := addrManager.GetBestLocalAddress(sp.NA()) - if sp.server.AddrManager.IsRoutable(lna) { - // Filter addresses the peer already knows about. - addresses := []*wire.NetAddress{lna} - sp.pushAddrMsg(addresses, sp.SubnetworkID()) - } - } - - // Request known addresses if the server address manager needs - // more. - if addrManager.NeedMoreAddresses() { - sp.QueueMessage(wire.NewMsgGetAddresses(false, sp.SubnetworkID()), nil) - - if sp.SubnetworkID() != nil { - sp.QueueMessage(wire.NewMsgGetAddresses(false, nil), nil) - } - } - - // Mark the address as a known good address. - addrManager.Good(sp.NA(), msg.SubnetworkID) - } - } -} diff --git a/server/p2p/p2p.go b/server/p2p/p2p.go deleted file mode 100644 index 12e50f8c2..000000000 --- a/server/p2p/p2p.go +++ /dev/null @@ -1,1840 +0,0 @@ -// Copyright (c) 2013-2017 The btcsuite developers -// Copyright (c) 2015-2017 The Decred developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package p2p - -import ( - "crypto/rand" - "encoding/binary" - "fmt" - "math" - "net" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/kaspanet/kaspad/dbaccess" - - "github.com/pkg/errors" - - "github.com/kaspanet/kaspad/util/subnetworkid" - - "github.com/kaspanet/kaspad/addrmgr" - "github.com/kaspanet/kaspad/blockdag" - "github.com/kaspanet/kaspad/blockdag/indexers" - "github.com/kaspanet/kaspad/config" - "github.com/kaspanet/kaspad/connmgr" - "github.com/kaspanet/kaspad/dagconfig" - "github.com/kaspanet/kaspad/logger" - "github.com/kaspanet/kaspad/mempool" - "github.com/kaspanet/kaspad/netsync" - "github.com/kaspanet/kaspad/peer" - "github.com/kaspanet/kaspad/server/serverutils" - "github.com/kaspanet/kaspad/txscript" - "github.com/kaspanet/kaspad/util" - "github.com/kaspanet/kaspad/util/bloom" - "github.com/kaspanet/kaspad/util/daghash" - "github.com/kaspanet/kaspad/version" - "github.com/kaspanet/kaspad/wire" -) - -const ( - // defaultServices describes the default services that are supported by - // the server. - defaultServices = wire.SFNodeNetwork | wire.SFNodeBloom | wire.SFNodeCF - - // defaultRequiredServices describes the default services that are - // required to be supported by outbound peers. - defaultRequiredServices = wire.SFNodeNetwork - - // connectionRetryInterval is the base amount of time to wait in between - // retries when connecting to persistent peers. It is adjusted by the - // number of retries such that there is a retry backoff. - connectionRetryInterval = time.Second * 5 -) - -var ( - // userAgentName is the user agent name and is used to help identify - // ourselves to other kaspa peers. - userAgentName = "kaspad" - - // userAgentVersion is the user agent version and is used to help - // identify ourselves to other kaspa peers. - userAgentVersion = version.Version() -) - -// simpleAddr implements the net.Addr interface with two struct fields -type simpleAddr struct { - net, addr string -} - -// String returns the address. -// -// This is part of the net.Addr interface. -func (a simpleAddr) String() string { - return a.addr -} - -// Network returns the network. -// -// This is part of the net.Addr interface. -func (a simpleAddr) Network() string { - return a.net -} - -// Ensure simpleAddr implements the net.Addr interface. -var _ net.Addr = simpleAddr{} - -// broadcastMsg provides the ability to house a kaspa message to be broadcast -// to all connected peers except specified excluded peers. -type broadcastMsg struct { - message wire.Message - excludePeers []*Peer -} - -// broadcastInventoryAdd is a type used to declare that the InvVect it contains -// needs to be added to the rebroadcast map -type broadcastInventoryAdd relayMsg - -// broadcastInventoryDel is a type used to declare that the InvVect it contains -// needs to be removed from the rebroadcast map -type broadcastInventoryDel *wire.InvVect - -// relayMsg packages an inventory vector along with the newly discovered -// inventory so the relay has access to that information. -type relayMsg struct { - invVect *wire.InvVect - data interface{} -} - -// Peer extends the peer to maintain state shared by the server and -// the blockmanager. -type Peer struct { - // The following variables must only be used atomically - FeeFilterInt int64 - - *peer.Peer - - connReq *connmgr.ConnReq - server *Server - persistent bool - relayMtx sync.Mutex - DisableRelayTx bool - sentAddrs bool - isWhitelisted bool - filter *bloom.Filter - knownAddresses map[string]struct{} - DynamicBanScore connmgr.DynamicBanScore - quit chan struct{} - // The following chans are used to sync blockmanager and server. - txProcessed chan struct{} - blockProcessed chan struct{} -} - -// peerState maintains state of inbound, persistent, outbound peers as well -// as banned peers and outbound groups. -type peerState struct { - inboundPeers map[int32]*Peer - outboundPeers map[int32]*Peer - persistentPeers map[int32]*Peer - banned map[string]time.Time -} - -// Count returns the count of all known peers. -func (ps *peerState) Count() int { - return ps.countInboundPeers() + ps.countOutboundPeers() -} - -func (ps *peerState) countInboundPeers() int { - return len(ps.inboundPeers) -} - -func (ps *peerState) countOutboundPeers() int { - return len(ps.outboundPeers) + - len(ps.persistentPeers) -} - -// forAllOutboundPeers is a helper function that runs a callback on all outbound -// peers known to peerState. -// The loop stops and returns false if one of the callback calls returns false. -// Otherwise the function should return true. -func (ps *peerState) forAllOutboundPeers(callback func(sp *Peer) bool) bool { - for _, e := range ps.outboundPeers { - shouldContinue := callback(e) - if !shouldContinue { - return false - } - } - for _, e := range ps.persistentPeers { - shouldContinue := callback(e) - if !shouldContinue { - return false - } - } - return true -} - -// forAllInboundPeers is a helper function that runs a callback on all inbound -// peers known to peerState. -// The loop stops and returns false if one of the callback calls returns false. -// Otherwise the function should return true. -func (ps *peerState) forAllInboundPeers(callback func(sp *Peer) bool) bool { - for _, e := range ps.inboundPeers { - shouldContinue := callback(e) - if !shouldContinue { - return false - } - } - return true -} - -// forAllPeers is a helper function that runs a callback on all peers known to -// peerState. -// The loop stops and returns false if one of the callback calls returns false. -// Otherwise the function should return true. -func (ps *peerState) forAllPeers(callback func(sp *Peer) bool) bool { - shouldContinue := ps.forAllInboundPeers(callback) - if !shouldContinue { - return false - } - return ps.forAllOutboundPeers(callback) -} - -// Server provides a kaspa server for handling communications to and from -// kaspa peers. -type Server struct { - cfg *config.Config - // The following variables must only be used atomically. - // Putting the uint64s first makes them 64-bit aligned for 32-bit systems. - bytesReceived uint64 // Total bytes received from all peers since start. - bytesSent uint64 // Total bytes sent by all peers since start. - shutdown int32 - shutdownSched int32 - - DAGParams *dagconfig.Params - AddrManager *addrmgr.AddrManager - connManager *connmgr.ConnManager - SyncManager *netsync.SyncManager - SigCache *txscript.SigCache - DAG *blockdag.BlockDAG - TxMemPool *mempool.TxPool - - modifyRebroadcastInv chan interface{} - newPeers chan *Peer - donePeers chan *Peer - banPeers chan *Peer - Query chan interface{} - relayInv chan relayMsg - broadcast chan broadcastMsg - wg sync.WaitGroup - nat serverutils.NAT - TimeSource blockdag.TimeSource - services wire.ServiceFlag - - // We add to quitWaitGroup before every instance in which we wait for - // the quit channel so that all those instances finish before we shut - // down the managers (connManager, addrManager, etc), - quitWaitGroup sync.WaitGroup - quit chan struct{} - - // The following fields are used for optional indexes. They will be nil - // if the associated index is not enabled. These fields are set during - // initial creation of the server and never changed afterwards, so they - // do not need to be protected for concurrent access. - AcceptanceIndex *indexers.AcceptanceIndex - - notifyNewTransactions func(txns []*mempool.TxDesc) -} - -// newServerPeer returns a new serverPeer instance. The peer needs to be set by -// the caller. -func newServerPeer(s *Server, isPersistent bool) *Peer { - return &Peer{ - server: s, - persistent: isPersistent, - filter: bloom.LoadFilter(nil), - knownAddresses: make(map[string]struct{}), - quit: make(chan struct{}), - txProcessed: make(chan struct{}, 1), - blockProcessed: make(chan struct{}, 1), - } -} - -// selectedTipHash returns the current selected tip hash -func (sp *Peer) selectedTipHash() *daghash.Hash { - return sp.server.DAG.SelectedTipHash() -} - -// blockExists determines whether a block with the given hash exists in -// the DAG. -func (sp *Peer) blockExists(hash *daghash.Hash) bool { - return sp.server.DAG.IsInDAG(hash) -} - -// addKnownAddresses adds the given addresses to the set of known addresses to -// the peer to prevent sending duplicate addresses. -func (sp *Peer) addKnownAddresses(addresses []*wire.NetAddress) { - for _, na := range addresses { - sp.knownAddresses[addrmgr.NetAddressKey(na)] = struct{}{} - } -} - -// addressKnown true if the given address is already known to the peer. -func (sp *Peer) addressKnown(na *wire.NetAddress) bool { - _, exists := sp.knownAddresses[addrmgr.NetAddressKey(na)] - return exists -} - -// setDisableRelayTx toggles relaying of transactions for the given peer. -// It is safe for concurrent access. -func (sp *Peer) setDisableRelayTx(disable bool) { - sp.relayMtx.Lock() - defer sp.relayMtx.Unlock() - sp.DisableRelayTx = disable -} - -// relayTxDisabled returns whether or not relaying of transactions for the given -// peer is disabled. -// It is safe for concurrent access. -func (sp *Peer) relayTxDisabled() bool { - sp.relayMtx.Lock() - defer sp.relayMtx.Unlock() - return sp.DisableRelayTx -} - -// pushAddrMsg sends an addr message to the connected peer using the provided -// addresses. -func (sp *Peer) pushAddrMsg(addresses []*wire.NetAddress, subnetworkID *subnetworkid.SubnetworkID) { - // Filter addresses already known to the peer. - addrs := make([]*wire.NetAddress, 0, len(addresses)) - for _, addr := range addresses { - if !sp.addressKnown(addr) { - addrs = append(addrs, addr) - } - } - known, err := sp.PushAddrMsg(addrs, subnetworkID) - if err != nil { - peerLog.Errorf("Can't push address message to %s: %s", sp.Peer, err) - sp.Disconnect() - return - } - sp.addKnownAddresses(known) -} - -// addBanScore increases the persistent and decaying ban score fields by the -// values passed as parameters. If the resulting score exceeds half of the ban -// threshold, a warning is logged including the reason provided. Further, if -// the score is above the ban threshold, the peer will be banned and -// disconnected. -func (sp *Peer) addBanScore(persistent, transient uint32, reason string) { - if sp.isWhitelisted { - peerLog.Debugf("Misbehaving whitelisted peer %s: %s", sp, reason) - } - - warnThreshold := sp.AppCfg.BanThreshold >> 1 - if transient == 0 && persistent == 0 { - // The score is not being increased, but a warning message is still - // logged if the score is above the warn threshold. - score := sp.DynamicBanScore.Int() - if score > warnThreshold { - peerLog.Warnf("Misbehaving peer %s: %s -- ban score is %d, "+ - "it was not increased this time", sp, reason, score) - } - return - } - - score := sp.DynamicBanScore.Increase(persistent, transient) - logMsg := fmt.Sprintf("Misbehaving peer %s: %s -- ban score increased to %d", - sp, reason, score) - if score > warnThreshold { - peerLog.Warn(logMsg) - if !sp.AppCfg.DisableBanning && !sp.isWhitelisted && score > sp.AppCfg.BanThreshold { - peerLog.Warnf("Misbehaving peer %s -- banning and disconnecting", - sp) - sp.server.BanPeer(sp) - sp.Disconnect() - } - } else if persistent != 0 { - peerLog.Warn(logMsg) - } else { - peerLog.Trace(logMsg) - } -} - -// enforceNodeBloomFlag disconnects the peer if the server is not configured to -// allow bloom filters. Additionally, if the peer has negotiated to a protocol -// version that is high enough to observe the bloom filter service support bit, -// it will be banned since it is intentionally violating the protocol. -func (sp *Peer) enforceNodeBloomFlag(cmd wire.MessageCommand) bool { - if sp.server.services&wire.SFNodeBloom != wire.SFNodeBloom { - // NOTE: Even though the addBanScore function already examines - // whether or not banning is enabled, it is checked here as well - // to ensure the violation is logged and the peer is - // disconnected regardless. - if !sp.AppCfg.DisableBanning { - - // Disconnect the peer regardless of whether it was - // banned. - sp.addBanScore(peer.BanScoreNodeBloomFlagViolation, 0, cmd.String()) - sp.Disconnect() - return false - } - - // Disconnect the peer regardless of protocol version or banning - // state. - peerLog.Debugf("%s sent an unsupported %s request -- "+ - "disconnecting", sp, cmd) - sp.Disconnect() - return false - } - - return true -} - -// OnRead is invoked when a peer receives a message and it is used to update -// the bytes received by the server. -func (sp *Peer) OnRead(_ *peer.Peer, bytesRead int, msg wire.Message, err error) { - sp.server.AddBytesReceived(uint64(bytesRead)) -} - -// OnWrite is invoked when a peer sends a message and it is used to update -// the bytes sent by the server. -func (sp *Peer) OnWrite(_ *peer.Peer, bytesWritten int, msg wire.Message, err error) { - sp.server.AddBytesSent(uint64(bytesWritten)) -} - -// randomUint16Number returns a random uint16 in a specified input range. Note -// that the range is in zeroth ordering; if you pass it 1800, you will get -// values from 0 to 1800. -func randomUint16Number(max uint16) uint16 { - // In order to avoid modulo bias and ensure every possible outcome in - // [0, max) has equal probability, the random number must be sampled - // from a random source that has a range limited to a multiple of the - // modulus. - var randomNumber uint16 - var limitRange = (math.MaxUint16 / max) * max - for { - binary.Read(rand.Reader, binary.LittleEndian, &randomNumber) - if randomNumber < limitRange { - return (randomNumber % max) - } - } -} - -// AddRebroadcastInventory adds 'iv' to the list of inventories to be -// rebroadcasted at random intervals until they show up in a block. -func (s *Server) AddRebroadcastInventory(iv *wire.InvVect, data interface{}) { - // Ignore if shutting down. - if atomic.LoadInt32(&s.shutdown) != 0 { - return - } - - s.modifyRebroadcastInv <- broadcastInventoryAdd{invVect: iv, data: data} -} - -// RemoveRebroadcastInventory removes 'iv' from the list of items to be -// rebroadcasted if present. -func (s *Server) RemoveRebroadcastInventory(iv *wire.InvVect) { - // Ignore if shutting down. - if atomic.LoadInt32(&s.shutdown) != 0 { - return - } - - s.modifyRebroadcastInv <- broadcastInventoryDel(iv) -} - -// RelayTransactions generates and relays inventory vectors for all of the -// passed transactions to all connected peers. -func (s *Server) RelayTransactions(txns []*mempool.TxDesc) { - for _, txD := range txns { - iv := wire.NewInvVect(wire.InvTypeTx, (*daghash.Hash)(txD.Tx.ID())) - s.RelayInventory(iv, txD) - } -} - -// pushTxMsg sends a tx message for the provided transaction hash to the -// connected peer. An error is returned if the transaction hash is not known. -func (s *Server) pushTxMsg(sp *Peer, txID *daghash.TxID, doneChan chan<- struct{}, - waitChan <-chan struct{}) error { - - // Attempt to fetch the requested transaction from the pool. A - // call could be made to check for existence first, but simply trying - // to fetch a missing transaction results in the same behavior. - tx, err := s.TxMemPool.FetchTransaction(txID) - if err != nil { - peerLog.Tracef("Unable to fetch tx %s from transaction "+ - "pool: %s", txID, err) - - if doneChan != nil { - doneChan <- struct{}{} - } - return err - } - - // Once we have fetched data wait for any previous operation to finish. - if waitChan != nil { - <-waitChan - } - - sp.QueueMessage(tx.MsgTx(), doneChan) - - return nil -} - -// pushBlockMsg sends a block message for the provided block hash to the -// connected peer. An error is returned if the block hash is not known. -func (s *Server) pushBlockMsg(sp *Peer, hash *daghash.Hash, doneChan chan<- struct{}, - waitChan <-chan struct{}) error { - - // Fetch the block from the database. - block, err := s.DAG.BlockByHash(hash) - if err != nil { - peerLog.Tracef("Unable to fetch requested block hash %s: %s", - hash, err) - - if doneChan != nil { - doneChan <- struct{}{} - } - return err - } - msgBlock := block.MsgBlock() - - // If we are a full node and the peer is a partial node, we must convert - // the block to a partial block. - nodeSubnetworkID := s.DAG.SubnetworkID() - peerSubnetworkID := sp.Peer.SubnetworkID() - isNodeFull := nodeSubnetworkID == nil - isPeerFull := peerSubnetworkID == nil - if isNodeFull && !isPeerFull { - msgBlock.ConvertToPartial(peerSubnetworkID) - } - - // Once we have fetched data wait for any previous operation to finish. - if waitChan != nil { - <-waitChan - } - - sp.QueueMessage(msgBlock, doneChan) - - return nil -} - -// pushMerkleBlockMsg sends a merkleblock message for the provided block hash to -// the connected peer. Since a merkle block requires the peer to have a filter -// loaded, this call will simply be ignored if there is no filter loaded. An -// error is returned if the block hash is not known. -func (s *Server) pushMerkleBlockMsg(sp *Peer, hash *daghash.Hash, - doneChan chan<- struct{}, waitChan <-chan struct{}) error { - - // Do not send a response if the peer doesn't have a filter loaded. - if !sp.filter.IsLoaded() { - if doneChan != nil { - doneChan <- struct{}{} - } - return nil - } - - // Fetch the raw block bytes from the database. - blk, err := sp.server.DAG.BlockByHash(hash) - if err != nil { - peerLog.Tracef("Unable to fetch requested block hash %s: %s", - hash, err) - - if doneChan != nil { - doneChan <- struct{}{} - } - return err - } - - // Generate a merkle block by filtering the requested block according - // to the filter for the peer. - merkle, matchedTxIndices := bloom.NewMerkleBlock(blk, sp.filter) - - // Once we have fetched data wait for any previous operation to finish. - if waitChan != nil { - <-waitChan - } - - // Send the merkleblock. Only send the done channel with this message - // if no transactions will be sent afterwards. - var dc chan<- struct{} - if len(matchedTxIndices) == 0 { - dc = doneChan - } - sp.QueueMessage(merkle, dc) - - // Finally, send any matched transactions. - blkTransactions := blk.MsgBlock().Transactions - for i, txIndex := range matchedTxIndices { - // Only send the done channel on the final transaction. - var dc chan<- struct{} - if i == len(matchedTxIndices)-1 { - dc = doneChan - } - if txIndex < uint32(len(blkTransactions)) { - sp.QueueMessage(blkTransactions[txIndex], dc) - } - } - - return nil -} - -// handleAddPeerMsg deals with adding new peers. It is invoked from the -// peerHandler goroutine. -func (s *Server) handleAddPeerMsg(state *peerState, sp *Peer) bool { - if sp == nil { - return false - } - - // Ignore new peers if we're shutting down. - if atomic.LoadInt32(&s.shutdown) != 0 { - srvrLog.Infof("New peer %s ignored - server is shutting down", sp) - sp.Disconnect() - return false - } - - // Disconnect banned peers. - host, _, err := net.SplitHostPort(sp.Addr()) - if err != nil { - srvrLog.Debugf("can't split hostport %s", err) - sp.Disconnect() - return false - } - if banEnd, ok := state.banned[host]; ok { - if time.Now().Before(banEnd) { - srvrLog.Debugf("Peer %s is banned for another %s - disconnecting", - host, time.Until(banEnd)) - sp.Disconnect() - return false - } - - srvrLog.Infof("Peer %s is no longer banned", host) - delete(state.banned, host) - } - - // TODO: Check for max peers from a single IP. - - // Limit max number of total peers. - if sp.Inbound() && len(state.inboundPeers) >= sp.AppCfg.MaxInboundPeers { - srvrLog.Infof("Max inbound peers reached [%d] - disconnecting peer %s", - sp.AppCfg.MaxInboundPeers, sp) - sp.Disconnect() - return false - } - - // Add the new peer and start it. - srvrLog.Debugf("New peer %s", sp) - if sp.Inbound() { - state.inboundPeers[sp.ID()] = sp - } else { - if sp.persistent { - state.persistentPeers[sp.ID()] = sp - } else { - state.outboundPeers[sp.ID()] = sp - } - } - - // Notify the connection manager. - s.connManager.NotifyConnectionRequestComplete() - - return true -} - -// handleDonePeerMsg deals with peers that have signalled they are done. It is -// invoked from the peerHandler goroutine. -func (s *Server) handleDonePeerMsg(state *peerState, sp *Peer) { - var list map[int32]*Peer - if sp.persistent { - list = state.persistentPeers - } else if sp.Inbound() { - list = state.inboundPeers - } else { - list = state.outboundPeers - } - if _, ok := list[sp.ID()]; ok { - if !sp.Inbound() && sp.connReq != nil { - s.connManager.Disconnect(sp.connReq.ID()) - } - delete(list, sp.ID()) - srvrLog.Debugf("Removed peer %s", sp) - return - } - - if sp.connReq != nil { - s.connManager.Disconnect(sp.connReq.ID()) - } - - // Update the address' last seen time if the peer has acknowledged - // our version and has sent us its version as well. - if sp.VerAckReceived() && sp.VersionKnown() && sp.NA() != nil { - s.AddrManager.Connected(sp.NA()) - } - - // If we get here it means that either we didn't know about the peer - // or we purposefully deleted it. -} - -// handleBanPeerMsg deals with banning peers. It is invoked from the -// peerHandler goroutine. -func (s *Server) handleBanPeerMsg(state *peerState, sp *Peer) { - host, _, err := net.SplitHostPort(sp.Addr()) - if err != nil { - srvrLog.Debugf("can't split ban peer %s: %s", sp.Addr(), err) - return - } - direction := logger.DirectionString(sp.Inbound()) - srvrLog.Infof("Banned peer %s (%s) for %s", host, direction, - sp.AppCfg.BanDuration) - state.banned[host] = time.Now().Add(sp.AppCfg.BanDuration) -} - -// handleRelayInvMsg deals with relaying inventory to peers that are not already -// known to have it. It is invoked from the peerHandler goroutine. -func (s *Server) handleRelayInvMsg(state *peerState, msg relayMsg) { - state.forAllPeers(func(sp *Peer) bool { - if !sp.Connected() { - return true - } - - if msg.invVect.Type == wire.InvTypeTx { - // Don't relay the transaction to the peer when it has - // transaction relaying disabled. - if sp.relayTxDisabled() { - return true - } - - txD, ok := msg.data.(*mempool.TxDesc) - if !ok { - peerLog.Warnf("Underlying data for tx inv "+ - "relay is not a *mempool.TxDesc: %T", - msg.data) - return true - } - - // Don't relay the transaction if the transaction fee-per-kb - // is less than the peer's feefilter. - feeFilter := uint64(atomic.LoadInt64(&sp.FeeFilterInt)) - if feeFilter > 0 && txD.FeePerMegaGram < feeFilter { - return true - } - - // Don't relay the transaction if there is a bloom - // filter loaded and the transaction doesn't match it. - if sp.filter.IsLoaded() { - if !sp.filter.MatchTxAndUpdate(txD.Tx) { - return true - } - } - - // Don't relay the transaction if the peer's subnetwork is - // incompatible with it. - if !txD.Tx.MsgTx().IsSubnetworkCompatible(sp.Peer.SubnetworkID()) { - return true - } - } - - // Queue the inventory to be relayed with the next batch. - // It will be ignored if the peer is already known to - // have the inventory. - sp.QueueInventory(msg.invVect) - return true - }) -} - -// handleBroadcastMsg deals with broadcasting messages to peers. It is invoked -// from the peerHandler goroutine. -func (s *Server) handleBroadcastMsg(state *peerState, bmsg *broadcastMsg) { - state.forAllPeers(func(sp *Peer) bool { - if !sp.Connected() { - return true - } - - for _, ep := range bmsg.excludePeers { - if sp == ep { - return true - } - } - - sp.QueueMessage(bmsg.message, nil) - return true - }) -} - -type getConnCountMsg struct { - reply chan int32 -} - -//GetPeersMsg is the message type which is used by the rpc server to get the peers list from the p2p server -type GetPeersMsg struct { - Reply chan []*Peer -} - -//GetManualNodesMsg is the message type which is used by the rpc server to get the list of persistent peers from the p2p server -type GetManualNodesMsg struct { - Reply chan []*Peer -} - -//DisconnectNodeMsg is the message that is sent to a peer before it gets disconnected -type DisconnectNodeMsg struct { - Cmp func(*Peer) bool - Reply chan error -} - -//ConnectNodeMsg is the message type which is used by the rpc server to add a peer to the p2p server -type ConnectNodeMsg struct { - Addr string - Permanent bool - Reply chan error -} - -//RemoveNodeMsg is the message type which is used by the rpc server to remove a peer from the p2p server -type RemoveNodeMsg struct { - Cmp func(*Peer) bool - Reply chan error -} - -// handleQuery is the central handler for all queries and commands from other -// goroutines related to peer state. -func (s *Server) handleQuery(state *peerState, querymsg interface{}) { - switch msg := querymsg.(type) { - case getConnCountMsg: - nconnected := int32(0) - state.forAllPeers(func(sp *Peer) bool { - if sp.Connected() { - nconnected++ - } - return true - }) - msg.reply <- nconnected - - case GetPeersMsg: - peers := make([]*Peer, 0, state.Count()) - state.forAllPeers(func(sp *Peer) bool { - if !sp.Connected() { - return true - } - peers = append(peers, sp) - return true - }) - msg.Reply <- peers - - case ConnectNodeMsg: - // TODO: duplicate oneshots? - // Limit max number of total peers. - if state.countOutboundPeers() >= s.cfg.TargetOutboundPeers { - msg.Reply <- errors.WithStack(connmgr.ErrMaxOutboundPeers) - return - } - for _, peer := range state.persistentPeers { - if peer.Addr() == msg.Addr { - if msg.Permanent { - msg.Reply <- errors.WithStack(connmgr.ErrAlreadyConnected) - } else { - msg.Reply <- errors.WithStack(connmgr.ErrAlreadyPermanent) - } - return - } - } - - netAddr, err := s.addrStringToNetAddr(msg.Addr) - if err != nil { - msg.Reply <- err - return - } - - // TODO: if too many, nuke a non-perm peer. - spawn("SPAWN_PLACEHOLDER_NAME", func() { - s.connManager.Connect(&connmgr.ConnReq{ - Addr: netAddr, - Permanent: msg.Permanent, - }) - }) - msg.Reply <- nil - case RemoveNodeMsg: - found := disconnectPeer(state.persistentPeers, msg.Cmp) - - if found { - msg.Reply <- nil - } else { - msg.Reply <- errors.WithStack(connmgr.ErrPeerNotFound) - } - // Request a list of the persistent (added) peers. - case GetManualNodesMsg: - // Respond with a slice of the relevant peers. - peers := make([]*Peer, 0, len(state.persistentPeers)) - for _, sp := range state.persistentPeers { - peers = append(peers, sp) - } - msg.Reply <- peers - case DisconnectNodeMsg: - // Check inbound peers. We pass a nil callback since we don't - // require any additional actions on disconnect for inbound peers. - found := disconnectPeer(state.inboundPeers, msg.Cmp) - if found { - msg.Reply <- nil - return - } - - // Check outbound peers. - found = disconnectPeer(state.outboundPeers, msg.Cmp) - if found { - // If there are multiple outbound connections to the same - // ip:port, continue disconnecting them all until no such - // peers are found. - for found { - found = disconnectPeer(state.outboundPeers, msg.Cmp) - } - msg.Reply <- nil - return - } - - msg.Reply <- errors.WithStack(connmgr.ErrPeerNotFound) - } -} - -// disconnectPeer attempts to drop the connection of a targeted peer in the -// passed peer list. Targets are identified via usage of the passed -// `compareFunc`, which should return `true` if the passed peer is the target -// peer. This function returns true on success and false if the peer is unable -// to be located. If the peer is found, and the passed callback: `whenFound' -// isn't nil, we call it with the peer as the argument before it is removed -// from the peerList, and is disconnected from the server. -func disconnectPeer(peerList map[int32]*Peer, compareFunc func(*Peer) bool) bool { - for addr, peer := range peerList { - if compareFunc(peer) { - // This is ok because we are not continuing - // to iterate so won't corrupt the loop. - delete(peerList, addr) - peer.Disconnect() - return true - } - } - return false -} - -// newPeerConfig returns the configuration for the given serverPeer. -func newPeerConfig(sp *Peer) *peer.Config { - return &peer.Config{ - Listeners: peer.MessageListeners{ - OnVersion: sp.OnVersion, - OnTx: sp.OnTx, - OnBlock: sp.OnBlock, - OnInv: sp.OnInv, - OnGetData: sp.OnGetData, - OnGetBlockLocator: sp.OnGetBlockLocator, - OnBlockLocator: sp.OnBlockLocator, - OnGetBlockInvs: sp.OnGetBlockInvs, - OnFeeFilter: sp.OnFeeFilter, - OnFilterAdd: sp.OnFilterAdd, - OnFilterClear: sp.OnFilterClear, - OnFilterLoad: sp.OnFilterLoad, - OnGetAddr: sp.OnGetAddr, - OnAddr: sp.OnAddr, - OnGetSelectedTip: sp.OnGetSelectedTip, - OnSelectedTip: sp.OnSelectedTip, - OnRead: sp.OnRead, - OnWrite: sp.OnWrite, - }, - SelectedTipHash: sp.selectedTipHash, - IsInDAG: sp.blockExists, - AddBanScore: sp.addBanScore, - HostToNetAddress: sp.server.AddrManager.HostToNetAddress, - Proxy: sp.AppCfg.Proxy, - UserAgentName: userAgentName, - UserAgentVersion: userAgentVersion, - UserAgentComments: sp.AppCfg.UserAgentComments, - DAGParams: sp.server.DAGParams, - Services: sp.server.services, - DisableRelayTx: sp.AppCfg.BlocksOnly, - ProtocolVersion: peer.MaxProtocolVersion, - SubnetworkID: sp.AppCfg.SubnetworkID, - } -} - -// inboundPeerConnected is invoked by the connection manager when a new inbound -// connection is established. It initializes a new inbound server peer -// instance, associates it with the connection, and starts a goroutine to wait -// for disconnection. -func (s *Server) inboundPeerConnected(conn net.Conn) { - sp := newServerPeer(s, false) - sp.Peer = peer.NewInboundPeer(newPeerConfig(sp), s.cfg) - - s.peerConnected(sp, conn) -} - -// outboundPeerConnected is invoked by the connection manager when a new -// outbound connection is established. It initializes a new outbound server -// peer instance, associates it with the relevant state such as the connection -// request instance and the connection itself, and finally notifies the address -// manager of the attempt. -func (s *Server) outboundPeerConnected(connReq *connmgr.ConnReq, conn net.Conn) { - sp := newServerPeer(s, connReq.Permanent) - outboundPeer, err := peer.NewOutboundPeer(newPeerConfig(sp), s.cfg, connReq.Addr.String()) - if err != nil { - srvrLog.Debugf("Cannot create outbound peer %s: %s", connReq.Addr, err) - s.connManager.Disconnect(connReq.ID()) - } - sp.Peer = outboundPeer - sp.connReq = connReq - - s.peerConnected(sp, conn) - - s.AddrManager.Attempt(sp.NA()) -} - -func (s *Server) peerConnected(sp *Peer, conn net.Conn) { - sp.isWhitelisted = s.isWhitelisted(conn.RemoteAddr()) - - spawn("SPAWN_PLACEHOLDER_NAME", func() { - err := sp.AssociateConnection(conn) - if err != nil { - peerLog.Debugf("Error connecting to peer: %+v", err) - return - } - - s.SyncManager.NewPeer(sp.Peer) - - s.AddPeer(sp) - - s.peerDoneHandler(sp) - }) -} - -// outboundPeerConnected is invoked by the connection manager when a new -// outbound connection failed to be established. -func (s *Server) outboundPeerConnectionFailed(connReq *connmgr.ConnReq) { - // If the connection request has no address - // associated to it, do nothing. - if connReq.Addr == nil { - return - } - - host, portStr, err := net.SplitHostPort(connReq.Addr.String()) - if err != nil { - srvrLog.Debugf("Cannot extract address host and port %s: %s", connReq.Addr, err) - } - port, err := strconv.ParseUint(portStr, 10, 16) - if err != nil { - srvrLog.Debugf("Cannot parse port %s: %s", connReq.Addr, err) - } - - // defaultServices is used here because Attempt makes no use - // of the services field and NewNetAddressIPPort does not - // take nil for it. - netAddress := wire.NewNetAddressIPPort(net.ParseIP(host), uint16(port), defaultServices) - - s.AddrManager.Attempt(netAddress) -} - -// peerDoneHandler handles peer disconnects by notifiying the server that it's -// done along with other performing other desirable cleanup. -func (s *Server) peerDoneHandler(sp *Peer) { - sp.WaitForDisconnect() - s.donePeers <- sp - - // Only tell sync manager we are gone if we ever told it we existed. - if sp.VersionKnown() { - s.SyncManager.DonePeer(sp.Peer) - - // Evict any remaining orphans that were sent by the peer. - numEvicted := s.TxMemPool.RemoveOrphansByTag(mempool.Tag(sp.ID())) - if numEvicted > 0 { - txmpLog.Debugf("Evicted %d %s from peer %s (id %d)", - numEvicted, logger.PickNoun(numEvicted, "orphan", - "orphans"), sp, sp.ID()) - } - } - close(sp.quit) -} - -// peerHandler is used to handle peer operations such as adding and removing -// peers to and from the server, banning peers, and broadcasting messages to -// peers. It must be run in a goroutine. -func (s *Server) peerHandler() { - // Start the address manager and sync manager, both of which are needed - // by peers. This is done here since their lifecycle is closely tied - // to this handler and rather than adding more channels to sychronize - // things, it's easier and slightly faster to simply start and stop them - // in this handler. - err := s.AddrManager.Start() - if err != nil { - panic(errors.Wrap(err, "address manager failed to start")) - } - s.SyncManager.Start() - - s.quitWaitGroup.Add(1) - - srvrLog.Tracef("Starting peer handler") - - state := &peerState{ - inboundPeers: make(map[int32]*Peer), - persistentPeers: make(map[int32]*Peer), - outboundPeers: make(map[int32]*Peer), - banned: make(map[string]time.Time), - } - - if !s.cfg.DisableDNSSeed { - seedFromSubNetwork := func(subnetworkID *subnetworkid.SubnetworkID) { - connmgr.SeedFromDNS(s.cfg, s.DAGParams, defaultRequiredServices, - false, subnetworkID, s.cfg.Lookup, func(addrs []*wire.NetAddress) { - // Kaspad uses a lookup of the dns seeder here. Since seeder returns - // IPs of nodes and not its own IP, we can not know real IP of - // source. So we'll take first returned address as source. - s.AddrManager.AddAddresses(addrs, addrs[0], subnetworkID) - }) - } - - // Add full nodes discovered through DNS to the address manager. - seedFromSubNetwork(nil) - - if s.cfg.SubnetworkID != nil { - // Node is partial - fetch nodes with same subnetwork - seedFromSubNetwork(s.cfg.SubnetworkID) - } - } - spawn("SPAWN_PLACEHOLDER_NAME", s.connManager.Start) - -out: - for { - select { - // New peers connected to the server. - case p := <-s.newPeers: - s.handleAddPeerMsg(state, p) - - // Disconnected peers. - case p := <-s.donePeers: - s.handleDonePeerMsg(state, p) - - // Peer to ban. - case p := <-s.banPeers: - s.handleBanPeerMsg(state, p) - - // New inventory to potentially be relayed to other peers. - case invMsg := <-s.relayInv: - s.handleRelayInvMsg(state, invMsg) - - // Message to broadcast to all connected peers except those - // which are excluded by the message. - case bmsg := <-s.broadcast: - s.handleBroadcastMsg(state, &bmsg) - - case qmsg := <-s.Query: - s.handleQuery(state, qmsg) - - case <-s.quit: - // Disconnect all peers on server shutdown. - state.forAllPeers(func(sp *Peer) bool { - srvrLog.Tracef("Shutdown peer %s", sp) - sp.Disconnect() - return true - }) - s.quitWaitGroup.Done() - break out - } - } - - // Wait for all p2p server quit jobs to finish before stopping the - // various managers - s.quitWaitGroup.Wait() - - s.connManager.Stop() - s.SyncManager.Stop() - s.AddrManager.Stop() - - // Drain channels before exiting so nothing is left waiting around - // to send. -cleanup: - for { - select { - case <-s.newPeers: - case <-s.donePeers: - case <-s.relayInv: - case <-s.broadcast: - case <-s.Query: - default: - break cleanup - } - } - s.wg.Done() - srvrLog.Tracef("Peer handler done") -} - -// AddPeer adds a new peer that has already been connected to the server. -func (s *Server) AddPeer(sp *Peer) { - s.newPeers <- sp -} - -// BanPeer bans a peer that has already been connected to the server by ip. -func (s *Server) BanPeer(sp *Peer) { - s.banPeers <- sp -} - -// RelayInventory relays the passed inventory vector to all connected peers -// that are not already known to have it. -func (s *Server) RelayInventory(invVect *wire.InvVect, data interface{}) { - s.relayInv <- relayMsg{invVect: invVect, data: data} -} - -// BroadcastMessage sends msg to all peers currently connected to the server -// except those in the passed peers to exclude. -func (s *Server) BroadcastMessage(msg wire.Message, exclPeers ...*Peer) { - // XXX: Need to determine if this is an alert that has already been - // broadcast and refrain from broadcasting again. - bmsg := broadcastMsg{message: msg, excludePeers: exclPeers} - s.broadcast <- bmsg -} - -// ConnectedCount returns the number of currently connected peers. -func (s *Server) ConnectedCount() int32 { - replyChan := make(chan int32) - - s.Query <- getConnCountMsg{reply: replyChan} - - return <-replyChan -} - -// AddBytesSent adds the passed number of bytes to the total bytes sent counter -// for the server. It is safe for concurrent access. -func (s *Server) AddBytesSent(bytesSent uint64) { - atomic.AddUint64(&s.bytesSent, bytesSent) -} - -// AddBytesReceived adds the passed number of bytes to the total bytes received -// counter for the server. It is safe for concurrent access. -func (s *Server) AddBytesReceived(bytesReceived uint64) { - atomic.AddUint64(&s.bytesReceived, bytesReceived) -} - -// NetTotals returns the sum of all bytes received and sent across the network -// for all peers. It is safe for concurrent access. -func (s *Server) NetTotals() (uint64, uint64) { - return atomic.LoadUint64(&s.bytesReceived), - atomic.LoadUint64(&s.bytesSent) -} - -// rebroadcastHandler keeps track of user submitted inventories that we have -// sent out but have not yet made it into a block. We periodically rebroadcast -// them in case our peers restarted or otherwise lost track of them. -func (s *Server) rebroadcastHandler() { - // Wait 5 min before first tx rebroadcast. - timer := time.NewTimer(5 * time.Minute) - pendingInvs := make(map[wire.InvVect]interface{}) - - s.quitWaitGroup.Add(1) - -out: - for { - select { - case riv := <-s.modifyRebroadcastInv: - switch msg := riv.(type) { - // Incoming InvVects are added to our map of RPC txs. - case broadcastInventoryAdd: - pendingInvs[*msg.invVect] = msg.data - - // When an InvVect has been added to a block, we can - // now remove it, if it was present. - case broadcastInventoryDel: - delete(pendingInvs, *msg) - } - - case <-timer.C: - // Any inventory we have has not made it into a block - // yet. We periodically resubmit them until they have. - for iv, data := range pendingInvs { - ivCopy := iv - s.RelayInventory(&ivCopy, data) - } - - // Process at a random time up to 30mins (in seconds) - // in the future. - timer.Reset(time.Second * - time.Duration(randomUint16Number(1800))) - - case <-s.quit: - break out - } - } - - timer.Stop() - - // Drain channels before exiting so nothing is left waiting around - // to send. -cleanup: - for { - select { - case <-s.modifyRebroadcastInv: - default: - break cleanup - } - } - s.quitWaitGroup.Done() - s.wg.Done() -} - -// Start begins accepting connections from peers. -func (s *Server) Start() { - - // Start the peer handler which in turn starts the address and block - // managers. - s.wg.Add(1) - spawn("SPAWN_PLACEHOLDER_NAME", s.peerHandler) - - if s.nat != nil { - s.wg.Add(1) - spawn("SPAWN_PLACEHOLDER_NAME", s.upnpUpdateThread) - } - - if !s.cfg.DisableRPC { - s.wg.Add(1) - - // Start the rebroadcastHandler, which ensures user tx received by - // the RPC server are rebroadcast until being included in a block. - spawn("SPAWN_PLACEHOLDER_NAME", s.rebroadcastHandler) - } -} - -// Stop gracefully shuts down the server by stopping and disconnecting all -// peers and the main listener. -func (s *Server) Stop() error { - // Signal the remaining goroutines to quit. - close(s.quit) - return nil -} - -// WaitForShutdown blocks until the main listener and peer handlers are stopped. -func (s *Server) WaitForShutdown() { - s.wg.Wait() -} - -// ScheduleShutdown schedules a server shutdown after the specified duration. -// It also dynamically adjusts how often to warn the server is going down based -// on remaining duration. -func (s *Server) ScheduleShutdown(duration time.Duration) { - // Don't schedule shutdown more than once. - if atomic.AddInt32(&s.shutdownSched, 1) != 1 { - return - } - srvrLog.Warnf("Server shutdown in %s", duration) - spawn("SPAWN_PLACEHOLDER_NAME", func() { - remaining := duration - tickDuration := dynamicTickDuration(remaining) - done := time.After(remaining) - ticker := time.NewTicker(tickDuration) - out: - for { - select { - case <-done: - ticker.Stop() - s.Stop() - break out - case <-ticker.C: - remaining = remaining - tickDuration - if remaining < time.Second { - continue - } - - // Change tick duration dynamically based on remaining time. - newDuration := dynamicTickDuration(remaining) - if tickDuration != newDuration { - tickDuration = newDuration - ticker.Stop() - ticker = time.NewTicker(tickDuration) - } - srvrLog.Warnf("Server shutdown in %s", remaining) - } - } - }) -} - -// ParseListeners determines whether each listen address is IPv4 and IPv6 and -// returns a slice of appropriate net.Addrs to listen on with TCP. It also -// properly detects addresses which apply to "all interfaces" and adds the -// address as both IPv4 and IPv6. -func ParseListeners(addrs []string) ([]net.Addr, error) { - netAddrs := make([]net.Addr, 0, len(addrs)*2) - for _, addr := range addrs { - host, _, err := net.SplitHostPort(addr) - if err != nil { - // Shouldn't happen due to already being normalized. - return nil, err - } - - // Empty host or host of * on plan9 is both IPv4 and IPv6. - if host == "" || (host == "*" && runtime.GOOS == "plan9") { - netAddrs = append(netAddrs, simpleAddr{net: "tcp4", addr: addr}) - netAddrs = append(netAddrs, simpleAddr{net: "tcp6", addr: addr}) - continue - } - - // Strip IPv6 zone id if present since net.ParseIP does not - // handle it. - zoneIndex := strings.LastIndex(host, "%") - if zoneIndex > 0 { - host = host[:zoneIndex] - } - - // Parse the IP. - ip := net.ParseIP(host) - if ip == nil { - hostAddrs, err := net.LookupHost(host) - if err != nil { - return nil, err - } - ip = net.ParseIP(hostAddrs[0]) - if ip == nil { - return nil, errors.Errorf("Cannot resolve IP address for host '%s'", host) - } - } - - // To4 returns nil when the IP is not an IPv4 address, so use - // this determine the address type. - if ip.To4() == nil { - netAddrs = append(netAddrs, simpleAddr{net: "tcp6", addr: addr}) - } else { - netAddrs = append(netAddrs, simpleAddr{net: "tcp4", addr: addr}) - } - } - return netAddrs, nil -} - -func (s *Server) upnpUpdateThread() { - // Go off immediately to prevent code duplication, thereafter we renew - // lease every 15 minutes. - timer := time.NewTimer(0 * time.Second) - lport, _ := strconv.ParseInt(s.cfg.NetParams().DefaultPort, 10, 16) - first := true - - s.quitWaitGroup.Add(1) - -out: - for { - select { - case <-timer.C: - // TODO: pick external port more cleverly - // TODO: know which ports we are listening to on an external net. - // TODO: if specific listen port doesn't work then ask for wildcard - // listen port? - // XXX this assumes timeout is in seconds. - listenPort, err := s.nat.AddPortMapping("tcp", int(lport), int(lport), - "kaspad listen port", 20*60) - if err != nil { - srvrLog.Warnf("can't add UPnP port mapping: %s", err) - } - if first && err == nil { - // TODO: look this up periodically to see if upnp domain changed - // and so did ip. - externalip, err := s.nat.GetExternalAddress() - if err != nil { - srvrLog.Warnf("UPnP can't get external address: %s", err) - continue out - } - na := wire.NewNetAddressIPPort(externalip, uint16(listenPort), - s.services) - err = s.AddrManager.AddLocalAddress(na, addrmgr.UpnpPrio) - if err != nil { - // XXX DeletePortMapping? - } - srvrLog.Warnf("Successfully bound via UPnP to %s", addrmgr.NetAddressKey(na)) - first = false - } - timer.Reset(time.Minute * 15) - case <-s.quit: - break out - } - } - - timer.Stop() - - if err := s.nat.DeletePortMapping("tcp", int(lport), int(lport)); err != nil { - srvrLog.Warnf("unable to remove UPnP port mapping: %s", err) - } else { - srvrLog.Debugf("successfully disestablished UPnP port mapping") - } - - s.quitWaitGroup.Done() - s.wg.Done() -} - -// NewServer returns a new kaspad server configured to listen on addr for the -// kaspa network type specified by dagParams. Use start to begin accepting -// connections from peers. -func NewServer(cfg *config.Config, listenAddrs []string, dagParams *dagconfig.Params, interrupt <-chan struct{}, - notifyNewTransactions func(txns []*mempool.TxDesc), databaseContext *dbaccess.DatabaseContext) (*Server, error) { - - services := defaultServices - if cfg.NoPeerBloomFilters { - services &^= wire.SFNodeBloom - } - - addressManager := addrmgr.New(cfg, databaseContext) - - var listeners []net.Listener - var nat serverutils.NAT - if !cfg.DisableListen { - var err error - listeners, nat, err = initListeners(cfg, addressManager, listenAddrs, services) - if err != nil { - return nil, err - } - if len(listeners) == 0 { - return nil, errors.New("no valid listen address") - } - } - - maxPeers := cfg.TargetOutboundPeers + cfg.MaxInboundPeers - - s := Server{ - cfg: cfg, - DAGParams: dagParams, - AddrManager: addressManager, - newPeers: make(chan *Peer, maxPeers), - donePeers: make(chan *Peer, maxPeers), - banPeers: make(chan *Peer, maxPeers), - Query: make(chan interface{}), - relayInv: make(chan relayMsg, maxPeers), - broadcast: make(chan broadcastMsg, maxPeers), - quit: make(chan struct{}), - modifyRebroadcastInv: make(chan interface{}), - nat: nat, - TimeSource: blockdag.NewTimeSource(), - services: services, - SigCache: txscript.NewSigCache(cfg.SigCacheMaxSize), - notifyNewTransactions: notifyNewTransactions, - } - - // Create indexes if needed. - var indexes []indexers.Indexer - if cfg.AcceptanceIndex { - indxLog.Info("acceptance index is enabled") - s.AcceptanceIndex = indexers.NewAcceptanceIndex() - indexes = append(indexes, s.AcceptanceIndex) - } - - // Create an index manager if any of the optional indexes are enabled. - var indexManager blockdag.IndexManager - if len(indexes) > 0 { - indexManager = indexers.NewManager(indexes) - } - - // Create a new block DAG instance with the appropriate configuration. - var err error - s.DAG, err = blockdag.New(&blockdag.Config{ - Interrupt: interrupt, - DAGParams: s.DAGParams, - TimeSource: s.TimeSource, - SigCache: s.SigCache, - IndexManager: indexManager, - SubnetworkID: cfg.SubnetworkID, - }) - if err != nil { - return nil, err - } - - txC := mempool.Config{ - Policy: mempool.Policy{ - AcceptNonStd: cfg.RelayNonStd, - MaxOrphanTxs: cfg.MaxOrphanTxs, - MaxOrphanTxSize: config.DefaultMaxOrphanTxSize, - MinRelayTxFee: cfg.MinRelayTxFee, - MaxTxVersion: 1, - }, - CalcSequenceLockNoLock: func(tx *util.Tx, utxoSet blockdag.UTXOSet) (*blockdag.SequenceLock, error) { - return s.DAG.CalcSequenceLockNoLock(tx, utxoSet, true) - }, - IsDeploymentActive: s.DAG.IsDeploymentActive, - SigCache: s.SigCache, - DAG: s.DAG, - } - s.TxMemPool = mempool.New(&txC) - - s.SyncManager, err = netsync.New(&netsync.Config{ - PeerNotifier: &s, - DAG: s.DAG, - TxMemPool: s.TxMemPool, - DAGParams: s.DAGParams, - MaxPeers: maxPeers, - }) - if err != nil { - return nil, err - } - - connManagerCfg := &connmgr.Config{ - Listeners: listeners, - OnAccept: s.inboundPeerConnected, - RetryDuration: connectionRetryInterval, - TargetOutbound: uint32(cfg.TargetOutboundPeers), - Dial: func(addr net.Addr) (net.Conn, error) { return serverutils.KaspadDial(cfg, addr) }, - OnConnection: s.outboundPeerConnected, - OnConnectionFailed: s.outboundPeerConnectionFailed, - AddrManager: s.AddrManager, - } - // Create a connection manager. - cmgr, err := connmgr.New(connManagerCfg, cfg) - if err != nil { - return nil, err - } - s.connManager = cmgr - - // Start up persistent peers. - permanentPeers := cfg.ConnectPeers - if len(permanentPeers) == 0 { - permanentPeers = cfg.AddPeers - } - for _, addr := range permanentPeers { - netAddr, err := s.addrStringToNetAddr(addr) - if err != nil { - return nil, err - } - - spawn("SPAWN_PLACEHOLDER_NAME", func() { - s.connManager.Connect(&connmgr.ConnReq{ - Addr: netAddr, - Permanent: true, - }) - }) - } - - return &s, nil -} - -// initListeners initializes the configured net listeners and adds any bound -// addresses to the address manager. Returns the listeners and a NAT interface, -// which is non-nil if UPnP is in use. -func initListeners(cfg *config.Config, amgr *addrmgr.AddrManager, listenAddrs []string, services wire.ServiceFlag) ([]net.Listener, serverutils.NAT, error) { - // Listen for TCP connections at the configured addresses - netAddrs, err := ParseListeners(listenAddrs) - if err != nil { - return nil, nil, err - } - - listeners := make([]net.Listener, 0, len(netAddrs)) - for _, addr := range netAddrs { - listener, err := net.Listen(addr.Network(), addr.String()) - if err != nil { - srvrLog.Warnf("Can't listen on %s: %s", addr, err) - continue - } - listeners = append(listeners, listener) - } - - var nat serverutils.NAT - if len(cfg.ExternalIPs) != 0 { - defaultPort, err := strconv.ParseUint(cfg.NetParams().DefaultPort, 10, 16) - if err != nil { - srvrLog.Errorf("Can not parse default port %s for active DAG: %s", - cfg.NetParams().DefaultPort, err) - return nil, nil, err - } - - for _, sip := range cfg.ExternalIPs { - eport := uint16(defaultPort) - host, portstr, err := net.SplitHostPort(sip) - if err != nil { - // no port, use default. - host = sip - } else { - port, err := strconv.ParseUint(portstr, 10, 16) - if err != nil { - srvrLog.Warnf("Can not parse port from %s for "+ - "externalip: %s", sip, err) - continue - } - eport = uint16(port) - } - na, err := amgr.HostToNetAddress(host, eport, services) - if err != nil { - srvrLog.Warnf("Not adding %s as externalip: %s", sip, err) - continue - } - - err = amgr.AddLocalAddress(na, addrmgr.ManualPrio) - if err != nil { - amgrLog.Warnf("Skipping specified external IP: %s", err) - } - } - } else { - if cfg.Upnp { - var err error - nat, err = serverutils.Discover() - if err != nil { - srvrLog.Warnf("Can't discover upnp: %s", err) - } - // nil nat here is fine, just means no upnp on network. - } - - // Add bound addresses to address manager to be advertised to peers. - for _, listener := range listeners { - addr := listener.Addr().String() - err := addLocalAddress(amgr, addr, services) - if err != nil { - amgrLog.Warnf("Skipping bound address %s: %s", addr, err) - } - } - } - - return listeners, nat, nil -} - -// addrStringToNetAddr takes an address in the form of 'host:port' and returns -// a net.Addr which maps to the original address with any host names resolved -// to IP addresses. It also handles tor addresses properly by returning a -// net.Addr that encapsulates the address. -func (s *Server) addrStringToNetAddr(addr string) (*net.TCPAddr, error) { - host, strPort, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - - port, err := strconv.Atoi(strPort) - if err != nil { - return nil, err - } - - // Skip if host is already an IP address. - if ip := net.ParseIP(host); ip != nil { - return &net.TCPAddr{ - IP: ip, - Port: port, - }, nil - } - - // Attempt to look up an IP address associated with the parsed host. - ips, err := s.cfg.Lookup(host) - if err != nil { - return nil, err - } - if len(ips) == 0 { - return nil, errors.Errorf("no addresses found for %s", host) - } - - return &net.TCPAddr{ - IP: ips[0], - Port: port, - }, nil -} - -// addLocalAddress adds an address that this node is listening on to the -// address manager so that it may be relayed to peers. -func addLocalAddress(addrMgr *addrmgr.AddrManager, addr string, services wire.ServiceFlag) error { - host, portStr, err := net.SplitHostPort(addr) - if err != nil { - return err - } - port, err := strconv.ParseUint(portStr, 10, 16) - if err != nil { - return err - } - - if ip := net.ParseIP(host); ip != nil && ip.IsUnspecified() { - // If bound to unspecified address, advertise all local interfaces - addrs, err := net.InterfaceAddrs() - if err != nil { - return err - } - - for _, addr := range addrs { - ifaceIP, _, err := net.ParseCIDR(addr.String()) - if err != nil { - continue - } - - // If bound to 0.0.0.0, do not add IPv6 interfaces and if bound to - // ::, do not add IPv4 interfaces. - if (ip.To4() == nil) != (ifaceIP.To4() == nil) { - continue - } - - netAddr := wire.NewNetAddressIPPort(ifaceIP, uint16(port), services) - addrMgr.AddLocalAddress(netAddr, addrmgr.BoundPrio) - } - } else { - netAddr, err := addrMgr.HostToNetAddress(host, uint16(port), services) - if err != nil { - return err - } - - addrMgr.AddLocalAddress(netAddr, addrmgr.BoundPrio) - } - - return nil -} - -// dynamicTickDuration is a convenience function used to dynamically choose a -// tick duration based on remaining time. It is primarily used during -// server shutdown to make shutdown warnings more frequent as the shutdown time -// approaches. -func dynamicTickDuration(remaining time.Duration) time.Duration { - switch { - case remaining <= time.Second*5: - return time.Second - case remaining <= time.Second*15: - return time.Second * 5 - case remaining <= time.Minute: - return time.Second * 15 - case remaining <= time.Minute*5: - return time.Minute - case remaining <= time.Minute*15: - return time.Minute * 5 - case remaining <= time.Hour: - return time.Minute * 15 - } - return time.Hour -} - -// isWhitelisted returns whether the IP address is included in the whitelisted -// networks and IPs. -func (s *Server) isWhitelisted(addr net.Addr) bool { - if len(s.cfg.Whitelists) == 0 { - return false - } - - host, _, err := net.SplitHostPort(addr.String()) - if err != nil { - srvrLog.Warnf("Unable to SplitHostPort on '%s': %s", addr, err) - return false - } - ip := net.ParseIP(host) - if ip == nil { - srvrLog.Warnf("Unable to parse IP '%s'", addr) - return false - } - - for _, ipnet := range s.cfg.Whitelists { - if ipnet.Contains(ip) { - return true - } - } - return false -} - -// AnnounceNewTransactions generates and relays inventory vectors and notifies -// both websocket and getblocktemplate long poll clients of the passed -// transactions. This function should be called whenever new transactions -// are added to the mempool. -func (s *Server) AnnounceNewTransactions(txns []*mempool.TxDesc) { - // Generate and relay inventory vectors for all newly accepted - // transactions. - s.RelayTransactions(txns) - - // Notify both websocket and getblocktemplate long poll clients of all - // newly accepted transactions. - s.notifyNewTransactions(txns) -} - -// TransactionConfirmed is a function for the peerNotifier interface. -// When a transaction has one confirmation, we can mark it as no -// longer needing rebroadcasting. -func (s *Server) TransactionConfirmed(tx *util.Tx) { - // Rebroadcasting is only necessary when the RPC server is active. - if s.cfg.DisableRPC { - return - } - - iv := wire.NewInvVect(wire.InvTypeTx, (*daghash.Hash)(tx.ID())) - s.RemoveRebroadcastInventory(iv) -} diff --git a/server/rpc/handle_add_manual_node.go b/server/rpc/handle_add_manual_node.go deleted file mode 100644 index 8d318c309..000000000 --- a/server/rpc/handle_add_manual_node.go +++ /dev/null @@ -1,37 +0,0 @@ -package rpc - -import ( - "github.com/kaspanet/kaspad/rpcmodel" - "github.com/kaspanet/kaspad/util/network" -) - -// handleAddManualNode handles addManualNode commands. -func handleAddManualNode(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*rpcmodel.AddManualNodeCmd) - - oneTry := c.OneTry != nil && *c.OneTry - - addr, err := network.NormalizeAddress(c.Addr, s.cfg.DAGParams.DefaultPort) - if err != nil { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCInvalidParameter, - Message: err.Error(), - } - } - - if oneTry { - err = s.cfg.ConnMgr.Connect(addr, false) - } else { - err = s.cfg.ConnMgr.Connect(addr, true) - } - - if err != nil { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCInvalidParameter, - Message: err.Error(), - } - } - - // no data returned unless an error. - return nil, nil -} diff --git a/server/rpc/handle_create_raw_transaction.go b/server/rpc/handle_create_raw_transaction.go deleted file mode 100644 index 6e9531468..000000000 --- a/server/rpc/handle_create_raw_transaction.go +++ /dev/null @@ -1,106 +0,0 @@ -package rpc - -import ( - "github.com/kaspanet/kaspad/rpcmodel" - "github.com/kaspanet/kaspad/txscript" - "github.com/kaspanet/kaspad/util" - "github.com/kaspanet/kaspad/util/daghash" - "github.com/kaspanet/kaspad/wire" -) - -// handleCreateRawTransaction handles createRawTransaction commands. -func handleCreateRawTransaction(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*rpcmodel.CreateRawTransactionCmd) - - txIns := []*wire.TxIn{} - // Add all transaction inputs to a new transaction after performing - // some validity checks. - for _, input := range c.Inputs { - txID, err := daghash.NewTxIDFromStr(input.TxID) - if err != nil { - return nil, rpcDecodeHexError(input.TxID) - } - - prevOut := wire.NewOutpoint(txID, input.Vout) - txIn := wire.NewTxIn(prevOut, []byte{}) - if c.LockTime != nil && *c.LockTime != 0 { - txIn.Sequence = wire.MaxTxInSequenceNum - 1 - } - txIns = append(txIns, txIn) - } - mtx := wire.NewNativeMsgTx(wire.TxVersion, txIns, nil) - - // Add all transaction outputs to the transaction after performing - // some validity checks. - params := s.cfg.DAGParams - for encodedAddr, amount := range c.Amounts { - // Ensure amount is in the valid range for monetary amounts. - if amount <= 0 || amount > util.MaxSompi { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCType, - Message: "Invalid amount", - } - } - - // Decode the provided address. - addr, err := util.DecodeAddress(encodedAddr, params.Prefix) - if err != nil { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCInvalidAddressOrKey, - Message: "Invalid address or key: " + err.Error(), - } - } - - // Ensure the address is one of the supported types and that - // the network encoded with the address matches the network the - // server is currently on. - switch addr.(type) { - case *util.AddressPubKeyHash: - case *util.AddressScriptHash: - default: - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCInvalidAddressOrKey, - Message: "Invalid address or key", - } - } - if !addr.IsForPrefix(params.Prefix) { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCInvalidAddressOrKey, - Message: "Invalid address: " + encodedAddr + - " is for the wrong network", - } - } - - // Create a new script which pays to the provided address. - scriptPubKey, err := txscript.PayToAddrScript(addr) - if err != nil { - context := "Failed to generate pay-to-address script" - return nil, internalRPCError(err.Error(), context) - } - - // Convert the amount to sompi. - sompi, err := util.NewAmount(amount) - if err != nil { - context := "Failed to convert amount" - return nil, internalRPCError(err.Error(), context) - } - - txOut := wire.NewTxOut(uint64(sompi), scriptPubKey) - mtx.AddTxOut(txOut) - } - - // Set the Locktime, if given. - if c.LockTime != nil { - mtx.LockTime = *c.LockTime - } - - // Return the serialized and hex-encoded transaction. Note that this - // is intentionally not directly returning because the first return - // value is a string and it would result in returning an empty string to - // the client instead of nothing (nil) in the case of an error. - mtxHex, err := messageToHex(mtx) - if err != nil { - return nil, err - } - return mtxHex, nil -} diff --git a/server/rpc/handle_decode_raw_transaction.go b/server/rpc/handle_decode_raw_transaction.go deleted file mode 100644 index 87073e2cf..000000000 --- a/server/rpc/handle_decode_raw_transaction.go +++ /dev/null @@ -1,41 +0,0 @@ -package rpc - -import ( - "bytes" - "encoding/hex" - "github.com/kaspanet/kaspad/rpcmodel" - "github.com/kaspanet/kaspad/wire" -) - -// handleDecodeRawTransaction handles decodeRawTransaction commands. -func handleDecodeRawTransaction(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*rpcmodel.DecodeRawTransactionCmd) - - // Deserialize the transaction. - hexStr := c.HexTx - if len(hexStr)%2 != 0 { - hexStr = "0" + hexStr - } - serializedTx, err := hex.DecodeString(hexStr) - if err != nil { - return nil, rpcDecodeHexError(hexStr) - } - var mtx wire.MsgTx - err = mtx.Deserialize(bytes.NewReader(serializedTx)) - if err != nil { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCDeserialization, - Message: "TX decode failed: " + err.Error(), - } - } - - // Create and return the result. - txReply := rpcmodel.TxRawDecodeResult{ - TxID: mtx.TxID().String(), - Version: mtx.Version, - Locktime: mtx.LockTime, - Vin: createVinList(&mtx), - Vout: createVoutList(&mtx, s.cfg.DAGParams, nil), - } - return txReply, nil -} diff --git a/server/rpc/handle_decode_script.go b/server/rpc/handle_decode_script.go deleted file mode 100644 index 7fb3cfbd7..000000000 --- a/server/rpc/handle_decode_script.go +++ /dev/null @@ -1,56 +0,0 @@ -package rpc - -import ( - "encoding/hex" - "github.com/kaspanet/kaspad/rpcmodel" - "github.com/kaspanet/kaspad/txscript" - "github.com/kaspanet/kaspad/util" - "github.com/kaspanet/kaspad/util/pointers" -) - -// handleDecodeScript handles decodeScript commands. -func handleDecodeScript(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*rpcmodel.DecodeScriptCmd) - - // Convert the hex script to bytes. - hexStr := c.HexScript - if len(hexStr)%2 != 0 { - hexStr = "0" + hexStr - } - script, err := hex.DecodeString(hexStr) - if err != nil { - return nil, rpcDecodeHexError(hexStr) - } - - // The disassembled string will contain [error] inline if the script - // doesn't fully parse, so ignore the error here. - disbuf, _ := txscript.DisasmString(script) - - // Get information about the script. - // Ignore the error here since an error means the script couldn't parse - // and there is no additinal information about it anyways. - scriptClass, addr, _ := txscript.ExtractScriptPubKeyAddress(script, - s.cfg.DAGParams) - var address *string - if addr != nil { - address = pointers.String(addr.EncodeAddress()) - } - - // Convert the script itself to a pay-to-script-hash address. - p2sh, err := util.NewAddressScriptHash(script, s.cfg.DAGParams.Prefix) - if err != nil { - context := "Failed to convert script to pay-to-script-hash" - return nil, internalRPCError(err.Error(), context) - } - - // Generate and return the reply. - reply := rpcmodel.DecodeScriptResult{ - Asm: disbuf, - Type: scriptClass.String(), - Address: address, - } - if scriptClass != txscript.ScriptHashTy { - reply.P2sh = p2sh.EncodeAddress() - } - return reply, nil -} diff --git a/server/rpc/handle_get_all_manual_nodes_info.go b/server/rpc/handle_get_all_manual_nodes_info.go deleted file mode 100644 index a21077a28..000000000 --- a/server/rpc/handle_get_all_manual_nodes_info.go +++ /dev/null @@ -1,9 +0,0 @@ -package rpc - -import "github.com/kaspanet/kaspad/rpcmodel" - -// handleGetAllManualNodesInfo handles getAllManualNodesInfo commands. -func handleGetAllManualNodesInfo(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*rpcmodel.GetAllManualNodesInfoCmd) - return getManualNodesInfo(s, c.Details, "") -} diff --git a/server/rpc/handle_get_connected_peer_info.go b/server/rpc/handle_get_connected_peer_info.go deleted file mode 100644 index f1af9d5a8..000000000 --- a/server/rpc/handle_get_connected_peer_info.go +++ /dev/null @@ -1,44 +0,0 @@ -package rpc - -import ( - "fmt" - "github.com/kaspanet/kaspad/rpcmodel" - "time" -) - -// handleGetConnectedPeerInfo implements the getConnectedPeerInfo command. -func handleGetConnectedPeerInfo(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - peers := s.cfg.ConnMgr.ConnectedPeers() - syncPeerID := s.cfg.SyncMgr.SyncPeerID() - infos := make([]*rpcmodel.GetConnectedPeerInfoResult, 0, len(peers)) - for _, p := range peers { - statsSnap := p.ToPeer().StatsSnapshot() - info := &rpcmodel.GetConnectedPeerInfoResult{ - ID: statsSnap.ID, - Addr: statsSnap.Addr, - Services: fmt.Sprintf("%08d", uint64(statsSnap.Services)), - RelayTxes: !p.IsTxRelayDisabled(), - LastSend: statsSnap.LastSend.UnixMilliseconds(), - LastRecv: statsSnap.LastRecv.UnixMilliseconds(), - BytesSent: statsSnap.BytesSent, - BytesRecv: statsSnap.BytesRecv, - ConnTime: statsSnap.ConnTime.UnixMilliseconds(), - PingTime: float64(statsSnap.LastPingMicros), - TimeOffset: statsSnap.TimeOffset, - Version: statsSnap.Version, - SubVer: statsSnap.UserAgent, - Inbound: statsSnap.Inbound, - SelectedTip: statsSnap.SelectedTipHash.String(), - BanScore: int32(p.BanScore()), - FeeFilter: p.FeeFilter(), - SyncNode: statsSnap.ID == syncPeerID, - } - if p.ToPeer().LastPingNonce() != 0 { - wait := float64(time.Since(statsSnap.LastPingTime).Nanoseconds()) - // We actually want microseconds. - info.PingWait = wait / 1000 - } - infos = append(infos, info) - } - return infos, nil -} diff --git a/server/rpc/handle_get_info.go b/server/rpc/handle_get_info.go deleted file mode 100644 index ddcd3bd4d..000000000 --- a/server/rpc/handle_get_info.go +++ /dev/null @@ -1,24 +0,0 @@ -package rpc - -import ( - "github.com/kaspanet/kaspad/rpcmodel" - "github.com/kaspanet/kaspad/version" -) - -// handleGetInfo implements the getInfo command. We only return the fields -// that are not related to wallet functionality. -func handleGetInfo(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - ret := &rpcmodel.InfoDAGResult{ - Version: version.Version(), - ProtocolVersion: int32(maxProtocolVersion), - Blocks: s.cfg.DAG.BlockCount(), - Connections: s.cfg.ConnMgr.ConnectedCount(), - Proxy: s.appCfg.Proxy, - Difficulty: getDifficultyRatio(s.cfg.DAG.CurrentBits(), s.cfg.DAGParams), - Testnet: s.appCfg.Testnet, - Devnet: s.appCfg.Devnet, - RelayFee: s.appCfg.MinRelayTxFee.ToKAS(), - } - - return ret, nil -} diff --git a/server/rpc/handle_get_manual_node_info.go b/server/rpc/handle_get_manual_node_info.go deleted file mode 100644 index d93711dc8..000000000 --- a/server/rpc/handle_get_manual_node_info.go +++ /dev/null @@ -1,113 +0,0 @@ -package rpc - -import ( - "net" - - "github.com/kaspanet/kaspad/logger" - "github.com/kaspanet/kaspad/rpcmodel" - "github.com/kaspanet/kaspad/util/pointers" -) - -// handleGetManualNodeInfo handles getManualNodeInfo commands. -func handleGetManualNodeInfo(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*rpcmodel.GetManualNodeInfoCmd) - results, err := getManualNodesInfo(s, c.Details, c.Node) - if err != nil { - return nil, err - } - if resultsNonDetailed, ok := results.([]string); ok { - return resultsNonDetailed[0], nil - } - resultsDetailed := results.([]*rpcmodel.GetManualNodeInfoResult) - return resultsDetailed[0], nil -} - -// getManualNodesInfo handles getManualNodeInfo and getAllManualNodesInfo commands. -func getManualNodesInfo(s *Server, detailsArg *bool, node string) (interface{}, error) { - - details := detailsArg == nil || *detailsArg - - // Retrieve a list of persistent (manual) peers from the server and - // filter the list of peers per the specified address (if any). - peers := s.cfg.ConnMgr.PersistentPeers() - if node != "" { - found := false - for i, peer := range peers { - if peer.ToPeer().Addr() == node { - peers = peers[i : i+1] - found = true - } - } - if !found { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCClientNodeNotAdded, - Message: "Node has not been added", - } - } - } - - // Without the details flag, the result is just a slice of the addresses as - // strings. - if !details { - results := make([]string, 0, len(peers)) - for _, peer := range peers { - results = append(results, peer.ToPeer().Addr()) - } - return results, nil - } - - // With the details flag, the result is an array of JSON objects which - // include the result of DNS lookups for each peer. - results := make([]*rpcmodel.GetManualNodeInfoResult, 0, len(peers)) - for _, rpcPeer := range peers { - // Set the "address" of the peer which could be an ip address - // or a domain name. - peer := rpcPeer.ToPeer() - var result rpcmodel.GetManualNodeInfoResult - result.ManualNode = peer.Addr() - result.Connected = pointers.Bool(peer.Connected()) - - // Split the address into host and port portions so we can do - // a DNS lookup against the host. When no port is specified in - // the address, just use the address as the host. - host, _, err := net.SplitHostPort(peer.Addr()) - if err != nil { - host = peer.Addr() - } - - var ipList []string - switch { - case net.ParseIP(host) != nil: - ipList = make([]string, 1) - ipList[0] = host - default: - // Do a DNS lookup for the address. If the lookup fails, just - // use the host. - ips, err := s.appCfg.Lookup(host) - if err != nil { - ipList = make([]string, 1) - ipList[0] = host - break - } - ipList = make([]string, 0, len(ips)) - for _, ip := range ips { - ipList = append(ipList, ip.String()) - } - } - - // Add the addresses and connection info to the result. - addrs := make([]rpcmodel.GetManualNodeInfoResultAddr, 0, len(ipList)) - for _, ip := range ipList { - var addr rpcmodel.GetManualNodeInfoResultAddr - addr.Address = ip - addr.Connected = "false" - if ip == host && peer.Connected() { - addr.Connected = logger.DirectionString(peer.Inbound()) - } - addrs = append(addrs, addr) - } - result.Addresses = &addrs - results = append(results, &result) - } - return results, nil -} diff --git a/server/rpc/handle_get_raw_mempool.go b/server/rpc/handle_get_raw_mempool.go deleted file mode 100644 index 47045d47a..000000000 --- a/server/rpc/handle_get_raw_mempool.go +++ /dev/null @@ -1,23 +0,0 @@ -package rpc - -import "github.com/kaspanet/kaspad/rpcmodel" - -// handleGetRawMempool implements the getRawMempool command. -func handleGetRawMempool(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*rpcmodel.GetRawMempoolCmd) - mp := s.cfg.TxMemPool - - if c.Verbose != nil && *c.Verbose { - return mp.RawMempoolVerbose(), nil - } - - // The response is simply an array of the transaction hashes if the - // verbose flag is not set. - descs := mp.TxDescs() - hashStrings := make([]string, len(descs)) - for i := range hashStrings { - hashStrings[i] = descs[i].Tx.ID().String() - } - - return hashStrings, nil -} diff --git a/server/rpc/handle_node.go b/server/rpc/handle_node.go deleted file mode 100644 index 6be157343..000000000 --- a/server/rpc/handle_node.go +++ /dev/null @@ -1,125 +0,0 @@ -package rpc - -import ( - "github.com/kaspanet/kaspad/rpcmodel" - "github.com/kaspanet/kaspad/util/network" - "net" - "strconv" -) - -// handleNode handles node commands. -func handleNode(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*rpcmodel.NodeCmd) - - var addr string - var nodeID uint64 - var errN, err error - params := s.cfg.DAGParams - switch c.SubCmd { - case "disconnect": - // If we have a valid uint disconnect by node id. Otherwise, - // attempt to disconnect by address, returning an error if a - // valid IP address is not supplied. - if nodeID, errN = strconv.ParseUint(c.Target, 10, 32); errN == nil { - err = s.cfg.ConnMgr.DisconnectByID(int32(nodeID)) - } else { - if _, _, errP := net.SplitHostPort(c.Target); errP == nil || net.ParseIP(c.Target) != nil { - addr, err = network.NormalizeAddress(c.Target, params.DefaultPort) - if err != nil { - break - } - - err = s.cfg.ConnMgr.DisconnectByAddr(addr) - } else { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCInvalidParameter, - Message: "invalid address or node ID", - } - } - } - if err != nil && peerExists(s.cfg.ConnMgr, addr, int32(nodeID)) { - - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCMisc, - Message: "can't disconnect a permanent peer, use remove", - } - } - - case "remove": - // If we have a valid uint disconnect by node id. Otherwise, - // attempt to disconnect by address, returning an error if a - // valid IP address is not supplied. - if nodeID, errN = strconv.ParseUint(c.Target, 10, 32); errN == nil { - err = s.cfg.ConnMgr.RemoveByID(int32(nodeID)) - } else { - if _, _, errP := net.SplitHostPort(c.Target); errP == nil || net.ParseIP(c.Target) != nil { - addr, err = network.NormalizeAddress(c.Target, params.DefaultPort) - if err != nil { - break - } - - err = s.cfg.ConnMgr.RemoveByAddr(addr) - } else { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCInvalidParameter, - Message: "invalid address or node ID", - } - } - } - if err != nil && peerExists(s.cfg.ConnMgr, addr, int32(nodeID)) { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCMisc, - Message: "can't remove a temporary peer, use disconnect", - } - } - - case "connect": - addr, err = network.NormalizeAddress(c.Target, params.DefaultPort) - if err != nil { - break - } - - // Default to temporary connections. - subCmd := "temp" - if c.ConnectSubCmd != nil { - subCmd = *c.ConnectSubCmd - } - - switch subCmd { - case "perm", "temp": - err = s.cfg.ConnMgr.Connect(addr, subCmd == "perm") - default: - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCInvalidParameter, - Message: "invalid subcommand for node connect", - } - } - default: - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCInvalidParameter, - Message: "invalid subcommand for node", - } - } - - if err != nil { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCInvalidParameter, - Message: err.Error(), - } - } - - // no data returned unless an error. - return nil, nil -} - -// peerExists determines if a certain peer is currently connected given -// information about all currently connected peers. Peer existence is -// determined using either a target address or node id. -func peerExists(connMgr rpcserverConnManager, addr string, nodeID int32) bool { - for _, p := range connMgr.ConnectedPeers() { - if p.ToPeer().ID() == nodeID || p.ToPeer().Addr() == addr { - return true - } - } - return false -} diff --git a/server/rpc/handle_ping.go b/server/rpc/handle_ping.go deleted file mode 100644 index dfa6a63bf..000000000 --- a/server/rpc/handle_ping.go +++ /dev/null @@ -1,19 +0,0 @@ -package rpc - -import ( - "github.com/kaspanet/kaspad/util/random" - "github.com/kaspanet/kaspad/wire" -) - -// handlePing implements the ping command. -func handlePing(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - // Ask server to ping \o_ - nonce, err := random.Uint64() - if err != nil { - return nil, internalRPCError("Not sending ping - failed to "+ - "generate nonce: "+err.Error(), "") - } - s.cfg.ConnMgr.BroadcastMessage(wire.NewMsgPing(nonce)) - - return nil, nil -} diff --git a/server/rpc/handle_remove_manual_node.go b/server/rpc/handle_remove_manual_node.go deleted file mode 100644 index 676356cc4..000000000 --- a/server/rpc/handle_remove_manual_node.go +++ /dev/null @@ -1,30 +0,0 @@ -package rpc - -import ( - "github.com/kaspanet/kaspad/rpcmodel" - "github.com/kaspanet/kaspad/util/network" -) - -// handleRemoveManualNode handles removeManualNode command. -func handleRemoveManualNode(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*rpcmodel.RemoveManualNodeCmd) - - addr, err := network.NormalizeAddress(c.Addr, s.cfg.DAGParams.DefaultPort) - if err != nil { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCInvalidParameter, - Message: err.Error(), - } - } - - err = s.cfg.ConnMgr.RemoveByAddr(addr) - if err != nil { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCInvalidParameter, - Message: err.Error(), - } - } - - // no data returned unless an error. - return nil, nil -} diff --git a/server/rpc/handle_send_raw_transaction.go b/server/rpc/handle_send_raw_transaction.go deleted file mode 100644 index 183c6083e..000000000 --- a/server/rpc/handle_send_raw_transaction.go +++ /dev/null @@ -1,93 +0,0 @@ -package rpc - -import ( - "bytes" - "encoding/hex" - "fmt" - "github.com/kaspanet/kaspad/mempool" - "github.com/kaspanet/kaspad/rpcmodel" - "github.com/kaspanet/kaspad/util" - "github.com/kaspanet/kaspad/util/daghash" - "github.com/kaspanet/kaspad/wire" - "github.com/pkg/errors" -) - -// handleSendRawTransaction implements the sendRawTransaction command. -func handleSendRawTransaction(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*rpcmodel.SendRawTransactionCmd) - // Deserialize and send off to tx relay - hexStr := c.HexTx - if len(hexStr)%2 != 0 { - hexStr = "0" + hexStr - } - serializedTx, err := hex.DecodeString(hexStr) - if err != nil { - return nil, rpcDecodeHexError(hexStr) - } - var msgTx wire.MsgTx - err = msgTx.Deserialize(bytes.NewReader(serializedTx)) - if err != nil { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCDeserialization, - Message: "TX decode failed: " + err.Error(), - } - } - - // Use 0 for the tag to represent local node. - tx := util.NewTx(&msgTx) - acceptedTxs, err := s.cfg.TxMemPool.ProcessTransaction(tx, false, 0) - if err != nil { - // When the error is a rule error, it means the transaction was - // simply rejected as opposed to something actually going wrong, - // so log it as such. Otherwise, something really did go wrong, - // so log it as an actual error. In both cases, a JSON-RPC - // error is returned to the client with the deserialization - // error code - if errors.As(err, &mempool.RuleError{}) { - log.Debugf("Rejected transaction %s: %s", tx.ID(), - err) - } else { - log.Errorf("Failed to process transaction %s: %s", - tx.ID(), err) - } - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCVerify, - Message: "TX rejected: " + err.Error(), - } - } - - // When the transaction was accepted it should be the first item in the - // returned array of accepted transactions. The only way this will not - // be true is if the API for ProcessTransaction changes and this code is - // not properly updated, but ensure the condition holds as a safeguard. - // - // Also, since an error is being returned to the caller, ensure the - // transaction is removed from the memory pool. - if len(acceptedTxs) == 0 || !acceptedTxs[0].Tx.ID().IsEqual(tx.ID()) { - err := s.cfg.TxMemPool.RemoveTransaction(tx, true, true) - if err != nil { - return nil, err - } - - errStr := fmt.Sprintf("transaction %s is not in accepted list", - tx.ID()) - return nil, internalRPCError(errStr, "") - } - - // Generate and relay inventory vectors for all newly accepted - // transactions into the memory pool due to the original being - // accepted. - s.cfg.ConnMgr.RelayTransactions(acceptedTxs) - - // Notify both websocket and getBlockTemplate long poll clients of all - // newly accepted transactions. - s.NotifyNewTransactions(acceptedTxs) - - // Keep track of all the sendRawTransaction request txns so that they - // can be rebroadcast if they don't make their way into a block. - txD := acceptedTxs[0] - iv := wire.NewInvVect(wire.InvTypeTx, (*daghash.Hash)(txD.Tx.ID())) - s.cfg.ConnMgr.AddRebroadcastInventory(iv, txD) - - return tx.ID().String(), nil -} diff --git a/server/rpc/handle_validate_address.go b/server/rpc/handle_validate_address.go deleted file mode 100644 index a311874bf..000000000 --- a/server/rpc/handle_validate_address.go +++ /dev/null @@ -1,23 +0,0 @@ -package rpc - -import ( - "github.com/kaspanet/kaspad/rpcmodel" - "github.com/kaspanet/kaspad/util" -) - -// handleValidateAddress implements the validateAddress command. -func handleValidateAddress(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*rpcmodel.ValidateAddressCmd) - - result := rpcmodel.ValidateAddressResult{} - addr, err := util.DecodeAddress(c.Address, s.cfg.DAGParams.Prefix) - if err != nil { - // Return the default value (false) for IsValid. - return result, nil - } - - result.Address = addr.EncodeAddress() - result.IsValid = true - - return result, nil -} diff --git a/server/rpc/rpcadapters.go b/server/rpc/rpcadapters.go deleted file mode 100644 index 119e6be6a..000000000 --- a/server/rpc/rpcadapters.go +++ /dev/null @@ -1,280 +0,0 @@ -// Copyright (c) 2017 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package rpc - -import ( - "sync/atomic" - - "github.com/kaspanet/kaspad/blockdag" - "github.com/kaspanet/kaspad/mempool" - "github.com/kaspanet/kaspad/netsync" - "github.com/kaspanet/kaspad/peer" - "github.com/kaspanet/kaspad/server/p2p" - "github.com/kaspanet/kaspad/util" - "github.com/kaspanet/kaspad/util/daghash" - "github.com/kaspanet/kaspad/wire" -) - -// rpcPeer provides a peer for use with the RPC server and implements the -// rpcserverPeer interface. -type rpcPeer p2p.Peer - -// Ensure rpcPeer implements the rpcserverPeer interface. -var _ rpcserverPeer = (*rpcPeer)(nil) - -// ToPeer returns the underlying peer instance. -// -// This function is safe for concurrent access and is part of the rpcserverPeer -// interface implementation. -func (p *rpcPeer) ToPeer() *peer.Peer { - if p == nil { - return nil - } - return (*p2p.Peer)(p).Peer -} - -// IsTxRelayDisabled returns whether or not the peer has disabled transaction -// relay. -// -// This function is safe for concurrent access and is part of the rpcserverPeer -// interface implementation. -func (p *rpcPeer) IsTxRelayDisabled() bool { - return (*p2p.Peer)(p).DisableRelayTx -} - -// BanScore returns the current integer value that represents how close the peer -// is to being banned. -// -// This function is safe for concurrent access and is part of the rpcserverPeer -// interface implementation. -func (p *rpcPeer) BanScore() uint32 { - return (*p2p.Peer)(p).DynamicBanScore.Int() -} - -// FeeFilter returns the requested current minimum fee rate for which -// transactions should be announced. -// -// This function is safe for concurrent access and is part of the rpcserverPeer -// interface implementation. -func (p *rpcPeer) FeeFilter() int64 { - return atomic.LoadInt64(&(*p2p.Peer)(p).FeeFilterInt) -} - -// rpcConnManager provides a connection manager for use with the RPC server and -// implements the rpcserverConnManager interface. -type rpcConnManager struct { - server *p2p.Server -} - -// Ensure rpcConnManager implements the rpcserverConnManager interface. -var _ rpcserverConnManager = &rpcConnManager{} - -// Connect adds the provided address as a new outbound peer. The permanent flag -// indicates whether or not to make the peer persistent and reconnect if the -// connection is lost. Attempting to connect to an already existing peer will -// return an error. -// -// This function is safe for concurrent access and is part of the -// rpcserverConnManager interface implementation. -func (cm *rpcConnManager) Connect(addr string, permanent bool) error { - replyChan := make(chan error) - cm.server.Query <- p2p.ConnectNodeMsg{ - Addr: addr, - Permanent: permanent, - Reply: replyChan, - } - return <-replyChan -} - -// RemoveByID removes the peer associated with the provided id from the list of -// persistent peers. Attempting to remove an id that does not exist will return -// an error. -// -// This function is safe for concurrent access and is part of the -// rpcserverConnManager interface implementation. -func (cm *rpcConnManager) RemoveByID(id int32) error { - replyChan := make(chan error) - cm.server.Query <- p2p.RemoveNodeMsg{ - Cmp: func(sp *p2p.Peer) bool { return sp.ID() == id }, - Reply: replyChan, - } - return <-replyChan -} - -// RemoveByAddr removes the peer associated with the provided address from the -// list of persistent peers. Attempting to remove an address that does not -// exist will return an error. -// -// This function is safe for concurrent access and is part of the -// rpcserverConnManager interface implementation. -func (cm *rpcConnManager) RemoveByAddr(addr string) error { - replyChan := make(chan error) - cm.server.Query <- p2p.RemoveNodeMsg{ - Cmp: func(sp *p2p.Peer) bool { return sp.Addr() == addr }, - Reply: replyChan, - } - return <-replyChan -} - -// DisconnectByID disconnects the peer associated with the provided id. This -// applies to both inbound and outbound peers. Attempting to remove an id that -// does not exist will return an error. -// -// This function is safe for concurrent access and is part of the -// rpcserverConnManager interface implementation. -func (cm *rpcConnManager) DisconnectByID(id int32) error { - replyChan := make(chan error) - cm.server.Query <- p2p.DisconnectNodeMsg{ - Cmp: func(sp *p2p.Peer) bool { return sp.ID() == id }, - Reply: replyChan, - } - return <-replyChan -} - -// DisconnectByAddr disconnects the peer associated with the provided address. -// This applies to both inbound and outbound peers. Attempting to remove an -// address that does not exist will return an error. -// -// This function is safe for concurrent access and is part of the -// rpcserverConnManager interface implementation. -func (cm *rpcConnManager) DisconnectByAddr(addr string) error { - replyChan := make(chan error) - cm.server.Query <- p2p.DisconnectNodeMsg{ - Cmp: func(sp *p2p.Peer) bool { return sp.Addr() == addr }, - Reply: replyChan, - } - return <-replyChan -} - -// ConnectedCount returns the number of currently connected peers. -// -// This function is safe for concurrent access and is part of the -// rpcserverConnManager interface implementation. -func (cm *rpcConnManager) ConnectedCount() int32 { - return cm.server.ConnectedCount() -} - -// NetTotals returns the sum of all bytes received and sent across the network -// for all peers. -// -// This function is safe for concurrent access and is part of the -// rpcserverConnManager interface implementation. -func (cm *rpcConnManager) NetTotals() (uint64, uint64) { - return cm.server.NetTotals() -} - -// ConnectedPeers returns an array consisting of all connected peers. -// -// This function is safe for concurrent access and is part of the -// rpcserverConnManager interface implementation. -func (cm *rpcConnManager) ConnectedPeers() []rpcserverPeer { - replyChan := make(chan []*p2p.Peer) - cm.server.Query <- p2p.GetPeersMsg{Reply: replyChan} - serverPeers := <-replyChan - - // Convert to RPC server peers. - peers := make([]rpcserverPeer, 0, len(serverPeers)) - for _, sp := range serverPeers { - peers = append(peers, (*rpcPeer)(sp)) - } - return peers -} - -// PersistentPeers returns an array consisting of all the added persistent -// peers. -// -// This function is safe for concurrent access and is part of the -// rpcserverConnManager interface implementation. -func (cm *rpcConnManager) PersistentPeers() []rpcserverPeer { - replyChan := make(chan []*p2p.Peer) - cm.server.Query <- p2p.GetManualNodesMsg{Reply: replyChan} - serverPeers := <-replyChan - - // Convert to generic peers. - peers := make([]rpcserverPeer, 0, len(serverPeers)) - for _, sp := range serverPeers { - peers = append(peers, (*rpcPeer)(sp)) - } - return peers -} - -// BroadcastMessage sends the provided message to all currently connected peers. -// -// This function is safe for concurrent access and is part of the -// rpcserverConnManager interface implementation. -func (cm *rpcConnManager) BroadcastMessage(msg wire.Message) { - cm.server.BroadcastMessage(msg) -} - -// AddRebroadcastInventory adds the provided inventory to the list of -// inventories to be rebroadcast at random intervals until they show up in a -// block. -// -// This function is safe for concurrent access and is part of the -// rpcserverConnManager interface implementation. -func (cm *rpcConnManager) AddRebroadcastInventory(iv *wire.InvVect, data interface{}) { - cm.server.AddRebroadcastInventory(iv, data) -} - -// RelayTransactions generates and relays inventory vectors for all of the -// passed transactions to all connected peers. -func (cm *rpcConnManager) RelayTransactions(txns []*mempool.TxDesc) { - cm.server.RelayTransactions(txns) -} - -// rpcSyncMgr provides a block manager for use with the RPC server and -// implements the rpcserverSyncManager interface. -type rpcSyncMgr struct { - server *p2p.Server - syncMgr *netsync.SyncManager -} - -// Ensure rpcSyncMgr implements the rpcserverSyncManager interface. -var _ rpcserverSyncManager = (*rpcSyncMgr)(nil) - -// IsSynced returns whether or not the sync manager believes the DAG is -// current as compared to the rest of the network. -// -// This function is safe for concurrent access and is part of the -// rpcserverSyncManager interface implementation. -func (b *rpcSyncMgr) IsSynced() bool { - return b.syncMgr.IsSynced() -} - -// SubmitBlock submits the provided block to the network after processing it -// locally. -// -// This function is safe for concurrent access and is part of the -// rpcserverSyncManager interface implementation. -func (b *rpcSyncMgr) SubmitBlock(block *util.Block, flags blockdag.BehaviorFlags) (bool, error) { - return b.syncMgr.ProcessBlock(block, flags) -} - -// Pause pauses the sync manager until the returned channel is closed. -// -// This function is safe for concurrent access and is part of the -// rpcserverSyncManager interface implementation. -func (b *rpcSyncMgr) Pause() chan<- struct{} { - return b.syncMgr.Pause() -} - -// SyncPeerID returns the peer that is currently the peer being used to sync -// from. -// -// This function is safe for concurrent access and is part of the -// rpcserverSyncManager interface implementation. -func (b *rpcSyncMgr) SyncPeerID() int32 { - return b.syncMgr.SyncPeerID() -} - -// AntiPastHeadersBetween returns the headers of the blocks between the -// lowHash's antiPast and highHash's antiPast, or up to -// wire.MaxBlockHeadersPerMsg block headers. -// -// This function is safe for concurrent access and is part of the -// rpcserverSyncManager interface implementation. -func (b *rpcSyncMgr) AntiPastHeadersBetween(lowHash, highHash *daghash.Hash, maxHeaders uint64) ([]*wire.BlockHeader, error) { - return b.server.DAG.AntiPastHeadersBetween(lowHash, highHash, maxHeaders) -} diff --git a/server/serverutils/log.go b/server/serverutils/log.go deleted file mode 100644 index f07663c45..000000000 --- a/server/serverutils/log.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (c) 2013-2016 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package serverutils - -import ( - "github.com/kaspanet/kaspad/logger" -) - -var log, _ = logger.Get(logger.SubsystemTags.RPCS) diff --git a/server/serverutils/upnp.go b/server/serverutils/upnp.go deleted file mode 100644 index 4c737852b..000000000 --- a/server/serverutils/upnp.go +++ /dev/null @@ -1,411 +0,0 @@ -package serverutils - -// Upnp code taken from Taipei Torrent license is below: -// Copyright (c) 2010 Jack Palevich. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Just enough UPnP to be able to forward ports -// - -import ( - "bytes" - "encoding/xml" - "io/ioutil" - "net" - "net/http" - "os" - "strconv" - "strings" - "time" - - "github.com/pkg/errors" -) - -// NAT is an interface representing a NAT traversal options for example UPNP or -// NAT-PMP. It provides methods to query and manipulate this traversal to allow -// access to services. -type NAT interface { - // Get the external address from outside the NAT. - GetExternalAddress() (addr net.IP, err error) - // Add a port mapping for protocol ("udp" or "tcp") from external port to - // internal port with description lasting for timeout. - AddPortMapping(protocol string, externalPort, internalPort int, description string, timeout int) (mappedExternalPort int, err error) - // Remove a previously added port mapping from external port to - // internal port. - DeletePortMapping(protocol string, externalPort, internalPort int) (err error) -} - -type upnpNAT struct { - serviceURL string - ourIP string -} - -// Discover searches the local network for a UPnP router returning a NAT -// for the network if so, nil if not. -func Discover() (nat NAT, err error) { - ssdp, err := net.ResolveUDPAddr("udp4", "239.255.255.250:1900") - if err != nil { - return - } - conn, err := net.ListenPacket("udp4", ":0") - if err != nil { - return - } - socket := conn.(*net.UDPConn) - defer socket.Close() - - err = socket.SetDeadline(time.Now().Add(3 * time.Second)) - if err != nil { - return - } - - st := "ST: urn:schemas-upnp-org:device:InternetGatewayDevice:1\r\n" - buf := bytes.NewBufferString( - "M-SEARCH * HTTP/1.1\r\n" + - "HOST: 239.255.255.250:1900\r\n" + - st + - "MAN: \"ssdp:discover\"\r\n" + - "MX: 2\r\n\r\n") - message := buf.Bytes() - answerBytes := make([]byte, 1024) - for i := 0; i < 3; i++ { - _, err = socket.WriteToUDP(message, ssdp) - if err != nil { - return - } - var n int - n, _, err = socket.ReadFromUDP(answerBytes) - if err != nil { - continue - // socket.Close() - // return - } - answer := string(answerBytes[0:n]) - if !strings.Contains(answer, "\r\n"+st) { - continue - } - // HTTP header field names are case-insensitive. - // http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 - locString := "\r\nlocation: " - locIndex := strings.Index(strings.ToLower(answer), locString) - if locIndex < 0 { - continue - } - loc := answer[locIndex+len(locString):] - endIndex := strings.Index(loc, "\r\n") - if endIndex < 0 { - continue - } - locURL := loc[0:endIndex] - var serviceURL string - serviceURL, err = getServiceURL(locURL) - if err != nil { - return - } - var ourIP string - ourIP, err = getOurIP() - if err != nil { - return - } - nat = &upnpNAT{serviceURL: serviceURL, ourIP: ourIP} - return - } - err = errors.New("UPnP port discovery failed") - return -} - -// service represents the Service type in an UPnP xml description. -// Only the parts we care about are present and thus the xml may have more -// fields than present in the structure. -type service struct { - ServiceType string `xml:"serviceType"` - ControlURL string `xml:"controlURL"` -} - -// deviceList represents the deviceList type in an UPnP xml description. -// Only the parts we care about are present and thus the xml may have more -// fields than present in the structure. -type deviceList struct { - XMLName xml.Name `xml:"deviceList"` - Device []device `xml:"device"` -} - -// serviceList represents the serviceList type in an UPnP xml description. -// Only the parts we care about are present and thus the xml may have more -// fields than present in the structure. -type serviceList struct { - XMLName xml.Name `xml:"serviceList"` - Service []service `xml:"service"` -} - -// device represents the device type in an UPnP xml description. -// Only the parts we care about are present and thus the xml may have more -// fields than present in the structure. -type device struct { - XMLName xml.Name `xml:"device"` - DeviceType string `xml:"deviceType"` - DeviceList deviceList `xml:"deviceList"` - ServiceList serviceList `xml:"serviceList"` -} - -// specVersion represents the specVersion in a UPnP xml description. -// Only the parts we care about are present and thus the xml may have more -// fields than present in the structure. -type specVersion struct { - XMLName xml.Name `xml:"specVersion"` - Major int `xml:"major"` - Minor int `xml:"minor"` -} - -// root represents the Root document for a UPnP xml description. -// Only the parts we care about are present and thus the xml may have more -// fields than present in the structure. -type root struct { - XMLName xml.Name `xml:"root"` - SpecVersion specVersion - Device device -} - -// getChildDevice searches the children of device for a device with the given -// type. -func getChildDevice(d *device, deviceType string) *device { - for i := range d.DeviceList.Device { - if d.DeviceList.Device[i].DeviceType == deviceType { - return &d.DeviceList.Device[i] - } - } - return nil -} - -// getChildDevice searches the service list of device for a service with the -// given type. -func getChildService(d *device, serviceType string) *service { - for i := range d.ServiceList.Service { - if d.ServiceList.Service[i].ServiceType == serviceType { - return &d.ServiceList.Service[i] - } - } - return nil -} - -// getOurIP returns a best guess at what the local IP is. -func getOurIP() (ip string, err error) { - hostname, err := os.Hostname() - if err != nil { - return - } - return net.LookupCNAME(hostname) -} - -// getServiceURL parses the xml description at the given root url to find the -// url for the WANIPConnection service to be used for port forwarding. -func getServiceURL(rootURL string) (url string, err error) { - r, err := http.Get(rootURL) - if err != nil { - return - } - defer r.Body.Close() - if r.StatusCode >= 400 { - err = errors.New(string(r.StatusCode)) - return - } - var root root - err = xml.NewDecoder(r.Body).Decode(&root) - if err != nil { - return - } - a := &root.Device - if a.DeviceType != "urn:schemas-upnp-org:device:InternetGatewayDevice:1" { - err = errors.New("no InternetGatewayDevice") - return - } - b := getChildDevice(a, "urn:schemas-upnp-org:device:WANDevice:1") - if b == nil { - err = errors.New("no WANDevice") - return - } - c := getChildDevice(b, "urn:schemas-upnp-org:device:WANConnectionDevice:1") - if c == nil { - err = errors.New("no WANConnectionDevice") - return - } - d := getChildService(c, "urn:schemas-upnp-org:service:WANIPConnection:1") - if d == nil { - err = errors.New("no WANIPConnection") - return - } - url = combineURL(rootURL, d.ControlURL) - return -} - -// combineURL appends subURL onto rootURL. -func combineURL(rootURL, subURL string) string { - protocolEnd := "://" - protoEndIndex := strings.Index(rootURL, protocolEnd) - a := rootURL[protoEndIndex+len(protocolEnd):] - rootIndex := strings.Index(a, "/") - return rootURL[0:protoEndIndex+len(protocolEnd)+rootIndex] + subURL -} - -// soapBody represents the element in a SOAP reply. -// fields we don't care about are elided. -type soapBody struct { - XMLName xml.Name `xml:"Body"` - Data []byte `xml:",innerxml"` -} - -// soapEnvelope represents the element in a SOAP reply. -// fields we don't care about are elided. -type soapEnvelope struct { - XMLName xml.Name `xml:"Envelope"` - Body soapBody `xml:"Body"` -} - -// soapRequests performs a soap request with the given parameters and returns -// the xml replied stripped of the soap headers. in the case that the request is -// unsuccessful the an error is returned. -func soapRequest(url, function, message string) (replyXML []byte, err error) { - fullMessage := "" + - "\r\n" + - "" + message + "" - - req, err := http.NewRequest("POST", url, strings.NewReader(fullMessage)) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "text/xml ; charset=\"utf-8\"") - req.Header.Set("User-Agent", "Darwin/10.0.0, UPnP/1.0, MiniUPnPc/1.3") - //req.Header.Set("Transfer-Encoding", "chunked") - req.Header.Set("SOAPAction", "\"urn:schemas-upnp-org:service:WANIPConnection:1#"+function+"\"") - req.Header.Set("Connection", "Close") - req.Header.Set("Cache-Control", "no-cache") - req.Header.Set("Pragma", "no-cache") - - r, err := http.DefaultClient.Do(req) - if err != nil { - return nil, err - } - if r.Body != nil { - defer r.Body.Close() - } - - if r.StatusCode >= 400 { - data, readErr := ioutil.ReadAll(r.Body) - if readErr != nil { - err = errors.Wrapf(readErr, "Error %d for %s", r.StatusCode, function) - } else { - err = errors.Errorf("Error %d for %s. Response: %s", r.StatusCode, function, data) - } - - r = nil - return - } - var reply soapEnvelope - err = xml.NewDecoder(r.Body).Decode(&reply) - if err != nil { - return nil, err - } - return reply.Body.Data, nil -} - -// getExternalIPAddressResponse represents the XML response to a -// GetExternalIPAddress SOAP request. -type getExternalIPAddressResponse struct { - XMLName xml.Name `xml:"GetExternalIPAddressResponse"` - ExternalIPAddress string `xml:"NewExternalIPAddress"` -} - -// GetExternalAddress implements the NAT interface by fetching the external IP -// from the UPnP router. -func (n *upnpNAT) GetExternalAddress() (addr net.IP, err error) { - message := "\r\n" - response, err := soapRequest(n.serviceURL, "GetExternalIPAddress", message) - if err != nil { - return nil, err - } - - var reply getExternalIPAddressResponse - err = xml.Unmarshal(response, &reply) - if err != nil { - return nil, err - } - - addr = net.ParseIP(reply.ExternalIPAddress) - if addr == nil { - return nil, errors.New("unable to parse ip address") - } - return addr, nil -} - -// AddPortMapping implements the NAT interface by setting up a port forwarding -// from the UPnP router to the local machine with the given ports and protocol. -func (n *upnpNAT) AddPortMapping(protocol string, externalPort, internalPort int, description string, timeout int) (mappedExternalPort int, err error) { - // A single concatenation would break ARM compilation. - message := "\r\n" + - "" + strconv.Itoa(externalPort) - message += "" + strings.ToUpper(protocol) + "" - message += "" + strconv.Itoa(internalPort) + "" + - "" + n.ourIP + "" + - "1" - message += description + - "" + strconv.Itoa(timeout) + - "" - - response, err := soapRequest(n.serviceURL, "AddPortMapping", message) - if err != nil { - return - } - - // TODO: check response to see if the port was forwarded - // If the port was not wildcard we don't get an reply with the port in - // it. Not sure about wildcard yet. miniupnpc just checks for error - // codes here. - mappedExternalPort = externalPort - _ = response - return -} - -// DeletePortMapping implements the NAT interface by removing up a port forwarding -// from the UPnP router to the local machine with the given ports and. -func (n *upnpNAT) DeletePortMapping(protocol string, externalPort, internalPort int) (err error) { - - message := "\r\n" + - "" + strconv.Itoa(externalPort) + - "" + strings.ToUpper(protocol) + "" + - "" - - response, err := soapRequest(n.serviceURL, "DeletePortMapping", message) - if err != nil { - return - } - - // TODO: check response to see if the port was deleted - // log.Println(message, response) - _ = response - return -}