mirror of
https://github.com/kaspanet/kaspad.git
synced 2025-06-06 22:26:47 +00:00
Limit the amount of inbound RPC connections (#1818)
* Limit the amount of inbound RPC connections. * Increment/decrement the right variable. * Implement TestRPCMaxInboundConnections. * Make go vet happy. * Increase RPCMaxInboundConnections to 128. * Set NUM_CLIENTS=128 in the rpc-idle-clients stability test. * Explain why the P2P server has unlimited inbound connections.
This commit is contained in:
parent
d922ee1be2
commit
ce17348175
@ -9,6 +9,7 @@ import (
|
|||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/peer"
|
"google.golang.org/grpc/peer"
|
||||||
"net"
|
"net"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -17,15 +18,22 @@ type gRPCServer struct {
|
|||||||
listeningAddresses []string
|
listeningAddresses []string
|
||||||
server *grpc.Server
|
server *grpc.Server
|
||||||
name string
|
name string
|
||||||
|
|
||||||
|
maxInboundConnections int
|
||||||
|
inboundConnectionCount int
|
||||||
|
inboundConnectionCountLock *sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// newGRPCServer creates a gRPC server
|
// newGRPCServer creates a gRPC server
|
||||||
func newGRPCServer(listeningAddresses []string, maxMessageSize int, name string) *gRPCServer {
|
func newGRPCServer(listeningAddresses []string, maxMessageSize int, maxInboundConnections int, name string) *gRPCServer {
|
||||||
log.Debugf("Created new %s GRPC server with maxMessageSize %d", name, maxMessageSize)
|
log.Debugf("Created new %s GRPC server with maxMessageSize %d and maxInboundConnections %d", name, maxMessageSize, maxInboundConnections)
|
||||||
return &gRPCServer{
|
return &gRPCServer{
|
||||||
server: grpc.NewServer(grpc.MaxRecvMsgSize(maxMessageSize), grpc.MaxSendMsgSize(maxMessageSize)),
|
server: grpc.NewServer(grpc.MaxRecvMsgSize(maxMessageSize), grpc.MaxSendMsgSize(maxMessageSize)),
|
||||||
listeningAddresses: listeningAddresses,
|
listeningAddresses: listeningAddresses,
|
||||||
name: name,
|
name: name,
|
||||||
|
maxInboundConnections: maxInboundConnections,
|
||||||
|
inboundConnectionCount: 0,
|
||||||
|
inboundConnectionCountLock: &sync.Mutex{},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -86,6 +94,11 @@ func (s *gRPCServer) SetOnConnectedHandler(onConnectedHandler server.OnConnected
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *gRPCServer) handleInboundConnection(ctx context.Context, stream grpcStream) error {
|
func (s *gRPCServer) handleInboundConnection(ctx context.Context, stream grpcStream) error {
|
||||||
|
err := s.incrementInboundConnectionCountAndLimitIfRequired()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
peerInfo, ok := peer.FromContext(ctx)
|
peerInfo, ok := peer.FromContext(ctx)
|
||||||
if !ok {
|
if !ok {
|
||||||
return errors.Errorf("Error getting stream peer info from context")
|
return errors.Errorf("Error getting stream peer info from context")
|
||||||
@ -97,7 +110,7 @@ func (s *gRPCServer) handleInboundConnection(ctx context.Context, stream grpcStr
|
|||||||
|
|
||||||
connection := newConnection(s, tcpAddress, stream, nil)
|
connection := newConnection(s, tcpAddress, stream, nil)
|
||||||
|
|
||||||
err := s.onConnectedHandler(connection)
|
err = s.onConnectedHandler(connection)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -105,6 +118,25 @@ func (s *gRPCServer) handleInboundConnection(ctx context.Context, stream grpcStr
|
|||||||
log.Infof("%s Incoming connection from %s", s.name, peerInfo.Addr)
|
log.Infof("%s Incoming connection from %s", s.name, peerInfo.Addr)
|
||||||
|
|
||||||
<-connection.stopChan
|
<-connection.stopChan
|
||||||
|
s.decrementInboundConnectionCount()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *gRPCServer) incrementInboundConnectionCountAndLimitIfRequired() error {
|
||||||
|
s.inboundConnectionCountLock.Lock()
|
||||||
|
defer s.inboundConnectionCountLock.Unlock()
|
||||||
|
|
||||||
|
if s.maxInboundConnections > 0 && s.inboundConnectionCount == s.maxInboundConnections {
|
||||||
|
return errors.Errorf("limit of %d inbound connections has been exceeded", s.maxInboundConnections)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.inboundConnectionCount++
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *gRPCServer) decrementInboundConnectionCount() {
|
||||||
|
s.inboundConnectionCountLock.Lock()
|
||||||
|
defer s.inboundConnectionCountLock.Unlock()
|
||||||
|
|
||||||
|
s.inboundConnectionCount--
|
||||||
|
}
|
||||||
|
@ -20,9 +20,15 @@ type p2pServer struct {
|
|||||||
|
|
||||||
const p2pMaxMessageSize = 10 * 1024 * 1024 // 10MB
|
const p2pMaxMessageSize = 10 * 1024 * 1024 // 10MB
|
||||||
|
|
||||||
|
// p2pMaxInboundConnections is the max amount of inbound connections for the P2P server.
|
||||||
|
// Note that inbound connections are not limited by the gRPC server. (A value of 0 means
|
||||||
|
// unlimited inbound connections.) The P2P limiting logic is more applicative, and as such
|
||||||
|
// is handled in the ConnectionManager instead.
|
||||||
|
const p2pMaxInboundConnections = 0
|
||||||
|
|
||||||
// NewP2PServer creates a new P2PServer
|
// NewP2PServer creates a new P2PServer
|
||||||
func NewP2PServer(listeningAddresses []string) (server.P2PServer, error) {
|
func NewP2PServer(listeningAddresses []string) (server.P2PServer, error) {
|
||||||
gRPCServer := newGRPCServer(listeningAddresses, p2pMaxMessageSize, "P2P")
|
gRPCServer := newGRPCServer(listeningAddresses, p2pMaxMessageSize, p2pMaxInboundConnections, "P2P")
|
||||||
p2pServer := &p2pServer{gRPCServer: *gRPCServer}
|
p2pServer := &p2pServer{gRPCServer: *gRPCServer}
|
||||||
protowire.RegisterP2PServer(gRPCServer.server, p2pServer)
|
protowire.RegisterP2PServer(gRPCServer.server, p2pServer)
|
||||||
return p2pServer, nil
|
return p2pServer, nil
|
||||||
|
@ -14,9 +14,12 @@ type rpcServer struct {
|
|||||||
// RPCMaxMessageSize is the max message size for the RPC server to send and receive
|
// RPCMaxMessageSize is the max message size for the RPC server to send and receive
|
||||||
const RPCMaxMessageSize = 1024 * 1024 * 1024 // 1 GB
|
const RPCMaxMessageSize = 1024 * 1024 * 1024 // 1 GB
|
||||||
|
|
||||||
|
// RPCMaxInboundConnections is the max amount of inbound connections for the RPC server
|
||||||
|
const RPCMaxInboundConnections = 128
|
||||||
|
|
||||||
// NewRPCServer creates a new RPCServer
|
// NewRPCServer creates a new RPCServer
|
||||||
func NewRPCServer(listeningAddresses []string) (server.Server, error) {
|
func NewRPCServer(listeningAddresses []string) (server.Server, error) {
|
||||||
gRPCServer := newGRPCServer(listeningAddresses, RPCMaxMessageSize, "RPC")
|
gRPCServer := newGRPCServer(listeningAddresses, RPCMaxMessageSize, RPCMaxInboundConnections, "RPC")
|
||||||
rpcServer := &rpcServer{gRPCServer: *gRPCServer}
|
rpcServer := &rpcServer{gRPCServer: *gRPCServer}
|
||||||
protowire.RegisterRPCServer(gRPCServer.server, rpcServer)
|
protowire.RegisterRPCServer(gRPCServer.server, rpcServer)
|
||||||
return rpcServer, nil
|
return rpcServer, nil
|
||||||
|
@ -23,6 +23,7 @@ type RPCClient struct {
|
|||||||
isConnected uint32
|
isConnected uint32
|
||||||
isClosed uint32
|
isClosed uint32
|
||||||
isReconnecting uint32
|
isReconnecting uint32
|
||||||
|
lastDisconnectedTime time.Time
|
||||||
|
|
||||||
timeout time.Duration
|
timeout time.Duration
|
||||||
}
|
}
|
||||||
@ -112,14 +113,15 @@ func (c *RPCClient) Reconnect() error {
|
|||||||
|
|
||||||
// Attempt to connect until we succeed
|
// Attempt to connect until we succeed
|
||||||
for {
|
for {
|
||||||
|
const retryDelay = 10 * time.Second
|
||||||
|
if time.Since(c.lastDisconnectedTime) > retryDelay {
|
||||||
err := c.connect()
|
err := c.connect()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
log.Warnf("Could not automatically reconnect to %s: %s", c.rpcAddress, err)
|
log.Warnf("Could not automatically reconnect to %s: %s", c.rpcAddress, err)
|
||||||
|
|
||||||
const retryDelay = 10 * time.Second
|
|
||||||
log.Warnf("Retrying in %s", retryDelay)
|
log.Warnf("Retrying in %s", retryDelay)
|
||||||
|
}
|
||||||
time.Sleep(retryDelay)
|
time.Sleep(retryDelay)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -127,6 +129,7 @@ func (c *RPCClient) Reconnect() error {
|
|||||||
func (c *RPCClient) handleClientDisconnected() {
|
func (c *RPCClient) handleClientDisconnected() {
|
||||||
atomic.StoreUint32(&c.isConnected, 0)
|
atomic.StoreUint32(&c.isConnected, 0)
|
||||||
if atomic.LoadUint32(&c.isClosed) == 0 {
|
if atomic.LoadUint32(&c.isClosed) == 0 {
|
||||||
|
c.lastDisconnectedTime = time.Now()
|
||||||
err := c.Reconnect()
|
err := c.Reconnect()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
rm -rf /tmp/kaspad-temp
|
rm -rf /tmp/kaspad-temp
|
||||||
|
|
||||||
NUM_CLIENTS=1000
|
NUM_CLIENTS=128
|
||||||
kaspad --devnet --appdir=/tmp/kaspad-temp --profile=6061 --rpcmaxwebsockets=$NUM_CLIENTS &
|
kaspad --devnet --appdir=/tmp/kaspad-temp --profile=6061 --rpcmaxwebsockets=$NUM_CLIENTS &
|
||||||
KASPAD_PID=$!
|
KASPAD_PID=$!
|
||||||
KASPAD_KILLED=0
|
KASPAD_KILLED=0
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
package integration
|
package integration
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/server/grpcserver"
|
||||||
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/kaspanet/kaspad/infrastructure/network/rpcclient"
|
"github.com/kaspanet/kaspad/infrastructure/network/rpcclient"
|
||||||
@ -23,3 +25,59 @@ func newTestRPCClient(rpcAddress string) (*testRPCClient, error) {
|
|||||||
RPCClient: rpcClient,
|
RPCClient: rpcClient,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestRPCMaxInboundConnections(t *testing.T) {
|
||||||
|
harness, teardown := setupHarness(t, &harnessParams{
|
||||||
|
p2pAddress: p2pAddress1,
|
||||||
|
rpcAddress: rpcAddress1,
|
||||||
|
miningAddress: miningAddress1,
|
||||||
|
miningAddressPrivateKey: miningAddress1PrivateKey,
|
||||||
|
})
|
||||||
|
defer teardown()
|
||||||
|
|
||||||
|
// Close the default RPC client so that it won't interfere with the test
|
||||||
|
err := harness.rpcClient.Close()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to close the default harness RPCClient: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Connect `RPCMaxInboundConnections` clients. We expect this to succeed immediately
|
||||||
|
rpcClients := []*testRPCClient{}
|
||||||
|
doneChan := make(chan error)
|
||||||
|
go func() {
|
||||||
|
for i := 0; i < grpcserver.RPCMaxInboundConnections; i++ {
|
||||||
|
rpcClient, err := newTestRPCClient(harness.rpcAddress)
|
||||||
|
if err != nil {
|
||||||
|
doneChan <- err
|
||||||
|
}
|
||||||
|
rpcClients = append(rpcClients, rpcClient)
|
||||||
|
}
|
||||||
|
doneChan <- nil
|
||||||
|
}()
|
||||||
|
select {
|
||||||
|
case err = <-doneChan:
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("newTestRPCClient: %s", err)
|
||||||
|
}
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatalf("Timeout for connecting %d RPC connections elapsed", grpcserver.RPCMaxInboundConnections)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to connect another client. We expect this to fail
|
||||||
|
// We set a timeout to account for reconnection mechanisms
|
||||||
|
go func() {
|
||||||
|
rpcClient, err := newTestRPCClient(harness.rpcAddress)
|
||||||
|
if err != nil {
|
||||||
|
doneChan <- err
|
||||||
|
}
|
||||||
|
rpcClients = append(rpcClients, rpcClient)
|
||||||
|
doneChan <- nil
|
||||||
|
}()
|
||||||
|
select {
|
||||||
|
case err = <-doneChan:
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("newTestRPCClient unexpectedly succeeded")
|
||||||
|
}
|
||||||
|
case <-time.After(time.Second * 15):
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user