mirror of
https://github.com/kaspanet/kaspad.git
synced 2025-11-24 06:25:55 +00:00
Merge branch 'dev' into replace-daglabs-dnsseeder
This commit is contained in:
commit
5dbbdef0eb
@ -223,18 +223,9 @@ func (m *Manager) notifyVirtualSelectedParentChainChanged(virtualChangeSet *exte
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualSelectedParentChainChanged")
|
||||
defer onEnd()
|
||||
|
||||
listenersThatPropagateSelectedParentChanged :=
|
||||
m.context.NotificationManager.AllListenersThatPropagateVirtualSelectedParentChainChanged()
|
||||
if len(listenersThatPropagateSelectedParentChanged) > 0 {
|
||||
// Generating acceptedTransactionIDs is a heavy operation, so we check if it's needed by any listener.
|
||||
includeAcceptedTransactionIDs := false
|
||||
for _, listener := range listenersThatPropagateSelectedParentChanged {
|
||||
if listener.IncludeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications() {
|
||||
includeAcceptedTransactionIDs = true
|
||||
break
|
||||
}
|
||||
}
|
||||
hasListeners, includeAcceptedTransactionIDs := m.context.NotificationManager.HasListenersThatPropagateVirtualSelectedParentChainChanged()
|
||||
|
||||
if hasListeners {
|
||||
notification, err := m.context.ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage(
|
||||
virtualChangeSet.VirtualSelectedParentChainChanges, includeAcceptedTransactionIDs)
|
||||
if err != nil {
|
||||
|
||||
@ -142,16 +142,29 @@ func (nm *NotificationManager) NotifyVirtualSelectedParentChainChanged(
|
||||
return nil
|
||||
}
|
||||
|
||||
// AllListenersThatPropagateVirtualSelectedParentChainChanged returns true if there's any listener that is
|
||||
// subscribed to VirtualSelectedParentChainChanged notifications.
|
||||
func (nm *NotificationManager) AllListenersThatPropagateVirtualSelectedParentChainChanged() []*NotificationListener {
|
||||
var listenersThatPropagate []*NotificationListener
|
||||
// HasListenersThatPropagateVirtualSelectedParentChainChanged returns whether there's any listener that is
|
||||
// subscribed to VirtualSelectedParentChainChanged notifications as well as checks if any such listener requested
|
||||
// to include AcceptedTransactionIDs.
|
||||
func (nm *NotificationManager) HasListenersThatPropagateVirtualSelectedParentChainChanged() (hasListeners, hasListenersThatRequireAcceptedTransactionIDs bool) {
|
||||
|
||||
nm.RLock()
|
||||
defer nm.RUnlock()
|
||||
|
||||
hasListeners = false
|
||||
hasListenersThatRequireAcceptedTransactionIDs = false
|
||||
|
||||
for _, listener := range nm.listeners {
|
||||
if listener.propagateVirtualSelectedParentChainChangedNotifications {
|
||||
listenersThatPropagate = append(listenersThatPropagate, listener)
|
||||
hasListeners = true
|
||||
// Generating acceptedTransactionIDs is a heavy operation, so we check if it's needed by any listener.
|
||||
if listener.includeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications {
|
||||
hasListenersThatRequireAcceptedTransactionIDs = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return listenersThatPropagate
|
||||
|
||||
return hasListeners, hasListenersThatRequireAcceptedTransactionIDs
|
||||
}
|
||||
|
||||
// NotifyFinalityConflict notifies the notification manager that there's a finality conflict in the DAG
|
||||
@ -338,7 +351,11 @@ func (nl *NotificationListener) PropagateFinalityConflictResolvedNotifications()
|
||||
// to the remote listener for the given addresses. Subsequent calls instruct the listener to
|
||||
// send UTXOs changed notifications for those addresses along with the old ones. Duplicate addresses
|
||||
// are ignored.
|
||||
func (nl *NotificationListener) PropagateUTXOsChangedNotifications(addresses []*UTXOsChangedNotificationAddress) {
|
||||
func (nm *NotificationManager) PropagateUTXOsChangedNotifications(nl *NotificationListener, addresses []*UTXOsChangedNotificationAddress) {
|
||||
// Apply a write-lock since the internal listener address map is modified
|
||||
nm.Lock()
|
||||
defer nm.Unlock()
|
||||
|
||||
if !nl.propagateUTXOsChangedNotifications {
|
||||
nl.propagateUTXOsChangedNotifications = true
|
||||
nl.propagateUTXOsChangedNotificationAddresses =
|
||||
@ -353,7 +370,11 @@ func (nl *NotificationListener) PropagateUTXOsChangedNotifications(addresses []*
|
||||
// StopPropagatingUTXOsChangedNotifications instructs the listener to stop sending UTXOs
|
||||
// changed notifications to the remote listener for the given addresses. Addresses for which
|
||||
// notifications are not currently sent are ignored.
|
||||
func (nl *NotificationListener) StopPropagatingUTXOsChangedNotifications(addresses []*UTXOsChangedNotificationAddress) {
|
||||
func (nm *NotificationManager) StopPropagatingUTXOsChangedNotifications(nl *NotificationListener, addresses []*UTXOsChangedNotificationAddress) {
|
||||
// Apply a write-lock since the internal listener address map is modified
|
||||
nm.Lock()
|
||||
defer nm.Unlock()
|
||||
|
||||
if !nl.propagateUTXOsChangedNotifications {
|
||||
return
|
||||
}
|
||||
|
||||
@ -26,7 +26,7 @@ func HandleNotifyUTXOsChanged(context *rpccontext.Context, router *router.Router
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listener.PropagateUTXOsChangedNotifications(addresses)
|
||||
context.NotificationManager.PropagateUTXOsChangedNotifications(listener, addresses)
|
||||
|
||||
response := appmessage.NewNotifyUTXOsChangedResponseMessage()
|
||||
return response, nil
|
||||
|
||||
@ -26,7 +26,7 @@ func HandleStopNotifyingUTXOsChanged(context *rpccontext.Context, router *router
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listener.StopPropagatingUTXOsChangedNotifications(addresses)
|
||||
context.NotificationManager.StopPropagatingUTXOsChangedNotifications(listener, addresses)
|
||||
|
||||
response := appmessage.NewStopNotifyingUTXOsChangedResponseMessage()
|
||||
return response, nil
|
||||
|
||||
@ -64,6 +64,12 @@ type consensus struct {
|
||||
virtualNotUpdated bool
|
||||
}
|
||||
|
||||
// In order to prevent a situation that the consensus lock is held for too much time, we
|
||||
// release the lock each time we resolve 100 blocks.
|
||||
// Note: `virtualResolveChunk` should be smaller than `params.FinalityDuration` in order to avoid a situation
|
||||
// where UpdatePruningPointByVirtual skips a pruning point.
|
||||
const virtualResolveChunk = 100
|
||||
|
||||
func (s *consensus) ValidateAndInsertBlockWithTrustedData(block *externalapi.BlockWithTrustedData, validateUTXO bool) error {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
@ -198,15 +204,32 @@ func (s *consensus) ValidateAndInsertBlock(block *externalapi.DomainBlock, updat
|
||||
if updateVirtual {
|
||||
s.lock.Lock()
|
||||
if s.virtualNotUpdated {
|
||||
s.lock.Unlock()
|
||||
err := s.ResolveVirtual(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
// We enter the loop in locked state
|
||||
for {
|
||||
_, isCompletelyResolved, err := s.resolveVirtualChunkNoLock(virtualResolveChunk)
|
||||
if err != nil {
|
||||
s.lock.Unlock()
|
||||
return err
|
||||
}
|
||||
if isCompletelyResolved {
|
||||
// Make sure we enter the block insertion function w/o releasing the lock.
|
||||
// Otherwise, we might actually enter it in `s.virtualNotUpdated == true` state
|
||||
_, err = s.validateAndInsertBlockNoLock(block, updateVirtual)
|
||||
// Finally, unlock for the last iteration and return
|
||||
s.lock.Unlock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// Unlock to allow other threads to enter consensus
|
||||
s.lock.Unlock()
|
||||
// Lock for the next iteration
|
||||
s.lock.Lock()
|
||||
}
|
||||
return s.validateAndInsertBlockWithLock(block, updateVirtual)
|
||||
}
|
||||
defer s.lock.Unlock()
|
||||
_, err := s.validateAndInsertBlockNoLock(block, updateVirtual)
|
||||
s.lock.Unlock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -912,11 +935,7 @@ func (s *consensus) ResolveVirtual(progressReportCallback func(uint64, uint64))
|
||||
progressReportCallback(virtualDAAScoreStart, virtualDAAScore)
|
||||
}
|
||||
|
||||
// In order to prevent a situation that the consensus lock is held for too much time, we
|
||||
// release the lock each time we resolve 100 blocks.
|
||||
// Note: maxBlocksToResolve should be smaller than `params.FinalityDuration` in order to avoid a situation
|
||||
// where UpdatePruningPointByVirtual skips a pruning point.
|
||||
_, isCompletelyResolved, err := s.resolveVirtualChunkWithLock(100)
|
||||
_, isCompletelyResolved, err := s.resolveVirtualChunkWithLock(virtualResolveChunk)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -772,78 +772,41 @@ func (pm *pruningManager) calculateDiffBetweenPreviousAndCurrentPruningPoints(st
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currentPruningGhostDAG, err := pm.ghostdagDataStore.Get(pm.databaseContext, stagingArea, currentPruningHash, false)
|
||||
|
||||
utxoDiff := utxo.NewMutableUTXODiff()
|
||||
|
||||
iterator, err := pm.dagTraversalManager.SelectedChildIterator(stagingArea, currentPruningHash, previousPruningHash, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
previousPruningGhostDAG, err := pm.ghostdagDataStore.Get(pm.databaseContext, stagingArea, previousPruningHash, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
defer iterator.Close()
|
||||
|
||||
for ok := iterator.First(); ok; ok = iterator.Next() {
|
||||
child, err := iterator.Get()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
chainBlockAcceptanceData, err := pm.acceptanceDataStore.Get(pm.databaseContext, stagingArea, child)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
chainBlockHeader, err := pm.blockHeaderStore.BlockHeader(pm.databaseContext, stagingArea, child)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, blockAcceptanceData := range chainBlockAcceptanceData {
|
||||
for _, transactionAcceptanceData := range blockAcceptanceData.TransactionAcceptanceData {
|
||||
if transactionAcceptanceData.IsAccepted {
|
||||
err = utxoDiff.AddTransaction(transactionAcceptanceData.Transaction, chainBlockHeader.DAAScore())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
currentPruningCurrentDiffChild := currentPruningHash
|
||||
previousPruningCurrentDiffChild := previousPruningHash
|
||||
// We need to use BlueWork because it's the only thing that's monotonic in the whole DAG
|
||||
// We use the BlueWork to know which point is currently lower on the DAG so we can keep climbing its children,
|
||||
// that way we keep climbing on the lowest point until they both reach the exact same descendant
|
||||
currentPruningCurrentDiffChildBlueWork := currentPruningGhostDAG.BlueWork()
|
||||
previousPruningCurrentDiffChildBlueWork := previousPruningGhostDAG.BlueWork()
|
||||
|
||||
var diffHashesFromPrevious []*externalapi.DomainHash
|
||||
var diffHashesFromCurrent []*externalapi.DomainHash
|
||||
for {
|
||||
// if currentPruningCurrentDiffChildBlueWork > previousPruningCurrentDiffChildBlueWork
|
||||
if currentPruningCurrentDiffChildBlueWork.Cmp(previousPruningCurrentDiffChildBlueWork) == 1 {
|
||||
diffHashesFromPrevious = append(diffHashesFromPrevious, previousPruningCurrentDiffChild)
|
||||
previousPruningCurrentDiffChild, err = pm.utxoDiffStore.UTXODiffChild(pm.databaseContext, stagingArea, previousPruningCurrentDiffChild)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
diffChildGhostDag, err := pm.ghostdagDataStore.Get(pm.databaseContext, stagingArea, previousPruningCurrentDiffChild, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
previousPruningCurrentDiffChildBlueWork = diffChildGhostDag.BlueWork()
|
||||
} else if currentPruningCurrentDiffChild.Equal(previousPruningCurrentDiffChild) {
|
||||
break
|
||||
} else {
|
||||
diffHashesFromCurrent = append(diffHashesFromCurrent, currentPruningCurrentDiffChild)
|
||||
currentPruningCurrentDiffChild, err = pm.utxoDiffStore.UTXODiffChild(pm.databaseContext, stagingArea, currentPruningCurrentDiffChild)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
diffChildGhostDag, err := pm.ghostdagDataStore.Get(pm.databaseContext, stagingArea, currentPruningCurrentDiffChild, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currentPruningCurrentDiffChildBlueWork = diffChildGhostDag.BlueWork()
|
||||
}
|
||||
}
|
||||
// The order in which we apply the diffs should be from top to bottom, but we traversed from bottom to top
|
||||
// so we apply the diffs in reverse order.
|
||||
oldDiff := utxo.NewMutableUTXODiff()
|
||||
for i := len(diffHashesFromPrevious) - 1; i >= 0; i-- {
|
||||
utxoDiff, err := pm.utxoDiffStore.UTXODiff(pm.databaseContext, stagingArea, diffHashesFromPrevious[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = oldDiff.WithDiffInPlace(utxoDiff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
newDiff := utxo.NewMutableUTXODiff()
|
||||
for i := len(diffHashesFromCurrent) - 1; i >= 0; i-- {
|
||||
utxoDiff, err := pm.utxoDiffStore.UTXODiff(pm.databaseContext, stagingArea, diffHashesFromCurrent[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = newDiff.WithDiffInPlace(utxoDiff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return oldDiff.DiffFrom(newDiff.ToImmutable())
|
||||
return utxoDiff.ToImmutable(), err
|
||||
}
|
||||
|
||||
// finalityScore is the number of finality intervals passed since
|
||||
|
||||
@ -297,7 +297,7 @@ var TestnetParams = Params{
|
||||
Net: appmessage.Testnet,
|
||||
RPCPort: "16210",
|
||||
DefaultPort: "16211",
|
||||
DNSSeeds: []string{"testnet-9-dnsseed.daglabs-dev.com"},
|
||||
DNSSeeds: []string{"testnet-10-dnsseed.kas.pa"},
|
||||
|
||||
// DAG parameters
|
||||
GenesisBlock: &testnetGenesisBlock,
|
||||
|
||||
@ -22,6 +22,7 @@ type OnDisconnectedHandler func()
|
||||
// GRPCClient is a gRPC-based RPC client
|
||||
type GRPCClient struct {
|
||||
stream protowire.RPC_MessageStreamClient
|
||||
connection *grpc.ClientConn
|
||||
onErrorHandler OnErrorHandler
|
||||
onDisconnectedHandler OnDisconnectedHandler
|
||||
}
|
||||
@ -43,7 +44,12 @@ func Connect(address string) (*GRPCClient, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error getting client stream for %s", address)
|
||||
}
|
||||
return &GRPCClient{stream: stream}, nil
|
||||
return &GRPCClient{stream: stream, connection: gRPCConnection}, nil
|
||||
}
|
||||
|
||||
// Close closes the underlying grpc connection
|
||||
func (c *GRPCClient) Close() error {
|
||||
return c.connection.Close()
|
||||
}
|
||||
|
||||
// Disconnect disconnects from the RPC server
|
||||
|
||||
@ -143,6 +143,9 @@ func (c *RPCClient) handleClientDisconnected() {
|
||||
}
|
||||
|
||||
func (c *RPCClient) handleClientError(err error) {
|
||||
if atomic.LoadUint32(&c.isClosed) == 1 {
|
||||
return
|
||||
}
|
||||
log.Warnf("Received error from client: %s", err)
|
||||
c.handleClientDisconnected()
|
||||
}
|
||||
@ -159,7 +162,7 @@ func (c *RPCClient) Close() error {
|
||||
return errors.Errorf("Cannot close a client that had already been closed")
|
||||
}
|
||||
c.rpcRouter.router.Close()
|
||||
return nil
|
||||
return c.GRPCClient.Close()
|
||||
}
|
||||
|
||||
// Address returns the address the RPC client connected to
|
||||
|
||||
@ -2,6 +2,7 @@ package integration
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -26,6 +27,37 @@ func newTestRPCClient(rpcAddress string) (*testRPCClient, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func connectAndClose(rpcAddress string) error {
|
||||
client, err := rpcclient.NewRPCClient(rpcAddress)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer client.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestRPCClientGoroutineLeak(t *testing.T) {
|
||||
_, teardown := setupHarness(t, &harnessParams{
|
||||
p2pAddress: p2pAddress1,
|
||||
rpcAddress: rpcAddress1,
|
||||
miningAddress: miningAddress1,
|
||||
miningAddressPrivateKey: miningAddress1PrivateKey,
|
||||
})
|
||||
defer teardown()
|
||||
numGoroutinesBefore := runtime.NumGoroutine()
|
||||
for i := 1; i < 100; i++ {
|
||||
err := connectAndClose(rpcAddress1)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to set up an RPC client: %s", err)
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
if runtime.NumGoroutine() > numGoroutinesBefore+10 {
|
||||
t.Fatalf("Number of goroutines is increasing for each RPC client open (%d -> %d), which indicates a memory leak",
|
||||
numGoroutinesBefore, runtime.NumGoroutine())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRPCMaxInboundConnections(t *testing.T) {
|
||||
harness, teardown := setupHarness(t, &harnessParams{
|
||||
p2pAddress: p2pAddress1,
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user