kaspad/testing/integration/16_incoming_connections_test.go
Ori Newman 5dbb1da84b
Implement pruning point proof (#1832)
* Calculate GHOSTDAG, reachability etc for each level

* Don't preallocate cache for dag stores except level 0 and reduce the number of connections in the integration test to 32

* Reduce the number of connections in the integration test to 16

* Increase page file

* BuildPruningPointProof

* BuildPruningPointProof

* Add PruningProofManager

* Implement ApplyPruningPointProof

* Add prefix and fix blockAtDepth and fill headersByLevel

* Some bug fixes

* Include all relevant blocks for each level in the proof

* Fix syncAndValidatePruningPointProof to return the right block hash

* Fix block window

* Fix isAncestorOfPruningPoint

* Ban for rule errors on pruning proof

* Find common ancestor for blockAtDepthMAtNextLevel

* Use pruning proof in TestValidateAndInsertImportedPruningPoint

* stage status and finality point for proof blocks

* Uncomment golint

* Change test timeouts

* Calculate merge set for ApplyPruningPointProof

* Increase test timeout

* Add better caching for daa window store

* Return to default timeout

* Add ErrPruningProofMissesBlocksBelowPruningPoint

* Add errDAAWindowBlockNotFound

* Force connection loop next iteration on connection manager stop

* Revert to Test64IncomingConnections

* Remove BlockAtDepth from DAGTraversalManager

* numBullies->16

* Set page file size to 8gb

* Increase p2p max message size

* Test64IncomingConnections->Test16IncomingConnections

* Add comment for PruningProofM

* Add comment in `func (c *ConnectionManager) Stop()`

* Rename isAncestorOfPruningPoint->isAncestorOfSelectedTip

* Revert page file to 16gb

* Improve ExpectedHeaderPruningPoint perf

* Fix comment

* Revert "Improve ExpectedHeaderPruningPoint perf"

This reverts commit bca1080e7140c78d510f51bbea858ae280c2f38e.

* Don't test windows
2021-10-26 09:48:27 +03:00

70 lines
1.8 KiB
Go

package integration
import (
"fmt"
"sync"
"testing"
"time"
"github.com/kaspanet/kaspad/app/appmessage"
)
func Test16IncomingConnections(t *testing.T) {
// Much more than 16 hosts creates a risk of running out of available file descriptors for leveldb
const numBullies = 16
harnessesParams := make([]*harnessParams, numBullies+1)
for i := 0; i < numBullies+1; i++ {
harnessesParams[i] = &harnessParams{
p2pAddress: fmt.Sprintf("127.0.0.1:%d", 12345+i),
rpcAddress: fmt.Sprintf("127.0.0.1:%d", 22345+i),
miningAddress: miningAddress1,
miningAddressPrivateKey: miningAddress1PrivateKey,
}
}
appHarnesses, teardown := setupHarnesses(t, harnessesParams)
defer teardown()
victim, bullies := appHarnesses[0], appHarnesses[1:]
for _, bully := range bullies {
connect(t, victim, bully)
}
blockAddedWG := sync.WaitGroup{}
blockAddedWG.Add(numBullies)
for _, bully := range bullies {
blockAdded := false
onBlockAdded := func(_ *appmessage.BlockAddedNotificationMessage) {
if blockAdded {
t.Fatalf("Single bully reported block added twice")
}
blockAdded = true
blockAddedWG.Done()
}
err := bully.rpcClient.RegisterForBlockAddedNotifications(onBlockAdded)
if err != nil {
t.Fatalf("Error from RegisterForBlockAddedNotifications: %+v", err)
}
}
_ = mineNextBlock(t, victim)
select {
case <-time.After(defaultTimeout):
t.Fatalf("Timeout waiting for block added notification from the bullies")
case <-ReceiveFromChanWhenDone(func() { blockAddedWG.Wait() }):
}
}
// ReceiveFromChanWhenDone takes a blocking function and returns a channel that sends an empty struct when the function is done.
func ReceiveFromChanWhenDone(callback func()) <-chan struct{} {
ch := make(chan struct{})
spawn("ReceiveFromChanWhenDone", func() {
callback()
close(ch)
})
return ch
}