mirror of
https://github.com/etcd-io/etcd.git
synced 2024-09-27 06:25:44 +00:00
Flakes: Additional logging and timeouts to understand common flakes.
This commit is contained in:
parent
68f2cb8c77
commit
ed1bc447c7
@ -120,19 +120,22 @@ func TestLeaseGrantAndList(t *testing.T) {
|
||||
|
||||
for _, nc := range nestedCases {
|
||||
t.Run(tc.name+"/"+nc.name, func(t *testing.T) {
|
||||
t.Logf("Creating cluster...")
|
||||
clus := testRunner.NewCluster(t, tc.config)
|
||||
defer clus.Close()
|
||||
cc := clus.Client()
|
||||
|
||||
t.Logf("Created cluster and client")
|
||||
testutils.ExecuteWithTimeout(t, 10*time.Second, func() {
|
||||
createdLeases := []clientv3.LeaseID{}
|
||||
for i := 0; i < nc.leaseCount; i++ {
|
||||
leaseResp, err := cc.Grant(10)
|
||||
t.Logf("Grant returned: resp:%s err:%v", leaseResp.String(), err)
|
||||
require.NoError(t, err)
|
||||
createdLeases = append(createdLeases, leaseResp.ID)
|
||||
}
|
||||
|
||||
resp, err := cc.LeaseList()
|
||||
t.Logf("Lease list returned: resp:%s err:%v", resp.String(), err)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, resp.Leases, nc.leaseCount)
|
||||
|
||||
|
@ -96,7 +96,7 @@ func TestMutexTryLockSingleNode(t *testing.T) {
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
t.Logf("3 nodes cluster created...")
|
||||
var clients []*clientv3.Client
|
||||
testMutexTryLock(t, 5, integration2.MakeSingleNodeClients(t, clus, &clients))
|
||||
integration2.CloseClients(t, clients)
|
||||
@ -113,35 +113,39 @@ func TestMutexTryLockMultiNode(t *testing.T) {
|
||||
}
|
||||
|
||||
func testMutexTryLock(t *testing.T, lockers int, chooseClient func() *clientv3.Client) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
lockedC := make(chan *concurrency.Mutex)
|
||||
notlockedC := make(chan *concurrency.Mutex)
|
||||
stopC := make(chan struct{})
|
||||
defer close(stopC)
|
||||
|
||||
for i := 0; i < lockers; i++ {
|
||||
go func() {
|
||||
go func(i int) {
|
||||
session, err := concurrency.NewSession(chooseClient())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
m := concurrency.NewMutex(session, "test-mutex-try-lock")
|
||||
err = m.TryLock(context.TODO())
|
||||
err = m.TryLock(ctx)
|
||||
if err == nil {
|
||||
select {
|
||||
case lockedC <- m:
|
||||
case <-stopC:
|
||||
case <-ctx.Done():
|
||||
t.Errorf("Thread: %v, Context failed: %v", i, err)
|
||||
}
|
||||
} else if err == concurrency.ErrLocked {
|
||||
select {
|
||||
case notlockedC <- m:
|
||||
case <-stopC:
|
||||
case <-ctx.Done():
|
||||
t.Errorf("Thread: %v, Context failed: %v", i, err)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("Unexpected Error %v", err)
|
||||
t.Errorf("Thread: %v; Unexpected Error %v", i, err)
|
||||
}
|
||||
}()
|
||||
}(i)
|
||||
}
|
||||
|
||||
timerC := time.After(time.Second)
|
||||
timerC := time.After(30 * time.Second)
|
||||
select {
|
||||
case <-lockedC:
|
||||
for i := 0; i < lockers-1; i++ {
|
||||
@ -154,7 +158,7 @@ func testMutexTryLock(t *testing.T, lockers int, chooseClient func() *clientv3.C
|
||||
}
|
||||
}
|
||||
case <-timerC:
|
||||
t.Errorf("timed out waiting for lock")
|
||||
t.Errorf("timed out waiting for lock (30s)")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -284,6 +284,8 @@ func TestIssue3699(t *testing.T) {
|
||||
// add node d
|
||||
c.AddMember(t)
|
||||
|
||||
t.Logf("Disturbing cluster till member:3 will become a leader")
|
||||
|
||||
// electing node d as leader makes node a unable to participate
|
||||
leaderID := c.WaitMembersForLeader(t, c.Members)
|
||||
for leaderID != 3 {
|
||||
@ -297,11 +299,16 @@ func TestIssue3699(t *testing.T) {
|
||||
leaderID = c.WaitMembersForLeader(t, c.Members)
|
||||
}
|
||||
|
||||
t.Logf("Finally elected member 3 as the leader.")
|
||||
|
||||
t.Logf("Restarting member '0'...")
|
||||
// bring back node a
|
||||
// node a will remain useless as long as d is the leader.
|
||||
if err := c.Members[0].Restart(t); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Logf("Restarted member '0'.")
|
||||
|
||||
select {
|
||||
// waiting for ReadyNotify can take several seconds
|
||||
case <-time.After(10 * time.Second):
|
||||
@ -311,12 +318,13 @@ func TestIssue3699(t *testing.T) {
|
||||
case <-c.Members[0].Server.ReadyNotify():
|
||||
}
|
||||
// must WaitMembersForLeader so goroutines don't leak on terminate
|
||||
c.WaitMembersForLeader(t, c.Members)
|
||||
c.WaitLeader(t)
|
||||
|
||||
t.Logf("Expecting successful put...")
|
||||
// try to participate in Cluster
|
||||
ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout)
|
||||
if _, err := c.Members[0].Client.Put(ctx, "/foo", "bar"); err != nil {
|
||||
t.Fatalf("unexpected error on Set (%v)", err)
|
||||
t.Fatalf("unexpected error on Put (%v)", err)
|
||||
}
|
||||
cancel()
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user