clientv3/integration: test linearizable get with leader election, network partition

Test case that failed my balancer refactor https://github.com/coreos/etcd/pull/8834.
Current, kv network partition tests do not specifically test
isolated leader case.

This PR moves TestKVSwitchUnavailable to network_partition_test.go
and make it always isolate leader.

Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
This commit is contained in:
Gyu-Ho Lee 2017-11-07 19:28:57 -08:00
parent 47728b8caf
commit 706cf20339
2 changed files with 45 additions and 26 deletions

View File

@ -937,29 +937,3 @@ func TestKVPutAtMostOnce(t *testing.T) {
t.Fatalf("expected version <= 10, got %+v", resp.Kvs[0])
}
}
func TestKVSwitchUnavailable(t *testing.T) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, SkipCreatingClient: true})
defer clus.Terminate(t)
clus.Members[0].InjectPartition(t, clus.Members[1:]...)
// try to connect with dead node in the endpoint list
cfg := clientv3.Config{
Endpoints: []string{
clus.Members[0].GRPCAddr(),
clus.Members[1].GRPCAddr(),
},
DialTimeout: 1 * time.Second}
cli, err := clientv3.New(cfg)
if err != nil {
t.Fatal(err)
}
defer cli.Close()
timeout := 3 * clus.Members[0].ServerConfig.ReqTimeout()
ctx, cancel := context.WithTimeout(context.TODO(), timeout)
if _, err := cli.Get(ctx, "abc"); err != nil {
t.Fatal(err)
}
cancel()
}

View File

@ -146,6 +146,51 @@ func testBalancerUnderNetworkPartition(t *testing.T, op func(*clientv3.Client, c
}
}
// TestBalancerUnderNetworkPartitionLinearizableGetLeaderElection ensures balancer
// switches endpoint when leader fails and linearizable get requests returns
// "etcdserver: request timed out".
func TestBalancerUnderNetworkPartitionLinearizableGetLeaderElection(t *testing.T) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
Size: 3,
SkipCreatingClient: true,
})
defer clus.Terminate(t)
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
lead := clus.WaitLeader(t)
timeout := 3 * clus.Members[(lead+1)%2].ServerConfig.ReqTimeout()
cli, err := clientv3.New(clientv3.Config{
Endpoints: []string{eps[(lead+1)%2]},
DialTimeout: 1 * time.Second,
})
if err != nil {
t.Fatal(err)
}
defer cli.Close()
// wait for non-leader to be pinned
mustWaitPinReady(t, cli)
// add all eps to list, so that when the original pined one fails
// the client can switch to other available eps
cli.SetEndpoints(eps[lead], eps[(lead+1)%2])
// isolate leader
clus.Members[lead].InjectPartition(t, clus.Members[(lead+1)%3], clus.Members[(lead+2)%3])
// expects balancer endpoint switch while ongoing leader election
ctx, cancel := context.WithTimeout(context.TODO(), timeout)
_, err = cli.Get(ctx, "a")
cancel()
if err != nil {
t.Fatal(err)
}
}
func TestBalancerUnderNetworkPartitionWatchLeader(t *testing.T) {
testBalancerUnderNetworkPartitionWatch(t, true)
}