From 706cf203394d456700f338778d0824c991ddfbd9 Mon Sep 17 00:00:00 2001 From: Gyu-Ho Lee Date: Tue, 7 Nov 2017 19:28:57 -0800 Subject: [PATCH] clientv3/integration: test linearizable get with leader election, network partition Test case that failed my balancer refactor https://github.com/coreos/etcd/pull/8834. Current, kv network partition tests do not specifically test isolated leader case. This PR moves TestKVSwitchUnavailable to network_partition_test.go and make it always isolate leader. Signed-off-by: Gyu-Ho Lee --- clientv3/integration/kv_test.go | 26 ----------- .../integration/network_partition_test.go | 45 +++++++++++++++++++ 2 files changed, 45 insertions(+), 26 deletions(-) diff --git a/clientv3/integration/kv_test.go b/clientv3/integration/kv_test.go index b347d6c2b..e6cb50346 100644 --- a/clientv3/integration/kv_test.go +++ b/clientv3/integration/kv_test.go @@ -937,29 +937,3 @@ func TestKVPutAtMostOnce(t *testing.T) { t.Fatalf("expected version <= 10, got %+v", resp.Kvs[0]) } } - -func TestKVSwitchUnavailable(t *testing.T) { - defer testutil.AfterTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, SkipCreatingClient: true}) - defer clus.Terminate(t) - - clus.Members[0].InjectPartition(t, clus.Members[1:]...) - // try to connect with dead node in the endpoint list - cfg := clientv3.Config{ - Endpoints: []string{ - clus.Members[0].GRPCAddr(), - clus.Members[1].GRPCAddr(), - }, - DialTimeout: 1 * time.Second} - cli, err := clientv3.New(cfg) - if err != nil { - t.Fatal(err) - } - defer cli.Close() - timeout := 3 * clus.Members[0].ServerConfig.ReqTimeout() - ctx, cancel := context.WithTimeout(context.TODO(), timeout) - if _, err := cli.Get(ctx, "abc"); err != nil { - t.Fatal(err) - } - cancel() -} diff --git a/clientv3/integration/network_partition_test.go b/clientv3/integration/network_partition_test.go index 50d9d418c..a71c4cea3 100644 --- a/clientv3/integration/network_partition_test.go +++ b/clientv3/integration/network_partition_test.go @@ -146,6 +146,51 @@ func testBalancerUnderNetworkPartition(t *testing.T, op func(*clientv3.Client, c } } +// TestBalancerUnderNetworkPartitionLinearizableGetLeaderElection ensures balancer +// switches endpoint when leader fails and linearizable get requests returns +// "etcdserver: request timed out". +func TestBalancerUnderNetworkPartitionLinearizableGetLeaderElection(t *testing.T) { + defer testutil.AfterTest(t) + + clus := integration.NewClusterV3(t, &integration.ClusterConfig{ + Size: 3, + SkipCreatingClient: true, + }) + defer clus.Terminate(t) + eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()} + + lead := clus.WaitLeader(t) + + timeout := 3 * clus.Members[(lead+1)%2].ServerConfig.ReqTimeout() + + cli, err := clientv3.New(clientv3.Config{ + Endpoints: []string{eps[(lead+1)%2]}, + DialTimeout: 1 * time.Second, + }) + if err != nil { + t.Fatal(err) + } + defer cli.Close() + + // wait for non-leader to be pinned + mustWaitPinReady(t, cli) + + // add all eps to list, so that when the original pined one fails + // the client can switch to other available eps + cli.SetEndpoints(eps[lead], eps[(lead+1)%2]) + + // isolate leader + clus.Members[lead].InjectPartition(t, clus.Members[(lead+1)%3], clus.Members[(lead+2)%3]) + + // expects balancer endpoint switch while ongoing leader election + ctx, cancel := context.WithTimeout(context.TODO(), timeout) + _, err = cli.Get(ctx, "a") + cancel() + if err != nil { + t.Fatal(err) + } +} + func TestBalancerUnderNetworkPartitionWatchLeader(t *testing.T) { testBalancerUnderNetworkPartitionWatch(t, true) }