mirror of
https://github.com/etcd-io/etcd.git
synced 2024-09-27 06:25:44 +00:00
Merge pull request #8839 from gyuho/test-balancer
clientv3/integration: test linearizable get with leader election, network partition
This commit is contained in:
commit
dfe0f8c2bc
@ -937,29 +937,3 @@ func TestKVPutAtMostOnce(t *testing.T) {
|
||||
t.Fatalf("expected version <= 10, got %+v", resp.Kvs[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestKVSwitchUnavailable(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, SkipCreatingClient: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
clus.Members[0].InjectPartition(t, clus.Members[1:]...)
|
||||
// try to connect with dead node in the endpoint list
|
||||
cfg := clientv3.Config{
|
||||
Endpoints: []string{
|
||||
clus.Members[0].GRPCAddr(),
|
||||
clus.Members[1].GRPCAddr(),
|
||||
},
|
||||
DialTimeout: 1 * time.Second}
|
||||
cli, err := clientv3.New(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
timeout := 3 * clus.Members[0].ServerConfig.ReqTimeout()
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), timeout)
|
||||
if _, err := cli.Get(ctx, "abc"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cancel()
|
||||
}
|
||||
|
@ -146,6 +146,51 @@ func testBalancerUnderNetworkPartition(t *testing.T, op func(*clientv3.Client, c
|
||||
}
|
||||
}
|
||||
|
||||
// TestBalancerUnderNetworkPartitionLinearizableGetLeaderElection ensures balancer
|
||||
// switches endpoint when leader fails and linearizable get requests returns
|
||||
// "etcdserver: request timed out".
|
||||
func TestBalancerUnderNetworkPartitionLinearizableGetLeaderElection(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
|
||||
Size: 3,
|
||||
SkipCreatingClient: true,
|
||||
})
|
||||
defer clus.Terminate(t)
|
||||
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
|
||||
|
||||
lead := clus.WaitLeader(t)
|
||||
|
||||
timeout := 3 * clus.Members[(lead+1)%2].ServerConfig.ReqTimeout()
|
||||
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: []string{eps[(lead+1)%2]},
|
||||
DialTimeout: 1 * time.Second,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
// wait for non-leader to be pinned
|
||||
mustWaitPinReady(t, cli)
|
||||
|
||||
// add all eps to list, so that when the original pined one fails
|
||||
// the client can switch to other available eps
|
||||
cli.SetEndpoints(eps[lead], eps[(lead+1)%2])
|
||||
|
||||
// isolate leader
|
||||
clus.Members[lead].InjectPartition(t, clus.Members[(lead+1)%3], clus.Members[(lead+2)%3])
|
||||
|
||||
// expects balancer endpoint switch while ongoing leader election
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), timeout)
|
||||
_, err = cli.Get(ctx, "a")
|
||||
cancel()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBalancerUnderNetworkPartitionWatchLeader(t *testing.T) {
|
||||
testBalancerUnderNetworkPartitionWatch(t, true)
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user