ordering: use default clients to populate etcd data

Switching endpoints on the same client was triggering balancer
reconnect errors that should be tested in clientv3/integration.
This commit is contained in:
Anthony Romano 2017-07-31 17:15:21 -07:00
parent 4669aaa9a2
commit 10db0319d1
2 changed files with 14 additions and 29 deletions

View File

@ -45,15 +45,11 @@ func TestDetectKvOrderViolation(t *testing.T) {
cli, err := clientv3.New(cfg) cli, err := clientv3.New(cfg)
ctx := context.TODO() ctx := context.TODO()
cli.SetEndpoints(clus.Members[0].GRPCAddr()) if _, err = clus.Client(0).Put(ctx, "foo", "bar"); err != nil {
_, err = cli.Put(ctx, "foo", "bar")
if err != nil {
t.Fatal(err) t.Fatal(err)
} }
// ensure that the second member has the current revision for the key foo // ensure that the second member has the current revision for the key foo
cli.SetEndpoints(clus.Members[1].GRPCAddr()) if _, err = clus.Client(1).Get(ctx, "foo"); err != nil {
_, err = cli.Get(ctx, "foo")
if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -107,23 +103,18 @@ func TestDetectTxnOrderViolation(t *testing.T) {
cli, err := clientv3.New(cfg) cli, err := clientv3.New(cfg)
ctx := context.TODO() ctx := context.TODO()
cli.SetEndpoints(clus.Members[0].GRPCAddr()) if _, err = clus.Client(0).Put(ctx, "foo", "bar"); err != nil {
_, err = cli.Put(ctx, "foo", "bar")
if err != nil {
t.Fatal(err) t.Fatal(err)
} }
// ensure that the second member has the current revision for the key foo // ensure that the second member has the current revision for the key foo
cli.SetEndpoints(clus.Members[1].GRPCAddr()) if _, err = clus.Client(1).Get(ctx, "foo"); err != nil {
_, err = cli.Get(ctx, "foo")
if err != nil {
t.Fatal(err) t.Fatal(err)
} }
// stop third member in order to force the member to have an outdated revision // stop third member in order to force the member to have an outdated revision
clus.Members[2].Stop(t) clus.Members[2].Stop(t)
time.Sleep(1 * time.Second) // give enough time for operation time.Sleep(1 * time.Second) // give enough time for operation
_, err = cli.Put(ctx, "foo", "buzz") if _, err = clus.Client(1).Put(ctx, "foo", "buzz"); err != nil {
if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -28,26 +28,21 @@ func TestEndpointSwitchResolvesViolation(t *testing.T) {
defer testutil.AfterTest(t) defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t) defer clus.Terminate(t)
cfg := clientv3.Config{ eps := []string{
Endpoints: []string{
clus.Members[0].GRPCAddr(), clus.Members[0].GRPCAddr(),
clus.Members[1].GRPCAddr(), clus.Members[1].GRPCAddr(),
clus.Members[2].GRPCAddr(), clus.Members[2].GRPCAddr(),
},
} }
cfg := clientv3.Config{Endpoints: []string{clus.Members[0].GRPCAddr()}}
cli, err := clientv3.New(cfg) cli, err := clientv3.New(cfg)
eps := cli.Endpoints()
ctx := context.TODO() ctx := context.TODO()
cli.SetEndpoints(clus.Members[0].GRPCAddr()) if _, err = clus.Client(0).Put(ctx, "foo", "bar"); err != nil {
_, err = cli.Put(ctx, "foo", "bar")
if err != nil {
t.Fatal(err) t.Fatal(err)
} }
// ensure that the second member has current revision for key "foo" // ensure that the second member has current revision for key "foo"
cli.SetEndpoints(clus.Members[1].GRPCAddr()) if _, err = clus.Client(1).Get(ctx, "foo"); err != nil {
_, err = cli.Get(ctx, "foo")
if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -58,8 +53,7 @@ func TestEndpointSwitchResolvesViolation(t *testing.T) {
time.Sleep(1 * time.Second) // give enough time for the operation time.Sleep(1 * time.Second) // give enough time for the operation
// update to "foo" will not be replicated to the third member due to the partition // update to "foo" will not be replicated to the third member due to the partition
_, err = cli.Put(ctx, "foo", "buzz") if _, err = clus.Client(1).Put(ctx, "foo", "buzz"); err != nil {
if err != nil {
t.Fatal(err) t.Fatal(err)
} }