Merge pull request #15617 from serathius/robustness-client-refactor

tests: Make using etcdctl expicit in e2e tests
This commit is contained in:
Marek Siarkowicz 2023-04-07 10:51:01 +02:00 committed by GitHub
commit 535ff9638b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 17 additions and 17 deletions

View File

@ -46,7 +46,7 @@ func TestAuthCluster(t *testing.T) {
}
}()
epcClient := epc.Client()
epcClient := epc.Etcdctl()
createUsers(ctx, t, epcClient)
if err := epcClient.AuthEnable(ctx); err != nil {
@ -58,7 +58,7 @@ func TestAuthCluster(t *testing.T) {
// write more than SnapshotCount keys to single leader to make sure snapshot is created
for i := 0; i <= 10; i++ {
if err := epc.Client(testUserClientOpts).Put(ctx, fmt.Sprintf("/test/%d", i), "test", config.PutOptions{}); err != nil {
if err := epc.Etcdctl(testUserClientOpts).Put(ctx, fmt.Sprintf("/test/%d", i), "test", config.PutOptions{}); err != nil {
t.Fatalf("failed to Put (%v)", err)
}
}
@ -72,14 +72,14 @@ func TestAuthCluster(t *testing.T) {
endpoints := epc.EndpointsGRPC()
assert.Equal(t, len(endpoints), 2)
for _, endpoint := range epc.EndpointsGRPC() {
if err := epc.Client(testUserClientOpts, e2e.WithEndpoints([]string{endpoint})).Put(ctx, "/test/key", endpoint, config.PutOptions{}); err != nil {
if err := epc.Etcdctl(testUserClientOpts, e2e.WithEndpoints([]string{endpoint})).Put(ctx, "/test/key", endpoint, config.PutOptions{}); err != nil {
t.Fatalf("failed to write to Put to %q (%v)", endpoint, err)
}
}
// verify all nodes have exact same revision and hash
assert.Eventually(t, func() bool {
hashKvs, err := epc.Client(rootUserClientOpts).HashKV(ctx, 0)
hashKvs, err := epc.Etcdctl(rootUserClientOpts).HashKV(ctx, 0)
if err != nil {
t.Logf("failed to get HashKV: %v", err)
return false

View File

@ -67,7 +67,7 @@ func TestCtlV3AuthCertCNWithWithConcurrentOperation(t *testing.T) {
}
}()
epcClient := epc.Client()
epcClient := epc.Etcdctl()
t.Log("Create users")
createUsers(ctx, t, epcClient)

View File

@ -83,7 +83,7 @@ func mixVersionsSnapshotTestByAddingMember(t *testing.T, clusterVersion, newInst
for i := 0; i < 20; i++ {
key := fmt.Sprintf("key-%d", i)
value := fmt.Sprintf("value-%d", i)
err := epc.Client().Put(context.TODO(), key, value, config.PutOptions{})
err := epc.Etcdctl().Put(context.TODO(), key, value, config.PutOptions{})
require.NoError(t, err, "failed to put %q, error: %v", key, err)
}
@ -99,7 +99,7 @@ func mixVersionsSnapshotTestByAddingMember(t *testing.T, clusterVersion, newInst
// verify all nodes have exact same revision and hash
t.Log("Verify all nodes have exact same revision and hash")
assert.Eventually(t, func() bool {
hashKvs, err := epc.Client().HashKV(context.TODO(), 0)
hashKvs, err := epc.Etcdctl().HashKV(context.TODO(), 0)
if err != nil {
t.Logf("failed to get HashKV: %v", err)
return false
@ -170,7 +170,7 @@ func mixVersionsSnapshotTestByMockPartition(t *testing.T, clusterVersion e2e.Clu
for i := 0; i < 20; i++ {
key := fmt.Sprintf("key-%d", i)
value := fmt.Sprintf("value-%d", i)
err := epc.Client().Put(context.TODO(), key, value, config.PutOptions{})
err := epc.Etcdctl().Put(context.TODO(), key, value, config.PutOptions{})
require.NoError(t, err, "failed to put %q, error: %v", key, err)
}
@ -185,7 +185,7 @@ func mixVersionsSnapshotTestByMockPartition(t *testing.T, clusterVersion e2e.Clu
// verify all nodes have exact same revision and hash
t.Log("Verify all nodes have exact same revision and hash")
assert.Eventually(t, func() bool {
hashKvs, err := epc.Client().HashKV(context.TODO(), 0)
hashKvs, err := epc.Etcdctl().HashKV(context.TODO(), 0)
if err != nil {
t.Logf("failed to get HashKV: %v", err)
return false

View File

@ -743,7 +743,7 @@ func (epc *EtcdProcessCluster) CloseProc(ctx context.Context, finder func(EtcdPr
// First remove member from the cluster
memberCtl := epc.Client(opts...)
memberCtl := epc.Etcdctl(opts...)
memberList, err := memberCtl.MemberList(ctx, false)
if err != nil {
return fmt.Errorf("failed to get member list: %w", err)
@ -795,7 +795,7 @@ func (epc *EtcdProcessCluster) StartNewProc(ctx context.Context, cfg *EtcdProces
epc.Cfg.SetInitialOrDiscovery(serverCfg, initialCluster, "existing")
// First add new member to cluster
memberCtl := epc.Client(opts...)
memberCtl := epc.Etcdctl(opts...)
_, err := memberCtl.MemberAdd(ctx, serverCfg.Name, []string{serverCfg.PeerURL.String()})
if err != nil {
return fmt.Errorf("failed to add new member: %w", err)
@ -878,7 +878,7 @@ func (epc *EtcdProcessCluster) Stop() (err error) {
return err
}
func (epc *EtcdProcessCluster) Client(opts ...config.ClientOption) *EtcdctlV3 {
func (epc *EtcdProcessCluster) Etcdctl(opts ...config.ClientOption) *EtcdctlV3 {
etcdctl, err := NewEtcdctl(epc.Cfg.Client, epc.EndpointsGRPC(), opts...)
if err != nil {
panic(err)
@ -924,7 +924,7 @@ func (epc *EtcdProcessCluster) WaitLeader(t testing.TB) int {
// WaitMembersForLeader waits until given members agree on the same leader,
// and returns its 'index' in the 'membs' list
func (epc *EtcdProcessCluster) WaitMembersForLeader(ctx context.Context, t testing.TB, membs []EtcdProcess) int {
cc := epc.Client()
cc := epc.Etcdctl()
// ensure leader is up via linearizable get
for {
@ -949,7 +949,7 @@ func (epc *EtcdProcessCluster) WaitMembersForLeader(ctx context.Context, t testi
default:
}
for i := range membs {
resp, err := membs[i].Client().Status(ctx)
resp, err := membs[i].Etcdctl().Status(ctx)
if err != nil {
if strings.Contains(err.Error(), "connection refused") {
// if member[i] has stopped

View File

@ -100,7 +100,7 @@ func (p *proxyEtcdProcess) Close() error {
return err
}
func (p *proxyEtcdProcess) Client(opts ...config.ClientOption) *EtcdctlV3 {
func (p *proxyEtcdProcess) Etcdctl(opts ...config.ClientOption) *EtcdctlV3 {
etcdctl, err := NewEtcdctl(p.etcdProc.Config().Client, p.etcdProc.EndpointsGRPC(), opts...)
if err != nil {
panic(err)

View File

@ -45,7 +45,7 @@ type EtcdProcess interface {
EndpointsGRPC() []string
EndpointsHTTP() []string
EndpointsMetrics() []string
Client(opts ...config.ClientOption) *EtcdctlV3
Etcdctl(opts ...config.ClientOption) *EtcdctlV3
IsRunning() bool
Wait(ctx context.Context) error
@ -124,7 +124,7 @@ func (ep *EtcdServerProcess) EndpointsHTTP() []string {
}
func (ep *EtcdServerProcess) EndpointsMetrics() []string { return []string{ep.cfg.MetricsURL} }
func (epc *EtcdServerProcess) Client(opts ...config.ClientOption) *EtcdctlV3 {
func (epc *EtcdServerProcess) Etcdctl(opts ...config.ClientOption) *EtcdctlV3 {
etcdctl, err := NewEtcdctl(epc.Config().Client, epc.EndpointsGRPC(), opts...)
if err != nil {
panic(err)