mirror of
https://github.com/etcd-io/etcd.git
synced 2024-09-27 06:25:44 +00:00
Merge pull request #15592 from serathius/cleanup-endpoints
tests: Cleanup endpoints
This commit is contained in:
commit
6f4e5f316e
@ -155,7 +155,7 @@ func startEtcd(t *testing.T, ep e2e.EtcdProcess, execPath string) {
|
||||
}
|
||||
|
||||
func downgradeEnable(t *testing.T, epc *e2e.EtcdProcessCluster, ver *semver.Version) {
|
||||
c, err := e2e.NewEtcdctl(epc.Cfg.Client, epc.EndpointsV3())
|
||||
c, err := e2e.NewEtcdctl(epc.Cfg.Client, epc.EndpointsGRPC())
|
||||
assert.NoError(t, err)
|
||||
testutils.ExecuteWithTimeout(t, 20*time.Second, func() {
|
||||
err := c.DowngradeEnable(context.TODO(), ver.String())
|
||||
@ -195,7 +195,7 @@ func leader(t *testing.T, epc *e2e.EtcdProcessCluster) e2e.EtcdProcess {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
|
||||
defer cancel()
|
||||
for i := 0; i < len(epc.Procs); i++ {
|
||||
endpoints := epc.Procs[i].EndpointsV3()
|
||||
endpoints := epc.Procs[i].EndpointsGRPC()
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: 3 * time.Second,
|
||||
|
@ -61,7 +61,7 @@ func corruptTest(cx ctlCtx) {
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
cx.t.Log("connecting clientv3...")
|
||||
eps := cx.epc.EndpointsV3()
|
||||
eps := cx.epc.EndpointsGRPC()
|
||||
cli1, err := clientv3.New(clientv3.Config{Endpoints: []string{eps[1]}, DialTimeout: 3 * time.Second})
|
||||
if err != nil {
|
||||
cx.t.Fatal(err)
|
||||
@ -116,7 +116,7 @@ func TestPeriodicCheckDetectsCorruption(t *testing.T) {
|
||||
}
|
||||
})
|
||||
|
||||
cc, err := e2e.NewEtcdctl(epc.Cfg.Client, epc.EndpointsV3())
|
||||
cc, err := e2e.NewEtcdctl(epc.Cfg.Client, epc.EndpointsGRPC())
|
||||
assert.NoError(t, err)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
@ -164,7 +164,7 @@ func TestCompactHashCheckDetectCorruption(t *testing.T) {
|
||||
}
|
||||
})
|
||||
|
||||
cc, err := e2e.NewEtcdctl(epc.Cfg.Client, epc.EndpointsV3())
|
||||
cc, err := e2e.NewEtcdctl(epc.Cfg.Client, epc.EndpointsGRPC())
|
||||
assert.NoError(t, err)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
|
@ -69,9 +69,9 @@ func TestAuthCluster(t *testing.T) {
|
||||
}
|
||||
|
||||
// make sure writes to both endpoints are successful
|
||||
endpoints := epc.EndpointsV3()
|
||||
endpoints := epc.EndpointsGRPC()
|
||||
assert.Equal(t, len(endpoints), 2)
|
||||
for _, endpoint := range epc.EndpointsV3() {
|
||||
for _, endpoint := range epc.EndpointsGRPC() {
|
||||
if err := epc.Client(testUserClientOpts, e2e.WithEndpoints([]string{endpoint})).Put(ctx, "/test/key", endpoint, config.PutOptions{}); err != nil {
|
||||
t.Fatalf("failed to write to Put to %q (%v)", endpoint, err)
|
||||
}
|
||||
|
@ -493,7 +493,7 @@ func authTestRevisionConsistency(cx ctlCtx) {
|
||||
|
||||
// get node0 auth revision
|
||||
node0 := cx.epc.Procs[0]
|
||||
endpoint := node0.EndpointsV3()[0]
|
||||
endpoint := node0.EndpointsGRPC()[0]
|
||||
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{endpoint}, Username: cx.user, Password: cx.pass, DialTimeout: 3 * time.Second})
|
||||
if err != nil {
|
||||
cx.t.Fatal(err)
|
||||
@ -575,7 +575,7 @@ func authTestCacheReload(cx ctlCtx) {
|
||||
}
|
||||
|
||||
node0 := cx.epc.Procs[0]
|
||||
endpoint := node0.EndpointsV3()[0]
|
||||
endpoint := node0.EndpointsGRPC()[0]
|
||||
|
||||
// create a client
|
||||
c, err := clientv3.New(clientv3.Config{Endpoints: []string{endpoint}, DialTimeout: 3 * time.Second})
|
||||
|
@ -78,7 +78,7 @@ func testCtlV3MoveLeader(t *testing.T, cfg e2e.EtcdProcessClusterConfig, envVars
|
||||
var leadIdx int
|
||||
var leaderID uint64
|
||||
var transferee uint64
|
||||
for i, ep := range epc.EndpointsV3() {
|
||||
for i, ep := range epc.EndpointsGRPC() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: []string{ep},
|
||||
DialTimeout: 3 * time.Second,
|
||||
@ -117,17 +117,17 @@ func testCtlV3MoveLeader(t *testing.T, cfg e2e.EtcdProcessClusterConfig, envVars
|
||||
expectErr bool
|
||||
}{
|
||||
{ // request to non-leader
|
||||
[]string{cx.epc.EndpointsV3()[(leadIdx+1)%3]},
|
||||
[]string{cx.epc.EndpointsGRPC()[(leadIdx+1)%3]},
|
||||
"no leader endpoint given at ",
|
||||
true,
|
||||
},
|
||||
{ // request to leader
|
||||
[]string{cx.epc.EndpointsV3()[leadIdx]},
|
||||
[]string{cx.epc.EndpointsGRPC()[leadIdx]},
|
||||
fmt.Sprintf("Leadership transferred from %s to %s", types.ID(leaderID), types.ID(transferee)),
|
||||
false,
|
||||
},
|
||||
{ // request to all endpoints
|
||||
cx.epc.EndpointsV3(),
|
||||
cx.epc.EndpointsGRPC(),
|
||||
"Leadership transferred",
|
||||
false,
|
||||
},
|
||||
|
@ -186,7 +186,7 @@ func testIssue6361(t *testing.T) {
|
||||
}()
|
||||
|
||||
dialTimeout := 10 * time.Second
|
||||
prefixArgs := []string{e2e.BinPath.Etcdctl, "--endpoints", strings.Join(epc.EndpointsV3(), ","), "--dial-timeout", dialTimeout.String()}
|
||||
prefixArgs := []string{e2e.BinPath.Etcdctl, "--endpoints", strings.Join(epc.EndpointsGRPC(), ","), "--dial-timeout", dialTimeout.String()}
|
||||
|
||||
t.Log("Writing some keys...")
|
||||
kvs := []kv{{"foo1", "val1"}, {"foo2", "val2"}, {"foo3", "val3"}}
|
||||
|
@ -111,7 +111,7 @@ func TestCtlV3DialWithHTTPScheme(t *testing.T) {
|
||||
}
|
||||
|
||||
func dialWithSchemeTest(cx ctlCtx) {
|
||||
cmdArgs := append(cx.prefixArgs(cx.epc.EndpointsV3()), "put", "foo", "bar")
|
||||
cmdArgs := append(cx.prefixArgs(cx.epc.EndpointsGRPC()), "put", "foo", "bar")
|
||||
if err := e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, "OK"); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
@ -331,7 +331,7 @@ func (cx *ctlCtx) prefixArgs(eps []string) []string {
|
||||
// PrefixArgs prefixes etcdctl command.
|
||||
// Make sure to unset environment variables after tests.
|
||||
func (cx *ctlCtx) PrefixArgs() []string {
|
||||
return cx.prefixArgs(cx.epc.EndpointsV3())
|
||||
return cx.prefixArgs(cx.epc.EndpointsGRPC())
|
||||
}
|
||||
|
||||
// PrefixArgsUtl returns prefix of the command that is etcdutl
|
||||
|
@ -53,7 +53,7 @@ func testClusterUsingDiscovery(t *testing.T, size int, peerTLS bool) {
|
||||
}
|
||||
defer dc.Close()
|
||||
|
||||
dcc := MustNewHTTPClient(t, dc.EndpointsV2(), nil)
|
||||
dcc := MustNewHTTPClient(t, dc.EndpointsHTTP(), nil)
|
||||
dkapi := client.NewKeysAPI(dcc)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout)
|
||||
if _, err := dkapi.Create(ctx, "/_config/size", fmt.Sprintf("%d", size)); err != nil {
|
||||
@ -65,14 +65,14 @@ func testClusterUsingDiscovery(t *testing.T, size int, peerTLS bool) {
|
||||
e2e.WithBasePort(3000),
|
||||
e2e.WithClusterSize(size),
|
||||
e2e.WithIsPeerTLS(peerTLS),
|
||||
e2e.WithDiscovery(dc.EndpointsV2()[0]+"/v2/keys"),
|
||||
e2e.WithDiscovery(dc.EndpointsHTTP()[0]+"/v2/keys"),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("could not start etcd process cluster (%v)", err)
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
kubectl := []string{e2e.BinPath.Etcdctl, "--endpoints", strings.Join(c.EndpointsV3(), ",")}
|
||||
kubectl := []string{e2e.BinPath.Etcdctl, "--endpoints", strings.Join(c.EndpointsGRPC(), ",")}
|
||||
if err := e2e.SpawnWithExpect(append(kubectl, "put", "key", "value"), "OK"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -68,14 +68,14 @@ func testClusterUsingV3Discovery(t *testing.T, discoveryClusterSize, targetClust
|
||||
}
|
||||
|
||||
// step 3: start the etcd cluster
|
||||
epc, err := bootstrapEtcdClusterUsingV3Discovery(t, ds.EndpointsV3(), discoveryToken, targetClusterSize, clientTlsType, isClientAutoTls)
|
||||
epc, err := bootstrapEtcdClusterUsingV3Discovery(t, ds.EndpointsGRPC(), discoveryToken, targetClusterSize, clientTlsType, isClientAutoTls)
|
||||
if err != nil {
|
||||
t.Fatalf("could not start etcd process cluster (%v)", err)
|
||||
}
|
||||
defer epc.Close()
|
||||
|
||||
// step 4: sanity test on the etcd cluster
|
||||
etcdctl := []string{e2e.BinPath.Etcdctl, "--endpoints", strings.Join(epc.EndpointsV3(), ",")}
|
||||
etcdctl := []string{e2e.BinPath.Etcdctl, "--endpoints", strings.Join(epc.EndpointsGRPC(), ",")}
|
||||
if err := e2e.SpawnWithExpect(append(etcdctl, "put", "key", "value"), "OK"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -411,7 +411,7 @@ func TestEtcdHealthyWithTinySnapshotCatchupEntries(t *testing.T) {
|
||||
for i := 0; i < 10; i++ {
|
||||
clientId := i
|
||||
g.Go(func() error {
|
||||
cc, err := e2e.NewEtcdctl(epc.Cfg.Client, epc.EndpointsV3())
|
||||
cc, err := e2e.NewEtcdctl(epc.Cfg.Client, epc.EndpointsGRPC())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -34,7 +34,7 @@ func TestGateway(t *testing.T) {
|
||||
}
|
||||
defer ec.Stop()
|
||||
|
||||
eps := strings.Join(ec.EndpointsV3(), ",")
|
||||
eps := strings.Join(ec.EndpointsGRPC(), ",")
|
||||
|
||||
p := startGateway(t, eps)
|
||||
defer func() {
|
||||
|
@ -42,7 +42,7 @@ func TestWarningApplyDuration(t *testing.T) {
|
||||
}
|
||||
})
|
||||
|
||||
cc, err := e2e.NewEtcdctl(epc.Cfg.Client, epc.EndpointsV3())
|
||||
cc, err := e2e.NewEtcdctl(epc.Cfg.Client, epc.EndpointsGRPC())
|
||||
require.NoError(t, err)
|
||||
err = cc.Put(context.TODO(), "foo", "bar", config.PutOptions{})
|
||||
assert.NoError(t, err, "error on put")
|
||||
@ -70,7 +70,7 @@ func TestExperimentalWarningApplyDuration(t *testing.T) {
|
||||
}
|
||||
})
|
||||
|
||||
cc, err := e2e.NewEtcdctl(epc.Cfg.Client, epc.EndpointsV3())
|
||||
cc, err := e2e.NewEtcdctl(epc.Cfg.Client, epc.EndpointsGRPC())
|
||||
require.NoError(t, err)
|
||||
err = cc.Put(context.TODO(), "foo", "bar", config.PutOptions{})
|
||||
assert.NoError(t, err, "error on put")
|
||||
|
@ -135,7 +135,7 @@ func TestEtctlutlMigrate(t *testing.T) {
|
||||
}()
|
||||
|
||||
dialTimeout := 10 * time.Second
|
||||
prefixArgs := []string{e2e.BinPath.Etcdctl, "--endpoints", strings.Join(epc.EndpointsV3(), ","), "--dial-timeout", dialTimeout.String()}
|
||||
prefixArgs := []string{e2e.BinPath.Etcdctl, "--endpoints", strings.Join(epc.EndpointsGRPC(), ","), "--dial-timeout", dialTimeout.String()}
|
||||
|
||||
t.Log("Write keys to ensure wal snapshot is created and all v3.5 fields are set...")
|
||||
for i := 0; i < 10; i++ {
|
||||
|
@ -115,13 +115,13 @@ func TestV2DeprecationSnapshotMatches(t *testing.T) {
|
||||
var snapshotCount uint64 = 10
|
||||
epc := runEtcdAndCreateSnapshot(t, e2e.LastVersion, lastReleaseData, snapshotCount)
|
||||
oldMemberDataDir := epc.Procs[0].Config().DataDirPath
|
||||
cc1, err := e2e.NewEtcdctl(epc.Cfg.Client, epc.EndpointsV3())
|
||||
cc1, err := e2e.NewEtcdctl(epc.Cfg.Client, epc.EndpointsGRPC())
|
||||
assert.NoError(t, err)
|
||||
members1 := addAndRemoveKeysAndMembers(ctx, t, cc1, snapshotCount)
|
||||
assert.NoError(t, epc.Close())
|
||||
epc = runEtcdAndCreateSnapshot(t, e2e.CurrentVersion, currentReleaseData, snapshotCount)
|
||||
newMemberDataDir := epc.Procs[0].Config().DataDirPath
|
||||
cc2, err := e2e.NewEtcdctl(epc.Cfg.Client, epc.EndpointsV3())
|
||||
cc2, err := e2e.NewEtcdctl(epc.Cfg.Client, epc.EndpointsGRPC())
|
||||
assert.NoError(t, err)
|
||||
members2 := addAndRemoveKeysAndMembers(ctx, t, cc2, snapshotCount)
|
||||
assert.NoError(t, epc.Close())
|
||||
@ -152,7 +152,7 @@ func TestV2DeprecationSnapshotRecover(t *testing.T) {
|
||||
}
|
||||
epc := runEtcdAndCreateSnapshot(t, e2e.LastVersion, dataDir, 10)
|
||||
|
||||
cc, err := e2e.NewEtcdctl(epc.Cfg.Client, epc.EndpointsV3())
|
||||
cc, err := e2e.NewEtcdctl(epc.Cfg.Client, epc.EndpointsGRPC())
|
||||
assert.NoError(t, err)
|
||||
|
||||
lastReleaseGetResponse, err := cc.Get(ctx, "", config.GetOptions{Prefix: true})
|
||||
@ -169,7 +169,7 @@ func TestV2DeprecationSnapshotRecover(t *testing.T) {
|
||||
epc, err = e2e.NewEtcdProcessCluster(context.TODO(), t, e2e.WithConfig(cfg))
|
||||
assert.NoError(t, err)
|
||||
|
||||
cc, err = e2e.NewEtcdctl(epc.Cfg.Client, epc.EndpointsV3())
|
||||
cc, err = e2e.NewEtcdctl(epc.Cfg.Client, epc.EndpointsGRPC())
|
||||
assert.NoError(t, err)
|
||||
currentReleaseGetResponse, err := cc.Get(ctx, "", config.GetOptions{Prefix: true})
|
||||
assert.NoError(t, err)
|
||||
|
@ -89,7 +89,7 @@ func TestWatchDelayForPeriodicProgressNotification(t *testing.T) {
|
||||
clus, err := e2e.NewEtcdProcessCluster(context.Background(), t, e2e.WithConfig(&tc.config))
|
||||
require.NoError(t, err)
|
||||
defer clus.Close()
|
||||
c := newClient(t, clus.EndpointsV3(), tc.config.Client)
|
||||
c := newClient(t, clus.EndpointsGRPC(), tc.config.Client)
|
||||
require.NoError(t, fillEtcdWithData(context.Background(), c, tc.dbSizeBytes))
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), watchTestDuration)
|
||||
@ -109,7 +109,7 @@ func TestWatchDelayForManualProgressNotification(t *testing.T) {
|
||||
clus, err := e2e.NewEtcdProcessCluster(context.Background(), t, e2e.WithConfig(&tc.config))
|
||||
require.NoError(t, err)
|
||||
defer clus.Close()
|
||||
c := newClient(t, clus.EndpointsV3(), tc.config.Client)
|
||||
c := newClient(t, clus.EndpointsGRPC(), tc.config.Client)
|
||||
require.NoError(t, fillEtcdWithData(context.Background(), c, tc.dbSizeBytes))
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), watchTestDuration)
|
||||
@ -142,7 +142,7 @@ func TestWatchDelayForEvent(t *testing.T) {
|
||||
clus, err := e2e.NewEtcdProcessCluster(context.Background(), t, e2e.WithConfig(&tc.config))
|
||||
require.NoError(t, err)
|
||||
defer clus.Close()
|
||||
c := newClient(t, clus.EndpointsV3(), tc.config.Client)
|
||||
c := newClient(t, clus.EndpointsGRPC(), tc.config.Client)
|
||||
require.NoError(t, fillEtcdWithData(context.Background(), c, tc.dbSizeBytes))
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), watchTestDuration)
|
||||
|
@ -702,14 +702,6 @@ func (cfg *EtcdProcessClusterConfig) TlsArgs() (args []string) {
|
||||
return args
|
||||
}
|
||||
|
||||
func (epc *EtcdProcessCluster) EndpointsV2() []string {
|
||||
return epc.Endpoints(func(ep EtcdProcess) []string { return ep.EndpointsV2() })
|
||||
}
|
||||
|
||||
func (epc *EtcdProcessCluster) EndpointsV3() []string {
|
||||
return epc.Endpoints(func(ep EtcdProcess) []string { return ep.EndpointsV3() })
|
||||
}
|
||||
|
||||
func (epc *EtcdProcessCluster) EndpointsGRPC() []string {
|
||||
return epc.Endpoints(func(ep EtcdProcess) []string { return ep.EndpointsGRPC() })
|
||||
}
|
||||
@ -887,7 +879,7 @@ func (epc *EtcdProcessCluster) Stop() (err error) {
|
||||
}
|
||||
|
||||
func (epc *EtcdProcessCluster) Client(opts ...config.ClientOption) *EtcdctlV3 {
|
||||
etcdctl, err := NewEtcdctl(epc.Cfg.Client, epc.EndpointsV3(), opts...)
|
||||
etcdctl, err := NewEtcdctl(epc.Cfg.Client, epc.EndpointsGRPC(), opts...)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -58,8 +58,6 @@ func NewProxyEtcdProcess(cfg *EtcdServerProcessConfig) (*proxyEtcdProcess, error
|
||||
|
||||
func (p *proxyEtcdProcess) Config() *EtcdServerProcessConfig { return p.etcdProc.Config() }
|
||||
|
||||
func (p *proxyEtcdProcess) EndpointsV2() []string { return p.EndpointsHTTP() }
|
||||
func (p *proxyEtcdProcess) EndpointsV3() []string { return p.EndpointsGRPC() }
|
||||
func (p *proxyEtcdProcess) EndpointsHTTP() []string { return p.proxyV2.endpoints() }
|
||||
func (p *proxyEtcdProcess) EndpointsGRPC() []string { return p.proxyV3.endpoints() }
|
||||
func (p *proxyEtcdProcess) EndpointsMetrics() []string {
|
||||
@ -103,7 +101,7 @@ func (p *proxyEtcdProcess) Close() error {
|
||||
}
|
||||
|
||||
func (p *proxyEtcdProcess) Client(opts ...config.ClientOption) *EtcdctlV3 {
|
||||
etcdctl, err := NewEtcdctl(p.etcdProc.Config().Client, p.etcdProc.EndpointsV3(), opts...)
|
||||
etcdctl, err := NewEtcdctl(p.etcdProc.Config().Client, p.etcdProc.EndpointsGRPC(), opts...)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -96,12 +96,12 @@ type e2eCluster struct {
|
||||
}
|
||||
|
||||
func (c *e2eCluster) Client(opts ...config.ClientOption) (intf.Client, error) {
|
||||
etcdctl, err := NewEtcdctl(c.Cfg.Client, c.EndpointsV3(), opts...)
|
||||
etcdctl, err := NewEtcdctl(c.Cfg.Client, c.EndpointsGRPC(), opts...)
|
||||
return e2eClient{etcdctl}, err
|
||||
}
|
||||
|
||||
func (c *e2eCluster) Endpoints() []string {
|
||||
return c.EndpointsV3()
|
||||
return c.EndpointsGRPC()
|
||||
}
|
||||
|
||||
func (c *e2eCluster) Members() (ms []intf.Member) {
|
||||
@ -121,7 +121,7 @@ type e2eMember struct {
|
||||
}
|
||||
|
||||
func (m e2eMember) Client() intf.Client {
|
||||
etcdctl, err := NewEtcdctl(m.Cfg.Client, m.EndpointsV3())
|
||||
etcdctl, err := NewEtcdctl(m.Cfg.Client, m.EndpointsGRPC())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -42,8 +42,6 @@ var (
|
||||
|
||||
// EtcdProcess is a process that serves etcd requests.
|
||||
type EtcdProcess interface {
|
||||
EndpointsV2() []string
|
||||
EndpointsV3() []string
|
||||
EndpointsGRPC() []string
|
||||
EndpointsHTTP() []string
|
||||
EndpointsMetrics() []string
|
||||
@ -117,8 +115,6 @@ func NewEtcdServerProcess(cfg *EtcdServerProcessConfig) (*EtcdServerProcess, err
|
||||
return ep, nil
|
||||
}
|
||||
|
||||
func (ep *EtcdServerProcess) EndpointsV2() []string { return ep.EndpointsHTTP() }
|
||||
func (ep *EtcdServerProcess) EndpointsV3() []string { return ep.EndpointsGRPC() }
|
||||
func (ep *EtcdServerProcess) EndpointsGRPC() []string { return []string{ep.cfg.ClientURL} }
|
||||
func (ep *EtcdServerProcess) EndpointsHTTP() []string {
|
||||
if ep.cfg.ClientHTTPURL == "" {
|
||||
@ -129,7 +125,7 @@ func (ep *EtcdServerProcess) EndpointsHTTP() []string {
|
||||
func (ep *EtcdServerProcess) EndpointsMetrics() []string { return []string{ep.cfg.MetricsURL} }
|
||||
|
||||
func (epc *EtcdServerProcess) Client(opts ...config.ClientOption) *EtcdctlV3 {
|
||||
etcdctl, err := NewEtcdctl(epc.Config().Client, epc.EndpointsV3(), opts...)
|
||||
etcdctl, err := NewEtcdctl(epc.Config().Client, epc.EndpointsGRPC(), opts...)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -24,7 +24,7 @@ import (
|
||||
|
||||
var lazyCluster = integration.NewLazyCluster()
|
||||
|
||||
func exampleEndpoints() []string { return lazyCluster.EndpointsV3() }
|
||||
func exampleEndpoints() []string { return lazyCluster.EndpointsGRPC() }
|
||||
|
||||
func forUnitTestsRunInMockedContext(mocking func(), example func()) {
|
||||
// For integration tests runs in the provided environment
|
||||
|
@ -36,7 +36,7 @@ var lazyCluster = integration.NewLazyClusterWithConfig(
|
||||
WatchProgressNotifyInterval: 200 * time.Millisecond,
|
||||
DisableStrictReconfigCheck: true})
|
||||
|
||||
func exampleEndpoints() []string { return lazyCluster.EndpointsV3() }
|
||||
func exampleEndpoints() []string { return lazyCluster.EndpointsGRPC() }
|
||||
|
||||
func forUnitTestsRunInMockedContext(_ func(), example func()) {
|
||||
// For integration tests runs in the provided environment
|
||||
|
@ -34,13 +34,13 @@ import (
|
||||
// Prefer dedicated clusters for substancial test-cases.
|
||||
|
||||
type LazyCluster interface {
|
||||
// EndpointsV2 - exposes connection points for client v2.
|
||||
// EndpointsHTTP - exposes connection points for http endpoints.
|
||||
// Calls to this method might initialize the cluster.
|
||||
EndpointsV2() []string
|
||||
EndpointsHTTP() []string
|
||||
|
||||
// EndpointsV3 - exposes connection points for client v3.
|
||||
// EndpointsGRPC - exposes connection points for client v3.
|
||||
// Calls to this method might initialize the cluster.
|
||||
EndpointsV3() []string
|
||||
EndpointsGRPC() []string
|
||||
|
||||
// Cluster - calls to this method might initialize the cluster.
|
||||
Cluster() *integration.Cluster
|
||||
@ -100,11 +100,11 @@ func (lc *lazyCluster) Terminate() {
|
||||
}
|
||||
}
|
||||
|
||||
func (lc *lazyCluster) EndpointsV2() []string {
|
||||
func (lc *lazyCluster) EndpointsHTTP() []string {
|
||||
return []string{lc.Cluster().Members[0].URL()}
|
||||
}
|
||||
|
||||
func (lc *lazyCluster) EndpointsV3() []string {
|
||||
func (lc *lazyCluster) EndpointsGRPC() []string {
|
||||
return lc.Cluster().Client(0).Endpoints()
|
||||
}
|
||||
|
||||
|
@ -238,7 +238,7 @@ func (f goPanicFailpoint) Name() string {
|
||||
|
||||
func triggerDefrag(_ *testing.T, ctx context.Context, member e2e.EtcdProcess, _ *e2e.EtcdProcessCluster) error {
|
||||
cc, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: member.EndpointsV3(),
|
||||
Endpoints: member.EndpointsGRPC(),
|
||||
Logger: zap.NewNop(),
|
||||
DialKeepAliveTime: 1 * time.Millisecond,
|
||||
DialKeepAliveTimeout: 5 * time.Millisecond,
|
||||
@ -247,7 +247,7 @@ func triggerDefrag(_ *testing.T, ctx context.Context, member e2e.EtcdProcess, _
|
||||
return fmt.Errorf("failed creating client: %w", err)
|
||||
}
|
||||
defer cc.Close()
|
||||
_, err = cc.Defragment(ctx, member.EndpointsV3()[0])
|
||||
_, err = cc.Defragment(ctx, member.EndpointsGRPC()[0])
|
||||
if err != nil && !strings.Contains(err.Error(), "error reading from server: EOF") {
|
||||
return err
|
||||
}
|
||||
@ -256,7 +256,7 @@ func triggerDefrag(_ *testing.T, ctx context.Context, member e2e.EtcdProcess, _
|
||||
|
||||
func triggerCompact(_ *testing.T, ctx context.Context, member e2e.EtcdProcess, _ *e2e.EtcdProcessCluster) error {
|
||||
cc, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: member.EndpointsV3(),
|
||||
Endpoints: member.EndpointsGRPC(),
|
||||
Logger: zap.NewNop(),
|
||||
DialKeepAliveTime: 1 * time.Millisecond,
|
||||
DialKeepAliveTimeout: 5 * time.Millisecond,
|
||||
@ -343,7 +343,7 @@ func triggerBlackhole(t *testing.T, ctx context.Context, member e2e.EtcdProcess,
|
||||
|
||||
func waitTillSnapshot(ctx context.Context, t *testing.T, clus *e2e.EtcdProcessCluster, blackholedMember e2e.EtcdProcess) error {
|
||||
var endpoints []string
|
||||
for _, ep := range clus.EndpointsV3() {
|
||||
for _, ep := range clus.EndpointsGRPC() {
|
||||
if ep == blackholedMember.Config().ClientURL {
|
||||
continue
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ const (
|
||||
|
||||
func simulateTraffic(ctx context.Context, t *testing.T, lg *zap.Logger, clus *e2e.EtcdProcessCluster, config trafficConfig) []porcupine.Operation {
|
||||
mux := sync.Mutex{}
|
||||
endpoints := clus.EndpointsV3()
|
||||
endpoints := clus.EndpointsGRPC()
|
||||
|
||||
ids := identity.NewIdProvider()
|
||||
lm := identity.NewLeaseIdStorage()
|
||||
|
@ -38,7 +38,7 @@ func collectClusterWatchEvents(ctx context.Context, t *testing.T, clus *e2e.Etcd
|
||||
memberMaxRevisionChans := make([]chan int64, len(clus.Procs))
|
||||
for i, member := range clus.Procs {
|
||||
c, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: member.EndpointsV3(),
|
||||
Endpoints: member.EndpointsGRPC(),
|
||||
Logger: zap.NewNop(),
|
||||
DialKeepAliveTime: 1 * time.Millisecond,
|
||||
DialKeepAliveTimeout: 5 * time.Millisecond,
|
||||
|
Loading…
x
Reference in New Issue
Block a user