diff --git a/tests/e2e/cluster_test.go b/tests/e2e/cluster_test.go deleted file mode 100644 index eb39b3afe..000000000 --- a/tests/e2e/cluster_test.go +++ /dev/null @@ -1,480 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "fmt" - "net/url" - "os" - "path" - "strings" - "testing" - "time" - - "go.etcd.io/etcd/server/v3/etcdserver" - "go.etcd.io/etcd/tests/v3/integration" - "go.uber.org/zap" - "go.uber.org/zap/zaptest" -) - -const etcdProcessBasePort = 20000 - -type clientConnType int - -var ( - fixturesDir = integration.MustAbsPath("../fixtures") -) - -const ( - clientNonTLS clientConnType = iota - clientTLS - clientTLSAndNonTLS -) - -func newConfigNoTLS() *etcdProcessClusterConfig { - return &etcdProcessClusterConfig{clusterSize: 3, - initialToken: "new", - } -} - -func newConfigAutoTLS() *etcdProcessClusterConfig { - return &etcdProcessClusterConfig{ - clusterSize: 3, - isPeerTLS: true, - isPeerAutoTLS: true, - initialToken: "new", - } -} - -func newConfigTLS() *etcdProcessClusterConfig { - return &etcdProcessClusterConfig{ - clusterSize: 3, - clientTLS: clientTLS, - isPeerTLS: true, - initialToken: "new", - } -} - -func newConfigClientTLS() *etcdProcessClusterConfig { - return &etcdProcessClusterConfig{ - clusterSize: 3, - clientTLS: clientTLS, - initialToken: "new", - } -} - -func newConfigClientBoth() *etcdProcessClusterConfig { - return &etcdProcessClusterConfig{ - clusterSize: 1, - clientTLS: clientTLSAndNonTLS, - initialToken: "new", - } -} - -func newConfigClientAutoTLS() *etcdProcessClusterConfig { - return &etcdProcessClusterConfig{ - clusterSize: 1, - isClientAutoTLS: true, - clientTLS: clientTLS, - initialToken: "new", - } -} - -func newConfigPeerTLS() *etcdProcessClusterConfig { - return &etcdProcessClusterConfig{ - clusterSize: 3, - isPeerTLS: true, - initialToken: "new", - } -} - -func newConfigClientTLSCertAuth() *etcdProcessClusterConfig { - return &etcdProcessClusterConfig{ - clusterSize: 1, - clientTLS: clientTLS, - initialToken: "new", - clientCertAuthEnabled: true, - } -} - -func newConfigClientTLSCertAuthWithNoCN() *etcdProcessClusterConfig { - return &etcdProcessClusterConfig{ - clusterSize: 1, - clientTLS: clientTLS, - initialToken: "new", - clientCertAuthEnabled: true, - noCN: true, - } -} - -func newConfigJWT() *etcdProcessClusterConfig { - return &etcdProcessClusterConfig{ - clusterSize: 1, - initialToken: "new", - authTokenOpts: "jwt,pub-key=" + path.Join(fixturesDir, "server.crt") + - ",priv-key=" + path.Join(fixturesDir, "server.key.insecure") + ",sign-method=RS256,ttl=1s", - } -} - -func configStandalone(cfg etcdProcessClusterConfig) *etcdProcessClusterConfig { - ret := cfg - ret.clusterSize = 1 - return &ret -} - -type etcdProcessCluster struct { - lg *zap.Logger - cfg *etcdProcessClusterConfig - procs []etcdProcess -} - -type etcdProcessClusterConfig struct { - execPath string - dataDirPath string - keepDataDir bool - envVars map[string]string - - clusterSize int - - baseScheme string - basePort int - - metricsURLScheme string - - snapshotCount int // default is 10000 - - clientTLS clientConnType - clientCertAuthEnabled bool - isPeerTLS bool - isPeerAutoTLS bool - isClientAutoTLS bool - isClientCRL bool - noCN bool - - cipherSuites []string - - forceNewCluster bool - initialToken string - quotaBackendBytes int64 - noStrictReconfig bool - enableV2 bool - initialCorruptCheck bool - authTokenOpts string - v2deprecation string - - rollingStart bool -} - -// newEtcdProcessCluster launches a new cluster from etcd processes, returning -// a new etcdProcessCluster once all nodes are ready to accept client requests. -func newEtcdProcessCluster(t testing.TB, cfg *etcdProcessClusterConfig) (*etcdProcessCluster, error) { - skipInShortMode(t) - - etcdCfgs := cfg.etcdServerProcessConfigs(t) - epc := &etcdProcessCluster{ - cfg: cfg, - lg: zaptest.NewLogger(t), - procs: make([]etcdProcess, cfg.clusterSize), - } - - // launch etcd processes - for i := range etcdCfgs { - proc, err := newEtcdProcess(etcdCfgs[i]) - if err != nil { - epc.Close() - return nil, fmt.Errorf("Cannot configure: %v", err) - } - epc.procs[i] = proc - } - - if cfg.rollingStart { - if err := epc.RollingStart(); err != nil { - return nil, fmt.Errorf("Cannot rolling-start: %v", err) - } - } else { - if err := epc.Start(); err != nil { - return nil, fmt.Errorf("Cannot start: %v", err) - } - } - return epc, nil -} - -func (cfg *etcdProcessClusterConfig) clientScheme() string { - if cfg.clientTLS == clientTLS { - return "https" - } - return "http" -} - -func (cfg *etcdProcessClusterConfig) peerScheme() string { - peerScheme := cfg.baseScheme - if peerScheme == "" { - peerScheme = "http" - } - if cfg.isPeerTLS { - peerScheme += "s" - } - return peerScheme -} - -func (cfg *etcdProcessClusterConfig) etcdServerProcessConfigs(tb testing.TB) []*etcdServerProcessConfig { - lg := zaptest.NewLogger(tb) - - if cfg.basePort == 0 { - cfg.basePort = etcdProcessBasePort - } - if cfg.execPath == "" { - cfg.execPath = binPath - } - if cfg.snapshotCount == 0 { - cfg.snapshotCount = etcdserver.DefaultSnapshotCount - } - - etcdCfgs := make([]*etcdServerProcessConfig, cfg.clusterSize) - initialCluster := make([]string, cfg.clusterSize) - for i := 0; i < cfg.clusterSize; i++ { - var curls []string - var curl, curltls string - port := cfg.basePort + 5*i - curlHost := fmt.Sprintf("localhost:%d", port) - - switch cfg.clientTLS { - case clientNonTLS, clientTLS: - curl = (&url.URL{Scheme: cfg.clientScheme(), Host: curlHost}).String() - curls = []string{curl} - case clientTLSAndNonTLS: - curl = (&url.URL{Scheme: "http", Host: curlHost}).String() - curltls = (&url.URL{Scheme: "https", Host: curlHost}).String() - curls = []string{curl, curltls} - } - - purl := url.URL{Scheme: cfg.peerScheme(), Host: fmt.Sprintf("localhost:%d", port+1)} - name := fmt.Sprintf("test-%d", i) - dataDirPath := cfg.dataDirPath - if cfg.dataDirPath == "" { - dataDirPath = tb.TempDir() - } - initialCluster[i] = fmt.Sprintf("%s=%s", name, purl.String()) - - args := []string{ - "--name", name, - "--listen-client-urls", strings.Join(curls, ","), - "--advertise-client-urls", strings.Join(curls, ","), - "--listen-peer-urls", purl.String(), - "--initial-advertise-peer-urls", purl.String(), - "--initial-cluster-token", cfg.initialToken, - "--data-dir", dataDirPath, - "--snapshot-count", fmt.Sprintf("%d", cfg.snapshotCount), - } - args = addV2Args(args) - if cfg.forceNewCluster { - args = append(args, "--force-new-cluster") - } - if cfg.quotaBackendBytes > 0 { - args = append(args, - "--quota-backend-bytes", fmt.Sprintf("%d", cfg.quotaBackendBytes), - ) - } - if cfg.noStrictReconfig { - args = append(args, "--strict-reconfig-check=false") - } - if cfg.enableV2 { - args = append(args, "--enable-v2") - } - if cfg.initialCorruptCheck { - args = append(args, "--experimental-initial-corrupt-check") - } - var murl string - if cfg.metricsURLScheme != "" { - murl = (&url.URL{ - Scheme: cfg.metricsURLScheme, - Host: fmt.Sprintf("localhost:%d", port+2), - }).String() - args = append(args, "--listen-metrics-urls", murl) - } - - args = append(args, cfg.tlsArgs()...) - - if cfg.authTokenOpts != "" { - args = append(args, "--auth-token", cfg.authTokenOpts) - } - - if cfg.v2deprecation != "" { - args = append(args, "--v2-deprecation", cfg.v2deprecation) - } - - etcdCfgs[i] = &etcdServerProcessConfig{ - lg: lg, - execPath: cfg.execPath, - args: args, - envVars: cfg.envVars, - tlsArgs: cfg.tlsArgs(), - dataDirPath: dataDirPath, - keepDataDir: cfg.keepDataDir, - name: name, - purl: purl, - acurl: curl, - murl: murl, - initialToken: cfg.initialToken, - } - } - - initialClusterArgs := []string{"--initial-cluster", strings.Join(initialCluster, ",")} - for i := range etcdCfgs { - etcdCfgs[i].initialCluster = strings.Join(initialCluster, ",") - etcdCfgs[i].args = append(etcdCfgs[i].args, initialClusterArgs...) - } - - return etcdCfgs -} - -func (cfg *etcdProcessClusterConfig) tlsArgs() (args []string) { - if cfg.clientTLS != clientNonTLS { - if cfg.isClientAutoTLS { - args = append(args, "--auto-tls") - } else { - tlsClientArgs := []string{ - "--cert-file", certPath, - "--key-file", privateKeyPath, - "--trusted-ca-file", caPath, - } - args = append(args, tlsClientArgs...) - - if cfg.clientCertAuthEnabled { - args = append(args, "--client-cert-auth") - } - } - } - - if cfg.isPeerTLS { - if cfg.isPeerAutoTLS { - args = append(args, "--peer-auto-tls") - } else { - tlsPeerArgs := []string{ - "--peer-cert-file", certPath, - "--peer-key-file", privateKeyPath, - "--peer-trusted-ca-file", caPath, - } - args = append(args, tlsPeerArgs...) - } - } - - if cfg.isClientCRL { - args = append(args, "--client-crl-file", crlPath, "--client-cert-auth") - } - - if len(cfg.cipherSuites) > 0 { - args = append(args, "--cipher-suites", strings.Join(cfg.cipherSuites, ",")) - } - - return args -} - -func (epc *etcdProcessCluster) EndpointsV2() []string { - return epc.endpoints(func(ep etcdProcess) []string { return ep.EndpointsV2() }) -} - -func (epc *etcdProcessCluster) EndpointsV3() []string { - return epc.endpoints(func(ep etcdProcess) []string { return ep.EndpointsV3() }) -} - -func (epc *etcdProcessCluster) endpoints(f func(ep etcdProcess) []string) (ret []string) { - for _, p := range epc.procs { - ret = append(ret, f(p)...) - } - return ret -} - -func (epc *etcdProcessCluster) Start() error { - return epc.start(func(ep etcdProcess) error { return ep.Start() }) -} - -func (epc *etcdProcessCluster) RollingStart() error { - return epc.rollingStart(func(ep etcdProcess) error { return ep.Start() }) -} - -func (epc *etcdProcessCluster) Restart() error { - return epc.start(func(ep etcdProcess) error { return ep.Restart() }) -} - -func (epc *etcdProcessCluster) start(f func(ep etcdProcess) error) error { - readyC := make(chan error, len(epc.procs)) - for i := range epc.procs { - go func(n int) { readyC <- f(epc.procs[n]) }(i) - } - for range epc.procs { - if err := <-readyC; err != nil { - epc.Close() - return err - } - } - return nil -} - -func (epc *etcdProcessCluster) rollingStart(f func(ep etcdProcess) error) error { - readyC := make(chan error, len(epc.procs)) - for i := range epc.procs { - go func(n int) { readyC <- f(epc.procs[n]) }(i) - // make sure the servers do not start at the same time - time.Sleep(time.Second) - } - for range epc.procs { - if err := <-readyC; err != nil { - epc.Close() - return err - } - } - return nil -} - -func (epc *etcdProcessCluster) Stop() (err error) { - for _, p := range epc.procs { - if p == nil { - continue - } - if curErr := p.Stop(); curErr != nil { - if err != nil { - err = fmt.Errorf("%v; %v", err, curErr) - } else { - err = curErr - } - } - } - return err -} - -func (epc *etcdProcessCluster) Close() error { - epc.lg.Info("closing test cluster...") - err := epc.Stop() - for _, p := range epc.procs { - // p is nil when newEtcdProcess fails in the middle - // Close still gets called to clean up test data - if p == nil { - continue - } - if cerr := p.Close(); cerr != nil { - err = cerr - } - } - epc.lg.Info("closed test cluster.") - return err -} - -func (epc *etcdProcessCluster) WithStopSignal(sig os.Signal) (ret os.Signal) { - for _, p := range epc.procs { - ret = p.WithStopSignal(sig) - } - return ret -} diff --git a/tests/e2e/ctl_v2_test.go b/tests/e2e/ctl_v2_test.go index 0aae87bc8..930a96c99 100644 --- a/tests/e2e/ctl_v2_test.go +++ b/tests/e2e/ctl_v2_test.go @@ -21,25 +21,27 @@ import ( "strings" "testing" "time" + + "go.etcd.io/etcd/tests/v3/framework/e2e" ) func BeforeTestV2(t testing.TB) { - BeforeTest(t) + e2e.BeforeTest(t) os.Setenv("ETCDCTL_API", "2") t.Cleanup(func() { os.Unsetenv("ETCDCTL_API") }) } -func TestCtlV2Set(t *testing.T) { testCtlV2Set(t, newConfigNoTLS(), false) } -func TestCtlV2SetQuorum(t *testing.T) { testCtlV2Set(t, newConfigNoTLS(), true) } -func TestCtlV2SetClientTLS(t *testing.T) { testCtlV2Set(t, newConfigClientTLS(), false) } -func TestCtlV2SetPeerTLS(t *testing.T) { testCtlV2Set(t, newConfigPeerTLS(), false) } -func TestCtlV2SetTLS(t *testing.T) { testCtlV2Set(t, newConfigTLS(), false) } -func testCtlV2Set(t *testing.T, cfg *etcdProcessClusterConfig, quorum bool) { +func TestCtlV2Set(t *testing.T) { testCtlV2Set(t, e2e.NewConfigNoTLS(), false) } +func TestCtlV2SetQuorum(t *testing.T) { testCtlV2Set(t, e2e.NewConfigNoTLS(), true) } +func TestCtlV2SetClientTLS(t *testing.T) { testCtlV2Set(t, e2e.NewConfigClientTLS(), false) } +func TestCtlV2SetPeerTLS(t *testing.T) { testCtlV2Set(t, e2e.NewConfigPeerTLS(), false) } +func TestCtlV2SetTLS(t *testing.T) { testCtlV2Set(t, e2e.NewConfigTLS(), false) } +func testCtlV2Set(t *testing.T, cfg *e2e.EtcdProcessClusterConfig, quorum bool) { BeforeTestV2(t) - cfg.enableV2 = true + cfg.EnableV2 = true epc := setupEtcdctlTest(t, cfg, quorum) defer cleanupEtcdProcessCluster(epc, t) @@ -54,13 +56,13 @@ func testCtlV2Set(t *testing.T, cfg *etcdProcessClusterConfig, quorum bool) { } } -func TestCtlV2Mk(t *testing.T) { testCtlV2Mk(t, newConfigNoTLS(), false) } -func TestCtlV2MkQuorum(t *testing.T) { testCtlV2Mk(t, newConfigNoTLS(), true) } -func TestCtlV2MkTLS(t *testing.T) { testCtlV2Mk(t, newConfigTLS(), false) } -func testCtlV2Mk(t *testing.T, cfg *etcdProcessClusterConfig, quorum bool) { +func TestCtlV2Mk(t *testing.T) { testCtlV2Mk(t, e2e.NewConfigNoTLS(), false) } +func TestCtlV2MkQuorum(t *testing.T) { testCtlV2Mk(t, e2e.NewConfigNoTLS(), true) } +func TestCtlV2MkTLS(t *testing.T) { testCtlV2Mk(t, e2e.NewConfigTLS(), false) } +func testCtlV2Mk(t *testing.T, cfg *e2e.EtcdProcessClusterConfig, quorum bool) { BeforeTestV2(t) - cfg.enableV2 = true + cfg.EnableV2 = true epc := setupEtcdctlTest(t, cfg, quorum) defer cleanupEtcdProcessCluster(epc, t) @@ -78,12 +80,12 @@ func testCtlV2Mk(t *testing.T, cfg *etcdProcessClusterConfig, quorum bool) { } } -func TestCtlV2Rm(t *testing.T) { testCtlV2Rm(t, newConfigNoTLS()) } -func TestCtlV2RmTLS(t *testing.T) { testCtlV2Rm(t, newConfigTLS()) } -func testCtlV2Rm(t *testing.T, cfg *etcdProcessClusterConfig) { +func TestCtlV2Rm(t *testing.T) { testCtlV2Rm(t, e2e.NewConfigNoTLS()) } +func TestCtlV2RmTLS(t *testing.T) { testCtlV2Rm(t, e2e.NewConfigTLS()) } +func testCtlV2Rm(t *testing.T, cfg *e2e.EtcdProcessClusterConfig) { BeforeTestV2(t) - cfg.enableV2 = true + cfg.EnableV2 = true epc := setupEtcdctlTest(t, cfg, true) defer cleanupEtcdProcessCluster(epc, t) @@ -101,13 +103,13 @@ func testCtlV2Rm(t *testing.T, cfg *etcdProcessClusterConfig) { } } -func TestCtlV2Ls(t *testing.T) { testCtlV2Ls(t, newConfigNoTLS(), false) } -func TestCtlV2LsQuorum(t *testing.T) { testCtlV2Ls(t, newConfigNoTLS(), true) } -func TestCtlV2LsTLS(t *testing.T) { testCtlV2Ls(t, newConfigTLS(), false) } -func testCtlV2Ls(t *testing.T, cfg *etcdProcessClusterConfig, quorum bool) { +func TestCtlV2Ls(t *testing.T) { testCtlV2Ls(t, e2e.NewConfigNoTLS(), false) } +func TestCtlV2LsQuorum(t *testing.T) { testCtlV2Ls(t, e2e.NewConfigNoTLS(), true) } +func TestCtlV2LsTLS(t *testing.T) { testCtlV2Ls(t, e2e.NewConfigTLS(), false) } +func testCtlV2Ls(t *testing.T, cfg *e2e.EtcdProcessClusterConfig, quorum bool) { BeforeTestV2(t) - cfg.enableV2 = true + cfg.EnableV2 = true epc := setupEtcdctlTest(t, cfg, quorum) defer cleanupEtcdProcessCluster(epc, t) @@ -122,13 +124,13 @@ func testCtlV2Ls(t *testing.T, cfg *etcdProcessClusterConfig, quorum bool) { } } -func TestCtlV2Watch(t *testing.T) { testCtlV2Watch(t, newConfigNoTLS(), false) } -func TestCtlV2WatchTLS(t *testing.T) { testCtlV2Watch(t, newConfigTLS(), false) } +func TestCtlV2Watch(t *testing.T) { testCtlV2Watch(t, e2e.NewConfigNoTLS(), false) } +func TestCtlV2WatchTLS(t *testing.T) { testCtlV2Watch(t, e2e.NewConfigTLS(), false) } -func testCtlV2Watch(t *testing.T, cfg *etcdProcessClusterConfig, noSync bool) { +func testCtlV2Watch(t *testing.T, cfg *e2e.EtcdProcessClusterConfig, noSync bool) { BeforeTestV2(t) - cfg.enableV2 = true + cfg.EnableV2 = true epc := setupEtcdctlTest(t, cfg, true) defer cleanupEtcdProcessCluster(epc, t) @@ -151,8 +153,8 @@ func testCtlV2Watch(t *testing.T, cfg *etcdProcessClusterConfig, noSync bool) { func TestCtlV2GetRoleUser(t *testing.T) { BeforeTestV2(t) - copied := newConfigNoTLS() - copied.enableV2 = true + copied := e2e.NewConfigNoTLS() + copied.EnableV2 = true epc := setupEtcdctlTest(t, copied, false) defer cleanupEtcdProcessCluster(epc, t) @@ -172,7 +174,7 @@ func TestCtlV2GetRoleUser(t *testing.T) { // ensure double grant gives an error; was crashing in 2.3.1 regrantArgs := etcdctlPrefixArgs(epc) regrantArgs = append(regrantArgs, "user", "grant", "--roles", "foo", "username") - if err := spawnWithExpect(regrantArgs, "duplicate"); err != nil { + if err := e2e.SpawnWithExpect(regrantArgs, "duplicate"); err != nil { t.Fatalf("missing duplicate error on double grant role (%v)", err) } } @@ -182,8 +184,8 @@ func TestCtlV2UserListRoot(t *testing.T) { testCtlV2UserList(t, "root") } func testCtlV2UserList(t *testing.T, username string) { BeforeTestV2(t) - copied := newConfigNoTLS() - copied.enableV2 = true + copied := e2e.NewConfigNoTLS() + copied.EnableV2 = true epc := setupEtcdctlTest(t, copied, false) defer cleanupEtcdProcessCluster(epc, t) @@ -198,8 +200,8 @@ func testCtlV2UserList(t *testing.T, username string) { func TestCtlV2RoleList(t *testing.T) { BeforeTestV2(t) - copied := newConfigNoTLS() - copied.enableV2 = true + copied := e2e.NewConfigNoTLS() + copied.EnableV2 = true epc := setupEtcdctlTest(t, copied, false) defer cleanupEtcdProcessCluster(epc, t) @@ -232,9 +234,9 @@ func testUtlCtlV2Backup(t *testing.T, snapCount int, v3 bool, utl bool) { t.Fatal(err) } - etcdCfg := newConfigNoTLS() - etcdCfg.snapshotCount = snapCount - etcdCfg.enableV2 = true + etcdCfg := e2e.NewConfigNoTLS() + etcdCfg.SnapshotCount = snapCount + etcdCfg.EnableV2 = true t.Log("Starting etcd-1") epc1 := setupEtcdctlTest(t, etcdCfg, false) @@ -259,7 +261,7 @@ func testUtlCtlV2Backup(t *testing.T, snapCount int, v3 bool, utl bool) { } } t.Log("Triggering etcd backup") - if err := etcdctlBackup(t, epc1, epc1.procs[0].Config().dataDirPath, backupDir, v3, utl); err != nil { + if err := etcdctlBackup(t, epc1, epc1.Procs[0].Config().DataDirPath, backupDir, v3, utl); err != nil { t.Fatal(err) } t.Log("Closing etcd-1 backup") @@ -271,11 +273,11 @@ func testUtlCtlV2Backup(t *testing.T, snapCount int, v3 bool, utl bool) { t.Log("Starting etcd-2 (post backup)") // restart from the backup directory - cfg2 := newConfigNoTLS() - cfg2.dataDirPath = backupDir - cfg2.keepDataDir = true - cfg2.forceNewCluster = true - cfg2.enableV2 = true + cfg2 := e2e.NewConfigNoTLS() + cfg2.DataDirPath = backupDir + cfg2.KeepDataDir = true + cfg2.ForceNewCluster = true + cfg2.EnableV2 = true epc2 := setupEtcdctlTest(t, cfg2, false) // Make sure a failing test is not leaking resources (running server). defer epc2.Close() @@ -318,9 +320,9 @@ func testUtlCtlV2Backup(t *testing.T, snapCount int, v3 bool, utl bool) { func TestCtlV2AuthWithCommonName(t *testing.T) { BeforeTestV2(t) - copiedCfg := newConfigClientTLS() - copiedCfg.clientCertAuthEnabled = true - copiedCfg.enableV2 = true + copiedCfg := e2e.NewConfigClientTLS() + copiedCfg.ClientCertAuthEnabled = true + copiedCfg.EnableV2 = true epc := setupEtcdctlTest(t, copiedCfg, false) defer cleanupEtcdProcessCluster(epc, t) @@ -350,8 +352,8 @@ func TestCtlV2AuthWithCommonName(t *testing.T) { func TestCtlV2ClusterHealth(t *testing.T) { BeforeTestV2(t) - copied := newConfigNoTLS() - copied.enableV2 = true + copied := e2e.NewConfigNoTLS() + copied.EnableV2 = true epc := setupEtcdctlTest(t, copied, true) defer cleanupEtcdProcessCluster(epc, t) @@ -361,7 +363,7 @@ func TestCtlV2ClusterHealth(t *testing.T) { } // missing members, has quorum - epc.procs[0].Stop() + epc.Procs[0].Stop() for i := 0; i < 3; i++ { err := etcdctlClusterHealth(epc, "cluster is degraded") @@ -375,129 +377,129 @@ func TestCtlV2ClusterHealth(t *testing.T) { } // no quorum - epc.procs[1].Stop() + epc.Procs[1].Stop() if err := etcdctlClusterHealth(epc, "cluster is unavailable"); err != nil { t.Fatalf("cluster-health expected to be unavailable (%v)", err) } - epc.procs[0], epc.procs[1] = nil, nil + epc.Procs[0], epc.Procs[1] = nil, nil } -func etcdctlPrefixArgs(clus *etcdProcessCluster) []string { +func etcdctlPrefixArgs(clus *e2e.EtcdProcessCluster) []string { endpoints := strings.Join(clus.EndpointsV2(), ",") - cmdArgs := []string{ctlBinPath} + cmdArgs := []string{e2e.CtlBinPath} cmdArgs = append(cmdArgs, "--endpoints", endpoints) - if clus.cfg.clientTLS == clientTLS { - cmdArgs = append(cmdArgs, "--ca-file", caPath, "--cert-file", certPath, "--key-file", privateKeyPath) + if clus.Cfg.ClientTLS == e2e.ClientTLS { + cmdArgs = append(cmdArgs, "--ca-file", e2e.CaPath, "--cert-file", e2e.CertPath, "--key-file", e2e.PrivateKeyPath) } return cmdArgs } func etcductlPrefixArgs(utl bool) []string { if utl { - return []string{utlBinPath} + return []string{e2e.UtlBinPath} } - return []string{ctlBinPath} + return []string{e2e.CtlBinPath} } -func etcdctlClusterHealth(clus *etcdProcessCluster, val string) error { +func etcdctlClusterHealth(clus *e2e.EtcdProcessCluster, val string) error { cmdArgs := append(etcdctlPrefixArgs(clus), "cluster-health") - return spawnWithExpect(cmdArgs, val) + return e2e.SpawnWithExpect(cmdArgs, val) } -func etcdctlSet(clus *etcdProcessCluster, key, value string) error { +func etcdctlSet(clus *e2e.EtcdProcessCluster, key, value string) error { cmdArgs := append(etcdctlPrefixArgs(clus), "set", key, value) - return spawnWithExpect(cmdArgs, value) + return e2e.SpawnWithExpect(cmdArgs, value) } -func etcdctlMk(clus *etcdProcessCluster, key, value string, first bool) error { +func etcdctlMk(clus *e2e.EtcdProcessCluster, key, value string, first bool) error { cmdArgs := append(etcdctlPrefixArgs(clus), "mk", key, value) if first { - return spawnWithExpect(cmdArgs, value) + return e2e.SpawnWithExpect(cmdArgs, value) } - return spawnWithExpect(cmdArgs, "Error: 105: Key already exists") + return e2e.SpawnWithExpect(cmdArgs, "Error: 105: Key already exists") } -func etcdctlGet(clus *etcdProcessCluster, key, value string, quorum bool) error { +func etcdctlGet(clus *e2e.EtcdProcessCluster, key, value string, quorum bool) error { cmdArgs := append(etcdctlPrefixArgs(clus), "get", key) if quorum { cmdArgs = append(cmdArgs, "--quorum") } - return spawnWithExpect(cmdArgs, value) + return e2e.SpawnWithExpect(cmdArgs, value) } -func etcdctlRm(clus *etcdProcessCluster, key, value string, first bool) error { +func etcdctlRm(clus *e2e.EtcdProcessCluster, key, value string, first bool) error { cmdArgs := append(etcdctlPrefixArgs(clus), "rm", key) if first { - return spawnWithExpect(cmdArgs, "PrevNode.Value: "+value) + return e2e.SpawnWithExpect(cmdArgs, "PrevNode.Value: "+value) } - return spawnWithExpect(cmdArgs, "Error: 100: Key not found") + return e2e.SpawnWithExpect(cmdArgs, "Error: 100: Key not found") } -func etcdctlLs(clus *etcdProcessCluster, key string, quorum bool) error { +func etcdctlLs(clus *e2e.EtcdProcessCluster, key string, quorum bool) error { cmdArgs := append(etcdctlPrefixArgs(clus), "ls") if quorum { cmdArgs = append(cmdArgs, "--quorum") } - return spawnWithExpect(cmdArgs, key) + return e2e.SpawnWithExpect(cmdArgs, key) } -func etcdctlWatch(clus *etcdProcessCluster, key, value string, noSync bool) <-chan error { +func etcdctlWatch(clus *e2e.EtcdProcessCluster, key, value string, noSync bool) <-chan error { cmdArgs := append(etcdctlPrefixArgs(clus), "watch", "--after-index=1", key) if noSync { cmdArgs = append(cmdArgs, "--no-sync") } errc := make(chan error, 1) go func() { - errc <- spawnWithExpect(cmdArgs, value) + errc <- e2e.SpawnWithExpect(cmdArgs, value) }() return errc } -func etcdctlRoleAdd(clus *etcdProcessCluster, role string) error { +func etcdctlRoleAdd(clus *e2e.EtcdProcessCluster, role string) error { cmdArgs := append(etcdctlPrefixArgs(clus), "role", "add", role) - return spawnWithExpect(cmdArgs, role) + return e2e.SpawnWithExpect(cmdArgs, role) } -func etcdctlRoleGrant(clus *etcdProcessCluster, role string, perms ...string) error { +func etcdctlRoleGrant(clus *e2e.EtcdProcessCluster, role string, perms ...string) error { cmdArgs := append(etcdctlPrefixArgs(clus), "role", "grant") cmdArgs = append(cmdArgs, perms...) cmdArgs = append(cmdArgs, role) - return spawnWithExpect(cmdArgs, role) + return e2e.SpawnWithExpect(cmdArgs, role) } -func etcdctlRoleList(clus *etcdProcessCluster, expectedRole string) error { +func etcdctlRoleList(clus *e2e.EtcdProcessCluster, expectedRole string) error { cmdArgs := append(etcdctlPrefixArgs(clus), "role", "list") - return spawnWithExpect(cmdArgs, expectedRole) + return e2e.SpawnWithExpect(cmdArgs, expectedRole) } -func etcdctlUserAdd(clus *etcdProcessCluster, user, pass string) error { +func etcdctlUserAdd(clus *e2e.EtcdProcessCluster, user, pass string) error { cmdArgs := append(etcdctlPrefixArgs(clus), "user", "add", user+":"+pass) - return spawnWithExpect(cmdArgs, "User "+user+" created") + return e2e.SpawnWithExpect(cmdArgs, "User "+user+" created") } -func etcdctlUserGrant(clus *etcdProcessCluster, user, role string) error { +func etcdctlUserGrant(clus *e2e.EtcdProcessCluster, user, role string) error { cmdArgs := append(etcdctlPrefixArgs(clus), "user", "grant", "--roles", role, user) - return spawnWithExpect(cmdArgs, "User "+user+" updated") + return e2e.SpawnWithExpect(cmdArgs, "User "+user+" updated") } -func etcdctlUserGet(clus *etcdProcessCluster, user string) error { +func etcdctlUserGet(clus *e2e.EtcdProcessCluster, user string) error { cmdArgs := append(etcdctlPrefixArgs(clus), "user", "get", user) - return spawnWithExpect(cmdArgs, "User: "+user) + return e2e.SpawnWithExpect(cmdArgs, "User: "+user) } -func etcdctlUserList(clus *etcdProcessCluster, expectedUser string) error { +func etcdctlUserList(clus *e2e.EtcdProcessCluster, expectedUser string) error { cmdArgs := append(etcdctlPrefixArgs(clus), "user", "list") - return spawnWithExpect(cmdArgs, expectedUser) + return e2e.SpawnWithExpect(cmdArgs, expectedUser) } -func etcdctlAuthEnable(clus *etcdProcessCluster) error { +func etcdctlAuthEnable(clus *e2e.EtcdProcessCluster) error { cmdArgs := append(etcdctlPrefixArgs(clus), "auth", "enable") - return spawnWithExpect(cmdArgs, "Authentication Enabled") + return e2e.SpawnWithExpect(cmdArgs, "Authentication Enabled") } -func etcdctlBackup(t testing.TB, clus *etcdProcessCluster, dataDir, backupDir string, v3 bool, utl bool) error { +func etcdctlBackup(t testing.TB, clus *e2e.EtcdProcessCluster, dataDir, backupDir string, v3 bool, utl bool) error { cmdArgs := append(etcductlPrefixArgs(utl), "backup", "--data-dir", dataDir, "--backup-dir", backupDir) if v3 { cmdArgs = append(cmdArgs, "--with-v3") @@ -505,7 +507,7 @@ func etcdctlBackup(t testing.TB, clus *etcdProcessCluster, dataDir, backupDir st cmdArgs = append(cmdArgs, "--with-v3=false") } t.Logf("Running: %v", cmdArgs) - proc, err := spawnCmd(cmdArgs, nil) + proc, err := e2e.SpawnCmd(cmdArgs, nil) if err != nil { return err } @@ -516,18 +518,18 @@ func etcdctlBackup(t testing.TB, clus *etcdProcessCluster, dataDir, backupDir st return proc.ProcessError() } -func setupEtcdctlTest(t *testing.T, cfg *etcdProcessClusterConfig, quorum bool) *etcdProcessCluster { +func setupEtcdctlTest(t *testing.T, cfg *e2e.EtcdProcessClusterConfig, quorum bool) *e2e.EtcdProcessCluster { if !quorum { - cfg = configStandalone(*cfg) + cfg = e2e.ConfigStandalone(*cfg) } - epc, err := newEtcdProcessCluster(t, cfg) + epc, err := e2e.NewEtcdProcessCluster(t, cfg) if err != nil { t.Fatalf("could not start etcd process cluster (%v)", err) } return epc } -func cleanupEtcdProcessCluster(epc *etcdProcessCluster, t *testing.T) { +func cleanupEtcdProcessCluster(epc *e2e.EtcdProcessCluster, t *testing.T) { if errC := epc.Close(); errC != nil { t.Fatalf("error closing etcd processes (%v)", errC) } diff --git a/tests/e2e/ctl_v3_alarm_test.go b/tests/e2e/ctl_v3_alarm_test.go index 19852c30b..f33654f00 100644 --- a/tests/e2e/ctl_v3_alarm_test.go +++ b/tests/e2e/ctl_v3_alarm_test.go @@ -22,6 +22,7 @@ import ( "time" "go.etcd.io/etcd/client/v3" + "go.etcd.io/etcd/tests/v3/framework/e2e" ) func TestCtlV3Alarm(t *testing.T) { @@ -53,7 +54,7 @@ func alarmTest(cx ctlCtx) { } // '/health' handler should return 'false' - if err := cURLGet(cx.epc, cURLReq{endpoint: "/health", expected: `{"health":"false","reason":"ALARM NOSPACE"}`}); err != nil { + if err := e2e.CURLGet(cx.epc, e2e.CURLReq{Endpoint: "/health", Expected: `{"health":"false","reason":"ALARM NOSPACE"}`}); err != nil { cx.t.Fatalf("failed get with curl (%v)", err) } @@ -101,5 +102,5 @@ func alarmTest(cx ctlCtx) { func ctlV3Alarm(cx ctlCtx, cmd string, as ...string) error { cmdArgs := append(cx.PrefixArgs(), "alarm", cmd) - return spawnWithExpects(cmdArgs, cx.envMap, as...) + return e2e.SpawnWithExpects(cmdArgs, cx.envMap, as...) } diff --git a/tests/e2e/ctl_v3_auth_no_proxy_test.go b/tests/e2e/ctl_v3_auth_no_proxy_test.go index 0b4807c83..18a9661a6 100644 --- a/tests/e2e/ctl_v3_auth_no_proxy_test.go +++ b/tests/e2e/ctl_v3_auth_no_proxy_test.go @@ -21,14 +21,16 @@ package e2e import ( "testing" + + "go.etcd.io/etcd/tests/v3/framework/e2e" ) func TestCtlV3AuthCertCN(t *testing.T) { - testCtl(t, authTestCertCN, withCfg(*newConfigClientTLSCertAuth())) + testCtl(t, authTestCertCN, withCfg(*e2e.NewConfigClientTLSCertAuth())) } func TestCtlV3AuthCertCNAndUsername(t *testing.T) { - testCtl(t, authTestCertCNAndUsername, withCfg(*newConfigClientTLSCertAuth())) + testCtl(t, authTestCertCNAndUsername, withCfg(*e2e.NewConfigClientTLSCertAuth())) } func TestCtlV3AuthCertCNAndUsernameNoPassword(t *testing.T) { - testCtl(t, authTestCertCNAndUsernameNoPassword, withCfg(*newConfigClientTLSCertAuth())) + testCtl(t, authTestCertCNAndUsernameNoPassword, withCfg(*e2e.NewConfigClientTLSCertAuth())) } diff --git a/tests/e2e/ctl_v3_auth_test.go b/tests/e2e/ctl_v3_auth_test.go index 11db1b389..ed3ffc416 100644 --- a/tests/e2e/ctl_v3_auth_test.go +++ b/tests/e2e/ctl_v3_auth_test.go @@ -23,6 +23,7 @@ import ( "time" "go.etcd.io/etcd/client/v3" + "go.etcd.io/etcd/tests/v3/framework/e2e" ) func TestCtlV3AuthEnable(t *testing.T) { @@ -35,7 +36,7 @@ func TestCtlV3AuthRoleUpdate(t *testing.T) { testCtl(t, authRoleUpdateT func TestCtlV3AuthUserDeleteDuringOps(t *testing.T) { testCtl(t, authUserDeleteDuringOpsTest) } func TestCtlV3AuthRoleRevokeDuringOps(t *testing.T) { testCtl(t, authRoleRevokeDuringOpsTest) } func TestCtlV3AuthTxn(t *testing.T) { testCtl(t, authTestTxn) } -func TestCtlV3AuthTxnJWT(t *testing.T) { testCtl(t, authTestTxn, withCfg(*newConfigJWT())) } +func TestCtlV3AuthTxnJWT(t *testing.T) { testCtl(t, authTestTxn, withCfg(*e2e.NewConfigJWT())) } func TestCtlV3AuthPrefixPerm(t *testing.T) { testCtl(t, authTestPrefixPerm) } func TestCtlV3AuthMemberAdd(t *testing.T) { testCtl(t, authTestMemberAdd) } func TestCtlV3AuthMemberRemove(t *testing.T) { @@ -46,7 +47,7 @@ func TestCtlV3AuthRevokeWithDelete(t *testing.T) { testCtl(t, authTestRevokeWith func TestCtlV3AuthInvalidMgmt(t *testing.T) { testCtl(t, authTestInvalidMgmt) } func TestCtlV3AuthFromKeyPerm(t *testing.T) { testCtl(t, authTestFromKeyPerm) } func TestCtlV3AuthAndWatch(t *testing.T) { testCtl(t, authTestWatch) } -func TestCtlV3AuthAndWatchJWT(t *testing.T) { testCtl(t, authTestWatch, withCfg(*newConfigJWT())) } +func TestCtlV3AuthAndWatchJWT(t *testing.T) { testCtl(t, authTestWatch, withCfg(*e2e.NewConfigJWT())) } func TestCtlV3AuthLeaseTestKeepAlive(t *testing.T) { testCtl(t, authLeaseTestKeepAlive) } func TestCtlV3AuthLeaseTestTimeToLiveExpired(t *testing.T) { @@ -54,7 +55,7 @@ func TestCtlV3AuthLeaseTestTimeToLiveExpired(t *testing.T) { } func TestCtlV3AuthLeaseGrantLeases(t *testing.T) { testCtl(t, authLeaseTestLeaseGrantLeases) } func TestCtlV3AuthLeaseGrantLeasesJWT(t *testing.T) { - testCtl(t, authLeaseTestLeaseGrantLeases, withCfg(*newConfigJWT())) + testCtl(t, authLeaseTestLeaseGrantLeases, withCfg(*e2e.NewConfigJWT())) } func TestCtlV3AuthLeaseRevoke(t *testing.T) { testCtl(t, authLeaseTestLeaseRevoke) } @@ -66,9 +67,13 @@ func TestCtlV3AuthDefrag(t *testing.T) { testCtl(t, authTestDefrag) } func TestCtlV3AuthEndpointHealth(t *testing.T) { testCtl(t, authTestEndpointHealth, withQuorum()) } -func TestCtlV3AuthSnapshot(t *testing.T) { testCtl(t, authTestSnapshot) } -func TestCtlV3AuthSnapshotJWT(t *testing.T) { testCtl(t, authTestSnapshot, withCfg(*newConfigJWT())) } -func TestCtlV3AuthJWTExpire(t *testing.T) { testCtl(t, authTestJWTExpire, withCfg(*newConfigJWT())) } +func TestCtlV3AuthSnapshot(t *testing.T) { testCtl(t, authTestSnapshot) } +func TestCtlV3AuthSnapshotJWT(t *testing.T) { + testCtl(t, authTestSnapshot, withCfg(*e2e.NewConfigJWT())) +} +func TestCtlV3AuthJWTExpire(t *testing.T) { + testCtl(t, authTestJWTExpire, withCfg(*e2e.NewConfigJWT())) +} func TestCtlV3AuthRevisionConsistency(t *testing.T) { testCtl(t, authTestRevisionConsistency) } func authEnableTest(cx ctlCtx) { @@ -93,7 +98,7 @@ func authEnable(cx ctlCtx) error { func ctlV3AuthEnable(cx ctlCtx) error { cmdArgs := append(cx.PrefixArgs(), "auth", "enable") - return spawnWithExpectWithEnv(cmdArgs, cx.envMap, "Authentication Enabled") + return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, "Authentication Enabled") } func authDisableTest(cx ctlCtx) { @@ -139,12 +144,12 @@ func authDisableTest(cx ctlCtx) { func ctlV3AuthDisable(cx ctlCtx) error { cmdArgs := append(cx.PrefixArgs(), "auth", "disable") - return spawnWithExpectWithEnv(cmdArgs, cx.envMap, "Authentication Disabled") + return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, "Authentication Disabled") } func authStatusTest(cx ctlCtx) { cmdArgs := append(cx.PrefixArgs(), "auth", "status") - if err := spawnWithExpects(cmdArgs, cx.envMap, "Authentication Status: false", "AuthRevision:"); err != nil { + if err := e2e.SpawnWithExpects(cmdArgs, cx.envMap, "Authentication Status: false", "AuthRevision:"); err != nil { cx.t.Fatal(err) } @@ -155,15 +160,15 @@ func authStatusTest(cx ctlCtx) { cx.user, cx.pass = "root", "root" cmdArgs = append(cx.PrefixArgs(), "auth", "status") - if err := spawnWithExpects(cmdArgs, cx.envMap, "Authentication Status: true", "AuthRevision:"); err != nil { + if err := e2e.SpawnWithExpects(cmdArgs, cx.envMap, "Authentication Status: true", "AuthRevision:"); err != nil { cx.t.Fatal(err) } cmdArgs = append(cx.PrefixArgs(), "auth", "status", "--write-out", "json") - if err := spawnWithExpectWithEnv(cmdArgs, cx.envMap, "enabled"); err != nil { + if err := e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, "enabled"); err != nil { cx.t.Fatal(err) } - if err := spawnWithExpectWithEnv(cmdArgs, cx.envMap, "authRevision"); err != nil { + if err := e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, "authRevision"); err != nil { cx.t.Fatal(err) } } @@ -381,25 +386,25 @@ func authRoleRevokeDuringOpsTest(cx ctlCtx) { } func ctlV3PutFailAuth(cx ctlCtx, key, val string) error { - return spawnWithExpectWithEnv(append(cx.PrefixArgs(), "put", key, val), cx.envMap, "authentication failed") + return e2e.SpawnWithExpectWithEnv(append(cx.PrefixArgs(), "put", key, val), cx.envMap, "authentication failed") } func ctlV3PutFailPerm(cx ctlCtx, key, val string) error { - return spawnWithExpectWithEnv(append(cx.PrefixArgs(), "put", key, val), cx.envMap, "permission denied") + return e2e.SpawnWithExpectWithEnv(append(cx.PrefixArgs(), "put", key, val), cx.envMap, "permission denied") } func authSetupTestUser(cx ctlCtx) { if err := ctlV3User(cx, []string{"add", "test-user", "--interactive=false"}, "User test-user created", []string{"pass"}); err != nil { cx.t.Fatal(err) } - if err := spawnWithExpectWithEnv(append(cx.PrefixArgs(), "role", "add", "test-role"), cx.envMap, "Role test-role created"); err != nil { + if err := e2e.SpawnWithExpectWithEnv(append(cx.PrefixArgs(), "role", "add", "test-role"), cx.envMap, "Role test-role created"); err != nil { cx.t.Fatal(err) } if err := ctlV3User(cx, []string{"grant-role", "test-user", "test-role"}, "Role test-role is granted to user test-user", nil); err != nil { cx.t.Fatal(err) } cmd := append(cx.PrefixArgs(), "role", "grant-permission", "test-role", "readwrite", "foo") - if err := spawnWithExpectWithEnv(cmd, cx.envMap, "Role test-role updated"); err != nil { + if err := e2e.SpawnWithExpectWithEnv(cmd, cx.envMap, "Role test-role updated"); err != nil { cx.t.Fatal(err) } } @@ -537,7 +542,7 @@ func authTestMemberAdd(cx ctlCtx) { cx.user, cx.pass = "root", "root" authSetupTestUser(cx) - peerURL := fmt.Sprintf("http://localhost:%d", etcdProcessBasePort+11) + peerURL := fmt.Sprintf("http://localhost:%d", e2e.EtcdProcessBasePort+11) // ordinary user cannot add a new member cx.user, cx.pass = "test-user", "pass" if err := ctlV3MemberAdd(cx, peerURL, false); err == nil { @@ -589,7 +594,7 @@ func authTestMemberUpdate(cx ctlCtx) { // ordinary user cannot update a member cx.user, cx.pass = "test-user", "pass" - peerURL := fmt.Sprintf("http://localhost:%d", etcdProcessBasePort+11) + peerURL := fmt.Sprintf("http://localhost:%d", e2e.EtcdProcessBasePort+11) memberID := fmt.Sprintf("%x", mr.Members[0].ID) if err = ctlV3MemberUpdate(cx, memberID, peerURL); err == nil { cx.t.Fatalf("ordinary user must not be allowed to update a member") @@ -611,7 +616,7 @@ func authTestCertCN(cx ctlCtx) { if err := ctlV3User(cx, []string{"add", "example.com", "--interactive=false"}, "User example.com created", []string{""}); err != nil { cx.t.Fatal(err) } - if err := spawnWithExpectWithEnv(append(cx.PrefixArgs(), "role", "add", "test-role"), cx.envMap, "Role test-role created"); err != nil { + if err := e2e.SpawnWithExpectWithEnv(append(cx.PrefixArgs(), "role", "add", "test-role"), cx.envMap, "Role test-role created"); err != nil { cx.t.Fatal(err) } if err := ctlV3User(cx, []string{"grant-role", "example.com", "test-role"}, "Role test-role is granted to user example.com", nil); err != nil { @@ -921,13 +926,13 @@ func authTestRoleGet(cx ctlCtx) { "KV Read:", "foo", "KV Write:", "foo", } - if err := spawnWithExpects(append(cx.PrefixArgs(), "role", "get", "test-role"), cx.envMap, expected...); err != nil { + if err := e2e.SpawnWithExpects(append(cx.PrefixArgs(), "role", "get", "test-role"), cx.envMap, expected...); err != nil { cx.t.Fatal(err) } // test-user can get the information of test-role because it belongs to the role cx.user, cx.pass = "test-user", "pass" - if err := spawnWithExpects(append(cx.PrefixArgs(), "role", "get", "test-role"), cx.envMap, expected...); err != nil { + if err := e2e.SpawnWithExpects(append(cx.PrefixArgs(), "role", "get", "test-role"), cx.envMap, expected...); err != nil { cx.t.Fatal(err) } @@ -935,7 +940,7 @@ func authTestRoleGet(cx ctlCtx) { expected = []string{ "Error: etcdserver: permission denied", } - if err := spawnWithExpects(append(cx.PrefixArgs(), "role", "get", "root"), cx.envMap, expected...); err != nil { + if err := e2e.SpawnWithExpects(append(cx.PrefixArgs(), "role", "get", "root"), cx.envMap, expected...); err != nil { cx.t.Fatal(err) } } @@ -952,13 +957,13 @@ func authTestUserGet(cx ctlCtx) { "Roles: test-role", } - if err := spawnWithExpects(append(cx.PrefixArgs(), "user", "get", "test-user"), cx.envMap, expected...); err != nil { + if err := e2e.SpawnWithExpects(append(cx.PrefixArgs(), "user", "get", "test-user"), cx.envMap, expected...); err != nil { cx.t.Fatal(err) } // test-user can get the information of test-user itself cx.user, cx.pass = "test-user", "pass" - if err := spawnWithExpects(append(cx.PrefixArgs(), "user", "get", "test-user"), cx.envMap, expected...); err != nil { + if err := e2e.SpawnWithExpects(append(cx.PrefixArgs(), "user", "get", "test-user"), cx.envMap, expected...); err != nil { cx.t.Fatal(err) } @@ -966,7 +971,7 @@ func authTestUserGet(cx ctlCtx) { expected = []string{ "Error: etcdserver: permission denied", } - if err := spawnWithExpects(append(cx.PrefixArgs(), "user", "get", "root"), cx.envMap, expected...); err != nil { + if err := e2e.SpawnWithExpects(append(cx.PrefixArgs(), "user", "get", "root"), cx.envMap, expected...); err != nil { cx.t.Fatal(err) } } @@ -977,7 +982,7 @@ func authTestRoleList(cx ctlCtx) { } cx.user, cx.pass = "root", "root" authSetupTestUser(cx) - if err := spawnWithExpectWithEnv(append(cx.PrefixArgs(), "role", "list"), cx.envMap, "test-role"); err != nil { + if err := e2e.SpawnWithExpectWithEnv(append(cx.PrefixArgs(), "role", "list"), cx.envMap, "test-role"); err != nil { cx.t.Fatal(err) } } @@ -1088,7 +1093,7 @@ func certCNAndUsername(cx ctlCtx, noPassword bool) { cx.t.Fatal(err) } } - if err := spawnWithExpectWithEnv(append(cx.PrefixArgs(), "role", "add", "test-role-cn"), cx.envMap, "Role test-role-cn created"); err != nil { + if err := e2e.SpawnWithExpectWithEnv(append(cx.PrefixArgs(), "role", "add", "test-role-cn"), cx.envMap, "Role test-role-cn created"); err != nil { cx.t.Fatal(err) } if err := ctlV3User(cx, []string{"grant-role", "example.com", "test-role-cn"}, "Role test-role-cn is granted to user example.com", nil); err != nil { @@ -1174,7 +1179,7 @@ func authTestRevisionConsistency(cx ctlCtx) { } // get node0 auth revision - node0 := cx.epc.procs[0] + node0 := cx.epc.Procs[0] endpoint := node0.EndpointsV3()[0] cli, err := clientv3.New(clientv3.Config{Endpoints: []string{endpoint}, Username: cx.user, Password: cx.pass, DialTimeout: 3 * time.Second}) if err != nil { diff --git a/tests/e2e/ctl_v3_compact_test.go b/tests/e2e/ctl_v3_compact_test.go index 4852382c8..f29e580d9 100644 --- a/tests/e2e/ctl_v3_compact_test.go +++ b/tests/e2e/ctl_v3_compact_test.go @@ -18,6 +18,8 @@ import ( "strconv" "strings" "testing" + + "go.etcd.io/etcd/tests/v3/framework/e2e" ) func TestCtlV3Compact(t *testing.T) { testCtl(t, compactTest) } @@ -71,5 +73,5 @@ func ctlV3Compact(cx ctlCtx, rev int64, physical bool) error { if physical { cmdArgs = append(cmdArgs, "--physical") } - return spawnWithExpectWithEnv(cmdArgs, cx.envMap, "compacted revision "+rs) + return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, "compacted revision "+rs) } diff --git a/tests/e2e/ctl_v3_completion_test.go b/tests/e2e/ctl_v3_completion_test.go index 6dcfef07e..88c030d1a 100644 --- a/tests/e2e/ctl_v3_completion_test.go +++ b/tests/e2e/ctl_v3_completion_test.go @@ -22,14 +22,15 @@ import ( "testing" "github.com/stretchr/testify/require" + "go.etcd.io/etcd/tests/v3/framework/e2e" ) -func TestCtlV3CompletionBash(t *testing.T) { testShellCompletion(t, ctlBinPath, "bash") } +func TestCtlV3CompletionBash(t *testing.T) { testShellCompletion(t, e2e.CtlBinPath, "bash") } -func TestUtlV3CompletionBash(t *testing.T) { testShellCompletion(t, utlBinPath, "bash") } +func TestUtlV3CompletionBash(t *testing.T) { testShellCompletion(t, e2e.UtlBinPath, "bash") } func testShellCompletion(t *testing.T, binPath, shellName string) { - BeforeTest(t) + e2e.BeforeTest(t) stdout := new(bytes.Buffer) completionCmd := exec.Command(binPath, "completion", shellName) diff --git a/tests/e2e/ctl_v3_defrag_test.go b/tests/e2e/ctl_v3_defrag_test.go index f1a63094f..6abf855f4 100644 --- a/tests/e2e/ctl_v3_defrag_test.go +++ b/tests/e2e/ctl_v3_defrag_test.go @@ -14,7 +14,11 @@ package e2e -import "testing" +import ( + "testing" + + "go.etcd.io/etcd/tests/v3/framework/e2e" +) func TestCtlV3DefragOnline(t *testing.T) { testCtl(t, defragOnlineTest) } @@ -48,17 +52,17 @@ func defragOnlineTest(cx ctlCtx) { func ctlV3OnlineDefrag(cx ctlCtx) error { cmdArgs := append(cx.PrefixArgs(), "defrag") - lines := make([]string, cx.epc.cfg.clusterSize) + lines := make([]string, cx.epc.Cfg.ClusterSize) for i := range lines { lines[i] = "Finished defragmenting etcd member" } - return spawnWithExpects(cmdArgs, cx.envMap, lines...) + return e2e.SpawnWithExpects(cmdArgs, cx.envMap, lines...) } func ctlV3OfflineDefrag(cx ctlCtx) error { cmdArgs := append(cx.PrefixArgsUtl(), "defrag", "--data-dir", cx.dataDir) lines := []string{"finished defragmenting directory"} - return spawnWithExpects(cmdArgs, cx.envMap, lines...) + return e2e.SpawnWithExpects(cmdArgs, cx.envMap, lines...) } func defragOfflineTest(cx ctlCtx) { diff --git a/tests/e2e/ctl_v3_elect_test.go b/tests/e2e/ctl_v3_elect_test.go index 9b7891560..40b15f69b 100644 --- a/tests/e2e/ctl_v3_elect_test.go +++ b/tests/e2e/ctl_v3_elect_test.go @@ -21,6 +21,7 @@ import ( "time" "go.etcd.io/etcd/pkg/v3/expect" + "go.etcd.io/etcd/tests/v3/framework/e2e" ) func TestCtlV3Elect(t *testing.T) { @@ -72,7 +73,7 @@ func testElect(cx ctlCtx) { if err = blocked.Signal(os.Interrupt); err != nil { cx.t.Fatal(err) } - if err = closeWithTimeout(blocked, time.Second); err != nil { + if err = e2e.CloseWithTimeout(blocked, time.Second); err != nil { cx.t.Fatal(err) } @@ -80,7 +81,7 @@ func testElect(cx ctlCtx) { if err = holder.Signal(os.Interrupt); err != nil { cx.t.Fatal(err) } - if err = closeWithTimeout(holder, time.Second); err != nil { + if err = e2e.CloseWithTimeout(holder, time.Second); err != nil { cx.t.Fatal(err) } @@ -98,7 +99,7 @@ func testElect(cx ctlCtx) { // ctlV3Elect creates a elect process with a channel listening for when it wins the election. func ctlV3Elect(cx ctlCtx, name, proposal string) (*expect.ExpectProcess, <-chan string, error) { cmdArgs := append(cx.PrefixArgs(), "elect", name, proposal) - proc, err := spawnCmd(cmdArgs, cx.envMap) + proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap) outc := make(chan string, 1) if err != nil { close(outc) diff --git a/tests/e2e/ctl_v3_endpoint_test.go b/tests/e2e/ctl_v3_endpoint_test.go index 33dd7f5c6..8e364e8b4 100644 --- a/tests/e2e/ctl_v3_endpoint_test.go +++ b/tests/e2e/ctl_v3_endpoint_test.go @@ -22,6 +22,7 @@ import ( "time" "go.etcd.io/etcd/client/v3" + "go.etcd.io/etcd/tests/v3/framework/e2e" ) func TestCtlV3EndpointHealth(t *testing.T) { testCtl(t, endpointHealthTest, withQuorum()) } @@ -36,11 +37,11 @@ func endpointHealthTest(cx ctlCtx) { func ctlV3EndpointHealth(cx ctlCtx) error { cmdArgs := append(cx.PrefixArgs(), "endpoint", "health") - lines := make([]string, cx.epc.cfg.clusterSize) + lines := make([]string, cx.epc.Cfg.ClusterSize) for i := range lines { lines[i] = "is healthy" } - return spawnWithExpects(cmdArgs, cx.envMap, lines...) + return e2e.SpawnWithExpects(cmdArgs, cx.envMap, lines...) } func endpointStatusTest(cx ctlCtx) { @@ -56,7 +57,7 @@ func ctlV3EndpointStatus(cx ctlCtx) error { u, _ := url.Parse(ep) eps = append(eps, u.Host) } - return spawnWithExpects(cmdArgs, cx.envMap, eps...) + return e2e.SpawnWithExpects(cmdArgs, cx.envMap, eps...) } func endpointHashKVTest(cx ctlCtx) { @@ -88,5 +89,5 @@ func ctlV3EndpointHashKV(cx ctlCtx) error { u, _ := url.Parse(ep) ss = append(ss, fmt.Sprintf("%s, %d", u.Host, hresp.Hash)) } - return spawnWithExpects(cmdArgs, cx.envMap, ss...) + return e2e.SpawnWithExpects(cmdArgs, cx.envMap, ss...) } diff --git a/tests/e2e/ctl_v3_grpc_test.go b/tests/e2e/ctl_v3_grpc_test.go index b0f824552..6007e241c 100644 --- a/tests/e2e/ctl_v3_grpc_test.go +++ b/tests/e2e/ctl_v3_grpc_test.go @@ -25,6 +25,7 @@ import ( "github.com/stretchr/testify/assert" "go.etcd.io/etcd/client/pkg/v3/testutil" + "go.etcd.io/etcd/tests/v3/framework/e2e" ) func TestAuthority(t *testing.T) { @@ -80,18 +81,18 @@ func TestAuthority(t *testing.T) { for _, tc := range tcs { for _, clusterSize := range []int{1, 3} { t.Run(fmt.Sprintf("Size: %d, Scenario: %q", clusterSize, tc.name), func(t *testing.T) { - BeforeTest(t) + e2e.BeforeTest(t) - cfg := newConfigNoTLS() - cfg.clusterSize = clusterSize + cfg := e2e.NewConfigNoTLS() + cfg.ClusterSize = clusterSize if tc.useTLS { - cfg.clientTLS = clientTLS + cfg.ClientTLS = e2e.ClientTLS } - cfg.isClientAutoTLS = tc.useInsecureTLS + cfg.IsClientAutoTLS = tc.useInsecureTLS // Enable debug mode to get logs with http2 headers (including authority) - cfg.envVars = map[string]string{"GODEBUG": "http2debug=2"} + cfg.EnvVars = map[string]string{"GODEBUG": "http2debug=2"} - epc, err := newEtcdProcessCluster(t, cfg) + epc, err := e2e.NewEtcdProcessCluster(t, cfg) if err != nil { t.Fatalf("could not start etcd process cluster (%v)", err) } @@ -113,13 +114,13 @@ func TestAuthority(t *testing.T) { } } -func templateEndpoints(t *testing.T, pattern string, clus *etcdProcessCluster) []string { +func templateEndpoints(t *testing.T, pattern string, clus *e2e.EtcdProcessCluster) []string { t.Helper() endpoints := []string{} - for i := 0; i < clus.cfg.clusterSize; i++ { + for i := 0; i < clus.Cfg.ClusterSize; i++ { ent := pattern if strings.Contains(ent, "%d") { - ent = fmt.Sprintf(ent, etcdProcessBasePort+i*5) + ent = fmt.Sprintf(ent, e2e.EtcdProcessBasePort+i*5) } if strings.Contains(ent, "%") { t.Fatalf("Failed to template pattern, %% symbol left %q", ent) @@ -129,9 +130,9 @@ func templateEndpoints(t *testing.T, pattern string, clus *etcdProcessCluster) [ return endpoints } -func assertAuthority(t *testing.T, expectAurhority string, clus *etcdProcessCluster) { - logs := []logsExpect{} - for _, proc := range clus.procs { +func assertAuthority(t *testing.T, expectAurhority string, clus *e2e.EtcdProcessCluster) { + logs := []e2e.LogsExpect{} + for _, proc := range clus.Procs { logs = append(logs, proc.Logs()) } line := firstMatch(t, `http2: decoded hpack field header field ":authority"`, logs...) @@ -141,11 +142,11 @@ func assertAuthority(t *testing.T, expectAurhority string, clus *etcdProcessClus assert.True(t, strings.HasSuffix(line, expectLine), fmt.Sprintf("Got %q expected suffix %q", line, expectLine)) } -func firstMatch(t *testing.T, expectLine string, logs ...logsExpect) string { +func firstMatch(t *testing.T, expectLine string, logs ...e2e.LogsExpect) string { t.Helper() match := make(chan string, len(logs)) for i := range logs { - go func(l logsExpect) { + go func(l e2e.LogsExpect) { line, _ := l.Expect(expectLine) match <- line }(logs[i]) @@ -168,11 +169,11 @@ func executeWithTimeout(t *testing.T, timeout time.Duration, f func()) { } type etcdctlV3 struct { - cfg *etcdProcessClusterConfig + cfg *e2e.EtcdProcessClusterConfig endpoints []string } -func clusterEtcdctlV3(cfg *etcdProcessClusterConfig, endpoints []string) *etcdctlV3 { +func clusterEtcdctlV3(cfg *e2e.EtcdProcessClusterConfig, endpoints []string) *etcdctlV3 { return &etcdctlV3{ cfg: cfg, endpoints: endpoints, @@ -184,28 +185,28 @@ func (ctl *etcdctlV3) Put(key, value string) error { } func (ctl *etcdctlV3) runCmd(args ...string) error { - cmdArgs := []string{ctlBinPath + "3"} + cmdArgs := []string{e2e.CtlBinPath + "3"} for k, v := range ctl.flags() { cmdArgs = append(cmdArgs, fmt.Sprintf("--%s=%s", k, v)) } cmdArgs = append(cmdArgs, args...) - return spawnWithExpect(cmdArgs, "OK") + return e2e.SpawnWithExpect(cmdArgs, "OK") } func (ctl *etcdctlV3) flags() map[string]string { fmap := make(map[string]string) - if ctl.cfg.clientTLS == clientTLS { - if ctl.cfg.isClientAutoTLS { + if ctl.cfg.ClientTLS == e2e.ClientTLS { + if ctl.cfg.IsClientAutoTLS { fmap["insecure-transport"] = "false" fmap["insecure-skip-tls-verify"] = "true" - } else if ctl.cfg.isClientCRL { - fmap["cacert"] = caPath - fmap["cert"] = revokedCertPath - fmap["key"] = revokedPrivateKeyPath + } else if ctl.cfg.IsClientCRL { + fmap["cacert"] = e2e.CaPath + fmap["cert"] = e2e.RevokedCertPath + fmap["key"] = e2e.RevokedPrivateKeyPath } else { - fmap["cacert"] = caPath - fmap["cert"] = certPath - fmap["key"] = privateKeyPath + fmap["cacert"] = e2e.CaPath + fmap["cert"] = e2e.CertPath + fmap["key"] = e2e.PrivateKeyPath } } fmap["endpoints"] = strings.Join(ctl.endpoints, ",") diff --git a/tests/e2e/ctl_v3_kv_test.go b/tests/e2e/ctl_v3_kv_test.go index 17b156a0d..69f639be7 100644 --- a/tests/e2e/ctl_v3_kv_test.go +++ b/tests/e2e/ctl_v3_kv_test.go @@ -19,27 +19,33 @@ import ( "strings" "testing" "time" + + "go.etcd.io/etcd/tests/v3/framework/e2e" ) -func TestCtlV3Put(t *testing.T) { testCtl(t, putTest, withDialTimeout(7*time.Second)) } -func TestCtlV3PutNoTLS(t *testing.T) { testCtl(t, putTest, withCfg(*newConfigNoTLS())) } -func TestCtlV3PutClientTLS(t *testing.T) { testCtl(t, putTest, withCfg(*newConfigClientTLS())) } -func TestCtlV3PutClientAutoTLS(t *testing.T) { testCtl(t, putTest, withCfg(*newConfigClientAutoTLS())) } -func TestCtlV3PutPeerTLS(t *testing.T) { testCtl(t, putTest, withCfg(*newConfigPeerTLS())) } -func TestCtlV3PutTimeout(t *testing.T) { testCtl(t, putTest, withDialTimeout(0)) } +func TestCtlV3Put(t *testing.T) { testCtl(t, putTest, withDialTimeout(7*time.Second)) } +func TestCtlV3PutNoTLS(t *testing.T) { testCtl(t, putTest, withCfg(*e2e.NewConfigNoTLS())) } +func TestCtlV3PutClientTLS(t *testing.T) { testCtl(t, putTest, withCfg(*e2e.NewConfigClientTLS())) } +func TestCtlV3PutClientAutoTLS(t *testing.T) { + testCtl(t, putTest, withCfg(*e2e.NewConfigClientAutoTLS())) +} +func TestCtlV3PutPeerTLS(t *testing.T) { testCtl(t, putTest, withCfg(*e2e.NewConfigPeerTLS())) } +func TestCtlV3PutTimeout(t *testing.T) { testCtl(t, putTest, withDialTimeout(0)) } func TestCtlV3PutClientTLSFlagByEnv(t *testing.T) { - testCtl(t, putTest, withCfg(*newConfigClientTLS()), withFlagByEnv()) + testCtl(t, putTest, withCfg(*e2e.NewConfigClientTLS()), withFlagByEnv()) } func TestCtlV3PutIgnoreValue(t *testing.T) { testCtl(t, putTestIgnoreValue) } func TestCtlV3PutIgnoreLease(t *testing.T) { testCtl(t, putTestIgnoreLease) } -func TestCtlV3Get(t *testing.T) { testCtl(t, getTest) } -func TestCtlV3GetNoTLS(t *testing.T) { testCtl(t, getTest, withCfg(*newConfigNoTLS())) } -func TestCtlV3GetClientTLS(t *testing.T) { testCtl(t, getTest, withCfg(*newConfigClientTLS())) } -func TestCtlV3GetClientAutoTLS(t *testing.T) { testCtl(t, getTest, withCfg(*newConfigClientAutoTLS())) } -func TestCtlV3GetPeerTLS(t *testing.T) { testCtl(t, getTest, withCfg(*newConfigPeerTLS())) } -func TestCtlV3GetTimeout(t *testing.T) { testCtl(t, getTest, withDialTimeout(0)) } -func TestCtlV3GetQuorum(t *testing.T) { testCtl(t, getTest, withQuorum()) } +func TestCtlV3Get(t *testing.T) { testCtl(t, getTest) } +func TestCtlV3GetNoTLS(t *testing.T) { testCtl(t, getTest, withCfg(*e2e.NewConfigNoTLS())) } +func TestCtlV3GetClientTLS(t *testing.T) { testCtl(t, getTest, withCfg(*e2e.NewConfigClientTLS())) } +func TestCtlV3GetClientAutoTLS(t *testing.T) { + testCtl(t, getTest, withCfg(*e2e.NewConfigClientAutoTLS())) +} +func TestCtlV3GetPeerTLS(t *testing.T) { testCtl(t, getTest, withCfg(*e2e.NewConfigPeerTLS())) } +func TestCtlV3GetTimeout(t *testing.T) { testCtl(t, getTest, withDialTimeout(0)) } +func TestCtlV3GetQuorum(t *testing.T) { testCtl(t, getTest, withQuorum()) } func TestCtlV3GetFormat(t *testing.T) { testCtl(t, getFormatTest) } func TestCtlV3GetRev(t *testing.T) { testCtl(t, getRevTest) } @@ -47,18 +53,18 @@ func TestCtlV3GetKeysOnly(t *testing.T) { testCtl(t, getKeysOnlyTest) } func TestCtlV3GetCountOnly(t *testing.T) { testCtl(t, getCountOnlyTest) } func TestCtlV3Del(t *testing.T) { testCtl(t, delTest) } -func TestCtlV3DelNoTLS(t *testing.T) { testCtl(t, delTest, withCfg(*newConfigNoTLS())) } -func TestCtlV3DelClientTLS(t *testing.T) { testCtl(t, delTest, withCfg(*newConfigClientTLS())) } -func TestCtlV3DelPeerTLS(t *testing.T) { testCtl(t, delTest, withCfg(*newConfigPeerTLS())) } +func TestCtlV3DelNoTLS(t *testing.T) { testCtl(t, delTest, withCfg(*e2e.NewConfigNoTLS())) } +func TestCtlV3DelClientTLS(t *testing.T) { testCtl(t, delTest, withCfg(*e2e.NewConfigClientTLS())) } +func TestCtlV3DelPeerTLS(t *testing.T) { testCtl(t, delTest, withCfg(*e2e.NewConfigPeerTLS())) } func TestCtlV3DelTimeout(t *testing.T) { testCtl(t, delTest, withDialTimeout(0)) } func TestCtlV3GetRevokedCRL(t *testing.T) { - cfg := etcdProcessClusterConfig{ - clusterSize: 1, - initialToken: "new", - clientTLS: clientTLS, - isClientCRL: true, - clientCertAuthEnabled: true, + cfg := e2e.EtcdProcessClusterConfig{ + ClusterSize: 1, + InitialToken: "new", + ClientTLS: e2e.ClientTLS, + IsClientCRL: true, + ClientCertAuthEnabled: true, } testCtl(t, testGetRevokedCRL, withCfg(cfg)) } @@ -69,7 +75,7 @@ func testGetRevokedCRL(cx ctlCtx) { cx.t.Fatalf("expected reset connection on put, got %v", err) } // test accept - cx.epc.cfg.isClientCRL = false + cx.epc.Cfg.IsClientCRL = false if err := ctlV3Put(cx, "k", "v", ""); err != nil { cx.t.Fatal(err) } @@ -190,7 +196,7 @@ func getFormatTest(cx ctlCtx) { cmdArgs = append(cmdArgs, "--print-value-only") } cmdArgs = append(cmdArgs, "abc") - if err := spawnWithExpectWithEnv(cmdArgs, cx.envMap, tt.wstr); err != nil { + if err := e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, tt.wstr); err != nil { cx.t.Errorf("#%d: error (%v), wanted %v", i, err, tt.wstr) } } @@ -228,24 +234,24 @@ func getKeysOnlyTest(cx ctlCtx) { cx.t.Fatal(err) } cmdArgs := append(cx.PrefixArgs(), []string{"get", "--keys-only", "key"}...) - if err := spawnWithExpectWithEnv(cmdArgs, cx.envMap, "key"); err != nil { + if err := e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, "key"); err != nil { cx.t.Fatal(err) } - if err := spawnWithExpects(cmdArgs, cx.envMap, "val"); err == nil { + if err := e2e.SpawnWithExpects(cmdArgs, cx.envMap, "val"); err == nil { cx.t.Fatalf("got value but passed --keys-only") } } func getCountOnlyTest(cx ctlCtx) { cmdArgs := append(cx.PrefixArgs(), []string{"get", "--count-only", "key", "--prefix", "--write-out=fields"}...) - if err := spawnWithExpects(cmdArgs, cx.envMap, "\"Count\" : 0"); err != nil { + if err := e2e.SpawnWithExpects(cmdArgs, cx.envMap, "\"Count\" : 0"); err != nil { cx.t.Fatal(err) } if err := ctlV3Put(cx, "key", "val", ""); err != nil { cx.t.Fatal(err) } cmdArgs = append(cx.PrefixArgs(), []string{"get", "--count-only", "key", "--prefix", "--write-out=fields"}...) - if err := spawnWithExpects(cmdArgs, cx.envMap, "\"Count\" : 1"); err != nil { + if err := e2e.SpawnWithExpects(cmdArgs, cx.envMap, "\"Count\" : 1"); err != nil { cx.t.Fatal(err) } if err := ctlV3Put(cx, "key1", "val", ""); err != nil { @@ -255,21 +261,21 @@ func getCountOnlyTest(cx ctlCtx) { cx.t.Fatal(err) } cmdArgs = append(cx.PrefixArgs(), []string{"get", "--count-only", "key", "--prefix", "--write-out=fields"}...) - if err := spawnWithExpects(cmdArgs, cx.envMap, "\"Count\" : 2"); err != nil { + if err := e2e.SpawnWithExpects(cmdArgs, cx.envMap, "\"Count\" : 2"); err != nil { cx.t.Fatal(err) } if err := ctlV3Put(cx, "key2", "val", ""); err != nil { cx.t.Fatal(err) } cmdArgs = append(cx.PrefixArgs(), []string{"get", "--count-only", "key", "--prefix", "--write-out=fields"}...) - if err := spawnWithExpects(cmdArgs, cx.envMap, "\"Count\" : 3"); err != nil { + if err := e2e.SpawnWithExpects(cmdArgs, cx.envMap, "\"Count\" : 3"); err != nil { cx.t.Fatal(err) } expected := []string{ "\"Count\" : 3", } cmdArgs = append(cx.PrefixArgs(), []string{"get", "--count-only", "key3", "--prefix", "--write-out=fields"}...) - if err := spawnWithExpects(cmdArgs, cx.envMap, expected...); err == nil { + if err := e2e.SpawnWithExpects(cmdArgs, cx.envMap, expected...); err == nil { cx.t.Fatal(err) } } @@ -348,7 +354,7 @@ func ctlV3Put(cx ctlCtx, key, value, leaseID string, flags ...string) error { if len(flags) != 0 { cmdArgs = append(cmdArgs, flags...) } - return spawnWithExpectWithEnv(cmdArgs, cx.envMap, "OK") + return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, "OK") } type kv struct { @@ -365,7 +371,7 @@ func ctlV3Get(cx ctlCtx, args []string, kvs ...kv) error { for _, elem := range kvs { lines = append(lines, elem.key, elem.val) } - return spawnWithExpects(cmdArgs, cx.envMap, lines...) + return e2e.SpawnWithExpects(cmdArgs, cx.envMap, lines...) } // ctlV3GetWithErr runs "get" command expecting no output but error @@ -375,11 +381,11 @@ func ctlV3GetWithErr(cx ctlCtx, args []string, errs []string) error { if !cx.quorum { cmdArgs = append(cmdArgs, "--consistency", "s") } - return spawnWithExpects(cmdArgs, cx.envMap, errs...) + return e2e.SpawnWithExpects(cmdArgs, cx.envMap, errs...) } func ctlV3Del(cx ctlCtx, args []string, num int) error { cmdArgs := append(cx.PrefixArgs(), "del") cmdArgs = append(cmdArgs, args...) - return spawnWithExpects(cmdArgs, cx.envMap, fmt.Sprintf("%d", num)) + return e2e.SpawnWithExpects(cmdArgs, cx.envMap, fmt.Sprintf("%d", num)) } diff --git a/tests/e2e/ctl_v3_lease_test.go b/tests/e2e/ctl_v3_lease_test.go index 0dc445202..d13309bc5 100644 --- a/tests/e2e/ctl_v3_lease_test.go +++ b/tests/e2e/ctl_v3_lease_test.go @@ -20,90 +20,92 @@ import ( "strings" "testing" "time" + + "go.etcd.io/etcd/tests/v3/framework/e2e" ) func TestCtlV3LeaseGrantTimeToLive(t *testing.T) { testCtl(t, leaseTestGrantTimeToLive) } func TestCtlV3LeaseGrantTimeToLiveNoTLS(t *testing.T) { - testCtl(t, leaseTestGrantTimeToLive, withCfg(*newConfigNoTLS())) + testCtl(t, leaseTestGrantTimeToLive, withCfg(*e2e.NewConfigNoTLS())) } func TestCtlV3LeaseGrantTimeToLiveClientTLS(t *testing.T) { - testCtl(t, leaseTestGrantTimeToLive, withCfg(*newConfigClientTLS())) + testCtl(t, leaseTestGrantTimeToLive, withCfg(*e2e.NewConfigClientTLS())) } func TestCtlV3LeaseGrantTimeToLiveClientAutoTLS(t *testing.T) { - testCtl(t, leaseTestGrantTimeToLive, withCfg(*newConfigClientAutoTLS())) + testCtl(t, leaseTestGrantTimeToLive, withCfg(*e2e.NewConfigClientAutoTLS())) } func TestCtlV3LeaseGrantTimeToLivePeerTLS(t *testing.T) { - testCtl(t, leaseTestGrantTimeToLive, withCfg(*newConfigPeerTLS())) + testCtl(t, leaseTestGrantTimeToLive, withCfg(*e2e.NewConfigPeerTLS())) } func TestCtlV3LeaseGrantLeases(t *testing.T) { testCtl(t, leaseTestGrantLeaseListed) } func TestCtlV3LeaseGrantLeasesNoTLS(t *testing.T) { - testCtl(t, leaseTestGrantLeaseListed, withCfg(*newConfigNoTLS())) + testCtl(t, leaseTestGrantLeaseListed, withCfg(*e2e.NewConfigNoTLS())) } func TestCtlV3LeaseGrantLeasesClientTLS(t *testing.T) { - testCtl(t, leaseTestGrantLeaseListed, withCfg(*newConfigClientTLS())) + testCtl(t, leaseTestGrantLeaseListed, withCfg(*e2e.NewConfigClientTLS())) } func TestCtlV3LeaseGrantLeasesClientAutoTLS(t *testing.T) { - testCtl(t, leaseTestGrantLeaseListed, withCfg(*newConfigClientAutoTLS())) + testCtl(t, leaseTestGrantLeaseListed, withCfg(*e2e.NewConfigClientAutoTLS())) } func TestCtlV3LeaseGrantLeasesPeerTLS(t *testing.T) { - testCtl(t, leaseTestGrantLeaseListed, withCfg(*newConfigPeerTLS())) + testCtl(t, leaseTestGrantLeaseListed, withCfg(*e2e.NewConfigPeerTLS())) } func TestCtlV3LeaseTestTimeToLiveExpired(t *testing.T) { testCtl(t, leaseTestTimeToLiveExpired) } func TestCtlV3LeaseTestTimeToLiveExpiredNoTLS(t *testing.T) { - testCtl(t, leaseTestTimeToLiveExpired, withCfg(*newConfigNoTLS())) + testCtl(t, leaseTestTimeToLiveExpired, withCfg(*e2e.NewConfigNoTLS())) } func TestCtlV3LeaseTestTimeToLiveExpiredClientTLS(t *testing.T) { - testCtl(t, leaseTestTimeToLiveExpired, withCfg(*newConfigClientTLS())) + testCtl(t, leaseTestTimeToLiveExpired, withCfg(*e2e.NewConfigClientTLS())) } func TestCtlV3LeaseTestTimeToLiveExpiredClientAutoTLS(t *testing.T) { - testCtl(t, leaseTestTimeToLiveExpired, withCfg(*newConfigClientAutoTLS())) + testCtl(t, leaseTestTimeToLiveExpired, withCfg(*e2e.NewConfigClientAutoTLS())) } func TestCtlV3LeaseTestTimeToLiveExpiredPeerTLS(t *testing.T) { - testCtl(t, leaseTestTimeToLiveExpired, withCfg(*newConfigPeerTLS())) + testCtl(t, leaseTestTimeToLiveExpired, withCfg(*e2e.NewConfigPeerTLS())) } func TestCtlV3LeaseKeepAlive(t *testing.T) { testCtl(t, leaseTestKeepAlive) } func TestCtlV3LeaseKeepAliveNoTLS(t *testing.T) { - testCtl(t, leaseTestKeepAlive, withCfg(*newConfigNoTLS())) + testCtl(t, leaseTestKeepAlive, withCfg(*e2e.NewConfigNoTLS())) } func TestCtlV3LeaseKeepAliveClientTLS(t *testing.T) { - testCtl(t, leaseTestKeepAlive, withCfg(*newConfigClientTLS())) + testCtl(t, leaseTestKeepAlive, withCfg(*e2e.NewConfigClientTLS())) } func TestCtlV3LeaseKeepAliveClientAutoTLS(t *testing.T) { - testCtl(t, leaseTestKeepAlive, withCfg(*newConfigClientAutoTLS())) + testCtl(t, leaseTestKeepAlive, withCfg(*e2e.NewConfigClientAutoTLS())) } func TestCtlV3LeaseKeepAlivePeerTLS(t *testing.T) { - testCtl(t, leaseTestKeepAlive, withCfg(*newConfigPeerTLS())) + testCtl(t, leaseTestKeepAlive, withCfg(*e2e.NewConfigPeerTLS())) } func TestCtlV3LeaseKeepAliveOnce(t *testing.T) { testCtl(t, leaseTestKeepAliveOnce) } func TestCtlV3LeaseKeepAliveOnceNoTLS(t *testing.T) { - testCtl(t, leaseTestKeepAliveOnce, withCfg(*newConfigNoTLS())) + testCtl(t, leaseTestKeepAliveOnce, withCfg(*e2e.NewConfigNoTLS())) } func TestCtlV3LeaseKeepAliveOnceClientTLS(t *testing.T) { - testCtl(t, leaseTestKeepAliveOnce, withCfg(*newConfigClientTLS())) + testCtl(t, leaseTestKeepAliveOnce, withCfg(*e2e.NewConfigClientTLS())) } func TestCtlV3LeaseKeepAliveOnceClientAutoTLS(t *testing.T) { - testCtl(t, leaseTestKeepAliveOnce, withCfg(*newConfigClientAutoTLS())) + testCtl(t, leaseTestKeepAliveOnce, withCfg(*e2e.NewConfigClientAutoTLS())) } func TestCtlV3LeaseKeepAliveOncePeerTLS(t *testing.T) { - testCtl(t, leaseTestKeepAliveOnce, withCfg(*newConfigPeerTLS())) + testCtl(t, leaseTestKeepAliveOnce, withCfg(*e2e.NewConfigPeerTLS())) } func TestCtlV3LeaseRevoke(t *testing.T) { testCtl(t, leaseTestRevoked) } func TestCtlV3LeaseRevokeNoTLS(t *testing.T) { - testCtl(t, leaseTestRevoked, withCfg(*newConfigNoTLS())) + testCtl(t, leaseTestRevoked, withCfg(*e2e.NewConfigNoTLS())) } func TestCtlV3LeaseRevokeClientTLS(t *testing.T) { - testCtl(t, leaseTestRevoked, withCfg(*newConfigClientTLS())) + testCtl(t, leaseTestRevoked, withCfg(*e2e.NewConfigClientTLS())) } func TestCtlV3LeaseRevokeClientAutoTLS(t *testing.T) { - testCtl(t, leaseTestRevoked, withCfg(*newConfigClientAutoTLS())) + testCtl(t, leaseTestRevoked, withCfg(*e2e.NewConfigClientAutoTLS())) } func TestCtlV3LeaseRevokePeerTLS(t *testing.T) { - testCtl(t, leaseTestRevoked, withCfg(*newConfigPeerTLS())) + testCtl(t, leaseTestRevoked, withCfg(*e2e.NewConfigPeerTLS())) } func leaseTestGrantTimeToLive(cx ctlCtx) { @@ -113,7 +115,7 @@ func leaseTestGrantTimeToLive(cx ctlCtx) { } cmdArgs := append(cx.PrefixArgs(), "lease", "timetolive", id, "--keys") - proc, err := spawnCmd(cmdArgs, cx.envMap) + proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap) if err != nil { cx.t.Fatalf("leaseTestGrantTimeToLive: error (%v)", err) } @@ -146,7 +148,7 @@ func leaseTestGrantLeasesList(cx ctlCtx) error { } cmdArgs := append(cx.PrefixArgs(), "lease", "list") - proc, err := spawnCmd(cmdArgs, cx.envMap) + proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap) if err != nil { return fmt.Errorf("lease list failed (%v)", err) } @@ -177,7 +179,7 @@ func leaseTestTimeToLiveExpire(cx ctlCtx, ttl int) error { time.Sleep(time.Duration(ttl+1) * time.Second) cmdArgs := append(cx.PrefixArgs(), "lease", "timetolive", leaseID) exp := fmt.Sprintf("lease %s already expired", leaseID) - if err = spawnWithExpectWithEnv(cmdArgs, cx.envMap, exp); err != nil { + if err = e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, exp); err != nil { return fmt.Errorf("lease not properly expired: (%v)", err) } if err := ctlV3Get(cx, []string{"key"}); err != nil { @@ -247,7 +249,7 @@ func leaseTestRevoke(cx ctlCtx) error { func ctlV3LeaseGrant(cx ctlCtx, ttl int) (string, error) { cmdArgs := append(cx.PrefixArgs(), "lease", "grant", strconv.Itoa(ttl)) - proc, err := spawnCmd(cmdArgs, cx.envMap) + proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap) if err != nil { return "", err } @@ -271,7 +273,7 @@ func ctlV3LeaseGrant(cx ctlCtx, ttl int) (string, error) { func ctlV3LeaseKeepAlive(cx ctlCtx, leaseID string) error { cmdArgs := append(cx.PrefixArgs(), "lease", "keep-alive", leaseID) - proc, err := spawnCmd(cmdArgs, nil) + proc, err := e2e.SpawnCmd(cmdArgs, nil) if err != nil { return err } @@ -285,7 +287,7 @@ func ctlV3LeaseKeepAlive(cx ctlCtx, leaseID string) error { func ctlV3LeaseKeepAliveOnce(cx ctlCtx, leaseID string) error { cmdArgs := append(cx.PrefixArgs(), "lease", "keep-alive", "--once", leaseID) - proc, err := spawnCmd(cmdArgs, nil) + proc, err := e2e.SpawnCmd(cmdArgs, nil) if err != nil { return err } @@ -298,5 +300,5 @@ func ctlV3LeaseKeepAliveOnce(cx ctlCtx, leaseID string) error { func ctlV3LeaseRevoke(cx ctlCtx, leaseID string) error { cmdArgs := append(cx.PrefixArgs(), "lease", "revoke", leaseID) - return spawnWithExpectWithEnv(cmdArgs, cx.envMap, fmt.Sprintf("lease %s revoked", leaseID)) + return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, fmt.Sprintf("lease %s revoked", leaseID)) } diff --git a/tests/e2e/ctl_v3_lock_test.go b/tests/e2e/ctl_v3_lock_test.go index 5330afb0a..26ea22501 100644 --- a/tests/e2e/ctl_v3_lock_test.go +++ b/tests/e2e/ctl_v3_lock_test.go @@ -22,6 +22,7 @@ import ( "time" "go.etcd.io/etcd/pkg/v3/expect" + "go.etcd.io/etcd/tests/v3/framework/e2e" ) func TestCtlV3Lock(t *testing.T) { @@ -77,7 +78,7 @@ func testLock(cx ctlCtx) { if err = blocked.Signal(os.Interrupt); err != nil { cx.t.Fatal(err) } - if err = closeWithTimeout(blocked, time.Second); err != nil { + if err = e2e.CloseWithTimeout(blocked, time.Second); err != nil { cx.t.Fatal(err) } @@ -85,7 +86,7 @@ func testLock(cx ctlCtx) { if err = holder.Signal(os.Interrupt); err != nil { cx.t.Fatal(err) } - if err = closeWithTimeout(holder, 200*time.Millisecond+time.Second); err != nil { + if err = e2e.CloseWithTimeout(holder, 200*time.Millisecond+time.Second); err != nil { cx.t.Fatal(err) } @@ -119,7 +120,7 @@ func testLockWithCmd(cx ctlCtx) { // ctlV3Lock creates a lock process with a channel listening for when it acquires the lock. func ctlV3Lock(cx ctlCtx, name string) (*expect.ExpectProcess, <-chan string, error) { cmdArgs := append(cx.PrefixArgs(), "lock", name) - proc, err := spawnCmd(cmdArgs, cx.envMap) + proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap) outc := make(chan string, 1) if err != nil { close(outc) @@ -140,5 +141,5 @@ func ctlV3LockWithCmd(cx ctlCtx, execCmd []string, as ...string) error { // use command as lock name cmdArgs := append(cx.PrefixArgs(), "lock", execCmd[0]) cmdArgs = append(cmdArgs, execCmd...) - return spawnWithExpects(cmdArgs, cx.envMap, as...) + return e2e.SpawnWithExpects(cmdArgs, cx.envMap, as...) } diff --git a/tests/e2e/ctl_v3_make_mirror_test.go b/tests/e2e/ctl_v3_make_mirror_test.go index 491af15bd..deb4b50e4 100644 --- a/tests/e2e/ctl_v3_make_mirror_test.go +++ b/tests/e2e/ctl_v3_make_mirror_test.go @@ -18,6 +18,8 @@ import ( "fmt" "testing" "time" + + "go.etcd.io/etcd/tests/v3/framework/e2e" ) func TestCtlV3MakeMirror(t *testing.T) { testCtl(t, makeMirrorTest) } @@ -59,16 +61,16 @@ func makeMirrorNoDestPrefixTest(cx ctlCtx) { func testMirrorCommand(cx ctlCtx, flags []string, sourcekvs []kv, destkvs []kvExec, srcprefix, destprefix string) { // set up another cluster to mirror with - mirrorcfg := newConfigAutoTLS() - mirrorcfg.clusterSize = 1 - mirrorcfg.basePort = 10000 + mirrorcfg := e2e.NewConfigAutoTLS() + mirrorcfg.ClusterSize = 1 + mirrorcfg.BasePort = 10000 mirrorctx := ctlCtx{ t: cx.t, cfg: *mirrorcfg, dialTimeout: 7 * time.Second, } - mirrorepc, err := newEtcdProcessCluster(cx.t, &mirrorctx.cfg) + mirrorepc, err := e2e.NewEtcdProcessCluster(cx.t, &mirrorctx.cfg) if err != nil { cx.t.Fatalf("could not start etcd process cluster (%v)", err) } @@ -82,8 +84,8 @@ func testMirrorCommand(cx ctlCtx, flags []string, sourcekvs []kv, destkvs []kvEx cmdArgs := append(cx.PrefixArgs(), "make-mirror") cmdArgs = append(cmdArgs, flags...) - cmdArgs = append(cmdArgs, fmt.Sprintf("localhost:%d", mirrorcfg.basePort)) - proc, err := spawnCmd(cmdArgs, cx.envMap) + cmdArgs = append(cmdArgs, fmt.Sprintf("localhost:%d", mirrorcfg.BasePort)) + proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap) if err != nil { cx.t.Fatal(err) } diff --git a/tests/e2e/ctl_v3_member_test.go b/tests/e2e/ctl_v3_member_test.go index f92526ed6..fe111424d 100644 --- a/tests/e2e/ctl_v3_member_test.go +++ b/tests/e2e/ctl_v3_member_test.go @@ -23,64 +23,69 @@ import ( "testing" "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/tests/v3/framework/e2e" ) func TestCtlV3MemberList(t *testing.T) { testCtl(t, memberListTest) } func TestCtlV3MemberListWithHex(t *testing.T) { testCtl(t, memberListWithHexTest) } -func TestCtlV3MemberListNoTLS(t *testing.T) { testCtl(t, memberListTest, withCfg(*newConfigNoTLS())) } +func TestCtlV3MemberListNoTLS(t *testing.T) { + testCtl(t, memberListTest, withCfg(*e2e.NewConfigNoTLS())) +} func TestCtlV3MemberListClientTLS(t *testing.T) { - testCtl(t, memberListTest, withCfg(*newConfigClientTLS())) + testCtl(t, memberListTest, withCfg(*e2e.NewConfigClientTLS())) } func TestCtlV3MemberListClientAutoTLS(t *testing.T) { - testCtl(t, memberListTest, withCfg(*newConfigClientAutoTLS())) + testCtl(t, memberListTest, withCfg(*e2e.NewConfigClientAutoTLS())) } func TestCtlV3MemberListPeerTLS(t *testing.T) { - testCtl(t, memberListTest, withCfg(*newConfigPeerTLS())) + testCtl(t, memberListTest, withCfg(*e2e.NewConfigPeerTLS())) } func TestCtlV3MemberRemove(t *testing.T) { testCtl(t, memberRemoveTest, withQuorum(), withNoStrictReconfig()) } func TestCtlV3MemberRemoveNoTLS(t *testing.T) { - testCtl(t, memberRemoveTest, withQuorum(), withNoStrictReconfig(), withCfg(*newConfigNoTLS())) + testCtl(t, memberRemoveTest, withQuorum(), withNoStrictReconfig(), withCfg(*e2e.NewConfigNoTLS())) } func TestCtlV3MemberRemoveClientTLS(t *testing.T) { - testCtl(t, memberRemoveTest, withQuorum(), withNoStrictReconfig(), withCfg(*newConfigClientTLS())) + testCtl(t, memberRemoveTest, withQuorum(), withNoStrictReconfig(), withCfg(*e2e.NewConfigClientTLS())) } func TestCtlV3MemberRemoveClientAutoTLS(t *testing.T) { testCtl(t, memberRemoveTest, withQuorum(), withNoStrictReconfig(), withCfg( - // default clusterSize is 1 - etcdProcessClusterConfig{ - clusterSize: 3, - isClientAutoTLS: true, - clientTLS: clientTLS, - initialToken: "new", + // default ClusterSize is 1 + e2e.EtcdProcessClusterConfig{ + ClusterSize: 3, + IsClientAutoTLS: true, + ClientTLS: e2e.ClientTLS, + InitialToken: "new", })) } func TestCtlV3MemberRemovePeerTLS(t *testing.T) { - testCtl(t, memberRemoveTest, withQuorum(), withNoStrictReconfig(), withCfg(*newConfigPeerTLS())) + testCtl(t, memberRemoveTest, withQuorum(), withNoStrictReconfig(), withCfg(*e2e.NewConfigPeerTLS())) } func TestCtlV3MemberAdd(t *testing.T) { testCtl(t, memberAddTest) } -func TestCtlV3MemberAddNoTLS(t *testing.T) { testCtl(t, memberAddTest, withCfg(*newConfigNoTLS())) } +func TestCtlV3MemberAddNoTLS(t *testing.T) { testCtl(t, memberAddTest, withCfg(*e2e.NewConfigNoTLS())) } func TestCtlV3MemberAddClientTLS(t *testing.T) { - testCtl(t, memberAddTest, withCfg(*newConfigClientTLS())) + testCtl(t, memberAddTest, withCfg(*e2e.NewConfigClientTLS())) } func TestCtlV3MemberAddClientAutoTLS(t *testing.T) { - testCtl(t, memberAddTest, withCfg(*newConfigClientAutoTLS())) + testCtl(t, memberAddTest, withCfg(*e2e.NewConfigClientAutoTLS())) +} +func TestCtlV3MemberAddPeerTLS(t *testing.T) { + testCtl(t, memberAddTest, withCfg(*e2e.NewConfigPeerTLS())) } -func TestCtlV3MemberAddPeerTLS(t *testing.T) { testCtl(t, memberAddTest, withCfg(*newConfigPeerTLS())) } func TestCtlV3MemberAddForLearner(t *testing.T) { testCtl(t, memberAddForLearnerTest) } func TestCtlV3MemberUpdate(t *testing.T) { testCtl(t, memberUpdateTest) } func TestCtlV3MemberUpdateNoTLS(t *testing.T) { - testCtl(t, memberUpdateTest, withCfg(*newConfigNoTLS())) + testCtl(t, memberUpdateTest, withCfg(*e2e.NewConfigNoTLS())) } func TestCtlV3MemberUpdateClientTLS(t *testing.T) { - testCtl(t, memberUpdateTest, withCfg(*newConfigClientTLS())) + testCtl(t, memberUpdateTest, withCfg(*e2e.NewConfigClientTLS())) } func TestCtlV3MemberUpdateClientAutoTLS(t *testing.T) { - testCtl(t, memberUpdateTest, withCfg(*newConfigClientAutoTLS())) + testCtl(t, memberUpdateTest, withCfg(*e2e.NewConfigClientAutoTLS())) } func TestCtlV3MemberUpdatePeerTLS(t *testing.T) { - testCtl(t, memberUpdateTest, withCfg(*newConfigPeerTLS())) + testCtl(t, memberUpdateTest, withCfg(*e2e.NewConfigPeerTLS())) } func memberListTest(cx ctlCtx) { @@ -91,17 +96,17 @@ func memberListTest(cx ctlCtx) { func ctlV3MemberList(cx ctlCtx) error { cmdArgs := append(cx.PrefixArgs(), "member", "list") - lines := make([]string, cx.cfg.clusterSize) + lines := make([]string, cx.cfg.ClusterSize) for i := range lines { lines[i] = "started" } - return spawnWithExpects(cmdArgs, cx.envMap, lines...) + return e2e.SpawnWithExpects(cmdArgs, cx.envMap, lines...) } func getMemberList(cx ctlCtx) (etcdserverpb.MemberListResponse, error) { cmdArgs := append(cx.PrefixArgs(), "--write-out", "json", "member", "list") - proc, err := spawnCmd(cmdArgs, cx.envMap) + proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap) if err != nil { return etcdserverpb.MemberListResponse{}, err } @@ -130,7 +135,7 @@ func memberListWithHexTest(cx ctlCtx) { cmdArgs := append(cx.PrefixArgs(), "--write-out", "json", "--hex", "member", "list") - proc, err := spawnCmd(cmdArgs, cx.envMap) + proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap) if err != nil { cx.t.Fatalf("memberListWithHexTest error (%v)", err) } @@ -177,17 +182,17 @@ func memberRemoveTest(cx ctlCtx) { func ctlV3MemberRemove(cx ctlCtx, ep, memberID, clusterID string) error { cmdArgs := append(cx.prefixArgs([]string{ep}), "member", "remove", memberID) - return spawnWithExpectWithEnv(cmdArgs, cx.envMap, fmt.Sprintf("%s removed from cluster %s", memberID, clusterID)) + return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, fmt.Sprintf("%s removed from cluster %s", memberID, clusterID)) } func memberAddTest(cx ctlCtx) { - if err := ctlV3MemberAdd(cx, fmt.Sprintf("http://localhost:%d", etcdProcessBasePort+11), false); err != nil { + if err := ctlV3MemberAdd(cx, fmt.Sprintf("http://localhost:%d", e2e.EtcdProcessBasePort+11), false); err != nil { cx.t.Fatal(err) } } func memberAddForLearnerTest(cx ctlCtx) { - if err := ctlV3MemberAdd(cx, fmt.Sprintf("http://localhost:%d", etcdProcessBasePort+11), true); err != nil { + if err := ctlV3MemberAdd(cx, fmt.Sprintf("http://localhost:%d", e2e.EtcdProcessBasePort+11), true); err != nil { cx.t.Fatal(err) } } @@ -197,7 +202,7 @@ func ctlV3MemberAdd(cx ctlCtx, peerURL string, isLearner bool) error { if isLearner { cmdArgs = append(cmdArgs, "--learner") } - return spawnWithExpectWithEnv(cmdArgs, cx.envMap, " added to cluster ") + return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, " added to cluster ") } func memberUpdateTest(cx ctlCtx) { @@ -206,7 +211,7 @@ func memberUpdateTest(cx ctlCtx) { cx.t.Fatal(err) } - peerURL := fmt.Sprintf("http://localhost:%d", etcdProcessBasePort+11) + peerURL := fmt.Sprintf("http://localhost:%d", e2e.EtcdProcessBasePort+11) memberID := fmt.Sprintf("%x", mr.Members[0].ID) if err = ctlV3MemberUpdate(cx, memberID, peerURL); err != nil { cx.t.Fatal(err) @@ -215,5 +220,5 @@ func memberUpdateTest(cx ctlCtx) { func ctlV3MemberUpdate(cx ctlCtx, memberID, peerURL string) error { cmdArgs := append(cx.PrefixArgs(), "member", "update", memberID, fmt.Sprintf("--peer-urls=%s", peerURL)) - return spawnWithExpectWithEnv(cmdArgs, cx.envMap, " updated in cluster ") + return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, " updated in cluster ") } diff --git a/tests/e2e/ctl_v3_move_leader_test.go b/tests/e2e/ctl_v3_move_leader_test.go index 05dc49939..c0a6a2492 100644 --- a/tests/e2e/ctl_v3_move_leader_test.go +++ b/tests/e2e/ctl_v3_move_leader_test.go @@ -25,18 +25,19 @@ import ( "go.etcd.io/etcd/client/pkg/v3/transport" "go.etcd.io/etcd/client/pkg/v3/types" "go.etcd.io/etcd/client/v3" + "go.etcd.io/etcd/tests/v3/framework/e2e" ) func TestCtlV3MoveLeaderSecure(t *testing.T) { - testCtlV3MoveLeader(t, *newConfigTLS()) + testCtlV3MoveLeader(t, *e2e.NewConfigTLS()) } func TestCtlV3MoveLeaderInsecure(t *testing.T) { - testCtlV3MoveLeader(t, *newConfigNoTLS()) + testCtlV3MoveLeader(t, *e2e.NewConfigNoTLS()) } -func testCtlV3MoveLeader(t *testing.T, cfg etcdProcessClusterConfig) { - BeforeTest(t) +func testCtlV3MoveLeader(t *testing.T, cfg e2e.EtcdProcessClusterConfig) { + e2e.BeforeTest(t) epc := setupEtcdctlTest(t, &cfg, true) defer func() { @@ -46,11 +47,11 @@ func testCtlV3MoveLeader(t *testing.T, cfg etcdProcessClusterConfig) { }() var tcfg *tls.Config - if cfg.clientTLS == clientTLS { + if cfg.ClientTLS == e2e.ClientTLS { tinfo := transport.TLSInfo{ - CertFile: certPath, - KeyFile: privateKeyPath, - TrustedCAFile: caPath, + CertFile: e2e.CertPath, + KeyFile: e2e.PrivateKeyPath, + TrustedCAFile: e2e.CaPath, } var err error tcfg, err = tinfo.ClientConfig() @@ -91,7 +92,7 @@ func testCtlV3MoveLeader(t *testing.T, cfg etcdProcessClusterConfig) { defer os.Unsetenv("ETCDCTL_API") cx := ctlCtx{ t: t, - cfg: *newConfigNoTLS(), + cfg: *e2e.NewConfigNoTLS(), dialTimeout: 7 * time.Second, epc: epc, } @@ -112,7 +113,7 @@ func testCtlV3MoveLeader(t *testing.T, cfg etcdProcessClusterConfig) { for i, tc := range tests { prefix := cx.prefixArgs(tc.eps) cmdArgs := append(prefix, "move-leader", types.ID(transferee).String()) - if err := spawnWithExpectWithEnv(cmdArgs, cx.envMap, tc.expect); err != nil { + if err := e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, tc.expect); err != nil { t.Fatalf("#%d: %v", i, err) } } diff --git a/tests/e2e/ctl_v3_role_test.go b/tests/e2e/ctl_v3_role_test.go index 2ca2152f3..2c15660bc 100644 --- a/tests/e2e/ctl_v3_role_test.go +++ b/tests/e2e/ctl_v3_role_test.go @@ -17,14 +17,18 @@ package e2e import ( "fmt" "testing" + + "go.etcd.io/etcd/tests/v3/framework/e2e" ) -func TestCtlV3RoleAdd(t *testing.T) { testCtl(t, roleAddTest) } -func TestCtlV3RootRoleGet(t *testing.T) { testCtl(t, rootRoleGetTest) } -func TestCtlV3RoleAddNoTLS(t *testing.T) { testCtl(t, roleAddTest, withCfg(*newConfigNoTLS())) } -func TestCtlV3RoleAddClientTLS(t *testing.T) { testCtl(t, roleAddTest, withCfg(*newConfigClientTLS())) } -func TestCtlV3RoleAddPeerTLS(t *testing.T) { testCtl(t, roleAddTest, withCfg(*newConfigPeerTLS())) } -func TestCtlV3RoleAddTimeout(t *testing.T) { testCtl(t, roleAddTest, withDialTimeout(0)) } +func TestCtlV3RoleAdd(t *testing.T) { testCtl(t, roleAddTest) } +func TestCtlV3RootRoleGet(t *testing.T) { testCtl(t, rootRoleGetTest) } +func TestCtlV3RoleAddNoTLS(t *testing.T) { testCtl(t, roleAddTest, withCfg(*e2e.NewConfigNoTLS())) } +func TestCtlV3RoleAddClientTLS(t *testing.T) { + testCtl(t, roleAddTest, withCfg(*e2e.NewConfigClientTLS())) +} +func TestCtlV3RoleAddPeerTLS(t *testing.T) { testCtl(t, roleAddTest, withCfg(*e2e.NewConfigPeerTLS())) } +func TestCtlV3RoleAddTimeout(t *testing.T) { testCtl(t, roleAddTest, withDialTimeout(0)) } func TestCtlV3RoleGrant(t *testing.T) { testCtl(t, roleGrantTest) } @@ -140,13 +144,13 @@ func ctlV3RoleMultiExpect(cx ctlCtx, args []string, expStr ...string) error { cmdArgs := append(cx.PrefixArgs(), "role") cmdArgs = append(cmdArgs, args...) - return spawnWithExpects(cmdArgs, cx.envMap, expStr...) + return e2e.SpawnWithExpects(cmdArgs, cx.envMap, expStr...) } func ctlV3Role(cx ctlCtx, args []string, expStr string) error { cmdArgs := append(cx.PrefixArgs(), "role") cmdArgs = append(cmdArgs, args...) - return spawnWithExpectWithEnv(cmdArgs, cx.envMap, expStr) + return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, expStr) } func ctlV3RoleGrantPermission(cx ctlCtx, rolename string, perm grantingPerm) error { @@ -160,7 +164,7 @@ func ctlV3RoleGrantPermission(cx ctlCtx, rolename string, perm grantingPerm) err cmdArgs = append(cmdArgs, rolename) cmdArgs = append(cmdArgs, grantingPermToArgs(perm)...) - proc, err := spawnCmd(cmdArgs, cx.envMap) + proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap) if err != nil { return err } @@ -186,7 +190,7 @@ func ctlV3RoleRevokePermission(cx ctlCtx, rolename string, key, rangeEnd string, expStr = fmt.Sprintf("Permission of key %s is revoked from role %s", key, rolename) } - proc, err := spawnCmd(cmdArgs, cx.envMap) + proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap) if err != nil { return err } diff --git a/tests/e2e/ctl_v3_snapshot_test.go b/tests/e2e/ctl_v3_snapshot_test.go index 1ee6eb1eb..59e1dc770 100644 --- a/tests/e2e/ctl_v3_snapshot_test.go +++ b/tests/e2e/ctl_v3_snapshot_test.go @@ -26,6 +26,7 @@ import ( "go.etcd.io/etcd/etcdutl/v3/snapshot" "go.etcd.io/etcd/pkg/v3/expect" + "go.etcd.io/etcd/tests/v3/framework/e2e" ) func TestCtlV3Snapshot(t *testing.T) { testCtl(t, snapshotTest) } @@ -84,7 +85,7 @@ func snapshotCorruptTest(cx ctlCtx) { datadir := cx.t.TempDir() - serr := spawnWithExpectWithEnv( + serr := e2e.SpawnWithExpectWithEnv( append(cx.PrefixArgsUtl(), "snapshot", "restore", "--data-dir", datadir, fpath), @@ -118,7 +119,7 @@ func snapshotStatusBeforeRestoreTest(cx ctlCtx) { dataDir := cx.t.TempDir() defer os.RemoveAll(dataDir) - serr := spawnWithExpectWithEnv( + serr := e2e.SpawnWithExpectWithEnv( append(cx.PrefixArgsUtl(), "snapshot", "restore", "--data-dir", dataDir, fpath), @@ -131,13 +132,13 @@ func snapshotStatusBeforeRestoreTest(cx ctlCtx) { func ctlV3SnapshotSave(cx ctlCtx, fpath string) error { cmdArgs := append(cx.PrefixArgs(), "snapshot", "save", fpath) - return spawnWithExpectWithEnv(cmdArgs, cx.envMap, fmt.Sprintf("Snapshot saved at %s", fpath)) + return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, fmt.Sprintf("Snapshot saved at %s", fpath)) } func getSnapshotStatus(cx ctlCtx, fpath string) (snapshot.Status, error) { cmdArgs := append(cx.PrefixArgsUtl(), "--write-out", "json", "snapshot", "status", fpath) - proc, err := spawnCmd(cmdArgs, nil) + proc, err := e2e.SpawnCmd(cmdArgs, nil) if err != nil { return snapshot.Status{}, err } @@ -172,14 +173,14 @@ func testIssue6361(t *testing.T, etcdutl bool) { os.Setenv("EXPECT_DEBUG", "1") } - BeforeTest(t) + e2e.BeforeTest(t) os.Setenv("ETCDCTL_API", "3") defer os.Unsetenv("ETCDCTL_API") - epc, err := newEtcdProcessCluster(t, &etcdProcessClusterConfig{ - clusterSize: 1, - initialToken: "new", - keepDataDir: true, + epc, err := e2e.NewEtcdProcessCluster(t, &e2e.EtcdProcessClusterConfig{ + ClusterSize: 1, + InitialToken: "new", + KeepDataDir: true, }) if err != nil { t.Fatalf("could not start etcd process cluster (%v)", err) @@ -191,12 +192,12 @@ func testIssue6361(t *testing.T, etcdutl bool) { }() dialTimeout := 10 * time.Second - prefixArgs := []string{ctlBinPath, "--endpoints", strings.Join(epc.EndpointsV3(), ","), "--dial-timeout", dialTimeout.String()} + prefixArgs := []string{e2e.CtlBinPath, "--endpoints", strings.Join(epc.EndpointsV3(), ","), "--dial-timeout", dialTimeout.String()} t.Log("Writing some keys...") kvs := []kv{{"foo1", "val1"}, {"foo2", "val2"}, {"foo3", "val3"}} for i := range kvs { - if err = spawnWithExpect(append(prefixArgs, "put", kvs[i].key, kvs[i].val), "OK"); err != nil { + if err = e2e.SpawnWithExpect(append(prefixArgs, "put", kvs[i].key, kvs[i].val), "OK"); err != nil { t.Fatal(err) } } @@ -204,7 +205,7 @@ func testIssue6361(t *testing.T, etcdutl bool) { fpath := filepath.Join(t.TempDir(), "test.snapshot") t.Log("etcdctl saving snapshot...") - if err = spawnWithExpects(append(prefixArgs, "snapshot", "save", fpath), + if err = e2e.SpawnWithExpects(append(prefixArgs, "snapshot", "save", fpath), nil, fmt.Sprintf("Snapshot saved at %s", fpath), ); err != nil { @@ -212,45 +213,45 @@ func testIssue6361(t *testing.T, etcdutl bool) { } t.Log("Stopping the original server...") - if err = epc.procs[0].Stop(); err != nil { + if err = epc.Procs[0].Stop(); err != nil { t.Fatal(err) } newDataDir := filepath.Join(t.TempDir(), "test.data") - uctlBinPath := ctlBinPath + uctlBinPath := e2e.CtlBinPath if etcdutl { - uctlBinPath = utlBinPath + uctlBinPath = e2e.UtlBinPath } t.Log("etcdctl restoring the snapshot...") - err = spawnWithExpect([]string{uctlBinPath, "snapshot", "restore", fpath, "--name", epc.procs[0].Config().name, "--initial-cluster", epc.procs[0].Config().initialCluster, "--initial-cluster-token", epc.procs[0].Config().initialToken, "--initial-advertise-peer-urls", epc.procs[0].Config().purl.String(), "--data-dir", newDataDir}, "added member") + err = e2e.SpawnWithExpect([]string{uctlBinPath, "snapshot", "restore", fpath, "--name", epc.Procs[0].Config().Name, "--initial-cluster", epc.Procs[0].Config().InitialCluster, "--initial-cluster-token", epc.Procs[0].Config().InitialToken, "--initial-advertise-peer-urls", epc.Procs[0].Config().Purl.String(), "--data-dir", newDataDir}, "added member") if err != nil { t.Fatal(err) } t.Log("(Re)starting the etcd member using the restored snapshot...") - epc.procs[0].Config().dataDirPath = newDataDir - for i := range epc.procs[0].Config().args { - if epc.procs[0].Config().args[i] == "--data-dir" { - epc.procs[0].Config().args[i+1] = newDataDir + epc.Procs[0].Config().DataDirPath = newDataDir + for i := range epc.Procs[0].Config().Args { + if epc.Procs[0].Config().Args[i] == "--data-dir" { + epc.Procs[0].Config().Args[i+1] = newDataDir } } - if err = epc.procs[0].Restart(); err != nil { + if err = epc.Procs[0].Restart(); err != nil { t.Fatal(err) } t.Log("Ensuring the restored member has the correct data...") for i := range kvs { - if err = spawnWithExpect(append(prefixArgs, "get", kvs[i].key), kvs[i].val); err != nil { + if err = e2e.SpawnWithExpect(append(prefixArgs, "get", kvs[i].key), kvs[i].val); err != nil { t.Fatal(err) } } t.Log("Adding new member into the cluster") - clientURL := fmt.Sprintf("http://localhost:%d", etcdProcessBasePort+30) - peerURL := fmt.Sprintf("http://localhost:%d", etcdProcessBasePort+31) - err = spawnWithExpect(append(prefixArgs, "member", "add", "newmember", fmt.Sprintf("--peer-urls=%s", peerURL)), " added to cluster ") + clientURL := fmt.Sprintf("http://localhost:%d", e2e.EtcdProcessBasePort+30) + peerURL := fmt.Sprintf("http://localhost:%d", e2e.EtcdProcessBasePort+31) + err = e2e.SpawnWithExpect(append(prefixArgs, "member", "add", "newmember", fmt.Sprintf("--peer-urls=%s", peerURL)), " added to cluster ") if err != nil { t.Fatal(err) } @@ -259,12 +260,12 @@ func testIssue6361(t *testing.T, etcdutl bool) { defer os.RemoveAll(newDataDir2) name2 := "infra2" - initialCluster2 := epc.procs[0].Config().initialCluster + fmt.Sprintf(",%s=%s", name2, peerURL) + initialCluster2 := epc.Procs[0].Config().InitialCluster + fmt.Sprintf(",%s=%s", name2, peerURL) t.Log("Starting the new member") // start the new member var nepc *expect.ExpectProcess - nepc, err = spawnCmd([]string{epc.procs[0].Config().execPath, "--name", name2, + nepc, err = e2e.SpawnCmd([]string{epc.Procs[0].Config().ExecPath, "--name", name2, "--listen-client-urls", clientURL, "--advertise-client-urls", clientURL, "--listen-peer-urls", peerURL, "--initial-advertise-peer-urls", peerURL, "--initial-cluster", initialCluster2, "--initial-cluster-state", "existing", "--data-dir", newDataDir2}, nil) @@ -275,11 +276,11 @@ func testIssue6361(t *testing.T, etcdutl bool) { t.Fatal(err) } - prefixArgs = []string{ctlBinPath, "--endpoints", clientURL, "--dial-timeout", dialTimeout.String()} + prefixArgs = []string{e2e.CtlBinPath, "--endpoints", clientURL, "--dial-timeout", dialTimeout.String()} t.Log("Ensuring added member has data from incoming snapshot...") for i := range kvs { - if err = spawnWithExpect(append(prefixArgs, "get", kvs[i].key), kvs[i].val); err != nil { + if err = e2e.SpawnWithExpect(append(prefixArgs, "get", kvs[i].key), kvs[i].val); err != nil { t.Fatal(err) } } @@ -292,12 +293,12 @@ func testIssue6361(t *testing.T, etcdutl bool) { } // For storageVersion to be stored, all fields expected 3.6 fields need to be set. This happens after first WAL snapshot. -// In this test we lower snapshotCount to 1 to ensure WAL snapshot is triggered. +// In this test we lower SnapshotCount to 1 to ensure WAL snapshot is triggered. func TestCtlV3SnapshotVersion(t *testing.T) { - testCtl(t, snapshotVersionTest, withCfg(etcdProcessClusterConfig{snapshotCount: 1})) + testCtl(t, snapshotVersionTest, withCfg(e2e.EtcdProcessClusterConfig{SnapshotCount: 1})) } func TestCtlV3SnapshotVersionEtcdutl(t *testing.T) { - testCtl(t, snapshotVersionTest, withEtcdutl(), withCfg(etcdProcessClusterConfig{snapshotCount: 1})) + testCtl(t, snapshotVersionTest, withEtcdutl(), withCfg(e2e.EtcdProcessClusterConfig{SnapshotCount: 1})) } func snapshotVersionTest(cx ctlCtx) { diff --git a/tests/e2e/ctl_v3_test.go b/tests/e2e/ctl_v3_test.go index 320b81e64..05c75b816 100644 --- a/tests/e2e/ctl_v3_test.go +++ b/tests/e2e/ctl_v3_test.go @@ -26,12 +26,13 @@ import ( "go.etcd.io/etcd/client/pkg/v3/fileutil" "go.etcd.io/etcd/client/pkg/v3/testutil" "go.etcd.io/etcd/pkg/v3/flags" + "go.etcd.io/etcd/tests/v3/framework/e2e" ) func TestCtlV3Version(t *testing.T) { testCtl(t, versionTest) } func TestClusterVersion(t *testing.T) { - BeforeTest(t) + e2e.BeforeTest(t) tests := []struct { name string @@ -49,18 +50,18 @@ func TestClusterVersion(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - binary := binDir + "/etcd" + binary := e2e.BinDir + "/etcd" if !fileutil.Exist(binary) { t.Skipf("%q does not exist", binary) } - BeforeTest(t) - cfg := newConfigNoTLS() - cfg.execPath = binary - cfg.snapshotCount = 3 - cfg.baseScheme = "unix" // to avoid port conflict - cfg.rollingStart = tt.rollingStart + e2e.BeforeTest(t) + cfg := e2e.NewConfigNoTLS() + cfg.ExecPath = binary + cfg.SnapshotCount = 3 + cfg.BaseScheme = "unix" // to avoid port conflict + cfg.RollingStart = tt.rollingStart - epc, err := newEtcdProcessCluster(t, cfg) + epc, err := e2e.NewEtcdProcessCluster(t, cfg) if err != nil { t.Fatalf("could not start etcd process cluster (%v)", err) } @@ -90,7 +91,7 @@ func versionTest(cx ctlCtx) { func clusterVersionTest(cx ctlCtx, expected string) { var err error for i := 0; i < 35; i++ { - if err = cURLGet(cx.epc, cURLReq{endpoint: "/version", expected: expected}); err != nil { + if err = e2e.CURLGet(cx.epc, e2e.CURLReq{Endpoint: "/version", Expected: expected}); err != nil { cx.t.Logf("#%d: v3 is not ready yet (%v)", i, err) time.Sleep(200 * time.Millisecond) continue @@ -104,17 +105,17 @@ func clusterVersionTest(cx ctlCtx, expected string) { func ctlV3Version(cx ctlCtx) error { cmdArgs := append(cx.PrefixArgs(), "version") - return spawnWithExpectWithEnv(cmdArgs, cx.envMap, version.Version) + return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, version.Version) } -// TestCtlV3DialWithHTTPScheme ensures that client handles endpoints with HTTPS scheme. +// TestCtlV3DialWithHTTPScheme ensures that client handles Endpoints with HTTPS scheme. func TestCtlV3DialWithHTTPScheme(t *testing.T) { - testCtl(t, dialWithSchemeTest, withCfg(*newConfigClientTLS())) + testCtl(t, dialWithSchemeTest, withCfg(*e2e.NewConfigClientTLS())) } func dialWithSchemeTest(cx ctlCtx) { cmdArgs := append(cx.prefixArgs(cx.epc.EndpointsV3()), "put", "foo", "bar") - if err := spawnWithExpectWithEnv(cmdArgs, cx.envMap, "OK"); err != nil { + if err := e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, "OK"); err != nil { cx.t.Fatal(err) } } @@ -122,12 +123,12 @@ func dialWithSchemeTest(cx ctlCtx) { type ctlCtx struct { t *testing.T apiPrefix string - cfg etcdProcessClusterConfig + cfg e2e.EtcdProcessClusterConfig quotaBackendBytes int64 corruptFunc func(string) error noStrictReconfig bool - epc *etcdProcessCluster + epc *e2e.EtcdProcessCluster envMap map[string]string @@ -160,7 +161,7 @@ func (cx *ctlCtx) applyOpts(opts []ctlOption) { cx.initialCorruptCheck = true } -func withCfg(cfg etcdProcessClusterConfig) ctlOption { +func withCfg(cfg e2e.EtcdProcessClusterConfig) ctlOption { return func(cx *ctlCtx) { cx.cfg = cfg } } @@ -213,35 +214,35 @@ func testCtl(t *testing.T, testFunc func(ctlCtx), opts ...ctlOption) { } func testCtlWithOffline(t *testing.T, testFunc func(ctlCtx), testOfflineFunc func(ctlCtx), opts ...ctlOption) { - BeforeTest(t) + e2e.BeforeTest(t) ret := ctlCtx{ t: t, - cfg: *newConfigAutoTLS(), + cfg: *e2e.NewConfigAutoTLS(), dialTimeout: 7 * time.Second, } ret.applyOpts(opts) if !ret.quorum { - ret.cfg = *configStandalone(ret.cfg) + ret.cfg = *e2e.ConfigStandalone(ret.cfg) } if ret.quotaBackendBytes > 0 { - ret.cfg.quotaBackendBytes = ret.quotaBackendBytes + ret.cfg.QuotaBackendBytes = ret.quotaBackendBytes } - ret.cfg.noStrictReconfig = ret.noStrictReconfig + ret.cfg.NoStrictReconfig = ret.noStrictReconfig if ret.initialCorruptCheck { - ret.cfg.initialCorruptCheck = ret.initialCorruptCheck + ret.cfg.InitialCorruptCheck = ret.initialCorruptCheck } if testOfflineFunc != nil { - ret.cfg.keepDataDir = true + ret.cfg.KeepDataDir = true } - epc, err := newEtcdProcessCluster(t, &ret.cfg) + epc, err := e2e.NewEtcdProcessCluster(t, &ret.cfg) if err != nil { t.Fatalf("could not start etcd process cluster (%v)", err) } ret.epc = epc - ret.dataDir = epc.procs[0].Config().dataDirPath + ret.dataDir = epc.Procs[0].Config().DataDirPath defer func() { if ret.envMap != nil { @@ -288,18 +289,18 @@ func (cx *ctlCtx) prefixArgs(eps []string) []string { fmap := make(map[string]string) fmap["endpoints"] = strings.Join(eps, ",") fmap["dial-timeout"] = cx.dialTimeout.String() - if cx.epc.cfg.clientTLS == clientTLS { - if cx.epc.cfg.isClientAutoTLS { + if cx.epc.Cfg.ClientTLS == e2e.ClientTLS { + if cx.epc.Cfg.IsClientAutoTLS { fmap["insecure-transport"] = "false" fmap["insecure-skip-tls-verify"] = "true" - } else if cx.epc.cfg.isClientCRL { - fmap["cacert"] = caPath - fmap["cert"] = revokedCertPath - fmap["key"] = revokedPrivateKeyPath + } else if cx.epc.Cfg.IsClientCRL { + fmap["cacert"] = e2e.CaPath + fmap["cert"] = e2e.RevokedCertPath + fmap["key"] = e2e.RevokedPrivateKeyPath } else { - fmap["cacert"] = caPath - fmap["cert"] = certPath - fmap["key"] = privateKeyPath + fmap["cacert"] = e2e.CaPath + fmap["cert"] = e2e.CertPath + fmap["key"] = e2e.PrivateKeyPath } } if cx.user != "" { @@ -308,7 +309,7 @@ func (cx *ctlCtx) prefixArgs(eps []string) []string { useEnv := cx.envMap != nil - cmdArgs := []string{ctlBinPath + "3"} + cmdArgs := []string{e2e.CtlBinPath + "3"} for k, v := range fmap { if useEnv { ek := flags.FlagToEnv("ETCDCTL", k) @@ -331,9 +332,9 @@ func (cx *ctlCtx) PrefixArgs() []string { // Please not thet 'utl' compatible commands does not consume --endpoints flag. func (cx *ctlCtx) PrefixArgsUtl() []string { if cx.etcdutl { - return []string{utlBinPath} + return []string{e2e.UtlBinPath} } - return []string{ctlBinPath} + return []string{e2e.CtlBinPath} } func isGRPCTimedout(err error) bool { @@ -341,7 +342,7 @@ func isGRPCTimedout(err error) bool { } func (cx *ctlCtx) memberToRemove() (ep string, memberID string, clusterID string) { - n1 := cx.cfg.clusterSize + n1 := cx.cfg.ClusterSize if n1 < 2 { cx.t.Fatalf("%d-node is too small to test 'member remove'", n1) } diff --git a/tests/e2e/ctl_v3_txn_test.go b/tests/e2e/ctl_v3_txn_test.go index 6fd4ed16b..af5011607 100644 --- a/tests/e2e/ctl_v3_txn_test.go +++ b/tests/e2e/ctl_v3_txn_test.go @@ -14,19 +14,23 @@ package e2e -import "testing" +import ( + "testing" + + "go.etcd.io/etcd/tests/v3/framework/e2e" +) func TestCtlV3TxnInteractiveSuccess(t *testing.T) { testCtl(t, txnTestSuccess, withInteractive()) } func TestCtlV3TxnInteractiveSuccessNoTLS(t *testing.T) { - testCtl(t, txnTestSuccess, withInteractive(), withCfg(*newConfigNoTLS())) + testCtl(t, txnTestSuccess, withInteractive(), withCfg(*e2e.NewConfigNoTLS())) } func TestCtlV3TxnInteractiveSuccessClientTLS(t *testing.T) { - testCtl(t, txnTestSuccess, withInteractive(), withCfg(*newConfigClientTLS())) + testCtl(t, txnTestSuccess, withInteractive(), withCfg(*e2e.NewConfigClientTLS())) } func TestCtlV3TxnInteractiveSuccessPeerTLS(t *testing.T) { - testCtl(t, txnTestSuccess, withInteractive(), withCfg(*newConfigPeerTLS())) + testCtl(t, txnTestSuccess, withInteractive(), withCfg(*e2e.NewConfigPeerTLS())) } func TestCtlV3TxnInteractiveFail(t *testing.T) { testCtl(t, txnTestFail, withInteractive()) @@ -102,7 +106,7 @@ func ctlV3Txn(cx ctlCtx, rqs txnRequests) error { if cx.interactive { cmdArgs = append(cmdArgs, "--interactive") } - proc, err := spawnCmd(cmdArgs, cx.envMap) + proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap) if err != nil { return err } diff --git a/tests/e2e/ctl_v3_user_test.go b/tests/e2e/ctl_v3_user_test.go index d4e409a17..1bda2045e 100644 --- a/tests/e2e/ctl_v3_user_test.go +++ b/tests/e2e/ctl_v3_user_test.go @@ -14,44 +14,56 @@ package e2e -import "testing" +import ( + "testing" -func TestCtlV3UserAdd(t *testing.T) { testCtl(t, userAddTest) } -func TestCtlV3UserAddNoTLS(t *testing.T) { testCtl(t, userAddTest, withCfg(*newConfigNoTLS())) } -func TestCtlV3UserAddClientTLS(t *testing.T) { testCtl(t, userAddTest, withCfg(*newConfigClientTLS())) } -func TestCtlV3UserAddPeerTLS(t *testing.T) { testCtl(t, userAddTest, withCfg(*newConfigPeerTLS())) } -func TestCtlV3UserAddTimeout(t *testing.T) { testCtl(t, userAddTest, withDialTimeout(0)) } + "go.etcd.io/etcd/tests/v3/framework/e2e" +) + +func TestCtlV3UserAdd(t *testing.T) { testCtl(t, userAddTest) } +func TestCtlV3UserAddNoTLS(t *testing.T) { testCtl(t, userAddTest, withCfg(*e2e.NewConfigNoTLS())) } +func TestCtlV3UserAddClientTLS(t *testing.T) { + testCtl(t, userAddTest, withCfg(*e2e.NewConfigClientTLS())) +} +func TestCtlV3UserAddPeerTLS(t *testing.T) { testCtl(t, userAddTest, withCfg(*e2e.NewConfigPeerTLS())) } +func TestCtlV3UserAddTimeout(t *testing.T) { testCtl(t, userAddTest, withDialTimeout(0)) } func TestCtlV3UserAddClientAutoTLS(t *testing.T) { - testCtl(t, userAddTest, withCfg(*newConfigClientAutoTLS())) + testCtl(t, userAddTest, withCfg(*e2e.NewConfigClientAutoTLS())) } func TestCtlV3UserList(t *testing.T) { testCtl(t, userListTest) } -func TestCtlV3UserListNoTLS(t *testing.T) { testCtl(t, userListTest, withCfg(*newConfigNoTLS())) } +func TestCtlV3UserListNoTLS(t *testing.T) { testCtl(t, userListTest, withCfg(*e2e.NewConfigNoTLS())) } func TestCtlV3UserListClientTLS(t *testing.T) { - testCtl(t, userListTest, withCfg(*newConfigClientTLS())) + testCtl(t, userListTest, withCfg(*e2e.NewConfigClientTLS())) +} +func TestCtlV3UserListPeerTLS(t *testing.T) { + testCtl(t, userListTest, withCfg(*e2e.NewConfigPeerTLS())) } -func TestCtlV3UserListPeerTLS(t *testing.T) { testCtl(t, userListTest, withCfg(*newConfigPeerTLS())) } func TestCtlV3UserListClientAutoTLS(t *testing.T) { - testCtl(t, userListTest, withCfg(*newConfigClientAutoTLS())) + testCtl(t, userListTest, withCfg(*e2e.NewConfigClientAutoTLS())) } func TestCtlV3UserDelete(t *testing.T) { testCtl(t, userDelTest) } -func TestCtlV3UserDeleteNoTLS(t *testing.T) { testCtl(t, userDelTest, withCfg(*newConfigNoTLS())) } +func TestCtlV3UserDeleteNoTLS(t *testing.T) { testCtl(t, userDelTest, withCfg(*e2e.NewConfigNoTLS())) } func TestCtlV3UserDeleteClientTLS(t *testing.T) { - testCtl(t, userDelTest, withCfg(*newConfigClientTLS())) + testCtl(t, userDelTest, withCfg(*e2e.NewConfigClientTLS())) +} +func TestCtlV3UserDeletePeerTLS(t *testing.T) { + testCtl(t, userDelTest, withCfg(*e2e.NewConfigPeerTLS())) } -func TestCtlV3UserDeletePeerTLS(t *testing.T) { testCtl(t, userDelTest, withCfg(*newConfigPeerTLS())) } func TestCtlV3UserDeleteClientAutoTLS(t *testing.T) { - testCtl(t, userDelTest, withCfg(*newConfigClientAutoTLS())) + testCtl(t, userDelTest, withCfg(*e2e.NewConfigClientAutoTLS())) +} +func TestCtlV3UserPasswd(t *testing.T) { testCtl(t, userPasswdTest) } +func TestCtlV3UserPasswdNoTLS(t *testing.T) { + testCtl(t, userPasswdTest, withCfg(*e2e.NewConfigNoTLS())) } -func TestCtlV3UserPasswd(t *testing.T) { testCtl(t, userPasswdTest) } -func TestCtlV3UserPasswdNoTLS(t *testing.T) { testCtl(t, userPasswdTest, withCfg(*newConfigNoTLS())) } func TestCtlV3UserPasswdClientTLS(t *testing.T) { - testCtl(t, userPasswdTest, withCfg(*newConfigClientTLS())) + testCtl(t, userPasswdTest, withCfg(*e2e.NewConfigClientTLS())) } func TestCtlV3UserPasswdPeerTLS(t *testing.T) { - testCtl(t, userPasswdTest, withCfg(*newConfigPeerTLS())) + testCtl(t, userPasswdTest, withCfg(*e2e.NewConfigPeerTLS())) } func TestCtlV3UserPasswdClientAutoTLS(t *testing.T) { - testCtl(t, userPasswdTest, withCfg(*newConfigClientAutoTLS())) + testCtl(t, userPasswdTest, withCfg(*e2e.NewConfigClientAutoTLS())) } type userCmdDesc struct { @@ -179,7 +191,7 @@ func ctlV3User(cx ctlCtx, args []string, expStr string, stdIn []string) error { cmdArgs := append(cx.PrefixArgs(), "user") cmdArgs = append(cmdArgs, args...) - proc, err := spawnCmd(cmdArgs, cx.envMap) + proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap) if err != nil { return err } diff --git a/tests/e2e/ctl_v3_watch_cov_test.go b/tests/e2e/ctl_v3_watch_cov_test.go index 8d2fd04d6..8214734da 100644 --- a/tests/e2e/ctl_v3_watch_cov_test.go +++ b/tests/e2e/ctl_v3_watch_cov_test.go @@ -20,25 +20,27 @@ package e2e import ( "os" "testing" + + "go.etcd.io/etcd/tests/v3/framework/e2e" ) func TestCtlV3Watch(t *testing.T) { testCtl(t, watchTest) } -func TestCtlV3WatchNoTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*newConfigNoTLS())) } -func TestCtlV3WatchClientTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*newConfigClientTLS())) } -func TestCtlV3WatchPeerTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*newConfigPeerTLS())) } +func TestCtlV3WatchNoTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*e2e.NewConfigNoTLS())) } +func TestCtlV3WatchClientTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*e2e.NewConfigClientTLS())) } +func TestCtlV3WatchPeerTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*e2e.NewConfigPeerTLS())) } func TestCtlV3WatchTimeout(t *testing.T) { testCtl(t, watchTest, withDialTimeout(0)) } func TestCtlV3WatchInteractive(t *testing.T) { testCtl(t, watchTest, withInteractive()) } func TestCtlV3WatchInteractiveNoTLS(t *testing.T) { - testCtl(t, watchTest, withInteractive(), withCfg(*newConfigNoTLS())) + testCtl(t, watchTest, withInteractive(), withCfg(*e2e.NewConfigNoTLS())) } func TestCtlV3WatchInteractiveClientTLS(t *testing.T) { - testCtl(t, watchTest, withInteractive(), withCfg(*newConfigClientTLS())) + testCtl(t, watchTest, withInteractive(), withCfg(*e2e.NewConfigClientTLS())) } func TestCtlV3WatchInteractivePeerTLS(t *testing.T) { - testCtl(t, watchTest, withInteractive(), withCfg(*newConfigPeerTLS())) + testCtl(t, watchTest, withInteractive(), withCfg(*e2e.NewConfigPeerTLS())) } func watchTest(cx ctlCtx) { diff --git a/tests/e2e/ctl_v3_watch_no_cov_test.go b/tests/e2e/ctl_v3_watch_no_cov_test.go index a952aa4a4..85ca863ba 100644 --- a/tests/e2e/ctl_v3_watch_no_cov_test.go +++ b/tests/e2e/ctl_v3_watch_no_cov_test.go @@ -20,25 +20,27 @@ package e2e import ( "os" "testing" + + "go.etcd.io/etcd/tests/v3/framework/e2e" ) func TestCtlV3Watch(t *testing.T) { testCtl(t, watchTest) } -func TestCtlV3WatchNoTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*newConfigNoTLS())) } -func TestCtlV3WatchClientTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*newConfigClientTLS())) } -func TestCtlV3WatchPeerTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*newConfigPeerTLS())) } +func TestCtlV3WatchNoTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*e2e.NewConfigNoTLS())) } +func TestCtlV3WatchClientTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*e2e.NewConfigClientTLS())) } +func TestCtlV3WatchPeerTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*e2e.NewConfigPeerTLS())) } func TestCtlV3WatchTimeout(t *testing.T) { testCtl(t, watchTest, withDialTimeout(0)) } func TestCtlV3WatchInteractive(t *testing.T) { testCtl(t, watchTest, withInteractive()) } func TestCtlV3WatchInteractiveNoTLS(t *testing.T) { - testCtl(t, watchTest, withInteractive(), withCfg(*newConfigNoTLS())) + testCtl(t, watchTest, withInteractive(), withCfg(*e2e.NewConfigNoTLS())) } func TestCtlV3WatchInteractiveClientTLS(t *testing.T) { - testCtl(t, watchTest, withInteractive(), withCfg(*newConfigClientTLS())) + testCtl(t, watchTest, withInteractive(), withCfg(*e2e.NewConfigClientTLS())) } func TestCtlV3WatchInteractivePeerTLS(t *testing.T) { - testCtl(t, watchTest, withInteractive(), withCfg(*newConfigPeerTLS())) + testCtl(t, watchTest, withInteractive(), withCfg(*e2e.NewConfigPeerTLS())) } func watchTest(cx ctlCtx) { diff --git a/tests/e2e/ctl_v3_watch_test.go b/tests/e2e/ctl_v3_watch_test.go index 0e0f24e94..bec43224e 100644 --- a/tests/e2e/ctl_v3_watch_test.go +++ b/tests/e2e/ctl_v3_watch_test.go @@ -14,7 +14,11 @@ package e2e -import "strings" +import ( + "strings" + + "go.etcd.io/etcd/tests/v3/framework/e2e" +) type kvExec struct { key, val string @@ -35,7 +39,7 @@ func setupWatchArgs(cx ctlCtx, args []string) []string { func ctlV3Watch(cx ctlCtx, args []string, kvs ...kvExec) error { cmdArgs := setupWatchArgs(cx, args) - proc, err := spawnCmd(cmdArgs, nil) + proc, err := e2e.SpawnCmd(cmdArgs, nil) if err != nil { return err } @@ -66,7 +70,7 @@ func ctlV3Watch(cx ctlCtx, args []string, kvs ...kvExec) error { func ctlV3WatchFailPerm(cx ctlCtx, args []string) error { cmdArgs := setupWatchArgs(cx, args) - proc, err := spawnCmd(cmdArgs, nil) + proc, err := e2e.SpawnCmd(cmdArgs, nil) if err != nil { return err } diff --git a/tests/e2e/etcd_config_test.go b/tests/e2e/etcd_config_test.go index 266cb971b..055e1ead3 100644 --- a/tests/e2e/etcd_config_test.go +++ b/tests/e2e/etcd_config_test.go @@ -22,18 +22,19 @@ import ( "testing" "go.etcd.io/etcd/pkg/v3/expect" + "go.etcd.io/etcd/tests/v3/framework/e2e" ) const exampleConfigFile = "../../etcd.conf.yml.sample" func TestEtcdExampleConfig(t *testing.T) { - skipInShortMode(t) + e2e.SkipInShortMode(t) - proc, err := spawnCmd([]string{binDir + "/etcd", "--config-file", exampleConfigFile}, nil) + proc, err := e2e.SpawnCmd([]string{e2e.BinDir + "/etcd", "--config-file", exampleConfigFile}, nil) if err != nil { t.Fatal(err) } - if err = waitReadyExpectProc(proc, etcdServerReadyLines); err != nil { + if err = e2e.WaitReadyExpectProc(proc, e2e.EtcdServerReadyLines); err != nil { t.Fatal(err) } if err = proc.Stop(); err != nil { @@ -42,11 +43,11 @@ func TestEtcdExampleConfig(t *testing.T) { } func TestEtcdMultiPeer(t *testing.T) { - skipInShortMode(t) + e2e.SkipInShortMode(t) peers, tmpdirs := make([]string, 3), make([]string, 3) for i := range peers { - peers[i] = fmt.Sprintf("e%d=http://127.0.0.1:%d", i, etcdProcessBasePort+i) + peers[i] = fmt.Sprintf("e%d=http://127.0.0.1:%d", i, e2e.EtcdProcessBasePort+i) d, err := ioutil.TempDir("", fmt.Sprintf("e%d.etcd", i)) if err != nil { t.Fatal(err) @@ -66,16 +67,16 @@ func TestEtcdMultiPeer(t *testing.T) { }() for i := range procs { args := []string{ - binDir + "/etcd", + e2e.BinDir + "/etcd", "--name", fmt.Sprintf("e%d", i), "--listen-client-urls", "http://0.0.0.0:0", "--data-dir", tmpdirs[i], "--advertise-client-urls", "http://0.0.0.0:0", - "--listen-peer-urls", fmt.Sprintf("http://127.0.0.1:%d,http://127.0.0.1:%d", etcdProcessBasePort+i, etcdProcessBasePort+len(peers)+i), - "--initial-advertise-peer-urls", fmt.Sprintf("http://127.0.0.1:%d", etcdProcessBasePort+i), + "--listen-peer-urls", fmt.Sprintf("http://127.0.0.1:%d,http://127.0.0.1:%d", e2e.EtcdProcessBasePort+i, e2e.EtcdProcessBasePort+len(peers)+i), + "--initial-advertise-peer-urls", fmt.Sprintf("http://127.0.0.1:%d", e2e.EtcdProcessBasePort+i), "--initial-cluster", ic, } - p, err := spawnCmd(args, nil) + p, err := e2e.SpawnCmd(args, nil) if err != nil { t.Fatal(err) } @@ -83,7 +84,7 @@ func TestEtcdMultiPeer(t *testing.T) { } for _, p := range procs { - if err := waitReadyExpectProc(p, etcdServerReadyLines); err != nil { + if err := e2e.WaitReadyExpectProc(p, e2e.EtcdServerReadyLines); err != nil { t.Fatal(err) } } @@ -91,16 +92,16 @@ func TestEtcdMultiPeer(t *testing.T) { // TestEtcdUnixPeers checks that etcd will boot with unix socket peers. func TestEtcdUnixPeers(t *testing.T) { - skipInShortMode(t) + e2e.SkipInShortMode(t) d, err := ioutil.TempDir("", "e1.etcd") if err != nil { t.Fatal(err) } defer os.RemoveAll(d) - proc, err := spawnCmd( + proc, err := e2e.SpawnCmd( []string{ - binDir + "/etcd", + e2e.BinDir + "/etcd", "--data-dir", d, "--name", "e1", "--listen-peer-urls", "unix://etcd.unix:1", @@ -112,7 +113,7 @@ func TestEtcdUnixPeers(t *testing.T) { if err != nil { t.Fatal(err) } - if err = waitReadyExpectProc(proc, etcdServerReadyLines); err != nil { + if err = e2e.WaitReadyExpectProc(proc, e2e.EtcdServerReadyLines); err != nil { t.Fatal(err) } if err = proc.Stop(); err != nil { @@ -122,11 +123,11 @@ func TestEtcdUnixPeers(t *testing.T) { // TestEtcdPeerCNAuth checks that the inter peer auth based on CN of cert is working correctly. func TestEtcdPeerCNAuth(t *testing.T) { - skipInShortMode(t) + e2e.SkipInShortMode(t) peers, tmpdirs := make([]string, 3), make([]string, 3) for i := range peers { - peers[i] = fmt.Sprintf("e%d=https://127.0.0.1:%d", i, etcdProcessBasePort+i) + peers[i] = fmt.Sprintf("e%d=https://127.0.0.1:%d", i, e2e.EtcdProcessBasePort+i) d, err := ioutil.TempDir("", fmt.Sprintf("e%d.etcd", i)) if err != nil { t.Fatal(err) @@ -148,34 +149,34 @@ func TestEtcdPeerCNAuth(t *testing.T) { // node 0 and 1 have a cert with the correct CN, node 2 doesn't for i := range procs { commonArgs := []string{ - binDir + "/etcd", + e2e.BinDir + "/etcd", "--name", fmt.Sprintf("e%d", i), "--listen-client-urls", "http://0.0.0.0:0", "--data-dir", tmpdirs[i], "--advertise-client-urls", "http://0.0.0.0:0", - "--listen-peer-urls", fmt.Sprintf("https://127.0.0.1:%d,https://127.0.0.1:%d", etcdProcessBasePort+i, etcdProcessBasePort+len(peers)+i), - "--initial-advertise-peer-urls", fmt.Sprintf("https://127.0.0.1:%d", etcdProcessBasePort+i), + "--listen-peer-urls", fmt.Sprintf("https://127.0.0.1:%d,https://127.0.0.1:%d", e2e.EtcdProcessBasePort+i, e2e.EtcdProcessBasePort+len(peers)+i), + "--initial-advertise-peer-urls", fmt.Sprintf("https://127.0.0.1:%d", e2e.EtcdProcessBasePort+i), "--initial-cluster", ic, } var args []string if i <= 1 { args = []string{ - "--peer-cert-file", certPath, - "--peer-key-file", privateKeyPath, - "--peer-client-cert-file", certPath, - "--peer-client-key-file", privateKeyPath, - "--peer-trusted-ca-file", caPath, + "--peer-cert-file", e2e.CertPath, + "--peer-key-file", e2e.PrivateKeyPath, + "--peer-client-cert-file", e2e.CertPath, + "--peer-client-key-file", e2e.PrivateKeyPath, + "--peer-trusted-ca-file", e2e.CaPath, "--peer-client-cert-auth", "--peer-cert-allowed-cn", "example.com", } } else { args = []string{ - "--peer-cert-file", certPath2, - "--peer-key-file", privateKeyPath2, - "--peer-client-cert-file", certPath2, - "--peer-client-key-file", privateKeyPath2, - "--peer-trusted-ca-file", caPath, + "--peer-cert-file", e2e.CertPath2, + "--peer-key-file", e2e.PrivateKeyPath2, + "--peer-client-cert-file", e2e.CertPath2, + "--peer-client-key-file", e2e.PrivateKeyPath2, + "--peer-trusted-ca-file", e2e.CaPath, "--peer-client-cert-auth", "--peer-cert-allowed-cn", "example2.com", } @@ -183,7 +184,7 @@ func TestEtcdPeerCNAuth(t *testing.T) { commonArgs = append(commonArgs, args...) - p, err := spawnCmd(commonArgs, nil) + p, err := e2e.SpawnCmd(commonArgs, nil) if err != nil { t.Fatal(err) } @@ -193,11 +194,11 @@ func TestEtcdPeerCNAuth(t *testing.T) { for i, p := range procs { var expect []string if i <= 1 { - expect = etcdServerReadyLines + expect = e2e.EtcdServerReadyLines } else { expect = []string{"remote error: tls: bad certificate"} } - if err := waitReadyExpectProc(p, expect); err != nil { + if err := e2e.WaitReadyExpectProc(p, expect); err != nil { t.Fatal(err) } } @@ -205,11 +206,11 @@ func TestEtcdPeerCNAuth(t *testing.T) { // TestEtcdPeerNameAuth checks that the inter peer auth based on cert name validation is working correctly. func TestEtcdPeerNameAuth(t *testing.T) { - skipInShortMode(t) + e2e.SkipInShortMode(t) peers, tmpdirs := make([]string, 3), make([]string, 3) for i := range peers { - peers[i] = fmt.Sprintf("e%d=https://127.0.0.1:%d", i, etcdProcessBasePort+i) + peers[i] = fmt.Sprintf("e%d=https://127.0.0.1:%d", i, e2e.EtcdProcessBasePort+i) d, err := ioutil.TempDir("", fmt.Sprintf("e%d.etcd", i)) if err != nil { t.Fatal(err) @@ -231,30 +232,30 @@ func TestEtcdPeerNameAuth(t *testing.T) { // node 0 and 1 have a cert with the correct certificate name, node 2 doesn't for i := range procs { commonArgs := []string{ - binDir + "/etcd", + e2e.BinDir + "/etcd", "--name", fmt.Sprintf("e%d", i), "--listen-client-urls", "http://0.0.0.0:0", "--data-dir", tmpdirs[i], "--advertise-client-urls", "http://0.0.0.0:0", - "--listen-peer-urls", fmt.Sprintf("https://127.0.0.1:%d,https://127.0.0.1:%d", etcdProcessBasePort+i, etcdProcessBasePort+len(peers)+i), - "--initial-advertise-peer-urls", fmt.Sprintf("https://127.0.0.1:%d", etcdProcessBasePort+i), + "--listen-peer-urls", fmt.Sprintf("https://127.0.0.1:%d,https://127.0.0.1:%d", e2e.EtcdProcessBasePort+i, e2e.EtcdProcessBasePort+len(peers)+i), + "--initial-advertise-peer-urls", fmt.Sprintf("https://127.0.0.1:%d", e2e.EtcdProcessBasePort+i), "--initial-cluster", ic, } var args []string if i <= 1 { args = []string{ - "--peer-cert-file", certPath, - "--peer-key-file", privateKeyPath, - "--peer-trusted-ca-file", caPath, + "--peer-cert-file", e2e.CertPath, + "--peer-key-file", e2e.PrivateKeyPath, + "--peer-trusted-ca-file", e2e.CaPath, "--peer-client-cert-auth", "--peer-cert-allowed-hostname", "localhost", } } else { args = []string{ - "--peer-cert-file", certPath2, - "--peer-key-file", privateKeyPath2, - "--peer-trusted-ca-file", caPath, + "--peer-cert-file", e2e.CertPath2, + "--peer-key-file", e2e.PrivateKeyPath2, + "--peer-trusted-ca-file", e2e.CaPath, "--peer-client-cert-auth", "--peer-cert-allowed-hostname", "example2.com", } @@ -262,7 +263,7 @@ func TestEtcdPeerNameAuth(t *testing.T) { commonArgs = append(commonArgs, args...) - p, err := spawnCmd(commonArgs, nil) + p, err := e2e.SpawnCmd(commonArgs, nil) if err != nil { t.Fatal(err) } @@ -272,43 +273,43 @@ func TestEtcdPeerNameAuth(t *testing.T) { for i, p := range procs { var expect []string if i <= 1 { - expect = etcdServerReadyLines + expect = e2e.EtcdServerReadyLines } else { expect = []string{"client certificate authentication failed"} } - if err := waitReadyExpectProc(p, expect); err != nil { + if err := e2e.WaitReadyExpectProc(p, expect); err != nil { t.Fatal(err) } } } func TestGrpcproxyAndCommonName(t *testing.T) { - skipInShortMode(t) + e2e.SkipInShortMode(t) argsWithNonEmptyCN := []string{ - binDir + "/etcd", + e2e.BinDir + "/etcd", "grpc-proxy", "start", - "--cert", certPath2, - "--key", privateKeyPath2, - "--cacert", caPath, + "--cert", e2e.CertPath2, + "--key", e2e.PrivateKeyPath2, + "--cacert", e2e.CaPath, } argsWithEmptyCN := []string{ - binDir + "/etcd", + e2e.BinDir + "/etcd", "grpc-proxy", "start", - "--cert", certPath3, - "--key", privateKeyPath3, - "--cacert", caPath, + "--cert", e2e.CertPath3, + "--key", e2e.PrivateKeyPath3, + "--cacert", e2e.CaPath, } - err := spawnWithExpect(argsWithNonEmptyCN, "cert has non empty Common Name") + err := e2e.SpawnWithExpect(argsWithNonEmptyCN, "cert has non empty Common Name") if err != nil { t.Errorf("Unexpected error: %s", err) } - p, err := spawnCmd(argsWithEmptyCN, nil) + p, err := e2e.SpawnCmd(argsWithEmptyCN, nil) defer func() { if p != nil { p.Stop() @@ -321,13 +322,13 @@ func TestGrpcproxyAndCommonName(t *testing.T) { } func TestBootstrapDefragFlag(t *testing.T) { - skipInShortMode(t) + e2e.SkipInShortMode(t) - proc, err := spawnCmd([]string{binDir + "/etcd", "--experimental-bootstrap-defrag-threshold-megabytes", "1000"}, nil) + proc, err := e2e.SpawnCmd([]string{e2e.BinDir + "/etcd", "--experimental-bootstrap-defrag-threshold-megabytes", "1000"}, nil) if err != nil { t.Fatal(err) } - if err = waitReadyExpectProc(proc, []string{"Skipping defragmentation"}); err != nil { + if err = e2e.WaitReadyExpectProc(proc, []string{"Skipping defragmentation"}); err != nil { t.Fatal(err) } if err = proc.Stop(); err != nil { diff --git a/tests/e2e/etcd_corrupt_test.go b/tests/e2e/etcd_corrupt_test.go index 2b0730b55..dc34702b7 100644 --- a/tests/e2e/etcd_corrupt_test.go +++ b/tests/e2e/etcd_corrupt_test.go @@ -26,6 +26,7 @@ import ( "go.etcd.io/etcd/api/v3/mvccpb" "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/server/v3/storage/datadir" + "go.etcd.io/etcd/tests/v3/framework/e2e" ) // TODO: test with embedded etcd in integration package @@ -35,10 +36,10 @@ func TestEtcdCorruptHash(t *testing.T) { // defer os.Setenv("EXPECT_DEBUG", oldenv) // os.Setenv("EXPECT_DEBUG", "1") - cfg := newConfigNoTLS() + cfg := e2e.NewConfigNoTLS() // trigger snapshot so that restart member can load peers from disk - cfg.snapshotCount = 3 + cfg.SnapshotCount = 3 testCtl(t, corruptTest, withQuorum(), withCfg(*cfg), @@ -76,18 +77,18 @@ func corruptTest(cx ctlCtx) { id0 := sresp.Header.GetMemberId() cx.t.Log("stopping etcd[0]...") - cx.epc.procs[0].Stop() + cx.epc.Procs[0].Stop() // corrupting first member by modifying backend offline. - fp := datadir.ToBackendFileName(cx.epc.procs[0].Config().dataDirPath) + fp := datadir.ToBackendFileName(cx.epc.Procs[0].Config().DataDirPath) cx.t.Logf("corrupting backend: %v", fp) if err = cx.corruptFunc(fp); err != nil { cx.t.Fatal(err) } cx.t.Log("restarting etcd[0]") - ep := cx.epc.procs[0] - proc, err := spawnCmd(append([]string{ep.Config().execPath}, ep.Config().args...), cx.envMap) + ep := cx.epc.Procs[0] + proc, err := e2e.SpawnCmd(append([]string{ep.Config().ExecPath}, ep.Config().Args...), cx.envMap) if err != nil { cx.t.Fatal(err) } @@ -95,7 +96,7 @@ func corruptTest(cx ctlCtx) { cx.t.Log("waiting for etcd[0] failure...") // restarting corrupted member should fail - waitReadyExpectProc(proc, []string{fmt.Sprintf("etcdmain: %016x found data inconsistency with peers", id0)}) + e2e.WaitReadyExpectProc(proc, []string{fmt.Sprintf("etcdmain: %016x found data inconsistency with peers", id0)}) } func corruptHash(fpath string) error { diff --git a/tests/e2e/etcd_process.go b/tests/e2e/etcd_process.go deleted file mode 100644 index 6fbb595e0..000000000 --- a/tests/e2e/etcd_process.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "fmt" - "net/url" - "os" - - "go.etcd.io/etcd/client/pkg/v3/fileutil" - "go.etcd.io/etcd/pkg/v3/expect" - "go.uber.org/zap" -) - -var ( - etcdServerReadyLines = []string{"ready to serve client requests"} - binPath string - ctlBinPath string - utlBinPath string -) - -// etcdProcess is a process that serves etcd requests. -type etcdProcess interface { - EndpointsV2() []string - EndpointsV3() []string - EndpointsMetrics() []string - - Start() error - Restart() error - Stop() error - Close() error - WithStopSignal(sig os.Signal) os.Signal - Config() *etcdServerProcessConfig - Logs() logsExpect -} - -type logsExpect interface { - Expect(string) (string, error) -} - -type etcdServerProcess struct { - cfg *etcdServerProcessConfig - proc *expect.ExpectProcess - donec chan struct{} // closed when Interact() terminates -} - -type etcdServerProcessConfig struct { - lg *zap.Logger - execPath string - args []string - tlsArgs []string - envVars map[string]string - - dataDirPath string - keepDataDir bool - - name string - - purl url.URL - - acurl string - murl string - - initialToken string - initialCluster string -} - -func newEtcdServerProcess(cfg *etcdServerProcessConfig) (*etcdServerProcess, error) { - if !fileutil.Exist(cfg.execPath) { - return nil, fmt.Errorf("could not find etcd binary: %s", cfg.execPath) - } - if !cfg.keepDataDir { - if err := os.RemoveAll(cfg.dataDirPath); err != nil { - return nil, err - } - } - return &etcdServerProcess{cfg: cfg, donec: make(chan struct{})}, nil -} - -func (ep *etcdServerProcess) EndpointsV2() []string { return []string{ep.cfg.acurl} } -func (ep *etcdServerProcess) EndpointsV3() []string { return ep.EndpointsV2() } -func (ep *etcdServerProcess) EndpointsMetrics() []string { return []string{ep.cfg.murl} } - -func (ep *etcdServerProcess) Start() error { - if ep.proc != nil { - panic("already started") - } - ep.cfg.lg.Info("starting server...", zap.String("name", ep.cfg.name)) - proc, err := spawnCmdWithLogger(ep.cfg.lg, append([]string{ep.cfg.execPath}, ep.cfg.args...), ep.cfg.envVars) - if err != nil { - return err - } - ep.proc = proc - err = ep.waitReady() - if err == nil { - ep.cfg.lg.Info("started server.", zap.String("name", ep.cfg.name)) - } - return err -} - -func (ep *etcdServerProcess) Restart() error { - ep.cfg.lg.Info("restaring server...", zap.String("name", ep.cfg.name)) - if err := ep.Stop(); err != nil { - return err - } - ep.donec = make(chan struct{}) - err := ep.Start() - if err == nil { - ep.cfg.lg.Info("restared server", zap.String("name", ep.cfg.name)) - } - return err -} - -func (ep *etcdServerProcess) Stop() (err error) { - ep.cfg.lg.Info("stoping server...", zap.String("name", ep.cfg.name)) - if ep == nil || ep.proc == nil { - return nil - } - err = ep.proc.Stop() - if err != nil { - return err - } - ep.proc = nil - <-ep.donec - ep.donec = make(chan struct{}) - if ep.cfg.purl.Scheme == "unix" || ep.cfg.purl.Scheme == "unixs" { - err = os.Remove(ep.cfg.purl.Host + ep.cfg.purl.Path) - if err != nil && !os.IsNotExist(err) { - return err - } - } - ep.cfg.lg.Info("stopped server.", zap.String("name", ep.cfg.name)) - return nil -} - -func (ep *etcdServerProcess) Close() error { - ep.cfg.lg.Info("closing server...", zap.String("name", ep.cfg.name)) - if err := ep.Stop(); err != nil { - return err - } - if !ep.cfg.keepDataDir { - ep.cfg.lg.Info("removing directory", zap.String("data-dir", ep.cfg.dataDirPath)) - return os.RemoveAll(ep.cfg.dataDirPath) - } - return nil -} - -func (ep *etcdServerProcess) WithStopSignal(sig os.Signal) os.Signal { - ret := ep.proc.StopSignal - ep.proc.StopSignal = sig - return ret -} - -func (ep *etcdServerProcess) waitReady() error { - defer close(ep.donec) - return waitReadyExpectProc(ep.proc, etcdServerReadyLines) -} - -func (ep *etcdServerProcess) Config() *etcdServerProcessConfig { return ep.cfg } - -func (ep *etcdServerProcess) Logs() logsExpect { - if ep.proc == nil { - ep.cfg.lg.Panic("Please grap logs before process is stopped") - } - return ep.proc -} diff --git a/tests/e2e/etcd_release_upgrade_test.go b/tests/e2e/etcd_release_upgrade_test.go index 78caef96f..f52c91f72 100644 --- a/tests/e2e/etcd_release_upgrade_test.go +++ b/tests/e2e/etcd_release_upgrade_test.go @@ -23,24 +23,25 @@ import ( "go.etcd.io/etcd/api/v3/version" "go.etcd.io/etcd/client/pkg/v3/fileutil" + "go.etcd.io/etcd/tests/v3/framework/e2e" ) // TestReleaseUpgrade ensures that changes to master branch does not affect // upgrade from latest etcd releases. func TestReleaseUpgrade(t *testing.T) { - lastReleaseBinary := binDir + "/etcd-last-release" + lastReleaseBinary := e2e.BinDir + "/etcd-last-release" if !fileutil.Exist(lastReleaseBinary) { t.Skipf("%q does not exist", lastReleaseBinary) } - BeforeTest(t) + e2e.BeforeTest(t) - copiedCfg := newConfigNoTLS() - copiedCfg.execPath = lastReleaseBinary - copiedCfg.snapshotCount = 3 - copiedCfg.baseScheme = "unix" // to avoid port conflict + copiedCfg := e2e.NewConfigNoTLS() + copiedCfg.ExecPath = lastReleaseBinary + copiedCfg.SnapshotCount = 3 + copiedCfg.BaseScheme = "unix" // to avoid port conflict - epc, err := newEtcdProcessCluster(t, copiedCfg) + epc, err := e2e.NewEtcdProcessCluster(t, copiedCfg) if err != nil { t.Fatalf("could not start etcd process cluster (%v)", err) } @@ -54,7 +55,7 @@ func TestReleaseUpgrade(t *testing.T) { defer os.Unsetenv("ETCDCTL_API") cx := ctlCtx{ t: t, - cfg: *newConfigNoTLS(), + cfg: *e2e.NewConfigNoTLS(), dialTimeout: 7 * time.Second, quorum: true, epc: epc, @@ -71,17 +72,17 @@ func TestReleaseUpgrade(t *testing.T) { t.Log("Cluster of etcd in old version running") - for i := range epc.procs { + for i := range epc.Procs { t.Logf("Stopping node: %v", i) - if err := epc.procs[i].Stop(); err != nil { + if err := epc.Procs[i].Stop(); err != nil { t.Fatalf("#%d: error closing etcd process (%v)", i, err) } t.Logf("Stopped node: %v", i) - epc.procs[i].Config().execPath = binDir + "/etcd" - epc.procs[i].Config().keepDataDir = true + epc.Procs[i].Config().ExecPath = e2e.BinDir + "/etcd" + epc.Procs[i].Config().KeepDataDir = true t.Logf("Restarting node in the new version: %v", i) - if err := epc.procs[i].Restart(); err != nil { + if err := epc.Procs[i].Restart(); err != nil { t.Fatalf("error restarting etcd process (%v)", err) } @@ -100,7 +101,7 @@ func TestReleaseUpgrade(t *testing.T) { // new cluster version needs more time to upgrade ver := version.Cluster(version.Version) for i := 0; i < 7; i++ { - if err = cURLGet(epc, cURLReq{endpoint: "/version", expected: `"etcdcluster":"` + ver}); err != nil { + if err = e2e.CURLGet(epc, e2e.CURLReq{Endpoint: "/version", Expected: `"etcdcluster":"` + ver}); err != nil { t.Logf("#%d: %v is not ready yet (%v)", i, ver, err) time.Sleep(time.Second) continue @@ -114,19 +115,19 @@ func TestReleaseUpgrade(t *testing.T) { } func TestReleaseUpgradeWithRestart(t *testing.T) { - lastReleaseBinary := binDir + "/etcd-last-release" + lastReleaseBinary := e2e.BinDir + "/etcd-last-release" if !fileutil.Exist(lastReleaseBinary) { t.Skipf("%q does not exist", lastReleaseBinary) } - BeforeTest(t) + e2e.BeforeTest(t) - copiedCfg := newConfigNoTLS() - copiedCfg.execPath = lastReleaseBinary - copiedCfg.snapshotCount = 10 - copiedCfg.baseScheme = "unix" + copiedCfg := e2e.NewConfigNoTLS() + copiedCfg.ExecPath = lastReleaseBinary + copiedCfg.SnapshotCount = 10 + copiedCfg.BaseScheme = "unix" - epc, err := newEtcdProcessCluster(t, copiedCfg) + epc, err := e2e.NewEtcdProcessCluster(t, copiedCfg) if err != nil { t.Fatalf("could not start etcd process cluster (%v)", err) } @@ -140,7 +141,7 @@ func TestReleaseUpgradeWithRestart(t *testing.T) { defer os.Unsetenv("ETCDCTL_API") cx := ctlCtx{ t: t, - cfg: *newConfigNoTLS(), + cfg: *e2e.NewConfigNoTLS(), dialTimeout: 7 * time.Second, quorum: true, epc: epc, @@ -155,19 +156,19 @@ func TestReleaseUpgradeWithRestart(t *testing.T) { } } - for i := range epc.procs { - if err := epc.procs[i].Stop(); err != nil { + for i := range epc.Procs { + if err := epc.Procs[i].Stop(); err != nil { t.Fatalf("#%d: error closing etcd process (%v)", i, err) } } var wg sync.WaitGroup - wg.Add(len(epc.procs)) - for i := range epc.procs { + wg.Add(len(epc.Procs)) + for i := range epc.Procs { go func(i int) { - epc.procs[i].Config().execPath = binDir + "/etcd" - epc.procs[i].Config().keepDataDir = true - if err := epc.procs[i].Restart(); err != nil { + epc.Procs[i].Config().ExecPath = e2e.BinDir + "/etcd" + epc.Procs[i].Config().KeepDataDir = true + if err := epc.Procs[i].Restart(); err != nil { t.Errorf("error restarting etcd process (%v)", err) } wg.Done() diff --git a/tests/e2e/gateway_test.go b/tests/e2e/gateway_test.go index 9f48a5225..938b3a672 100644 --- a/tests/e2e/gateway_test.go +++ b/tests/e2e/gateway_test.go @@ -20,6 +20,7 @@ import ( "testing" "go.etcd.io/etcd/pkg/v3/expect" + "go.etcd.io/etcd/tests/v3/framework/e2e" ) var ( @@ -27,7 +28,7 @@ var ( ) func TestGateway(t *testing.T) { - ec, err := newEtcdProcessCluster(t, newConfigNoTLS()) + ec, err := e2e.NewEtcdProcessCluster(t, e2e.NewConfigNoTLS()) if err != nil { t.Fatal(err) } @@ -41,14 +42,14 @@ func TestGateway(t *testing.T) { os.Setenv("ETCDCTL_API", "3") defer os.Unsetenv("ETCDCTL_API") - err = spawnWithExpect([]string{ctlBinPath, "--endpoints=" + defaultGatewayEndpoint, "put", "foo", "bar"}, "OK\r\n") + err = e2e.SpawnWithExpect([]string{e2e.CtlBinPath, "--endpoints=" + defaultGatewayEndpoint, "put", "foo", "bar"}, "OK\r\n") if err != nil { t.Errorf("failed to finish put request through gateway: %v", err) } } func startGateway(t *testing.T, endpoints string) *expect.ExpectProcess { - p, err := expect.NewExpect(binPath, "gateway", "--endpoints="+endpoints, "start") + p, err := expect.NewExpect(e2e.BinPath, "gateway", "--endpoints="+endpoints, "start") if err != nil { t.Fatal(err) } diff --git a/tests/e2e/main_test.go b/tests/e2e/main_test.go index 41561b550..58d7efb95 100644 --- a/tests/e2e/main_test.go +++ b/tests/e2e/main_test.go @@ -5,61 +5,15 @@ package e2e import ( - "flag" "os" - "runtime" "testing" "go.etcd.io/etcd/client/pkg/v3/testutil" - "go.etcd.io/etcd/tests/v3/integration" -) - -var ( - binDir string - certDir string - - certPath string - privateKeyPath string - caPath string - - certPath2 string - privateKeyPath2 string - - certPath3 string - privateKeyPath3 string - - crlPath string - revokedCertPath string - revokedPrivateKeyPath string + "go.etcd.io/etcd/tests/v3/framework/e2e" ) func TestMain(m *testing.M) { - os.Setenv("ETCD_UNSUPPORTED_ARCH", runtime.GOARCH) - os.Unsetenv("ETCDCTL_API") - - binDirDef := integration.MustAbsPath("../../bin") - certDirDef := fixturesDir - - flag.StringVar(&binDir, "bin-dir", binDirDef, "The directory for store etcd and etcdctl binaries.") - flag.StringVar(&certDir, "cert-dir", certDirDef, "The directory for store certificate files.") - flag.Parse() - - binPath = binDir + "/etcd" - ctlBinPath = binDir + "/etcdctl" - utlBinPath = binDir + "/etcdutl" - certPath = certDir + "/server.crt" - privateKeyPath = certDir + "/server.key.insecure" - caPath = certDir + "/ca.crt" - revokedCertPath = certDir + "/server-revoked.crt" - revokedPrivateKeyPath = certDir + "/server-revoked.key.insecure" - crlPath = certDir + "/revoke.crl" - - certPath2 = certDir + "/server2.crt" - privateKeyPath2 = certDir + "/server2.key.insecure" - - certPath3 = certDir + "/server3.crt" - privateKeyPath3 = certDir + "/server3.key.insecure" - + e2e.InitFlags() v := m.Run() if v == 0 && testutil.CheckLeakedGoroutine() { os.Exit(1) diff --git a/tests/e2e/metrics_test.go b/tests/e2e/metrics_test.go index ce26f3501..e0628fe88 100644 --- a/tests/e2e/metrics_test.go +++ b/tests/e2e/metrics_test.go @@ -19,19 +19,20 @@ import ( "testing" "go.etcd.io/etcd/api/v3/version" + "go.etcd.io/etcd/tests/v3/framework/e2e" ) func TestV3MetricsSecure(t *testing.T) { - cfg := newConfigTLS() - cfg.clusterSize = 1 - cfg.metricsURLScheme = "https" + cfg := e2e.NewConfigTLS() + cfg.ClusterSize = 1 + cfg.MetricsURLScheme = "https" testCtl(t, metricsTest) } func TestV3MetricsInsecure(t *testing.T) { - cfg := newConfigTLS() - cfg.clusterSize = 1 - cfg.metricsURLScheme = "http" + cfg := e2e.NewConfigTLS() + cfg.ClusterSize = 1 + cfg.MetricsURLScheme = "http" testCtl(t, metricsTest) } @@ -62,7 +63,7 @@ func metricsTest(cx ctlCtx) { if err := ctlV3Watch(cx, []string{"k", "--rev", "1"}, []kvExec{{key: "k", val: "v"}}...); err != nil { cx.t.Fatal(err) } - if err := cURLGet(cx.epc, cURLReq{endpoint: test.endpoint, expected: test.expected, metricsURLScheme: cx.cfg.metricsURLScheme}); err != nil { + if err := e2e.CURLGet(cx.epc, e2e.CURLReq{Endpoint: test.endpoint, Expected: test.expected, MetricsURLScheme: cx.cfg.MetricsURLScheme}); err != nil { cx.t.Fatalf("failed get with curl (%v)", err) } } diff --git a/tests/e2e/utl_migrate_test.go b/tests/e2e/utl_migrate_test.go index e81cf2623..efc97f7f4 100644 --- a/tests/e2e/utl_migrate_test.go +++ b/tests/e2e/utl_migrate_test.go @@ -25,10 +25,11 @@ import ( "go.etcd.io/etcd/client/pkg/v3/fileutil" "go.etcd.io/etcd/server/v3/storage/backend" "go.etcd.io/etcd/server/v3/storage/schema" + "go.etcd.io/etcd/tests/v3/framework/e2e" ) func TestEtctlutlMigrate(t *testing.T) { - lastReleaseBinary := binDir + "/etcd-last-release" + lastReleaseBinary := e2e.BinDir + "/etcd-last-release" tcs := []struct { name string @@ -103,20 +104,20 @@ func TestEtctlutlMigrate(t *testing.T) { } for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { - BeforeTest(t) + e2e.BeforeTest(t) if tc.binary != "" && !fileutil.Exist(tc.binary) { t.Skipf("%q does not exist", lastReleaseBinary) } dataDirPath := t.TempDir() - epc, err := newEtcdProcessCluster(t, &etcdProcessClusterConfig{ - execPath: tc.binary, - dataDirPath: dataDirPath, - clusterSize: 1, - initialToken: "new", - keepDataDir: true, - // Set low snapshotCount to ensure wal snapshot is done - snapshotCount: 1, + epc, err := e2e.NewEtcdProcessCluster(t, &e2e.EtcdProcessClusterConfig{ + ExecPath: tc.binary, + DataDirPath: dataDirPath, + ClusterSize: 1, + InitialToken: "new", + KeepDataDir: true, + // Set low SnapshotCount to ensure wal snapshot is done + SnapshotCount: 1, }) if err != nil { t.Fatalf("could not start etcd process cluster (%v)", err) @@ -128,26 +129,26 @@ func TestEtctlutlMigrate(t *testing.T) { }() dialTimeout := 10 * time.Second - prefixArgs := []string{ctlBinPath, "--endpoints", strings.Join(epc.EndpointsV3(), ","), "--dial-timeout", dialTimeout.String()} + prefixArgs := []string{e2e.CtlBinPath, "--endpoints", strings.Join(epc.EndpointsV3(), ","), "--dial-timeout", dialTimeout.String()} t.Log("Write keys to ensure wal snapshot is created and all v3.5 fields are set...") for i := 0; i < 10; i++ { - if err = spawnWithExpect(append(prefixArgs, "put", fmt.Sprintf("%d", i), "value"), "OK"); err != nil { + if err = e2e.SpawnWithExpect(append(prefixArgs, "put", fmt.Sprintf("%d", i), "value"), "OK"); err != nil { t.Fatal(err) } } t.Log("Stopping the server...") - if err = epc.procs[0].Stop(); err != nil { + if err = epc.Procs[0].Stop(); err != nil { t.Fatal(err) } t.Log("etcdutl migrate...") - args := []string{utlBinPath, "migrate", "--data-dir", dataDirPath, "--target-version", tc.targetVersion} + args := []string{e2e.UtlBinPath, "migrate", "--data-dir", dataDirPath, "--target-version", tc.targetVersion} if tc.force { args = append(args, "--force") } - err = spawnWithExpect(args, tc.expectLogsSubString) + err = e2e.SpawnWithExpect(args, tc.expectLogsSubString) if err != nil { t.Fatal(err) } diff --git a/tests/e2e/v2_curl_test.go b/tests/e2e/v2_curl_test.go index 0285a7bef..ce6474408 100644 --- a/tests/e2e/v2_curl_test.go +++ b/tests/e2e/v2_curl_test.go @@ -15,27 +15,27 @@ package e2e import ( - "fmt" - "math/rand" "strings" "testing" + + "go.etcd.io/etcd/tests/v3/framework/e2e" ) -func TestV2CurlNoTLS(t *testing.T) { testCurlPutGet(t, newConfigNoTLS()) } -func TestV2CurlAutoTLS(t *testing.T) { testCurlPutGet(t, newConfigAutoTLS()) } -func TestV2CurlAllTLS(t *testing.T) { testCurlPutGet(t, newConfigTLS()) } -func TestV2CurlPeerTLS(t *testing.T) { testCurlPutGet(t, newConfigPeerTLS()) } -func TestV2CurlClientTLS(t *testing.T) { testCurlPutGet(t, newConfigClientTLS()) } -func TestV2CurlClientBoth(t *testing.T) { testCurlPutGet(t, newConfigClientBoth()) } -func testCurlPutGet(t *testing.T, cfg *etcdProcessClusterConfig) { +func TestV2CurlNoTLS(t *testing.T) { testCurlPutGet(t, e2e.NewConfigNoTLS()) } +func TestV2CurlAutoTLS(t *testing.T) { testCurlPutGet(t, e2e.NewConfigAutoTLS()) } +func TestV2CurlAllTLS(t *testing.T) { testCurlPutGet(t, e2e.NewConfigTLS()) } +func TestV2CurlPeerTLS(t *testing.T) { testCurlPutGet(t, e2e.NewConfigPeerTLS()) } +func TestV2CurlClientTLS(t *testing.T) { testCurlPutGet(t, e2e.NewConfigClientTLS()) } +func TestV2CurlClientBoth(t *testing.T) { testCurlPutGet(t, e2e.NewConfigClientBoth()) } +func testCurlPutGet(t *testing.T, cfg *e2e.EtcdProcessClusterConfig) { BeforeTestV2(t) // test doesn't use quorum gets, so ensure there are no followers to avoid // stale reads that will break the test - cfg = configStandalone(*cfg) + cfg = e2e.ConfigStandalone(*cfg) - cfg.enableV2 = true - epc, err := newEtcdProcessCluster(t, cfg) + cfg.EnableV2 = true + epc, err := e2e.NewEtcdProcessCluster(t, cfg) if err != nil { t.Fatalf("could not start etcd process cluster (%v)", err) } @@ -49,14 +49,14 @@ func testCurlPutGet(t *testing.T, cfg *etcdProcessClusterConfig) { expectPut = `{"action":"set","node":{"key":"/foo","value":"bar","` expectGet = `{"action":"get","node":{"key":"/foo","value":"bar","` ) - if err := cURLPut(epc, cURLReq{endpoint: "/v2/keys/foo", value: "bar", expected: expectPut}); err != nil { + if err := e2e.CURLPut(epc, e2e.CURLReq{Endpoint: "/v2/keys/foo", Value: "bar", Expected: expectPut}); err != nil { t.Fatalf("failed put with curl (%v)", err) } - if err := cURLGet(epc, cURLReq{endpoint: "/v2/keys/foo", expected: expectGet}); err != nil { + if err := e2e.CURLGet(epc, e2e.CURLReq{Endpoint: "/v2/keys/foo", Expected: expectGet}); err != nil { t.Fatalf("failed get with curl (%v)", err) } - if cfg.clientTLS == clientTLSAndNonTLS { - if err := cURLGet(epc, cURLReq{endpoint: "/v2/keys/foo", expected: expectGet, isTLS: true}); err != nil { + if cfg.ClientTLS == e2e.ClientTLSAndNonTLS { + if err := e2e.CURLGet(epc, e2e.CURLReq{Endpoint: "/v2/keys/foo", Expected: expectGet, IsTLS: true}); err != nil { t.Fatalf("failed get with curl (%v)", err) } } @@ -65,8 +65,8 @@ func testCurlPutGet(t *testing.T, cfg *etcdProcessClusterConfig) { func TestV2CurlIssue5182(t *testing.T) { BeforeTestV2(t) - copied := newConfigNoTLS() - copied.enableV2 = true + copied := e2e.NewConfigNoTLS() + copied.EnableV2 = true epc := setupEtcdctlTest(t, copied, false) defer func() { if err := epc.Close(); err != nil { @@ -75,20 +75,20 @@ func TestV2CurlIssue5182(t *testing.T) { }() expectPut := `{"action":"set","node":{"key":"/foo","value":"bar","` - if err := cURLPut(epc, cURLReq{endpoint: "/v2/keys/foo", value: "bar", expected: expectPut}); err != nil { + if err := e2e.CURLPut(epc, e2e.CURLReq{Endpoint: "/v2/keys/foo", Value: "bar", Expected: expectPut}); err != nil { t.Fatal(err) } expectUserAdd := `{"user":"foo","roles":null}` - if err := cURLPut(epc, cURLReq{endpoint: "/v2/auth/users/foo", value: `{"user":"foo", "password":"pass"}`, expected: expectUserAdd}); err != nil { + if err := e2e.CURLPut(epc, e2e.CURLReq{Endpoint: "/v2/auth/users/foo", Value: `{"user":"foo", "password":"pass"}`, Expected: expectUserAdd}); err != nil { t.Fatal(err) } expectRoleAdd := `{"role":"foo","permissions":{"kv":{"read":["/foo/*"],"write":null}}` - if err := cURLPut(epc, cURLReq{endpoint: "/v2/auth/roles/foo", value: `{"role":"foo", "permissions": {"kv": {"read": ["/foo/*"]}}}`, expected: expectRoleAdd}); err != nil { + if err := e2e.CURLPut(epc, e2e.CURLReq{Endpoint: "/v2/auth/roles/foo", Value: `{"role":"foo", "permissions": {"kv": {"read": ["/foo/*"]}}}`, Expected: expectRoleAdd}); err != nil { t.Fatal(err) } expectUserUpdate := `{"user":"foo","roles":["foo"]}` - if err := cURLPut(epc, cURLReq{endpoint: "/v2/auth/users/foo", value: `{"user": "foo", "grant": ["foo"]}`, expected: expectUserUpdate}); err != nil { + if err := e2e.CURLPut(epc, e2e.CURLReq{Endpoint: "/v2/auth/users/foo", Value: `{"user": "foo", "grant": ["foo"]}`, Expected: expectUserUpdate}); err != nil { t.Fatal(err) } @@ -99,13 +99,13 @@ func TestV2CurlIssue5182(t *testing.T) { t.Fatal(err) } - if err := cURLGet(epc, cURLReq{endpoint: "/v2/keys/foo/", username: "root", password: "a", expected: "bar"}); err != nil { + if err := e2e.CURLGet(epc, e2e.CURLReq{Endpoint: "/v2/keys/foo/", Username: "root", Password: "a", Expected: "bar"}); err != nil { t.Fatal(err) } - if err := cURLGet(epc, cURLReq{endpoint: "/v2/keys/foo/", username: "foo", password: "pass", expected: "bar"}); err != nil { + if err := e2e.CURLGet(epc, e2e.CURLReq{Endpoint: "/v2/keys/foo/", Username: "foo", Password: "pass", Expected: "bar"}); err != nil { t.Fatal(err) } - if err := cURLGet(epc, cURLReq{endpoint: "/v2/keys/foo/", username: "foo", password: "", expected: "bar"}); err != nil { + if err := e2e.CURLGet(epc, e2e.CURLReq{Endpoint: "/v2/keys/foo/", Username: "foo", Password: "", Expected: "bar"}); err != nil { if !strings.Contains(err.Error(), `The request requires user authentication`) { t.Fatalf("expected 'The request requires user authentication' error, got %v", err) } @@ -113,88 +113,3 @@ func TestV2CurlIssue5182(t *testing.T) { t.Fatalf("expected 'The request requires user authentication' error") } } - -type cURLReq struct { - username string - password string - - isTLS bool - timeout int - - endpoint string - - value string - expected string - header string - - metricsURLScheme string - - ciphers string -} - -// cURLPrefixArgs builds the beginning of a curl command for a given key -// addressed to a random URL in the given cluster. -func cURLPrefixArgs(clus *etcdProcessCluster, method string, req cURLReq) []string { - var ( - cmdArgs = []string{"curl"} - acurl = clus.procs[rand.Intn(clus.cfg.clusterSize)].Config().acurl - ) - if req.metricsURLScheme != "https" { - if req.isTLS { - if clus.cfg.clientTLS != clientTLSAndNonTLS { - panic("should not use cURLPrefixArgsUseTLS when serving only TLS or non-TLS") - } - cmdArgs = append(cmdArgs, "--cacert", caPath, "--cert", certPath, "--key", privateKeyPath) - acurl = toTLS(clus.procs[rand.Intn(clus.cfg.clusterSize)].Config().acurl) - } else if clus.cfg.clientTLS == clientTLS { - if !clus.cfg.noCN { - cmdArgs = append(cmdArgs, "--cacert", caPath, "--cert", certPath, "--key", privateKeyPath) - } else { - cmdArgs = append(cmdArgs, "--cacert", caPath, "--cert", certPath3, "--key", privateKeyPath3) - } - } - } - if req.metricsURLScheme != "" { - acurl = clus.procs[rand.Intn(clus.cfg.clusterSize)].EndpointsMetrics()[0] - } - ep := acurl + req.endpoint - - if req.username != "" || req.password != "" { - cmdArgs = append(cmdArgs, "-L", "-u", fmt.Sprintf("%s:%s", req.username, req.password), ep) - } else { - cmdArgs = append(cmdArgs, "-L", ep) - } - if req.timeout != 0 { - cmdArgs = append(cmdArgs, "-m", fmt.Sprintf("%d", req.timeout)) - } - - if req.header != "" { - cmdArgs = append(cmdArgs, "-H", req.header) - } - - if req.ciphers != "" { - cmdArgs = append(cmdArgs, "--ciphers", req.ciphers) - } - - switch method { - case "POST", "PUT": - dt := req.value - if !strings.HasPrefix(dt, "{") { // for non-JSON value - dt = "value=" + dt - } - cmdArgs = append(cmdArgs, "-X", method, "-d", dt) - } - return cmdArgs -} - -func cURLPost(clus *etcdProcessCluster, req cURLReq) error { - return spawnWithExpect(cURLPrefixArgs(clus, "POST", req), req.expected) -} - -func cURLPut(clus *etcdProcessCluster, req cURLReq) error { - return spawnWithExpect(cURLPrefixArgs(clus, "PUT", req), req.expected) -} - -func cURLGet(clus *etcdProcessCluster, req cURLReq) error { - return spawnWithExpect(cURLPrefixArgs(clus, "GET", req), req.expected) -} diff --git a/tests/e2e/v2store_deprecation_test.go b/tests/e2e/v2store_deprecation_test.go index cf6c28200..52dec549b 100644 --- a/tests/e2e/v2store_deprecation_test.go +++ b/tests/e2e/v2store_deprecation_test.go @@ -19,24 +19,25 @@ import ( "testing" "github.com/stretchr/testify/assert" + "go.etcd.io/etcd/tests/v3/framework/e2e" ) func createV2store(t testing.TB, dataDirPath string) { t.Log("Creating not-yet v2-deprecated etcd") - cfg := configStandalone(etcdProcessClusterConfig{enableV2: true, dataDirPath: dataDirPath, snapshotCount: 5}) - epc, err := newEtcdProcessCluster(t, cfg) + cfg := e2e.ConfigStandalone(e2e.EtcdProcessClusterConfig{EnableV2: true, DataDirPath: dataDirPath, SnapshotCount: 5}) + epc, err := e2e.NewEtcdProcessCluster(t, cfg) assert.NoError(t, err) defer func() { assert.NoError(t, epc.Stop()) }() - // We need to exceed 'snapshotCount' such that v2 snapshot is dumped. + // We need to exceed 'SnapshotCount' such that v2 snapshot is dumped. for i := 0; i < 10; i++ { - if err := cURLPut(epc, cURLReq{ - endpoint: "/v2/keys/foo", value: "bar" + fmt.Sprint(i), - expected: `{"action":"set","node":{"key":"/foo","value":"bar` + fmt.Sprint(i)}); err != nil { + if err := e2e.CURLPut(epc, e2e.CURLReq{ + Endpoint: "/v2/keys/foo", Value: "bar" + fmt.Sprint(i), + Expected: `{"action":"set","node":{"key":"/foo","value":"bar` + fmt.Sprint(i)}); err != nil { t.Fatalf("failed put with curl (%v)", err) } } @@ -45,17 +46,17 @@ func createV2store(t testing.TB, dataDirPath string) { func assertVerifyCanStartV2deprecationNotYet(t testing.TB, dataDirPath string) { t.Log("verify: possible to start etcd with --v2-deprecation=not-yet mode") - cfg := configStandalone(etcdProcessClusterConfig{enableV2: true, dataDirPath: dataDirPath, v2deprecation: "not-yet", keepDataDir: true}) - epc, err := newEtcdProcessCluster(t, cfg) + cfg := e2e.ConfigStandalone(e2e.EtcdProcessClusterConfig{EnableV2: true, DataDirPath: dataDirPath, V2deprecation: "not-yet", KeepDataDir: true}) + epc, err := e2e.NewEtcdProcessCluster(t, cfg) assert.NoError(t, err) defer func() { assert.NoError(t, epc.Stop()) }() - if err := cURLGet(epc, cURLReq{ - endpoint: "/v2/keys/foo", - expected: `{"action":"get","node":{"key":"/foo","value":"bar9","modifiedIndex":13,"createdIndex":13}}`}); err != nil { + if err := e2e.CURLGet(epc, e2e.CURLReq{ + Endpoint: "/v2/keys/foo", + Expected: `{"action":"get","node":{"key":"/foo","value":"bar9","modifiedIndex":13,"createdIndex":13}}`}); err != nil { t.Fatalf("failed get with curl (%v)", err) } @@ -63,7 +64,7 @@ func assertVerifyCanStartV2deprecationNotYet(t testing.TB, dataDirPath string) { func assertVerifyCannotStartV2deprecationWriteOnly(t testing.TB, dataDirPath string) { t.Log("Verify its infeasible to start etcd with --v2-deprecation=write-only mode") - proc, err := spawnCmd([]string{binDir + "/etcd", "--v2-deprecation=write-only", "--data-dir=" + dataDirPath}, nil) + proc, err := e2e.SpawnCmd([]string{e2e.BinDir + "/etcd", "--v2-deprecation=write-only", "--data-dir=" + dataDirPath}, nil) assert.NoError(t, err) _, err = proc.Expect("detected disallowed custom content in v2store for stage --v2-deprecation=write-only") @@ -71,7 +72,7 @@ func assertVerifyCannotStartV2deprecationWriteOnly(t testing.TB, dataDirPath str } func TestV2Deprecation(t *testing.T) { - BeforeTest(t) + e2e.BeforeTest(t) dataDirPath := t.TempDir() t.Run("create-storev2-data", func(t *testing.T) { @@ -89,8 +90,8 @@ func TestV2Deprecation(t *testing.T) { } func TestV2DeprecationWriteOnlyNoV2Api(t *testing.T) { - BeforeTest(t) - proc, err := spawnCmd([]string{binDir + "/etcd", "--v2-deprecation=write-only", "--enable-v2"}, nil) + e2e.BeforeTest(t) + proc, err := e2e.SpawnCmd([]string{e2e.BinDir + "/etcd", "--v2-deprecation=write-only", "--enable-v2"}, nil) assert.NoError(t, err) _, err = proc.Expect("--enable-v2 and --v2-deprecation=write-only are mutually exclusive") diff --git a/tests/e2e/v3_cipher_suite_test.go b/tests/e2e/v3_cipher_suite_test.go index 694de13b4..4b804c015 100644 --- a/tests/e2e/v3_cipher_suite_test.go +++ b/tests/e2e/v3_cipher_suite_test.go @@ -22,14 +22,15 @@ import ( "testing" "go.etcd.io/etcd/api/v3/version" + "go.etcd.io/etcd/tests/v3/framework/e2e" ) func TestV3CurlCipherSuitesValid(t *testing.T) { testV3CurlCipherSuites(t, true) } func TestV3CurlCipherSuitesMismatch(t *testing.T) { testV3CurlCipherSuites(t, false) } func testV3CurlCipherSuites(t *testing.T, valid bool) { - cc := newConfigClientTLS() - cc.clusterSize = 1 - cc.cipherSuites = []string{ + cc := e2e.NewConfigClientTLS() + cc.ClusterSize = 1 + cc.CipherSuites = []string{ "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", @@ -45,11 +46,11 @@ func testV3CurlCipherSuites(t *testing.T, valid bool) { } func cipherSuiteTestValid(cx ctlCtx) { - if err := cURLGet(cx.epc, cURLReq{ - endpoint: "/metrics", - expected: fmt.Sprintf(`etcd_server_version{server_version="%s"} 1`, version.Version), - metricsURLScheme: cx.cfg.metricsURLScheme, - ciphers: "ECDHE-RSA-AES128-GCM-SHA256", // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + if err := e2e.CURLGet(cx.epc, e2e.CURLReq{ + Endpoint: "/metrics", + Expected: fmt.Sprintf(`etcd_server_version{server_version="%s"} 1`, version.Version), + MetricsURLScheme: cx.cfg.MetricsURLScheme, + Ciphers: "ECDHE-RSA-AES128-GCM-SHA256", // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 }); err != nil { cx.t.Fatalf("failed get with curl (%v)", err) } @@ -58,11 +59,11 @@ func cipherSuiteTestValid(cx ctlCtx) { func cipherSuiteTestMismatch(cx ctlCtx) { var err error for _, exp := range []string{"alert handshake failure", "failed setting cipher list"} { - err = cURLGet(cx.epc, cURLReq{ - endpoint: "/metrics", - expected: exp, - metricsURLScheme: cx.cfg.metricsURLScheme, - ciphers: "ECDHE-RSA-DES-CBC3-SHA", // TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA + err = e2e.CURLGet(cx.epc, e2e.CURLReq{ + Endpoint: "/metrics", + Expected: exp, + MetricsURLScheme: cx.cfg.MetricsURLScheme, + Ciphers: "ECDHE-RSA-DES-CBC3-SHA", // TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA }) if err == nil { break diff --git a/tests/e2e/v3_curl_lease_test.go b/tests/e2e/v3_curl_lease_test.go index 80d21e610..82c297d00 100644 --- a/tests/e2e/v3_curl_lease_test.go +++ b/tests/e2e/v3_curl_lease_test.go @@ -19,26 +19,27 @@ import ( "testing" pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/tests/v3/framework/e2e" ) func TestV3CurlLeaseGrantNoTLS(t *testing.T) { for _, p := range apiPrefix { - testCtl(t, testV3CurlLeaseGrant, withApiPrefix(p), withCfg(*newConfigNoTLS())) + testCtl(t, testV3CurlLeaseGrant, withApiPrefix(p), withCfg(*e2e.NewConfigNoTLS())) } } func TestV3CurlLeaseRevokeNoTLS(t *testing.T) { for _, p := range apiPrefix { - testCtl(t, testV3CurlLeaseRevoke, withApiPrefix(p), withCfg(*newConfigNoTLS())) + testCtl(t, testV3CurlLeaseRevoke, withApiPrefix(p), withCfg(*e2e.NewConfigNoTLS())) } } func TestV3CurlLeaseLeasesNoTLS(t *testing.T) { for _, p := range apiPrefix { - testCtl(t, testV3CurlLeaseLeases, withApiPrefix(p), withCfg(*newConfigNoTLS())) + testCtl(t, testV3CurlLeaseLeases, withApiPrefix(p), withCfg(*e2e.NewConfigNoTLS())) } } func TestV3CurlLeaseKeepAliveNoTLS(t *testing.T) { for _, p := range apiPrefix { - testCtl(t, testV3CurlLeaseKeepAlive, withApiPrefix(p), withCfg(*newConfigNoTLS())) + testCtl(t, testV3CurlLeaseKeepAlive, withApiPrefix(p), withCfg(*e2e.NewConfigNoTLS())) } } @@ -49,7 +50,7 @@ type v3cURLTest struct { } func testV3CurlLeaseGrant(cx ctlCtx) { - leaseID := randomLeaseID() + leaseID := e2e.RandomLeaseID() tests := []v3cURLTest{ { @@ -73,13 +74,13 @@ func testV3CurlLeaseGrant(cx ctlCtx) { expected: `"grantedTTL"`, }, } - if err := cURLWithExpected(cx, tests); err != nil { + if err := CURLWithExpected(cx, tests); err != nil { cx.t.Fatalf("testV3CurlLeaseGrant: %v", err) } } func testV3CurlLeaseRevoke(cx ctlCtx) { - leaseID := randomLeaseID() + leaseID := e2e.RandomLeaseID() tests := []v3cURLTest{ { @@ -93,13 +94,13 @@ func testV3CurlLeaseRevoke(cx ctlCtx) { expected: `"revision":"`, }, } - if err := cURLWithExpected(cx, tests); err != nil { + if err := CURLWithExpected(cx, tests); err != nil { cx.t.Fatalf("testV3CurlLeaseRevoke: %v", err) } } func testV3CurlLeaseLeases(cx ctlCtx) { - leaseID := randomLeaseID() + leaseID := e2e.RandomLeaseID() tests := []v3cURLTest{ { @@ -113,13 +114,13 @@ func testV3CurlLeaseLeases(cx ctlCtx) { expected: gwLeaseIDExpected(leaseID), }, } - if err := cURLWithExpected(cx, tests); err != nil { + if err := CURLWithExpected(cx, tests); err != nil { cx.t.Fatalf("testV3CurlLeaseGrant: %v", err) } } func testV3CurlLeaseKeepAlive(cx ctlCtx) { - leaseID := randomLeaseID() + leaseID := e2e.RandomLeaseID() tests := []v3cURLTest{ { @@ -133,7 +134,7 @@ func testV3CurlLeaseKeepAlive(cx ctlCtx) { expected: gwLeaseIDExpected(leaseID), }, } - if err := cURLWithExpected(cx, tests); err != nil { + if err := CURLWithExpected(cx, tests); err != nil { cx.t.Fatalf("testV3CurlLeaseGrant: %v", err) } } @@ -144,7 +145,7 @@ func gwLeaseIDExpected(leaseID int64) string { func gwLeaseTTLWithKeys(cx ctlCtx, leaseID int64) string { d := &pb.LeaseTimeToLiveRequest{ID: leaseID, Keys: true} - s, err := dataMarshal(d) + s, err := e2e.DataMarshal(d) if err != nil { cx.t.Fatalf("gwLeaseTTLWithKeys: error (%v)", err) } @@ -153,7 +154,7 @@ func gwLeaseTTLWithKeys(cx ctlCtx, leaseID int64) string { func gwLeaseKeepAlive(cx ctlCtx, leaseID int64) string { d := &pb.LeaseKeepAliveRequest{ID: leaseID} - s, err := dataMarshal(d) + s, err := e2e.DataMarshal(d) if err != nil { cx.t.Fatalf("gwLeaseKeepAlive: Marshal error (%v)", err) } @@ -162,7 +163,7 @@ func gwLeaseKeepAlive(cx ctlCtx, leaseID int64) string { func gwLeaseGrant(cx ctlCtx, leaseID int64, ttl int64) string { d := &pb.LeaseGrantRequest{ID: leaseID, TTL: ttl} - s, err := dataMarshal(d) + s, err := e2e.DataMarshal(d) if err != nil { cx.t.Fatalf("gwLeaseGrant: Marshal error (%v)", err) } @@ -171,7 +172,7 @@ func gwLeaseGrant(cx ctlCtx, leaseID int64, ttl int64) string { func gwLeaseRevoke(cx ctlCtx, leaseID int64) string { d := &pb.LeaseRevokeRequest{ID: leaseID} - s, err := dataMarshal(d) + s, err := e2e.DataMarshal(d) if err != nil { cx.t.Fatalf("gwLeaseRevoke: Marshal error (%v)", err) } @@ -180,7 +181,7 @@ func gwLeaseRevoke(cx ctlCtx, leaseID int64) string { func gwKVPutLease(cx ctlCtx, k string, v string, leaseID int64) string { d := pb.PutRequest{Key: []byte(k), Value: []byte(v), Lease: leaseID} - s, err := dataMarshal(d) + s, err := e2e.DataMarshal(d) if err != nil { cx.t.Fatalf("gwKVPutLease: Marshal error (%v)", err) } diff --git a/tests/e2e/v3_curl_test.go b/tests/e2e/v3_curl_test.go index ce4ab5a0c..17fe6590b 100644 --- a/tests/e2e/v3_curl_test.go +++ b/tests/e2e/v3_curl_test.go @@ -27,6 +27,7 @@ import ( "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/client/pkg/v3/testutil" epb "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb" + "go.etcd.io/etcd/tests/v3/framework/e2e" "github.com/grpc-ecosystem/grpc-gateway/runtime" ) @@ -35,27 +36,27 @@ var apiPrefix = []string{"/v3"} func TestV3CurlPutGetNoTLS(t *testing.T) { for _, p := range apiPrefix { - testCtl(t, testV3CurlPutGet, withApiPrefix(p), withCfg(*newConfigNoTLS())) + testCtl(t, testV3CurlPutGet, withApiPrefix(p), withCfg(*e2e.NewConfigNoTLS())) } } func TestV3CurlPutGetAutoTLS(t *testing.T) { for _, p := range apiPrefix { - testCtl(t, testV3CurlPutGet, withApiPrefix(p), withCfg(*newConfigAutoTLS())) + testCtl(t, testV3CurlPutGet, withApiPrefix(p), withCfg(*e2e.NewConfigAutoTLS())) } } func TestV3CurlPutGetAllTLS(t *testing.T) { for _, p := range apiPrefix { - testCtl(t, testV3CurlPutGet, withApiPrefix(p), withCfg(*newConfigTLS())) + testCtl(t, testV3CurlPutGet, withApiPrefix(p), withCfg(*e2e.NewConfigTLS())) } } func TestV3CurlPutGetPeerTLS(t *testing.T) { for _, p := range apiPrefix { - testCtl(t, testV3CurlPutGet, withApiPrefix(p), withCfg(*newConfigPeerTLS())) + testCtl(t, testV3CurlPutGet, withApiPrefix(p), withCfg(*e2e.NewConfigPeerTLS())) } } func TestV3CurlPutGetClientTLS(t *testing.T) { for _, p := range apiPrefix { - testCtl(t, testV3CurlPutGet, withApiPrefix(p), withCfg(*newConfigClientTLS())) + testCtl(t, testV3CurlPutGet, withApiPrefix(p), withCfg(*e2e.NewConfigClientTLS())) } } func TestV3CurlWatch(t *testing.T) { @@ -75,7 +76,7 @@ func TestV3CurlAuth(t *testing.T) { } func TestV3CurlAuthClientTLSCertAuth(t *testing.T) { for _, p := range apiPrefix { - testCtl(t, testV3CurlAuth, withApiPrefix(p), withCfg(*newConfigClientTLSCertAuthWithNoCN())) + testCtl(t, testV3CurlAuth, withApiPrefix(p), withCfg(*e2e.NewConfigClientTLSCertAuthWithNoCN())) } } @@ -103,14 +104,14 @@ func testV3CurlPutGet(cx ctlCtx) { p := cx.apiPrefix - if err := cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/kv/put"), value: string(putData), expected: expectPut}); err != nil { + if err := e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/kv/put"), Value: string(putData), Expected: expectPut}); err != nil { cx.t.Fatalf("failed testV3CurlPutGet put with curl using prefix (%s) (%v)", p, err) } - if err := cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/kv/range"), value: string(rangeData), expected: expectGet}); err != nil { + if err := e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/kv/range"), Value: string(rangeData), Expected: expectGet}); err != nil { cx.t.Fatalf("failed testV3CurlPutGet get with curl using prefix (%s) (%v)", p, err) } - if cx.cfg.clientTLS == clientTLSAndNonTLS { - if err := cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/kv/range"), value: string(rangeData), expected: expectGet, isTLS: true}); err != nil { + if cx.cfg.ClientTLS == e2e.ClientTLSAndNonTLS { + if err := e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/kv/range"), Value: string(rangeData), Expected: expectGet, IsTLS: true}); err != nil { cx.t.Fatalf("failed testV3CurlPutGet get with curl using prefix (%s) (%v)", p, err) } } @@ -134,11 +135,11 @@ func testV3CurlWatch(cx ctlCtx) { wstr := `{"create_request" : ` + string(wreq) + "}" p := cx.apiPrefix - if err = cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/kv/put"), value: string(putreq), expected: "revision"}); err != nil { + if err = e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/kv/put"), Value: string(putreq), Expected: "revision"}); err != nil { cx.t.Fatalf("failed testV3CurlWatch put with curl using prefix (%s) (%v)", p, err) } // expects "bar", timeout after 2 seconds since stream waits forever - if err = cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/watch"), value: wstr, expected: `"YmFy"`, timeout: 2}); err != nil { + if err = e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/watch"), Value: wstr, Expected: `"YmFy"`, Timeout: 2}); err != nil { cx.t.Fatalf("failed testV3CurlWatch watch with curl using prefix (%s) (%v)", p, err) } } @@ -171,13 +172,13 @@ func testV3CurlTxn(cx ctlCtx) { } expected := `"succeeded":true,"responses":[{"response_put":{"header":{"revision":"2"}}}]` p := cx.apiPrefix - if err := cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/kv/txn"), value: string(jsonDat), expected: expected}); err != nil { + if err := e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/kv/txn"), Value: string(jsonDat), Expected: expected}); err != nil { cx.t.Fatalf("failed testV3CurlTxn txn with curl using prefix (%s) (%v)", p, err) } // was crashing etcd server malformed := `{"compare":[{"result":0,"target":1,"key":"Zm9v","TargetUnion":null}],"success":[{"Request":{"RequestPut":{"key":"Zm9v","value":"YmFy"}}}]}` - if err := cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/kv/txn"), value: malformed, expected: "error"}); err != nil { + if err := e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/kv/txn"), Value: malformed, Expected: "error"}); err != nil { cx.t.Fatalf("failed testV3CurlTxn put with curl using prefix (%s) (%v)", p, err) } @@ -194,7 +195,7 @@ func testV3CurlAuth(cx ctlCtx) { user, err := json.Marshal(&pb.AuthUserAddRequest{Name: usernames[i], Password: pwds[i], Options: options[i]}) testutil.AssertNil(cx.t, err) - if err = cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/auth/user/add"), value: string(user), expected: "revision"}); err != nil { + if err = e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/auth/user/add"), Value: string(user), Expected: "revision"}); err != nil { cx.t.Fatalf("failed testV3CurlAuth add user %v with curl (%v)", usernames[i], err) } } @@ -203,7 +204,7 @@ func testV3CurlAuth(cx ctlCtx) { rolereq, err := json.Marshal(&pb.AuthRoleAddRequest{Name: string("root")}) testutil.AssertNil(cx.t, err) - if err = cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/auth/role/add"), value: string(rolereq), expected: "revision"}); err != nil { + if err = e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/auth/role/add"), Value: string(rolereq), Expected: "revision"}); err != nil { cx.t.Fatalf("failed testV3CurlAuth create role with curl using prefix (%s) (%v)", p, err) } @@ -212,13 +213,13 @@ func testV3CurlAuth(cx ctlCtx) { grantroleroot, err := json.Marshal(&pb.AuthUserGrantRoleRequest{User: usernames[i], Role: "root"}) testutil.AssertNil(cx.t, err) - if err = cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/auth/user/grant"), value: string(grantroleroot), expected: "revision"}); err != nil { + if err = e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/auth/user/grant"), Value: string(grantroleroot), Expected: "revision"}); err != nil { cx.t.Fatalf("failed testV3CurlAuth grant role with curl using prefix (%s) (%v)", p, err) } } // enable auth - if err = cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/auth/enable"), value: string("{}"), expected: "revision"}); err != nil { + if err = e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/auth/enable"), Value: string("{}"), Expected: "revision"}); err != nil { cx.t.Fatalf("failed testV3CurlAuth enable auth with curl using prefix (%s) (%v)", p, err) } @@ -228,7 +229,7 @@ func testV3CurlAuth(cx ctlCtx) { testutil.AssertNil(cx.t, err) // fail put no auth - if err = cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/kv/put"), value: string(putreq), expected: "error"}); err != nil { + if err = e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/kv/put"), Value: string(putreq), Expected: "error"}); err != nil { cx.t.Fatalf("failed testV3CurlAuth no auth put with curl using prefix (%s) (%v)", p, err) } @@ -242,8 +243,8 @@ func testV3CurlAuth(cx ctlCtx) { lineFunc = func(txt string) bool { return true } ) - cmdArgs = cURLPrefixArgs(cx.epc, "POST", cURLReq{endpoint: path.Join(p, "/auth/authenticate"), value: string(authreq)}) - proc, err := spawnCmd(cmdArgs, cx.envMap) + cmdArgs = e2e.CURLPrefixArgs(cx.epc, "POST", e2e.CURLReq{Endpoint: path.Join(p, "/auth/authenticate"), Value: string(authreq)}) + proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap) testutil.AssertNil(cx.t, err) defer proc.Close() @@ -261,7 +262,7 @@ func testV3CurlAuth(cx ctlCtx) { authHeader = "Authorization: " + token // put with auth - if err = cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/kv/put"), value: string(putreq), header: authHeader, expected: "revision"}); err != nil { + if err = e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/kv/put"), Value: string(putreq), Header: authHeader, Expected: "revision"}); err != nil { cx.t.Fatalf("failed testV3CurlAuth auth put with curl using prefix (%s) and user (%v) (%v)", p, usernames[i], err) } } @@ -269,7 +270,7 @@ func testV3CurlAuth(cx ctlCtx) { func TestV3CurlCampaignNoTLS(t *testing.T) { for _, p := range apiPrefix { - testCtl(t, testV3CurlCampaign, withApiPrefix(p), withCfg(*newConfigNoTLS())) + testCtl(t, testV3CurlCampaign, withApiPrefix(p), withCfg(*e2e.NewConfigNoTLS())) } } @@ -281,11 +282,11 @@ func testV3CurlCampaign(cx ctlCtx) { if err != nil { cx.t.Fatal(err) } - cargs := cURLPrefixArgs(cx.epc, "POST", cURLReq{ - endpoint: path.Join(cx.apiPrefix, "/election/campaign"), - value: string(cdata), + cargs := e2e.CURLPrefixArgs(cx.epc, "POST", e2e.CURLReq{ + Endpoint: path.Join(cx.apiPrefix, "/election/campaign"), + Value: string(cdata), }) - lines, err := spawnWithExpectLines(cargs, cx.envMap, `"leader":{"name":"`) + lines, err := e2e.SpawnWithExpectLines(cargs, cx.envMap, `"leader":{"name":"`) if err != nil { cx.t.Fatalf("failed post campaign request (%s) (%v)", cx.apiPrefix, err) } @@ -320,10 +321,10 @@ func testV3CurlCampaign(cx ctlCtx) { if err != nil { cx.t.Fatal(err) } - if err = cURLPost(cx.epc, cURLReq{ - endpoint: path.Join(cx.apiPrefix, "/election/proclaim"), - value: string(pdata), - expected: `"revision":`, + if err = e2e.CURLPost(cx.epc, e2e.CURLReq{ + Endpoint: path.Join(cx.apiPrefix, "/election/proclaim"), + Value: string(pdata), + Expected: `"revision":`, }); err != nil { cx.t.Fatalf("failed post proclaim request (%s) (%v)", cx.apiPrefix, err) } @@ -331,7 +332,7 @@ func testV3CurlCampaign(cx ctlCtx) { func TestV3CurlProclaimMissiongLeaderKeyNoTLS(t *testing.T) { for _, p := range apiPrefix { - testCtl(t, testV3CurlProclaimMissiongLeaderKey, withApiPrefix(p), withCfg(*newConfigNoTLS())) + testCtl(t, testV3CurlProclaimMissiongLeaderKey, withApiPrefix(p), withCfg(*e2e.NewConfigNoTLS())) } } @@ -340,10 +341,10 @@ func testV3CurlProclaimMissiongLeaderKey(cx ctlCtx) { if err != nil { cx.t.Fatal(err) } - if err = cURLPost(cx.epc, cURLReq{ - endpoint: path.Join(cx.apiPrefix, "/election/proclaim"), - value: string(pdata), - expected: `{"error":"\"leader\" field must be provided","code":2,"message":"\"leader\" field must be provided"}`, + if err = e2e.CURLPost(cx.epc, e2e.CURLReq{ + Endpoint: path.Join(cx.apiPrefix, "/election/proclaim"), + Value: string(pdata), + Expected: `{"error":"\"leader\" field must be provided","code":2,"message":"\"leader\" field must be provided"}`, }); err != nil { cx.t.Fatalf("failed post proclaim request (%s) (%v)", cx.apiPrefix, err) } @@ -351,15 +352,15 @@ func testV3CurlProclaimMissiongLeaderKey(cx ctlCtx) { func TestV3CurlResignMissiongLeaderKeyNoTLS(t *testing.T) { for _, p := range apiPrefix { - testCtl(t, testV3CurlResignMissiongLeaderKey, withApiPrefix(p), withCfg(*newConfigNoTLS())) + testCtl(t, testV3CurlResignMissiongLeaderKey, withApiPrefix(p), withCfg(*e2e.NewConfigNoTLS())) } } func testV3CurlResignMissiongLeaderKey(cx ctlCtx) { - if err := cURLPost(cx.epc, cURLReq{ - endpoint: path.Join(cx.apiPrefix, "/election/resign"), - value: `{}`, - expected: `{"error":"\"leader\" field must be provided","code":2,"message":"\"leader\" field must be provided"}`, + if err := e2e.CURLPost(cx.epc, e2e.CURLReq{ + Endpoint: path.Join(cx.apiPrefix, "/election/resign"), + Value: `{}`, + Expected: `{"error":"\"leader\" field must be provided","code":2,"message":"\"leader\" field must be provided"}`, }); err != nil { cx.t.Fatalf("failed post resign request (%s) (%v)", cx.apiPrefix, err) } @@ -367,14 +368,14 @@ func testV3CurlResignMissiongLeaderKey(cx ctlCtx) { func TestV3CurlMaintenanceAlarmMissiongAlarm(t *testing.T) { for _, p := range apiPrefix { - testCtl(t, testV3CurlMaintenanceAlarmMissiongAlarm, withApiPrefix(p), withCfg(*newConfigNoTLS())) + testCtl(t, testV3CurlMaintenanceAlarmMissiongAlarm, withApiPrefix(p), withCfg(*e2e.NewConfigNoTLS())) } } func testV3CurlMaintenanceAlarmMissiongAlarm(cx ctlCtx) { - if err := cURLPost(cx.epc, cURLReq{ - endpoint: path.Join(cx.apiPrefix, "/maintenance/alarm"), - value: `{"action": "ACTIVATE"}`, + if err := e2e.CURLPost(cx.epc, e2e.CURLReq{ + Endpoint: path.Join(cx.apiPrefix, "/maintenance/alarm"), + Value: `{"action": "ACTIVATE"}`, }); err != nil { cx.t.Fatalf("failed post maintenance alarm (%s) (%v)", cx.apiPrefix, err) } @@ -391,11 +392,11 @@ type campaignResponse struct { } `json:"leader,omitempty"` } -func cURLWithExpected(cx ctlCtx, tests []v3cURLTest) error { +func CURLWithExpected(cx ctlCtx, tests []v3cURLTest) error { p := cx.apiPrefix for _, t := range tests { value := fmt.Sprintf("%v", t.value) - if err := cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, t.endpoint), value: value, expected: t.expected}); err != nil { + if err := e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, t.endpoint), Value: value, Expected: t.expected}); err != nil { return fmt.Errorf("prefix (%s) endpoint (%s): error (%v), wanted %v", p, t.endpoint, err, t.expected) } } diff --git a/tests/framework/e2e/cluster.go b/tests/framework/e2e/cluster.go new file mode 100644 index 000000000..d6bc14341 --- /dev/null +++ b/tests/framework/e2e/cluster.go @@ -0,0 +1,475 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e + +import ( + "fmt" + "net/url" + "os" + "path" + "strings" + "testing" + "time" + + "go.etcd.io/etcd/server/v3/etcdserver" + "go.uber.org/zap" + "go.uber.org/zap/zaptest" +) + +const EtcdProcessBasePort = 20000 + +type ClientConnType int + +const ( + ClientNonTLS ClientConnType = iota + ClientTLS + ClientTLSAndNonTLS +) + +func NewConfigNoTLS() *EtcdProcessClusterConfig { + return &EtcdProcessClusterConfig{ClusterSize: 3, + InitialToken: "new", + } +} + +func NewConfigAutoTLS() *EtcdProcessClusterConfig { + return &EtcdProcessClusterConfig{ + ClusterSize: 3, + IsPeerTLS: true, + IsPeerAutoTLS: true, + InitialToken: "new", + } +} + +func NewConfigTLS() *EtcdProcessClusterConfig { + return &EtcdProcessClusterConfig{ + ClusterSize: 3, + ClientTLS: ClientTLS, + IsPeerTLS: true, + InitialToken: "new", + } +} + +func NewConfigClientTLS() *EtcdProcessClusterConfig { + return &EtcdProcessClusterConfig{ + ClusterSize: 3, + ClientTLS: ClientTLS, + InitialToken: "new", + } +} + +func NewConfigClientBoth() *EtcdProcessClusterConfig { + return &EtcdProcessClusterConfig{ + ClusterSize: 1, + ClientTLS: ClientTLSAndNonTLS, + InitialToken: "new", + } +} + +func NewConfigClientAutoTLS() *EtcdProcessClusterConfig { + return &EtcdProcessClusterConfig{ + ClusterSize: 1, + IsClientAutoTLS: true, + ClientTLS: ClientTLS, + InitialToken: "new", + } +} + +func NewConfigPeerTLS() *EtcdProcessClusterConfig { + return &EtcdProcessClusterConfig{ + ClusterSize: 3, + IsPeerTLS: true, + InitialToken: "new", + } +} + +func NewConfigClientTLSCertAuth() *EtcdProcessClusterConfig { + return &EtcdProcessClusterConfig{ + ClusterSize: 1, + ClientTLS: ClientTLS, + InitialToken: "new", + ClientCertAuthEnabled: true, + } +} + +func NewConfigClientTLSCertAuthWithNoCN() *EtcdProcessClusterConfig { + return &EtcdProcessClusterConfig{ + ClusterSize: 1, + ClientTLS: ClientTLS, + InitialToken: "new", + ClientCertAuthEnabled: true, + NoCN: true, + } +} + +func NewConfigJWT() *EtcdProcessClusterConfig { + return &EtcdProcessClusterConfig{ + ClusterSize: 1, + InitialToken: "new", + AuthTokenOpts: "jwt,pub-key=" + path.Join(FixturesDir, "server.crt") + + ",priv-key=" + path.Join(FixturesDir, "server.key.insecure") + ",sign-method=RS256,ttl=1s", + } +} + +func ConfigStandalone(cfg EtcdProcessClusterConfig) *EtcdProcessClusterConfig { + ret := cfg + ret.ClusterSize = 1 + return &ret +} + +type EtcdProcessCluster struct { + lg *zap.Logger + Cfg *EtcdProcessClusterConfig + Procs []EtcdProcess +} + +type EtcdProcessClusterConfig struct { + ExecPath string + DataDirPath string + KeepDataDir bool + EnvVars map[string]string + + ClusterSize int + + BaseScheme string + BasePort int + + MetricsURLScheme string + + SnapshotCount int // default is 10000 + + ClientTLS ClientConnType + ClientCertAuthEnabled bool + IsPeerTLS bool + IsPeerAutoTLS bool + IsClientAutoTLS bool + IsClientCRL bool + NoCN bool + + CipherSuites []string + + ForceNewCluster bool + InitialToken string + QuotaBackendBytes int64 + NoStrictReconfig bool + EnableV2 bool + InitialCorruptCheck bool + AuthTokenOpts string + V2deprecation string + + RollingStart bool +} + +// NewEtcdProcessCluster launches a new cluster from etcd processes, returning +// a new EtcdProcessCluster once all nodes are ready to accept client requests. +func NewEtcdProcessCluster(t testing.TB, cfg *EtcdProcessClusterConfig) (*EtcdProcessCluster, error) { + SkipInShortMode(t) + + etcdCfgs := cfg.EtcdServerProcessConfigs(t) + epc := &EtcdProcessCluster{ + Cfg: cfg, + lg: zaptest.NewLogger(t), + Procs: make([]EtcdProcess, cfg.ClusterSize), + } + + // launch etcd processes + for i := range etcdCfgs { + proc, err := NewEtcdProcess(etcdCfgs[i]) + if err != nil { + epc.Close() + return nil, fmt.Errorf("Cannot configure: %v", err) + } + epc.Procs[i] = proc + } + + if cfg.RollingStart { + if err := epc.RollingStart(); err != nil { + return nil, fmt.Errorf("Cannot rolling-start: %v", err) + } + } else { + if err := epc.Start(); err != nil { + return nil, fmt.Errorf("Cannot start: %v", err) + } + } + return epc, nil +} + +func (cfg *EtcdProcessClusterConfig) ClientScheme() string { + if cfg.ClientTLS == ClientTLS { + return "https" + } + return "http" +} + +func (cfg *EtcdProcessClusterConfig) PeerScheme() string { + peerScheme := cfg.BaseScheme + if peerScheme == "" { + peerScheme = "http" + } + if cfg.IsPeerTLS { + peerScheme += "s" + } + return peerScheme +} + +func (cfg *EtcdProcessClusterConfig) EtcdServerProcessConfigs(tb testing.TB) []*EtcdServerProcessConfig { + lg := zaptest.NewLogger(tb) + + if cfg.BasePort == 0 { + cfg.BasePort = EtcdProcessBasePort + } + if cfg.ExecPath == "" { + cfg.ExecPath = BinPath + } + if cfg.SnapshotCount == 0 { + cfg.SnapshotCount = etcdserver.DefaultSnapshotCount + } + + etcdCfgs := make([]*EtcdServerProcessConfig, cfg.ClusterSize) + initialCluster := make([]string, cfg.ClusterSize) + for i := 0; i < cfg.ClusterSize; i++ { + var curls []string + var curl, curltls string + port := cfg.BasePort + 5*i + curlHost := fmt.Sprintf("localhost:%d", port) + + switch cfg.ClientTLS { + case ClientNonTLS, ClientTLS: + curl = (&url.URL{Scheme: cfg.ClientScheme(), Host: curlHost}).String() + curls = []string{curl} + case ClientTLSAndNonTLS: + curl = (&url.URL{Scheme: "http", Host: curlHost}).String() + curltls = (&url.URL{Scheme: "https", Host: curlHost}).String() + curls = []string{curl, curltls} + } + + purl := url.URL{Scheme: cfg.PeerScheme(), Host: fmt.Sprintf("localhost:%d", port+1)} + name := fmt.Sprintf("test-%d", i) + dataDirPath := cfg.DataDirPath + if cfg.DataDirPath == "" { + dataDirPath = tb.TempDir() + } + initialCluster[i] = fmt.Sprintf("%s=%s", name, purl.String()) + + args := []string{ + "--name", name, + "--listen-client-urls", strings.Join(curls, ","), + "--advertise-client-urls", strings.Join(curls, ","), + "--listen-peer-urls", purl.String(), + "--initial-advertise-peer-urls", purl.String(), + "--initial-cluster-token", cfg.InitialToken, + "--data-dir", dataDirPath, + "--snapshot-count", fmt.Sprintf("%d", cfg.SnapshotCount), + } + args = AddV2Args(args) + if cfg.ForceNewCluster { + args = append(args, "--force-new-cluster") + } + if cfg.QuotaBackendBytes > 0 { + args = append(args, + "--quota-backend-bytes", fmt.Sprintf("%d", cfg.QuotaBackendBytes), + ) + } + if cfg.NoStrictReconfig { + args = append(args, "--strict-reconfig-check=false") + } + if cfg.EnableV2 { + args = append(args, "--enable-v2") + } + if cfg.InitialCorruptCheck { + args = append(args, "--experimental-initial-corrupt-check") + } + var murl string + if cfg.MetricsURLScheme != "" { + murl = (&url.URL{ + Scheme: cfg.MetricsURLScheme, + Host: fmt.Sprintf("localhost:%d", port+2), + }).String() + args = append(args, "--listen-metrics-urls", murl) + } + + args = append(args, cfg.TlsArgs()...) + + if cfg.AuthTokenOpts != "" { + args = append(args, "--auth-token", cfg.AuthTokenOpts) + } + + if cfg.V2deprecation != "" { + args = append(args, "--v2-deprecation", cfg.V2deprecation) + } + + etcdCfgs[i] = &EtcdServerProcessConfig{ + lg: lg, + ExecPath: cfg.ExecPath, + Args: args, + EnvVars: cfg.EnvVars, + TlsArgs: cfg.TlsArgs(), + DataDirPath: dataDirPath, + KeepDataDir: cfg.KeepDataDir, + Name: name, + Purl: purl, + Acurl: curl, + Murl: murl, + InitialToken: cfg.InitialToken, + } + } + + initialClusterArgs := []string{"--initial-cluster", strings.Join(initialCluster, ",")} + for i := range etcdCfgs { + etcdCfgs[i].InitialCluster = strings.Join(initialCluster, ",") + etcdCfgs[i].Args = append(etcdCfgs[i].Args, initialClusterArgs...) + } + + return etcdCfgs +} + +func (cfg *EtcdProcessClusterConfig) TlsArgs() (args []string) { + if cfg.ClientTLS != ClientNonTLS { + if cfg.IsClientAutoTLS { + args = append(args, "--auto-tls") + } else { + tlsClientArgs := []string{ + "--cert-file", CertPath, + "--key-file", PrivateKeyPath, + "--trusted-ca-file", CaPath, + } + args = append(args, tlsClientArgs...) + + if cfg.ClientCertAuthEnabled { + args = append(args, "--client-cert-auth") + } + } + } + + if cfg.IsPeerTLS { + if cfg.IsPeerAutoTLS { + args = append(args, "--peer-auto-tls") + } else { + tlsPeerArgs := []string{ + "--peer-cert-file", CertPath, + "--peer-key-file", PrivateKeyPath, + "--peer-trusted-ca-file", CaPath, + } + args = append(args, tlsPeerArgs...) + } + } + + if cfg.IsClientCRL { + args = append(args, "--client-crl-file", CrlPath, "--client-cert-auth") + } + + if len(cfg.CipherSuites) > 0 { + args = append(args, "--cipher-suites", strings.Join(cfg.CipherSuites, ",")) + } + + return args +} + +func (epc *EtcdProcessCluster) EndpointsV2() []string { + return epc.Endpoints(func(ep EtcdProcess) []string { return ep.EndpointsV2() }) +} + +func (epc *EtcdProcessCluster) EndpointsV3() []string { + return epc.Endpoints(func(ep EtcdProcess) []string { return ep.EndpointsV3() }) +} + +func (epc *EtcdProcessCluster) Endpoints(f func(ep EtcdProcess) []string) (ret []string) { + for _, p := range epc.Procs { + ret = append(ret, f(p)...) + } + return ret +} + +func (epc *EtcdProcessCluster) Start() error { + return epc.start(func(ep EtcdProcess) error { return ep.Start() }) +} + +func (epc *EtcdProcessCluster) RollingStart() error { + return epc.rollingStart(func(ep EtcdProcess) error { return ep.Start() }) +} + +func (epc *EtcdProcessCluster) Restart() error { + return epc.start(func(ep EtcdProcess) error { return ep.Restart() }) +} + +func (epc *EtcdProcessCluster) start(f func(ep EtcdProcess) error) error { + readyC := make(chan error, len(epc.Procs)) + for i := range epc.Procs { + go func(n int) { readyC <- f(epc.Procs[n]) }(i) + } + for range epc.Procs { + if err := <-readyC; err != nil { + epc.Close() + return err + } + } + return nil +} + +func (epc *EtcdProcessCluster) rollingStart(f func(ep EtcdProcess) error) error { + readyC := make(chan error, len(epc.Procs)) + for i := range epc.Procs { + go func(n int) { readyC <- f(epc.Procs[n]) }(i) + // make sure the servers do not start at the same time + time.Sleep(time.Second) + } + for range epc.Procs { + if err := <-readyC; err != nil { + epc.Close() + return err + } + } + return nil +} + +func (epc *EtcdProcessCluster) Stop() (err error) { + for _, p := range epc.Procs { + if p == nil { + continue + } + if curErr := p.Stop(); curErr != nil { + if err != nil { + err = fmt.Errorf("%v; %v", err, curErr) + } else { + err = curErr + } + } + } + return err +} + +func (epc *EtcdProcessCluster) Close() error { + epc.lg.Info("closing test cluster...") + err := epc.Stop() + for _, p := range epc.Procs { + // p is nil when NewEtcdProcess fails in the middle + // Close still gets called to clean up test data + if p == nil { + continue + } + if cerr := p.Close(); cerr != nil { + err = cerr + } + } + epc.lg.Info("closed test cluster.") + return err +} + +func (epc *EtcdProcessCluster) WithStopSignal(sig os.Signal) (ret os.Signal) { + for _, p := range epc.Procs { + ret = p.WithStopSignal(sig) + } + return ret +} diff --git a/tests/e2e/cluster_direct_test.go b/tests/framework/e2e/cluster_direct.go similarity index 87% rename from tests/e2e/cluster_direct_test.go rename to tests/framework/e2e/cluster_direct.go index e5b91270a..c773dbe0f 100644 --- a/tests/e2e/cluster_direct_test.go +++ b/tests/framework/e2e/cluster_direct.go @@ -17,6 +17,6 @@ package e2e -func newEtcdProcess(cfg *etcdServerProcessConfig) (etcdProcess, error) { - return newEtcdServerProcess(cfg) +func NewEtcdProcess(cfg *EtcdServerProcessConfig) (EtcdProcess, error) { + return NewEtcdServerProcess(cfg) } diff --git a/tests/e2e/cluster_proxy_test.go b/tests/framework/e2e/cluster_proxy.go similarity index 80% rename from tests/e2e/cluster_proxy_test.go rename to tests/framework/e2e/cluster_proxy.go index fd7924835..192079c48 100644 --- a/tests/e2e/cluster_proxy_test.go +++ b/tests/framework/e2e/cluster_proxy.go @@ -31,17 +31,17 @@ import ( ) type proxyEtcdProcess struct { - etcdProc etcdProcess + etcdProc EtcdProcess proxyV2 *proxyV2Proc proxyV3 *proxyV3Proc } -func newEtcdProcess(cfg *etcdServerProcessConfig) (etcdProcess, error) { - return newProxyEtcdProcess(cfg) +func NewEtcdProcess(cfg *EtcdServerProcessConfig) (EtcdProcess, error) { + return NewProxyEtcdProcess(cfg) } -func newProxyEtcdProcess(cfg *etcdServerProcessConfig) (*proxyEtcdProcess, error) { - ep, err := newEtcdServerProcess(cfg) +func NewProxyEtcdProcess(cfg *EtcdServerProcessConfig) (*proxyEtcdProcess, error) { + ep, err := NewEtcdServerProcess(cfg) if err != nil { return nil, err } @@ -53,7 +53,7 @@ func newProxyEtcdProcess(cfg *etcdServerProcessConfig) (*proxyEtcdProcess, error return pep, nil } -func (p *proxyEtcdProcess) Config() *etcdServerProcessConfig { return p.etcdProc.Config() } +func (p *proxyEtcdProcess) Config() *EtcdServerProcessConfig { return p.etcdProc.Config() } func (p *proxyEtcdProcess) EndpointsV2() []string { return p.proxyV2.endpoints() } func (p *proxyEtcdProcess) EndpointsV3() []string { return p.proxyV3.endpoints() } @@ -115,7 +115,7 @@ func (p *proxyEtcdProcess) WithStopSignal(sig os.Signal) os.Signal { return p.etcdProc.WithStopSignal(sig) } -func (p *proxyEtcdProcess) Logs() logsExpect { +func (p *proxyEtcdProcess) Logs() LogsExpect { return p.etcdProc.Logs() } @@ -136,7 +136,7 @@ func (pp *proxyProc) start() error { if pp.proc != nil { panic("already started") } - proc, err := spawnCmdWithLogger(pp.lg, append([]string{pp.execPath}, pp.args...), nil) + proc, err := SpawnCmdWithLogger(pp.lg, append([]string{pp.execPath}, pp.args...), nil) if err != nil { return err } @@ -146,7 +146,7 @@ func (pp *proxyProc) start() error { func (pp *proxyProc) waitReady(readyStr string) error { defer close(pp.donec) - return waitReadyExpectProc(pp.proc, []string{readyStr}) + return WaitReadyExpectProc(pp.proc, []string{readyStr}) } func (pp *proxyProc) Stop() error { @@ -176,8 +176,8 @@ type proxyV2Proc struct { dataDir string } -func proxyListenURL(cfg *etcdServerProcessConfig, portOffset int) string { - u, err := url.Parse(cfg.acurl) +func proxyListenURL(cfg *EtcdServerProcessConfig, portOffset int) string { + u, err := url.Parse(cfg.Acurl) if err != nil { panic(err) } @@ -187,22 +187,22 @@ func proxyListenURL(cfg *etcdServerProcessConfig, portOffset int) string { return u.String() } -func newProxyV2Proc(cfg *etcdServerProcessConfig) *proxyV2Proc { +func newProxyV2Proc(cfg *EtcdServerProcessConfig) *proxyV2Proc { listenAddr := proxyListenURL(cfg, 2) name := fmt.Sprintf("testname-proxy-%p", cfg) - dataDir := path.Join(cfg.dataDirPath, name+".etcd") + dataDir := path.Join(cfg.DataDirPath, name+".etcd") args := []string{ "--name", name, "--proxy", "on", "--listen-client-urls", listenAddr, - "--initial-cluster", cfg.name + "=" + cfg.purl.String(), + "--initial-cluster", cfg.Name + "=" + cfg.Purl.String(), "--data-dir", dataDir, } return &proxyV2Proc{ proxyProc: proxyProc{ lg: cfg.lg, - execPath: cfg.execPath, - args: append(args, cfg.tlsArgs...), + execPath: cfg.ExecPath, + args: append(args, cfg.TlsArgs...), ep: listenAddr, donec: make(chan struct{}), }, @@ -239,33 +239,33 @@ type proxyV3Proc struct { proxyProc } -func newProxyV3Proc(cfg *etcdServerProcessConfig) *proxyV3Proc { +func newProxyV3Proc(cfg *EtcdServerProcessConfig) *proxyV3Proc { listenAddr := proxyListenURL(cfg, 3) args := []string{ "grpc-proxy", "start", "--listen-addr", strings.Split(listenAddr, "/")[2], - "--endpoints", cfg.acurl, + "--endpoints", cfg.Acurl, // pass-through member RPCs "--advertise-client-url", "", - "--data-dir", cfg.dataDirPath, + "--data-dir", cfg.DataDirPath, } murl := "" - if cfg.murl != "" { + if cfg.Murl != "" { murl = proxyListenURL(cfg, 4) args = append(args, "--metrics-addr", murl) } tlsArgs := []string{} - for i := 0; i < len(cfg.tlsArgs); i++ { - switch cfg.tlsArgs[i] { + for i := 0; i < len(cfg.TlsArgs); i++ { + switch cfg.TlsArgs[i] { case "--cert-file": - tlsArgs = append(tlsArgs, "--cert-file", cfg.tlsArgs[i+1]) + tlsArgs = append(tlsArgs, "--cert-file", cfg.TlsArgs[i+1]) i++ case "--key-file": - tlsArgs = append(tlsArgs, "--key-file", cfg.tlsArgs[i+1]) + tlsArgs = append(tlsArgs, "--key-file", cfg.TlsArgs[i+1]) i++ case "--trusted-ca-file": - tlsArgs = append(tlsArgs, "--trusted-ca-file", cfg.tlsArgs[i+1]) + tlsArgs = append(tlsArgs, "--trusted-ca-file", cfg.TlsArgs[i+1]) i++ case "--auto-tls": tlsArgs = append(tlsArgs, "--auto-tls", "--insecure-skip-tls-verify") @@ -273,21 +273,21 @@ func newProxyV3Proc(cfg *etcdServerProcessConfig) *proxyV3Proc { i++ // skip arg case "--client-cert-auth", "--peer-auto-tls": default: - tlsArgs = append(tlsArgs, cfg.tlsArgs[i]) + tlsArgs = append(tlsArgs, cfg.TlsArgs[i]) } // Configure certificates for connection proxy ---> server. // This certificate must NOT have CN set. tlsArgs = append(tlsArgs, - "--cert", path.Join(fixturesDir, "client-nocn.crt"), - "--key", path.Join(fixturesDir, "client-nocn.key.insecure"), - "--cacert", path.Join(fixturesDir, "ca.crt"), - "--client-crl-file", path.Join(fixturesDir, "revoke.crl")) + "--cert", path.Join(FixturesDir, "client-nocn.crt"), + "--key", path.Join(FixturesDir, "client-nocn.key.insecure"), + "--cacert", path.Join(FixturesDir, "ca.crt"), + "--client-crl-file", path.Join(FixturesDir, "revoke.crl")) } return &proxyV3Proc{ proxyProc{ lg: cfg.lg, - execPath: cfg.execPath, + execPath: cfg.ExecPath, args: append(args, tlsArgs...), ep: listenAddr, murl: murl, diff --git a/tests/framework/e2e/curl.go b/tests/framework/e2e/curl.go new file mode 100644 index 000000000..284b49aaa --- /dev/null +++ b/tests/framework/e2e/curl.go @@ -0,0 +1,106 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e + +import ( + "fmt" + "math/rand" + "strings" +) + +type CURLReq struct { + Username string + Password string + + IsTLS bool + Timeout int + + Endpoint string + + Value string + Expected string + Header string + + MetricsURLScheme string + + Ciphers string +} + +// CURLPrefixArgs builds the beginning of a curl command for a given key +// addressed to a random URL in the given cluster. +func CURLPrefixArgs(clus *EtcdProcessCluster, method string, req CURLReq) []string { + var ( + cmdArgs = []string{"curl"} + acurl = clus.Procs[rand.Intn(clus.Cfg.ClusterSize)].Config().Acurl + ) + if req.MetricsURLScheme != "https" { + if req.IsTLS { + if clus.Cfg.ClientTLS != ClientTLSAndNonTLS { + panic("should not use cURLPrefixArgsUseTLS when serving only TLS or non-TLS") + } + cmdArgs = append(cmdArgs, "--cacert", CaPath, "--cert", CertPath, "--key", PrivateKeyPath) + acurl = ToTLS(clus.Procs[rand.Intn(clus.Cfg.ClusterSize)].Config().Acurl) + } else if clus.Cfg.ClientTLS == ClientTLS { + if !clus.Cfg.NoCN { + cmdArgs = append(cmdArgs, "--cacert", CaPath, "--cert", CertPath, "--key", PrivateKeyPath) + } else { + cmdArgs = append(cmdArgs, "--cacert", CaPath, "--cert", CertPath3, "--key", PrivateKeyPath3) + } + } + } + if req.MetricsURLScheme != "" { + acurl = clus.Procs[rand.Intn(clus.Cfg.ClusterSize)].EndpointsMetrics()[0] + } + ep := acurl + req.Endpoint + + if req.Username != "" || req.Password != "" { + cmdArgs = append(cmdArgs, "-L", "-u", fmt.Sprintf("%s:%s", req.Username, req.Password), ep) + } else { + cmdArgs = append(cmdArgs, "-L", ep) + } + if req.Timeout != 0 { + cmdArgs = append(cmdArgs, "-m", fmt.Sprintf("%d", req.Timeout)) + } + + if req.Header != "" { + cmdArgs = append(cmdArgs, "-H", req.Header) + } + + if req.Ciphers != "" { + cmdArgs = append(cmdArgs, "--ciphers", req.Ciphers) + } + + switch method { + case "POST", "PUT": + dt := req.Value + if !strings.HasPrefix(dt, "{") { // for non-JSON value + dt = "value=" + dt + } + cmdArgs = append(cmdArgs, "-X", method, "-d", dt) + } + return cmdArgs +} + +func CURLPost(clus *EtcdProcessCluster, req CURLReq) error { + return SpawnWithExpect(CURLPrefixArgs(clus, "POST", req), req.Expected) +} + +func CURLPut(clus *EtcdProcessCluster, req CURLReq) error { + return SpawnWithExpect(CURLPrefixArgs(clus, "PUT", req), req.Expected) +} + +func CURLGet(clus *EtcdProcessCluster, req CURLReq) error { + return SpawnWithExpect(CURLPrefixArgs(clus, "GET", req), req.Expected) +} diff --git a/tests/framework/e2e/etcd_process.go b/tests/framework/e2e/etcd_process.go new file mode 100644 index 000000000..e31f906ae --- /dev/null +++ b/tests/framework/e2e/etcd_process.go @@ -0,0 +1,178 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e + +import ( + "fmt" + "net/url" + "os" + + "go.etcd.io/etcd/client/pkg/v3/fileutil" + "go.etcd.io/etcd/pkg/v3/expect" + "go.uber.org/zap" +) + +var ( + EtcdServerReadyLines = []string{"ready to serve client requests"} + BinPath string + CtlBinPath string + UtlBinPath string +) + +// EtcdProcess is a process that serves etcd requests. +type EtcdProcess interface { + EndpointsV2() []string + EndpointsV3() []string + EndpointsMetrics() []string + + Start() error + Restart() error + Stop() error + Close() error + WithStopSignal(sig os.Signal) os.Signal + Config() *EtcdServerProcessConfig + Logs() LogsExpect +} + +type LogsExpect interface { + Expect(string) (string, error) +} + +type EtcdServerProcess struct { + cfg *EtcdServerProcessConfig + proc *expect.ExpectProcess + donec chan struct{} // closed when Interact() terminates +} + +type EtcdServerProcessConfig struct { + lg *zap.Logger + ExecPath string + Args []string + TlsArgs []string + EnvVars map[string]string + + DataDirPath string + KeepDataDir bool + + Name string + + Purl url.URL + + Acurl string + Murl string + + InitialToken string + InitialCluster string +} + +func NewEtcdServerProcess(cfg *EtcdServerProcessConfig) (*EtcdServerProcess, error) { + if !fileutil.Exist(cfg.ExecPath) { + return nil, fmt.Errorf("could not find etcd binary: %s", cfg.ExecPath) + } + if !cfg.KeepDataDir { + if err := os.RemoveAll(cfg.DataDirPath); err != nil { + return nil, err + } + } + return &EtcdServerProcess{cfg: cfg, donec: make(chan struct{})}, nil +} + +func (ep *EtcdServerProcess) EndpointsV2() []string { return []string{ep.cfg.Acurl} } +func (ep *EtcdServerProcess) EndpointsV3() []string { return ep.EndpointsV2() } +func (ep *EtcdServerProcess) EndpointsMetrics() []string { return []string{ep.cfg.Murl} } + +func (ep *EtcdServerProcess) Start() error { + if ep.proc != nil { + panic("already started") + } + ep.cfg.lg.Info("starting server...", zap.String("name", ep.cfg.Name)) + proc, err := SpawnCmdWithLogger(ep.cfg.lg, append([]string{ep.cfg.ExecPath}, ep.cfg.Args...), ep.cfg.EnvVars) + if err != nil { + return err + } + ep.proc = proc + err = ep.waitReady() + if err == nil { + ep.cfg.lg.Info("started server.", zap.String("name", ep.cfg.Name)) + } + return err +} + +func (ep *EtcdServerProcess) Restart() error { + ep.cfg.lg.Info("restaring server...", zap.String("name", ep.cfg.Name)) + if err := ep.Stop(); err != nil { + return err + } + ep.donec = make(chan struct{}) + err := ep.Start() + if err == nil { + ep.cfg.lg.Info("restared server", zap.String("name", ep.cfg.Name)) + } + return err +} + +func (ep *EtcdServerProcess) Stop() (err error) { + ep.cfg.lg.Info("stoping server...", zap.String("name", ep.cfg.Name)) + if ep == nil || ep.proc == nil { + return nil + } + err = ep.proc.Stop() + if err != nil { + return err + } + ep.proc = nil + <-ep.donec + ep.donec = make(chan struct{}) + if ep.cfg.Purl.Scheme == "unix" || ep.cfg.Purl.Scheme == "unixs" { + err = os.Remove(ep.cfg.Purl.Host + ep.cfg.Purl.Path) + if err != nil && !os.IsNotExist(err) { + return err + } + } + ep.cfg.lg.Info("stopped server.", zap.String("name", ep.cfg.Name)) + return nil +} + +func (ep *EtcdServerProcess) Close() error { + ep.cfg.lg.Info("closing server...", zap.String("name", ep.cfg.Name)) + if err := ep.Stop(); err != nil { + return err + } + if !ep.cfg.KeepDataDir { + ep.cfg.lg.Info("removing directory", zap.String("data-dir", ep.cfg.DataDirPath)) + return os.RemoveAll(ep.cfg.DataDirPath) + } + return nil +} + +func (ep *EtcdServerProcess) WithStopSignal(sig os.Signal) os.Signal { + ret := ep.proc.StopSignal + ep.proc.StopSignal = sig + return ret +} + +func (ep *EtcdServerProcess) waitReady() error { + defer close(ep.donec) + return WaitReadyExpectProc(ep.proc, EtcdServerReadyLines) +} + +func (ep *EtcdServerProcess) Config() *EtcdServerProcessConfig { return ep.cfg } + +func (ep *EtcdServerProcess) Logs() LogsExpect { + if ep.proc == nil { + ep.cfg.lg.Panic("Please grap logs before process is stopped") + } + return ep.proc +} diff --git a/tests/e2e/etcd_spawn_cov.go b/tests/framework/e2e/etcd_spawn_cov.go similarity index 89% rename from tests/e2e/etcd_spawn_cov.go rename to tests/framework/e2e/etcd_spawn_cov.go index 9b24ac9d0..7c4ff8c0f 100644 --- a/tests/e2e/etcd_spawn_cov.go +++ b/tests/framework/e2e/etcd_spawn_cov.go @@ -26,7 +26,7 @@ import ( "go.etcd.io/etcd/client/pkg/v3/fileutil" "go.etcd.io/etcd/pkg/v3/expect" - "go.etcd.io/etcd/tests/v3/integration" + "go.etcd.io/etcd/tests/v3/framework/integration" "go.uber.org/zap" ) @@ -36,11 +36,11 @@ var ( coverDir = integration.MustAbsPath(os.Getenv("COVERDIR")) ) -func spawnCmd(args []string) (*expect.ExpectProcess, error) { - return spawnCmdWithLogger(zap.NewNop(), args) +func SpawnCmd(args []string) (*expect.ExpectProcess, error) { + return SpawnCmdWithLogger(zap.NewNop(), args) } -func spawnCmdWithLogger(lg *zap.Logger, args []string) (*expect.ExpectProcess, error) { +func SpawnCmdWithLogger(lg *zap.Logger, args []string) (*expect.ExpectProcess, error) { cmd := args[0] env := make([]string, 0) switch { @@ -51,7 +51,7 @@ func spawnCmdWithLogger(lg *zap.Logger, args []string) (*expect.ExpectProcess, e case strings.HasSuffix(cmd, "/etcdutl"): cmd = cmd + "_test" case strings.HasSuffix(cmd, "/etcdctl3"): - cmd = ctlBinPath + "_test" + cmd = CtlBinPath + "_test" env = append(env, "ETCDCTL_API=3") } diff --git a/tests/e2e/etcd_spawn_nocov.go b/tests/framework/e2e/etcd_spawn_nocov.go similarity index 88% rename from tests/e2e/etcd_spawn_nocov.go rename to tests/framework/e2e/etcd_spawn_nocov.go index 2ed551394..a9343ccea 100644 --- a/tests/e2e/etcd_spawn_nocov.go +++ b/tests/framework/e2e/etcd_spawn_nocov.go @@ -28,11 +28,11 @@ import ( const noOutputLineCount = 0 // regular binaries emit no extra lines -func spawnCmd(args []string, envVars map[string]string) (*expect.ExpectProcess, error) { - return spawnCmdWithLogger(zap.NewNop(), args, envVars) +func SpawnCmd(args []string, envVars map[string]string) (*expect.ExpectProcess, error) { + return SpawnCmdWithLogger(zap.NewNop(), args, envVars) } -func spawnCmdWithLogger(lg *zap.Logger, args []string, envVars map[string]string) (*expect.ExpectProcess, error) { +func SpawnCmdWithLogger(lg *zap.Logger, args []string, envVars map[string]string) (*expect.ExpectProcess, error) { wd, err := os.Getwd() if err != nil { return nil, err @@ -41,7 +41,7 @@ func spawnCmdWithLogger(lg *zap.Logger, args []string, envVars map[string]string if strings.HasSuffix(args[0], "/etcdctl3") { env = append(env, "ETCDCTL_API=3") lg.Info("spawning process with ETCDCTL_API=3", zap.Strings("args", args), zap.String("working-dir", wd), zap.Strings("environment-variables", env)) - return expect.NewExpectWithEnv(ctlBinPath, args[1:], env) + return expect.NewExpectWithEnv(CtlBinPath, args[1:], env) } lg.Info("spawning process", zap.Strings("args", args), zap.String("working-dir", wd), zap.Strings("environment-variables", env)) return expect.NewExpectWithEnv(args[0], args[1:], env) diff --git a/tests/framework/e2e/flags.go b/tests/framework/e2e/flags.go new file mode 100644 index 000000000..9a2c933d2 --- /dev/null +++ b/tests/framework/e2e/flags.go @@ -0,0 +1,72 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e + +import ( + "flag" + "os" + "runtime" + + "go.etcd.io/etcd/tests/v3/framework/integration" +) + +var ( + BinDir string + CertDir string + + CertPath string + PrivateKeyPath string + CaPath string + + CertPath2 string + PrivateKeyPath2 string + + CertPath3 string + PrivateKeyPath3 string + + CrlPath string + RevokedCertPath string + RevokedPrivateKeyPath string + + FixturesDir = integration.MustAbsPath("../fixtures") +) + +func InitFlags() { + os.Setenv("ETCD_UNSUPPORTED_ARCH", runtime.GOARCH) + os.Unsetenv("ETCDCTL_API") + + binDirDef := integration.MustAbsPath("../../bin") + certDirDef := FixturesDir + + flag.StringVar(&BinDir, "bin-dir", binDirDef, "The directory for store etcd and etcdctl binaries.") + flag.StringVar(&CertDir, "cert-dir", certDirDef, "The directory for store certificate files.") + flag.Parse() + + BinPath = BinDir + "/etcd" + CtlBinPath = BinDir + "/etcdctl" + UtlBinPath = BinDir + "/etcdutl" + CertPath = CertDir + "/server.crt" + PrivateKeyPath = CertDir + "/server.key.insecure" + CaPath = CertDir + "/ca.crt" + RevokedCertPath = CertDir + "/server-revoked.crt" + RevokedPrivateKeyPath = CertDir + "/server-revoked.key.insecure" + CrlPath = CertDir + "/revoke.crl" + + CertPath2 = CertDir + "/server2.crt" + PrivateKeyPath2 = CertDir + "/server2.key.insecure" + + CertPath3 = CertDir + "/server3.crt" + PrivateKeyPath3 = CertDir + "/server3.key.insecure" +} diff --git a/tests/e2e/testing.go b/tests/framework/e2e/testing.go similarity index 98% rename from tests/e2e/testing.go rename to tests/framework/e2e/testing.go index 00e98114e..e1447146c 100644 --- a/tests/e2e/testing.go +++ b/tests/framework/e2e/testing.go @@ -24,7 +24,7 @@ import ( ) func BeforeTest(t testing.TB) { - skipInShortMode(t) + SkipInShortMode(t) testutil.RegisterLeakDetection(t) os.Setenv(verify.ENV_VERIFY, verify.ENV_VERIFY_ALL_VALUE) diff --git a/tests/e2e/util.go b/tests/framework/e2e/util.go similarity index 75% rename from tests/e2e/util.go rename to tests/framework/e2e/util.go index 2aa45bc95..a3b903863 100644 --- a/tests/e2e/util.go +++ b/tests/framework/e2e/util.go @@ -26,7 +26,7 @@ import ( "go.etcd.io/etcd/pkg/v3/expect" ) -func waitReadyExpectProc(exproc *expect.ExpectProcess, readyStrs []string) error { +func WaitReadyExpectProc(exproc *expect.ExpectProcess, readyStrs []string) error { matchSet := func(l string) bool { for _, s := range readyStrs { if strings.Contains(l, s) { @@ -39,21 +39,21 @@ func waitReadyExpectProc(exproc *expect.ExpectProcess, readyStrs []string) error return err } -func spawnWithExpect(args []string, expected string) error { - return spawnWithExpects(args, nil, []string{expected}...) +func SpawnWithExpect(args []string, expected string) error { + return SpawnWithExpects(args, nil, []string{expected}...) } -func spawnWithExpectWithEnv(args []string, envVars map[string]string, expected string) error { - return spawnWithExpects(args, envVars, []string{expected}...) +func SpawnWithExpectWithEnv(args []string, envVars map[string]string, expected string) error { + return SpawnWithExpects(args, envVars, []string{expected}...) } -func spawnWithExpects(args []string, envVars map[string]string, xs ...string) error { - _, err := spawnWithExpectLines(args, envVars, xs...) +func SpawnWithExpects(args []string, envVars map[string]string, xs ...string) error { + _, err := SpawnWithExpectLines(args, envVars, xs...) return err } -func spawnWithExpectLines(args []string, envVars map[string]string, xs ...string) ([]string, error) { - proc, err := spawnCmd(args, envVars) +func SpawnWithExpectLines(args []string, envVars map[string]string, xs ...string) ([]string, error) { + proc, err := SpawnCmd(args, envVars) if err != nil { return nil, err } @@ -84,11 +84,11 @@ func spawnWithExpectLines(args []string, envVars map[string]string, xs ...string return lines, perr } -func randomLeaseID() int64 { +func RandomLeaseID() int64 { return rand.New(rand.NewSource(time.Now().UnixNano())).Int63() } -func dataMarshal(data interface{}) (d string, e error) { +func DataMarshal(data interface{}) (d string, e error) { m, err := json.Marshal(data) if err != nil { return "", err @@ -96,7 +96,7 @@ func dataMarshal(data interface{}) (d string, e error) { return string(m), nil } -func closeWithTimeout(p *expect.ExpectProcess, d time.Duration) error { +func CloseWithTimeout(p *expect.ExpectProcess, d time.Duration) error { errc := make(chan error, 1) go func() { errc <- p.Close() }() select { @@ -105,15 +105,15 @@ func closeWithTimeout(p *expect.ExpectProcess, d time.Duration) error { case <-time.After(d): p.Stop() // retry close after stopping to collect SIGQUIT data, if any - closeWithTimeout(p, time.Second) + CloseWithTimeout(p, time.Second) } return fmt.Errorf("took longer than %v to Close process %+v", d, p) } -func toTLS(s string) string { +func ToTLS(s string) string { return strings.Replace(s, "http://", "https://", 1) } -func skipInShortMode(t testing.TB) { +func SkipInShortMode(t testing.TB) { testutil.SkipTestIfShortMode(t, "e2e tests are not running in --short mode") } diff --git a/tests/e2e/v2_test.go b/tests/framework/e2e/v2.go similarity index 92% rename from tests/e2e/v2_test.go rename to tests/framework/e2e/v2.go index fe44b3f96..4610841fe 100644 --- a/tests/e2e/v2_test.go +++ b/tests/framework/e2e/v2.go @@ -17,4 +17,4 @@ package e2e -func addV2Args(args []string) []string { return args } +func AddV2Args(args []string) []string { return args } diff --git a/tests/e2e/v2v3_test.go b/tests/framework/e2e/v2v3.go similarity index 94% rename from tests/e2e/v2v3_test.go rename to tests/framework/e2e/v2v3.go index 75043b7df..a42bb936d 100644 --- a/tests/e2e/v2v3_test.go +++ b/tests/framework/e2e/v2v3.go @@ -17,6 +17,6 @@ package e2e -func addV2Args(args []string) []string { +func AddV2Args(args []string) []string { return append(args, "--experimental-enable-v2v3", "v2/") } diff --git a/tests/integration/bridge.go b/tests/framework/integration/bridge.go similarity index 100% rename from tests/integration/bridge.go rename to tests/framework/integration/bridge.go diff --git a/tests/integration/cluster.go b/tests/framework/integration/cluster.go similarity index 66% rename from tests/integration/cluster.go rename to tests/framework/integration/cluster.go index 528bcb902..55d9e28c4 100644 --- a/tests/integration/cluster.go +++ b/tests/framework/integration/cluster.go @@ -67,31 +67,31 @@ import ( const ( // RequestWaitTimeout is the time duration to wait for a request to go through or detect leader loss. RequestWaitTimeout = 5 * time.Second - tickDuration = 10 * time.Millisecond - requestTimeout = 20 * time.Second + TickDuration = 10 * time.Millisecond + RequestTimeout = 20 * time.Second - clusterName = "etcd" - basePort = 21000 + ClusterName = "etcd" + BasePort = 21000 URLScheme = "unix" URLSchemeTLS = "unixs" - baseGRPCPort = 30000 + BaseGRPCPort = 30000 ) var ( - electionTicks = 10 + ElectionTicks = 10 - // integration test uses unique ports, counting up, to listen for each + // LocalListenCount integration test uses unique ports, counting up, to listen for each // member, ensuring restarted members can listen on the same port again. - localListenCount = int64(0) + LocalListenCount = int64(0) - testTLSInfo = transport.TLSInfo{ + TestTLSInfo = transport.TLSInfo{ KeyFile: MustAbsPath("../fixtures/server.key.insecure"), CertFile: MustAbsPath("../fixtures/server.crt"), TrustedCAFile: MustAbsPath("../fixtures/ca.crt"), ClientCertAuth: true, } - testTLSInfoWithSpecificUsage = transport.TLSInfo{ + TestTLSInfoWithSpecificUsage = transport.TLSInfo{ KeyFile: MustAbsPath("../fixtures/server-serverusage.key.insecure"), CertFile: MustAbsPath("../fixtures/server-serverusage.crt"), ClientKeyFile: MustAbsPath("../fixtures/client-clientusage.key.insecure"), @@ -100,33 +100,33 @@ var ( ClientCertAuth: true, } - testTLSInfoIP = transport.TLSInfo{ + TestTLSInfoIP = transport.TLSInfo{ KeyFile: MustAbsPath("../fixtures/server-ip.key.insecure"), CertFile: MustAbsPath("../fixtures/server-ip.crt"), TrustedCAFile: MustAbsPath("../fixtures/ca.crt"), ClientCertAuth: true, } - testTLSInfoExpired = transport.TLSInfo{ + TestTLSInfoExpired = transport.TLSInfo{ KeyFile: MustAbsPath("./fixtures-expired/server.key.insecure"), CertFile: MustAbsPath("./fixtures-expired/server.crt"), TrustedCAFile: MustAbsPath("./fixtures-expired/ca.crt"), ClientCertAuth: true, } - testTLSInfoExpiredIP = transport.TLSInfo{ + TestTLSInfoExpiredIP = transport.TLSInfo{ KeyFile: MustAbsPath("./fixtures-expired/server-ip.key.insecure"), CertFile: MustAbsPath("./fixtures-expired/server-ip.crt"), TrustedCAFile: MustAbsPath("./fixtures-expired/ca.crt"), ClientCertAuth: true, } - defaultTokenJWT = fmt.Sprintf("jwt,pub-key=%s,priv-key=%s,sign-method=RS256,ttl=1s", + DefaultTokenJWT = fmt.Sprintf("jwt,pub-key=%s,priv-key=%s,sign-method=RS256,ttl=1s", MustAbsPath("../fixtures/server.crt"), MustAbsPath("../fixtures/server.key.insecure")) - // uniqueNumber is used to generate unique port numbers + // UniqueNumber is used to generate unique port numbers // Should only be accessed via atomic package methods. - uniqueNumber int32 + UniqueNumber int32 ) type ClusterConfig struct { @@ -171,33 +171,33 @@ type ClusterConfig struct { WatchProgressNotifyInterval time.Duration } -type cluster struct { - cfg *ClusterConfig - Members []*member - lastMemberNum int +type Cluster struct { + Cfg *ClusterConfig + Members []*Member + LastMemberNum int } -func (c *cluster) generateMemberName() string { - c.lastMemberNum++ - return fmt.Sprintf("m%v", c.lastMemberNum-1) +func (c *Cluster) generateMemberName() string { + c.LastMemberNum++ + return fmt.Sprintf("m%v", c.LastMemberNum-1) } -func schemeFromTLSInfo(tls *transport.TLSInfo) string { +func SchemeFromTLSInfo(tls *transport.TLSInfo) string { if tls == nil { return URLScheme } return URLSchemeTLS } -func (c *cluster) fillClusterForMembers() error { - if c.cfg.DiscoveryURL != "" { - // cluster will be discovered +func (c *Cluster) fillClusterForMembers() error { + if c.Cfg.DiscoveryURL != "" { + // Cluster will be discovered return nil } addrs := make([]string, 0) for _, m := range c.Members { - scheme := schemeFromTLSInfo(m.PeerTLSInfo) + scheme := SchemeFromTLSInfo(m.PeerTLSInfo) for _, l := range m.PeerListeners { addrs = append(addrs, fmt.Sprintf("%s=%s://%s", m.Name, scheme, l.Addr().String())) } @@ -213,11 +213,11 @@ func (c *cluster) fillClusterForMembers() error { return nil } -func newCluster(t testutil.TB, cfg *ClusterConfig) *cluster { - testutil.SkipTestIfShortMode(t, "Cannot start etcd cluster in --short tests") +func NewClusterFromConfig(t testutil.TB, cfg *ClusterConfig) *Cluster { + testutil.SkipTestIfShortMode(t, "Cannot start etcd Cluster in --short tests") - c := &cluster{cfg: cfg} - ms := make([]*member, cfg.Size) + c := &Cluster{Cfg: cfg} + ms := make([]*Member, cfg.Size) for i := 0; i < cfg.Size; i++ { ms[i] = c.mustNewMember(t, int64(i)) } @@ -229,24 +229,24 @@ func newCluster(t testutil.TB, cfg *ClusterConfig) *cluster { return c } -// NewCluster returns an unlaunched cluster of the given size which has been +// NewCluster returns an unlaunched Cluster of the given size which has been // set to use static bootstrap. -func NewCluster(t testutil.TB, size int) *cluster { +func NewCluster(t testutil.TB, size int) *Cluster { t.Helper() - return newCluster(t, &ClusterConfig{Size: size}) + return NewClusterFromConfig(t, &ClusterConfig{Size: size}) } -// NewClusterByConfig returns an unlaunched cluster defined by a cluster configuration -func NewClusterByConfig(t testutil.TB, cfg *ClusterConfig) *cluster { - return newCluster(t, cfg) +// NewClusterByConfig returns an unlaunched Cluster defined by a Cluster configuration +func NewClusterByConfig(t testutil.TB, cfg *ClusterConfig) *Cluster { + return NewClusterFromConfig(t, cfg) } -func (c *cluster) Launch(t testutil.TB) { +func (c *Cluster) Launch(t testutil.TB) { errc := make(chan error) for _, m := range c.Members { // Members are launched in separate goroutines because if they boot // using discovery url, they have to wait for others to register to continue. - go func(m *member) { + go func(m *Member) { errc <- m.Launch() }(m) } @@ -256,28 +256,28 @@ func (c *cluster) Launch(t testutil.TB) { t.Fatalf("error setting up member: %v", err) } } - // wait cluster to be stable to receive future client requests - c.waitMembersMatch(t, c.HTTPMembers()) + // wait Cluster to be stable to receive future client requests + c.WaitMembersMatch(t, c.HTTPMembers()) c.waitVersion() for _, m := range c.Members { t.Logf(" - %v -> %v (%v)", m.Name, m.ID(), m.GRPCURL()) } } -func (c *cluster) URL(i int) string { +func (c *Cluster) URL(i int) string { return c.Members[i].ClientURLs[0].String() } -// URLs returns a list of all active client URLs in the cluster -func (c *cluster) URLs() []string { +// URLs returns a list of all active client URLs in the Cluster +func (c *Cluster) URLs() []string { return getMembersURLs(c.Members) } -func getMembersURLs(members []*member) []string { +func getMembersURLs(members []*Member) []string { urls := make([]string, 0) for _, m := range members { select { - case <-m.s.StopNotify(): + case <-m.Server.StopNotify(): continue default: } @@ -289,11 +289,11 @@ func getMembersURLs(members []*member) []string { } // HTTPMembers returns a list of all active members as client.Members -func (c *cluster) HTTPMembers() []client.Member { +func (c *Cluster) HTTPMembers() []client.Member { ms := []client.Member{} for _, m := range c.Members { - pScheme := schemeFromTLSInfo(m.PeerTLSInfo) - cScheme := schemeFromTLSInfo(m.ClientTLSInfo) + pScheme := SchemeFromTLSInfo(m.PeerTLSInfo) + cScheme := SchemeFromTLSInfo(m.ClientTLSInfo) cm := client.Member{Name: m.Name} for _, ln := range m.PeerListeners { cm.PeerURLs = append(cm.PeerURLs, pScheme+"://"+ln.Addr().String()) @@ -306,33 +306,33 @@ func (c *cluster) HTTPMembers() []client.Member { return ms } -func (c *cluster) mustNewMember(t testutil.TB, memberNumber int64) *member { - m := mustNewMember(t, - memberConfig{ - name: c.generateMemberName(), - memberNumber: memberNumber, - authToken: c.cfg.AuthToken, - peerTLS: c.cfg.PeerTLS, - clientTLS: c.cfg.ClientTLS, - quotaBackendBytes: c.cfg.QuotaBackendBytes, - maxTxnOps: c.cfg.MaxTxnOps, - maxRequestBytes: c.cfg.MaxRequestBytes, - snapshotCount: c.cfg.SnapshotCount, - snapshotCatchUpEntries: c.cfg.SnapshotCatchUpEntries, - grpcKeepAliveMinTime: c.cfg.GRPCKeepAliveMinTime, - grpcKeepAliveInterval: c.cfg.GRPCKeepAliveInterval, - grpcKeepAliveTimeout: c.cfg.GRPCKeepAliveTimeout, - clientMaxCallSendMsgSize: c.cfg.ClientMaxCallSendMsgSize, - clientMaxCallRecvMsgSize: c.cfg.ClientMaxCallRecvMsgSize, - useIP: c.cfg.UseIP, - useBridge: c.cfg.UseBridge, - useTCP: c.cfg.UseTCP, - enableLeaseCheckpoint: c.cfg.EnableLeaseCheckpoint, - leaseCheckpointInterval: c.cfg.LeaseCheckpointInterval, - WatchProgressNotifyInterval: c.cfg.WatchProgressNotifyInterval, +func (c *Cluster) mustNewMember(t testutil.TB, memberNumber int64) *Member { + m := MustNewMember(t, + MemberConfig{ + Name: c.generateMemberName(), + MemberNumber: memberNumber, + AuthToken: c.Cfg.AuthToken, + PeerTLS: c.Cfg.PeerTLS, + ClientTLS: c.Cfg.ClientTLS, + QuotaBackendBytes: c.Cfg.QuotaBackendBytes, + MaxTxnOps: c.Cfg.MaxTxnOps, + MaxRequestBytes: c.Cfg.MaxRequestBytes, + SnapshotCount: c.Cfg.SnapshotCount, + SnapshotCatchUpEntries: c.Cfg.SnapshotCatchUpEntries, + GrpcKeepAliveMinTime: c.Cfg.GRPCKeepAliveMinTime, + GrpcKeepAliveInterval: c.Cfg.GRPCKeepAliveInterval, + GrpcKeepAliveTimeout: c.Cfg.GRPCKeepAliveTimeout, + ClientMaxCallSendMsgSize: c.Cfg.ClientMaxCallSendMsgSize, + ClientMaxCallRecvMsgSize: c.Cfg.ClientMaxCallRecvMsgSize, + UseIP: c.Cfg.UseIP, + UseBridge: c.Cfg.UseBridge, + UseTCP: c.Cfg.UseTCP, + EnableLeaseCheckpoint: c.Cfg.EnableLeaseCheckpoint, + LeaseCheckpointInterval: c.Cfg.LeaseCheckpointInterval, + WatchProgressNotifyInterval: c.Cfg.WatchProgressNotifyInterval, }) - m.DiscoveryURL = c.cfg.DiscoveryURL - if c.cfg.UseGRPC { + m.DiscoveryURL = c.Cfg.DiscoveryURL + if c.Cfg.UseGRPC { if err := m.listenGRPC(); err != nil { t.Fatal(err) } @@ -341,17 +341,17 @@ func (c *cluster) mustNewMember(t testutil.TB, memberNumber int64) *member { } // addMember return PeerURLs of the added member. -func (c *cluster) addMember(t testutil.TB) types.URLs { +func (c *Cluster) addMember(t testutil.TB) types.URLs { m := c.mustNewMember(t, 0) - scheme := schemeFromTLSInfo(c.cfg.PeerTLS) + scheme := SchemeFromTLSInfo(c.Cfg.PeerTLS) - // send add request to the cluster + // send add request to the Cluster var err error for i := 0; i < len(c.Members); i++ { clientURL := c.URL(i) peerURL := scheme + "://" + m.PeerListeners[0].Addr().String() - if err = c.addMemberByURL(t, clientURL, peerURL); err == nil { + if err = c.AddMemberByURL(t, clientURL, peerURL); err == nil { break } } @@ -369,74 +369,74 @@ func (c *cluster) addMember(t testutil.TB) types.URLs { t.Fatal(err) } c.Members = append(c.Members, m) - // wait cluster to be stable to receive future client requests - c.waitMembersMatch(t, c.HTTPMembers()) + // wait Cluster to be stable to receive future client requests + c.WaitMembersMatch(t, c.HTTPMembers()) return m.PeerURLs } -func (c *cluster) addMemberByURL(t testutil.TB, clientURL, peerURL string) error { - cc := MustNewHTTPClient(t, []string{clientURL}, c.cfg.ClientTLS) +func (c *Cluster) AddMemberByURL(t testutil.TB, clientURL, peerURL string) error { + cc := MustNewHTTPClient(t, []string{clientURL}, c.Cfg.ClientTLS) ma := client.NewMembersAPI(cc) - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) + ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout) _, err := ma.Add(ctx, peerURL) cancel() if err != nil { return err } - // wait for the add node entry applied in the cluster + // wait for the add node entry applied in the Cluster members := append(c.HTTPMembers(), client.Member{PeerURLs: []string{peerURL}, ClientURLs: []string{}}) - c.waitMembersMatch(t, members) + c.WaitMembersMatch(t, members) return nil } // AddMember return PeerURLs of the added member. -func (c *cluster) AddMember(t testutil.TB) types.URLs { +func (c *Cluster) AddMember(t testutil.TB) types.URLs { return c.addMember(t) } -func (c *cluster) RemoveMember(t testutil.TB, id uint64) { - if err := c.removeMember(t, id); err != nil { +func (c *Cluster) MustRemoveMember(t testutil.TB, id uint64) { + if err := c.RemoveMember(t, id); err != nil { t.Fatal(err) } } -func (c *cluster) removeMember(t testutil.TB, id uint64) error { - // send remove request to the cluster - cc := MustNewHTTPClient(t, c.URLs(), c.cfg.ClientTLS) +func (c *Cluster) RemoveMember(t testutil.TB, id uint64) error { + // send remove request to the Cluster + cc := MustNewHTTPClient(t, c.URLs(), c.Cfg.ClientTLS) ma := client.NewMembersAPI(cc) - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) + ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout) err := ma.Remove(ctx, types.ID(id).String()) cancel() if err != nil { return err } - newMembers := make([]*member, 0) + newMembers := make([]*Member, 0) for _, m := range c.Members { - if uint64(m.s.ID()) != id { + if uint64(m.Server.ID()) != id { newMembers = append(newMembers, m) } else { select { - case <-m.s.StopNotify(): + case <-m.Server.StopNotify(): m.Terminate(t) // 1s stop delay + election timeout + 1s disk and network delay + connection write timeout // TODO: remove connection write timeout by selecting on http response closeNotifier // blocking on https://github.com/golang/go/issues/9524 - case <-time.After(time.Second + time.Duration(electionTicks)*tickDuration + time.Second + rafthttp.ConnWriteTimeout): - t.Fatalf("failed to remove member %s in time", m.s.ID()) + case <-time.After(time.Second + time.Duration(ElectionTicks)*TickDuration + time.Second + rafthttp.ConnWriteTimeout): + t.Fatalf("failed to remove member %s in time", m.Server.ID()) } } } c.Members = newMembers - c.waitMembersMatch(t, c.HTTPMembers()) + c.WaitMembersMatch(t, c.HTTPMembers()) return nil } -func (c *cluster) Terminate(t testutil.TB) { +func (c *Cluster) Terminate(t testutil.TB) { var wg sync.WaitGroup wg.Add(len(c.Members)) for _, m := range c.Members { - go func(mm *member) { + go func(mm *Member) { defer wg.Done() mm.Terminate(t) }(m) @@ -444,39 +444,39 @@ func (c *cluster) Terminate(t testutil.TB) { wg.Wait() } -func (c *cluster) waitMembersMatch(t testutil.TB, membs []client.Member) { +func (c *Cluster) WaitMembersMatch(t testutil.TB, membs []client.Member) { for _, u := range c.URLs() { - cc := MustNewHTTPClient(t, []string{u}, c.cfg.ClientTLS) + cc := MustNewHTTPClient(t, []string{u}, c.Cfg.ClientTLS) ma := client.NewMembersAPI(cc) for { - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) + ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout) ms, err := ma.List(ctx) cancel() if err == nil && isMembersEqual(ms, membs) { break } - time.Sleep(tickDuration) + time.Sleep(TickDuration) } } } // WaitLeader returns index of the member in c.Members that is leader (or -1). -func (c *cluster) WaitLeader(t testutil.TB) int { return c.waitLeader(t, c.Members) } +func (c *Cluster) WaitLeader(t testutil.TB) int { return c.WaitMembersForLeader(t, c.Members) } -// waitLeader waits until given members agree on the same leader, +// WaitMembersForLeader waits until given members agree on the same leader, // and returns its 'index' in the 'membs' list (or -1). -func (c *cluster) waitLeader(t testutil.TB, membs []*member) int { +func (c *Cluster) WaitMembersForLeader(t testutil.TB, membs []*Member) int { possibleLead := make(map[uint64]bool) var lead uint64 for _, m := range membs { - possibleLead[uint64(m.s.ID())] = true + possibleLead[uint64(m.Server.ID())] = true } cc := MustNewHTTPClient(t, getMembersURLs(membs), nil) kapi := client.NewKeysAPI(cc) // ensure leader is up via linearizable get for { - ctx, cancel := context.WithTimeout(context.Background(), 10*tickDuration+time.Second) + ctx, cancel := context.WithTimeout(context.Background(), 10*TickDuration+time.Second) _, err := kapi.Get(ctx, "0", &client.GetOptions{Quorum: true}) cancel() if err == nil || strings.Contains(err.Error(), "Key not found") { @@ -488,21 +488,21 @@ func (c *cluster) waitLeader(t testutil.TB, membs []*member) int { lead = 0 for _, m := range membs { select { - case <-m.s.StopNotify(): + case <-m.Server.StopNotify(): continue default: } - if lead != 0 && lead != m.s.Lead() { + if lead != 0 && lead != m.Server.Lead() { lead = 0 - time.Sleep(10 * tickDuration) + time.Sleep(10 * TickDuration) break } - lead = m.s.Lead() + lead = m.Server.Lead() } } for i, m := range membs { - if uint64(m.s.ID()) == lead { + if uint64(m.Server.ID()) == lead { return i } } @@ -510,35 +510,35 @@ func (c *cluster) waitLeader(t testutil.TB, membs []*member) int { return -1 } -func (c *cluster) WaitNoLeader() { c.waitNoLeader(c.Members) } +func (c *Cluster) WaitNoLeader() { c.WaitMembersNoLeader(c.Members) } -// waitNoLeader waits until given members lose leader. -func (c *cluster) waitNoLeader(membs []*member) { +// WaitMembersNoLeader waits until given members lose leader. +func (c *Cluster) WaitMembersNoLeader(membs []*Member) { noLeader := false for !noLeader { noLeader = true for _, m := range membs { select { - case <-m.s.StopNotify(): + case <-m.Server.StopNotify(): continue default: } - if m.s.Lead() != 0 { + if m.Server.Lead() != 0 { noLeader = false - time.Sleep(10 * tickDuration) + time.Sleep(10 * TickDuration) break } } } } -func (c *cluster) waitVersion() { +func (c *Cluster) waitVersion() { for _, m := range c.Members { for { - if m.s.ClusterVersion() != nil { + if m.Server.ClusterVersion() != nil { break } - time.Sleep(tickDuration) + time.Sleep(TickDuration) } } } @@ -555,9 +555,9 @@ func isMembersEqual(membs []client.Member, wmembs []client.Member) bool { } func newLocalListener(t testutil.TB) net.Listener { - c := atomic.AddInt64(&localListenCount, 1) + c := atomic.AddInt64(&LocalListenCount, 1) // Go 1.8+ allows only numbers in port - addr := fmt.Sprintf("127.0.0.1:%05d%05d", c+basePort, os.Getpid()) + addr := fmt.Sprintf("127.0.0.1:%05d%05d", c+BasePort, os.Getpid()) return NewListenerWithAddr(t, addr) } @@ -569,82 +569,82 @@ func NewListenerWithAddr(t testutil.TB, addr string) net.Listener { return l } -type member struct { +type Member struct { config.ServerConfig UniqNumber int64 MemberNumber int64 PeerListeners, ClientListeners []net.Listener - grpcListener net.Listener + GrpcListener net.Listener // PeerTLSInfo enables peer TLS when set PeerTLSInfo *transport.TLSInfo // ClientTLSInfo enables client TLS when set ClientTLSInfo *transport.TLSInfo DialOptions []grpc.DialOption - raftHandler *testutil.PauseableHandler - s *etcdserver.EtcdServer - serverClosers []func() + RaftHandler *testutil.PauseableHandler + Server *etcdserver.EtcdServer + ServerClosers []func() - grpcServerOpts []grpc.ServerOption - grpcServer *grpc.Server - grpcServerPeer *grpc.Server - grpcURL string - grpcBridge *bridge + GrpcServerOpts []grpc.ServerOption + GrpcServer *grpc.Server + GrpcServerPeer *grpc.Server + GrpcURL string + GrpcBridge *bridge - // serverClient is a clientv3 that directly calls the etcdserver. - serverClient *clientv3.Client + // ServerClient is a clientv3 that directly calls the etcdserver. + ServerClient *clientv3.Client - keepDataDirTerminate bool - clientMaxCallSendMsgSize int - clientMaxCallRecvMsgSize int - useIP bool - useBridge bool - useTCP bool + KeepDataDirTerminate bool + ClientMaxCallSendMsgSize int + ClientMaxCallRecvMsgSize int + UseIP bool + UseBridge bool + UseTCP bool - isLearner bool - closed bool + IsLearner bool + Closed bool - grpcServerRecorder *grpc_testing.GrpcRecorder + GrpcServerRecorder *grpc_testing.GrpcRecorder } -func (m *member) GRPCURL() string { return m.grpcURL } +func (m *Member) GRPCURL() string { return m.GrpcURL } -type memberConfig struct { - name string - uniqNumber int64 - memberNumber int64 - peerTLS *transport.TLSInfo - clientTLS *transport.TLSInfo - authToken string - quotaBackendBytes int64 - maxTxnOps uint - maxRequestBytes uint - snapshotCount uint64 - snapshotCatchUpEntries uint64 - grpcKeepAliveMinTime time.Duration - grpcKeepAliveInterval time.Duration - grpcKeepAliveTimeout time.Duration - clientMaxCallSendMsgSize int - clientMaxCallRecvMsgSize int - useIP bool - useBridge bool - useTCP bool - enableLeaseCheckpoint bool - leaseCheckpointInterval time.Duration +type MemberConfig struct { + Name string + UniqNumber int64 + MemberNumber int64 + PeerTLS *transport.TLSInfo + ClientTLS *transport.TLSInfo + AuthToken string + QuotaBackendBytes int64 + MaxTxnOps uint + MaxRequestBytes uint + SnapshotCount uint64 + SnapshotCatchUpEntries uint64 + GrpcKeepAliveMinTime time.Duration + GrpcKeepAliveInterval time.Duration + GrpcKeepAliveTimeout time.Duration + ClientMaxCallSendMsgSize int + ClientMaxCallRecvMsgSize int + UseIP bool + UseBridge bool + UseTCP bool + EnableLeaseCheckpoint bool + LeaseCheckpointInterval time.Duration WatchProgressNotifyInterval time.Duration } -// mustNewMember return an inited member with the given name. If peerTLS is +// MustNewMember return an inited member with the given name. If peerTLS is // set, it will use https scheme to communicate between peers. -func mustNewMember(t testutil.TB, mcfg memberConfig) *member { +func MustNewMember(t testutil.TB, mcfg MemberConfig) *Member { var err error - m := &member{ - MemberNumber: mcfg.memberNumber, - UniqNumber: atomic.AddInt64(&localListenCount, 1), + m := &Member{ + MemberNumber: mcfg.MemberNumber, + UniqNumber: atomic.AddInt64(&LocalListenCount, 1), } - peerScheme := schemeFromTLSInfo(mcfg.peerTLS) - clientScheme := schemeFromTLSInfo(mcfg.clientTLS) + peerScheme := SchemeFromTLSInfo(mcfg.PeerTLS) + clientScheme := SchemeFromTLSInfo(mcfg.ClientTLS) pln := newLocalListener(t) m.PeerListeners = []net.Listener{pln} @@ -652,7 +652,7 @@ func mustNewMember(t testutil.TB, mcfg memberConfig) *member { if err != nil { t.Fatal(err) } - m.PeerTLSInfo = mcfg.peerTLS + m.PeerTLSInfo = mcfg.PeerTLS cln := newLocalListener(t) m.ClientListeners = []net.Listener{cln} @@ -660,75 +660,75 @@ func mustNewMember(t testutil.TB, mcfg memberConfig) *member { if err != nil { t.Fatal(err) } - m.ClientTLSInfo = mcfg.clientTLS + m.ClientTLSInfo = mcfg.ClientTLS - m.Name = mcfg.name + m.Name = mcfg.Name m.DataDir, err = ioutil.TempDir(t.TempDir(), "etcd") if err != nil { t.Fatal(err) } - clusterStr := fmt.Sprintf("%s=%s://%s", mcfg.name, peerScheme, pln.Addr().String()) + clusterStr := fmt.Sprintf("%s=%s://%s", mcfg.Name, peerScheme, pln.Addr().String()) m.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr) if err != nil { t.Fatal(err) } - m.InitialClusterToken = clusterName + m.InitialClusterToken = ClusterName m.NewCluster = true m.BootstrapTimeout = 10 * time.Millisecond if m.PeerTLSInfo != nil { m.ServerConfig.PeerTLSInfo = *m.PeerTLSInfo } - m.ElectionTicks = electionTicks + m.ElectionTicks = ElectionTicks m.InitialElectionTickAdvance = true - m.TickMs = uint(tickDuration / time.Millisecond) - m.QuotaBackendBytes = mcfg.quotaBackendBytes - m.MaxTxnOps = mcfg.maxTxnOps + m.TickMs = uint(TickDuration / time.Millisecond) + m.QuotaBackendBytes = mcfg.QuotaBackendBytes + m.MaxTxnOps = mcfg.MaxTxnOps if m.MaxTxnOps == 0 { m.MaxTxnOps = embed.DefaultMaxTxnOps } - m.MaxRequestBytes = mcfg.maxRequestBytes + m.MaxRequestBytes = mcfg.MaxRequestBytes if m.MaxRequestBytes == 0 { m.MaxRequestBytes = embed.DefaultMaxRequestBytes } m.SnapshotCount = etcdserver.DefaultSnapshotCount - if mcfg.snapshotCount != 0 { - m.SnapshotCount = mcfg.snapshotCount + if mcfg.SnapshotCount != 0 { + m.SnapshotCount = mcfg.SnapshotCount } m.SnapshotCatchUpEntries = etcdserver.DefaultSnapshotCatchUpEntries - if mcfg.snapshotCatchUpEntries != 0 { - m.SnapshotCatchUpEntries = mcfg.snapshotCatchUpEntries + if mcfg.SnapshotCatchUpEntries != 0 { + m.SnapshotCatchUpEntries = mcfg.SnapshotCatchUpEntries } // for the purpose of integration testing, simple token is enough m.AuthToken = "simple" - if mcfg.authToken != "" { - m.AuthToken = mcfg.authToken + if mcfg.AuthToken != "" { + m.AuthToken = mcfg.AuthToken } m.BcryptCost = uint(bcrypt.MinCost) // use min bcrypt cost to speedy up integration testing - m.grpcServerOpts = []grpc.ServerOption{} - if mcfg.grpcKeepAliveMinTime > time.Duration(0) { - m.grpcServerOpts = append(m.grpcServerOpts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ - MinTime: mcfg.grpcKeepAliveMinTime, + m.GrpcServerOpts = []grpc.ServerOption{} + if mcfg.GrpcKeepAliveMinTime > time.Duration(0) { + m.GrpcServerOpts = append(m.GrpcServerOpts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ + MinTime: mcfg.GrpcKeepAliveMinTime, PermitWithoutStream: false, })) } - if mcfg.grpcKeepAliveInterval > time.Duration(0) && - mcfg.grpcKeepAliveTimeout > time.Duration(0) { - m.grpcServerOpts = append(m.grpcServerOpts, grpc.KeepaliveParams(keepalive.ServerParameters{ - Time: mcfg.grpcKeepAliveInterval, - Timeout: mcfg.grpcKeepAliveTimeout, + if mcfg.GrpcKeepAliveInterval > time.Duration(0) && + mcfg.GrpcKeepAliveTimeout > time.Duration(0) { + m.GrpcServerOpts = append(m.GrpcServerOpts, grpc.KeepaliveParams(keepalive.ServerParameters{ + Time: mcfg.GrpcKeepAliveInterval, + Timeout: mcfg.GrpcKeepAliveTimeout, })) } - m.clientMaxCallSendMsgSize = mcfg.clientMaxCallSendMsgSize - m.clientMaxCallRecvMsgSize = mcfg.clientMaxCallRecvMsgSize - m.useIP = mcfg.useIP - m.useBridge = mcfg.useBridge - m.useTCP = mcfg.useTCP - m.EnableLeaseCheckpoint = mcfg.enableLeaseCheckpoint - m.LeaseCheckpointInterval = mcfg.leaseCheckpointInterval + m.ClientMaxCallSendMsgSize = mcfg.ClientMaxCallSendMsgSize + m.ClientMaxCallRecvMsgSize = mcfg.ClientMaxCallRecvMsgSize + m.UseIP = mcfg.UseIP + m.UseBridge = mcfg.UseBridge + m.UseTCP = mcfg.UseTCP + m.EnableLeaseCheckpoint = mcfg.EnableLeaseCheckpoint + m.LeaseCheckpointInterval = mcfg.LeaseCheckpointInterval m.WatchProgressNotifyInterval = mcfg.WatchProgressNotifyInterval @@ -737,8 +737,8 @@ func mustNewMember(t testutil.TB, mcfg memberConfig) *member { m.WarningUnaryRequestDuration = embed.DefaultWarningUnaryRequestDuration m.V2Deprecation = config.V2_DEPR_DEFAULT - m.grpcServerRecorder = &grpc_testing.GrpcRecorder{} - m.Logger = memberLogger(t, mcfg.name) + m.GrpcServerRecorder = &grpc_testing.GrpcRecorder{} + m.Logger = memberLogger(t, mcfg.Name) t.Cleanup(func() { // if we didn't cleanup the logger, the consecutive test // might reuse this (t). @@ -758,7 +758,7 @@ func memberLogger(t testutil.TB, name string) *zap.Logger { } // listenGRPC starts a grpc server over a unix domain socket on the member -func (m *member) listenGRPC() error { +func (m *Member) listenGRPC() error { // prefix with localhost so cert has right domain network, host, port := m.grpcAddr() grpcAddr := host + ":" + port @@ -767,34 +767,34 @@ func (m *member) listenGRPC() error { if err != nil { return fmt.Errorf("listen failed on grpc socket %s (%v)", grpcAddr, err) } - m.grpcURL = fmt.Sprintf("%s://%s", m.clientScheme(), grpcAddr) - if m.useBridge { + m.GrpcURL = fmt.Sprintf("%s://%s", m.clientScheme(), grpcAddr) + if m.UseBridge { _, err = m.addBridge() if err != nil { grpcListener.Close() return err } } - m.grpcListener = grpcListener + m.GrpcListener = grpcListener return nil } -func (m *member) clientScheme() string { +func (m *Member) clientScheme() string { switch { - case m.useTCP && m.ClientTLSInfo != nil: + case m.UseTCP && m.ClientTLSInfo != nil: return "https" - case m.useTCP && m.ClientTLSInfo == nil: + case m.UseTCP && m.ClientTLSInfo == nil: return "http" - case !m.useTCP && m.ClientTLSInfo != nil: + case !m.UseTCP && m.ClientTLSInfo != nil: return "unixs" - case !m.useTCP && m.ClientTLSInfo == nil: + case !m.UseTCP && m.ClientTLSInfo == nil: return "unix" } m.Logger.Panic("Failed to determine client schema") return "" } -func (m *member) addBridge() (*bridge, error) { +func (m *Member) addBridge() (*bridge, error) { network, host, port := m.grpcAddr() grpcAddr := host + ":" + port bridgeAddr := grpcAddr + "0" @@ -803,41 +803,41 @@ func (m *member) addBridge() (*bridge, error) { if err != nil { return nil, fmt.Errorf("listen failed on bridge socket %s (%v)", bridgeAddr, err) } - m.grpcBridge, err = newBridge(dialer{network: network, addr: grpcAddr}, bridgeListener) + m.GrpcBridge, err = newBridge(dialer{network: network, addr: grpcAddr}, bridgeListener) if err != nil { bridgeListener.Close() return nil, err } - m.grpcURL = m.clientScheme() + "://" + bridgeAddr - return m.grpcBridge, nil + m.GrpcURL = m.clientScheme() + "://" + bridgeAddr + return m.GrpcBridge, nil } -func (m *member) Bridge() *bridge { - if !m.useBridge { - m.Logger.Panic("Bridge not available. Please configure using bridge before creating cluster.") +func (m *Member) Bridge() *bridge { + if !m.UseBridge { + m.Logger.Panic("Bridge not available. Please configure using bridge before creating Cluster.") } - return m.grpcBridge + return m.GrpcBridge } -func (m *member) grpcAddr() (network, host, port string) { +func (m *Member) grpcAddr() (network, host, port string) { // prefix with localhost so cert has right domain host = "localhost" - if m.useIP { // for IP-only TLS certs + if m.UseIP { // for IP-only TLS certs host = "127.0.0.1" } network = "unix" - if m.useTCP { + if m.UseTCP { network = "tcp" } port = m.Name - if m.useTCP { + if m.UseTCP { port = fmt.Sprintf("%d", GrpcPortNumber(m.UniqNumber, m.MemberNumber)) } return network, host, port } func GrpcPortNumber(uniqNumber, memberNumber int64) int64 { - return baseGRPCPort + uniqNumber*10 + memberNumber + return BaseGRPCPort + uniqNumber*10 + memberNumber } type dialer struct { @@ -849,24 +849,24 @@ func (d dialer) Dial() (net.Conn, error) { return net.Dial(d.network, d.addr) } -func (m *member) ElectionTimeout() time.Duration { - return time.Duration(m.s.Cfg.ElectionTicks*int(m.s.Cfg.TickMs)) * time.Millisecond +func (m *Member) ElectionTimeout() time.Duration { + return time.Duration(m.Server.Cfg.ElectionTicks*int(m.Server.Cfg.TickMs)) * time.Millisecond } -func (m *member) ID() types.ID { return m.s.ID() } +func (m *Member) ID() types.ID { return m.Server.ID() } // NewClientV3 creates a new grpc client connection to the member -func NewClientV3(m *member) (*clientv3.Client, error) { - if m.grpcURL == "" { +func NewClientV3(m *Member) (*clientv3.Client, error) { + if m.GrpcURL == "" { return nil, fmt.Errorf("member not configured for grpc") } cfg := clientv3.Config{ - Endpoints: []string{m.grpcURL}, + Endpoints: []string{m.GrpcURL}, DialTimeout: 5 * time.Second, DialOptions: []grpc.DialOption{grpc.WithBlock()}, - MaxCallSendMsgSize: m.clientMaxCallSendMsgSize, - MaxCallRecvMsgSize: m.clientMaxCallRecvMsgSize, + MaxCallSendMsgSize: m.ClientMaxCallSendMsgSize, + MaxCallRecvMsgSize: m.ClientMaxCallRecvMsgSize, Logger: m.Logger.Named("client"), } @@ -885,8 +885,8 @@ func NewClientV3(m *member) (*clientv3.Client, error) { // Clone returns a member with the same server configuration. The returned // member will not set PeerListeners and ClientListeners. -func (m *member) Clone(t testutil.TB) *member { - mm := &member{} +func (m *Member) Clone(t testutil.TB) *Member { + mm := &Member{} mm.ServerConfig = m.ServerConfig var err error @@ -918,20 +918,20 @@ func (m *member) Clone(t testutil.TB) *member { // Launch starts a member based on ServerConfig, PeerListeners // and ClientListeners. -func (m *member) Launch() error { +func (m *Member) Launch() error { m.Logger.Info( "launching a member", zap.String("name", m.Name), zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), - zap.String("grpc-url", m.grpcURL), + zap.String("grpc-url", m.GrpcURL), ) var err error - if m.s, err = etcdserver.NewServer(m.ServerConfig); err != nil { + if m.Server, err = etcdserver.NewServer(m.ServerConfig); err != nil { return fmt.Errorf("failed to initialize the etcd server: %v", err) } - m.s.SyncTicker = time.NewTicker(500 * time.Millisecond) - m.s.Start() + m.Server.SyncTicker = time.NewTicker(500 * time.Millisecond) + m.Server.Start() var peerTLScfg *tls.Config if m.PeerTLSInfo != nil && !m.PeerTLSInfo.Empty() { @@ -940,7 +940,7 @@ func (m *member) Launch() error { } } - if m.grpcListener != nil { + if m.GrpcListener != nil { var ( tlscfg *tls.Config ) @@ -950,23 +950,23 @@ func (m *member) Launch() error { return err } } - m.grpcServer = v3rpc.Server(m.s, tlscfg, m.grpcServerRecorder.UnaryInterceptor(), m.grpcServerOpts...) - m.grpcServerPeer = v3rpc.Server(m.s, peerTLScfg, m.grpcServerRecorder.UnaryInterceptor()) - m.serverClient = v3client.New(m.s) - lockpb.RegisterLockServer(m.grpcServer, v3lock.NewLockServer(m.serverClient)) - epb.RegisterElectionServer(m.grpcServer, v3election.NewElectionServer(m.serverClient)) - go m.grpcServer.Serve(m.grpcListener) + m.GrpcServer = v3rpc.Server(m.Server, tlscfg, m.GrpcServerRecorder.UnaryInterceptor(), m.GrpcServerOpts...) + m.GrpcServerPeer = v3rpc.Server(m.Server, peerTLScfg, m.GrpcServerRecorder.UnaryInterceptor()) + m.ServerClient = v3client.New(m.Server) + lockpb.RegisterLockServer(m.GrpcServer, v3lock.NewLockServer(m.ServerClient)) + epb.RegisterElectionServer(m.GrpcServer, v3election.NewElectionServer(m.ServerClient)) + go m.GrpcServer.Serve(m.GrpcListener) } - m.raftHandler = &testutil.PauseableHandler{Next: etcdhttp.NewPeerHandler(m.Logger, m.s)} + m.RaftHandler = &testutil.PauseableHandler{Next: etcdhttp.NewPeerHandler(m.Logger, m.Server)} - h := (http.Handler)(m.raftHandler) - if m.grpcListener != nil { + h := (http.Handler)(m.RaftHandler) + if m.GrpcListener != nil { h = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.ProtoMajor == 2 && strings.Contains(r.Header.Get("Content-Type"), "application/grpc") { - m.grpcServerPeer.ServeHTTP(w, r) + m.GrpcServerPeer.ServeHTTP(w, r) } else { - m.raftHandler.ServeHTTP(w, r) + m.RaftHandler.ServeHTTP(w, r) } }) } @@ -976,9 +976,9 @@ func (m *member) Launch() error { // don't hang on matcher after closing listener cm.SetReadTimeout(time.Second) - if m.grpcServer != nil { + if m.GrpcServer != nil { grpcl := cm.Match(cmux.HTTP2()) - go m.grpcServerPeer.Serve(grpcl) + go m.GrpcServerPeer.Serve(grpcl) } // serve http1/http2 rafthttp/grpc @@ -1010,7 +1010,7 @@ func (m *member) Launch() error { hs.Close() <-donec } - m.serverClosers = append(m.serverClosers, closer) + m.ServerClosers = append(m.ServerClosers, closer) } for _, ln := range m.ClientListeners { hs := &httptest.Server{ @@ -1018,7 +1018,7 @@ func (m *member) Launch() error { Config: &http.Server{ Handler: v2http.NewClientHandler( m.Logger, - m.s, + m.Server, m.ServerConfig.ReqTimeout(), ), ErrorLog: log.New(ioutil.Discard, "net/http", 0), @@ -1038,7 +1038,7 @@ func (m *member) Launch() error { // Previously, // 1. Server has non-empty (*tls.Config).Certificates on client hello // 2. Server calls (*tls.Config).GetCertificate iff: - // - Server's (*tls.Config).Certificates is not empty, or + // - Server'Server (*tls.Config).Certificates is not empty, or // - Client supplies SNI; non-empty (*tls.ClientHelloInfo).ServerName // // When (*tls.Config).Certificates is always populated on initial handshake, @@ -1056,7 +1056,7 @@ func (m *member) Launch() error { // // This introduces another problem with "httptest.Server": // when server initial certificates are empty, certificates - // are overwritten by Go's internal test certs, which have + // are overwritten by Go'Server internal test certs, which have // different SAN fields (e.g. example.com). To work around, // re-overwrite (*tls.Config).Certificates before starting // test server. @@ -1073,7 +1073,7 @@ func (m *member) Launch() error { hs.CloseClientConnections() hs.Close() } - m.serverClosers = append(m.serverClosers, closer) + m.ServerClosers = append(m.ServerClosers, closer) } m.Logger.Info( @@ -1081,30 +1081,30 @@ func (m *member) Launch() error { zap.String("name", m.Name), zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), - zap.String("grpc-url", m.grpcURL), + zap.String("grpc-url", m.GrpcURL), ) return nil } -func (m *member) RecordedRequests() []grpc_testing.RequestInfo { - return m.grpcServerRecorder.RecordedRequests() +func (m *Member) RecordedRequests() []grpc_testing.RequestInfo { + return m.GrpcServerRecorder.RecordedRequests() } -func (m *member) WaitOK(t testutil.TB) { +func (m *Member) WaitOK(t testutil.TB) { m.WaitStarted(t) - for m.s.Leader() == 0 { - time.Sleep(tickDuration) + for m.Server.Leader() == 0 { + time.Sleep(TickDuration) } } -func (m *member) WaitStarted(t testutil.TB) { +func (m *Member) WaitStarted(t testutil.TB) { cc := MustNewHTTPClient(t, []string{m.URL()}, m.ClientTLSInfo) kapi := client.NewKeysAPI(cc) for { - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) + ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout) _, err := kapi.Get(ctx, "/", nil) if err != nil { - time.Sleep(tickDuration) + time.Sleep(TickDuration) continue } cancel() @@ -1113,51 +1113,51 @@ func (m *member) WaitStarted(t testutil.TB) { } func WaitClientV3(t testutil.TB, kv clientv3.KV) { - timeout := time.Now().Add(requestTimeout) + timeout := time.Now().Add(RequestTimeout) var err error for time.Now().Before(timeout) { - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) + ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout) _, err = kv.Get(ctx, "/") cancel() if err == nil { return } - time.Sleep(tickDuration) + time.Sleep(TickDuration) } if err != nil { t.Fatalf("timed out waiting for client: %v", err) } } -func (m *member) URL() string { return m.ClientURLs[0].String() } +func (m *Member) URL() string { return m.ClientURLs[0].String() } -func (m *member) Pause() { - m.raftHandler.Pause() - m.s.PauseSending() +func (m *Member) Pause() { + m.RaftHandler.Pause() + m.Server.PauseSending() } -func (m *member) Resume() { - m.raftHandler.Resume() - m.s.ResumeSending() +func (m *Member) Resume() { + m.RaftHandler.Resume() + m.Server.ResumeSending() } -// Close stops the member's etcdserver and closes its connections -func (m *member) Close() { - if m.grpcBridge != nil { - m.grpcBridge.Close() - m.grpcBridge = nil +// Close stops the member'Server etcdserver and closes its connections +func (m *Member) Close() { + if m.GrpcBridge != nil { + m.GrpcBridge.Close() + m.GrpcBridge = nil } - if m.serverClient != nil { - m.serverClient.Close() - m.serverClient = nil + if m.ServerClient != nil { + m.ServerClient.Close() + m.ServerClient = nil } - if m.grpcServer != nil { + if m.GrpcServer != nil { ch := make(chan struct{}) go func() { defer close(ch) // close listeners to stop accepting new connections, // will block on any existing transports - m.grpcServer.GracefulStop() + m.GrpcServer.GracefulStop() }() // wait until all pending RPCs are finished select { @@ -1165,21 +1165,21 @@ func (m *member) Close() { case <-time.After(2 * time.Second): // took too long, manually close open transports // e.g. watch streams - m.grpcServer.Stop() + m.GrpcServer.Stop() <-ch } - m.grpcServer = nil - m.grpcServerPeer.GracefulStop() - m.grpcServerPeer.Stop() - m.grpcServerPeer = nil + m.GrpcServer = nil + m.GrpcServerPeer.GracefulStop() + m.GrpcServerPeer.Stop() + m.GrpcServerPeer = nil } - if m.s != nil { - m.s.HardStop() + if m.Server != nil { + m.Server.HardStop() } - for _, f := range m.serverClosers { + for _, f := range m.ServerClosers { f() } - if !m.closed { + if !m.Closed { // Avoid verification of the same file multiple times // (that might not exist any longer) verify.MustVerifyIfEnabled(verify.Config{ @@ -1188,51 +1188,51 @@ func (m *member) Close() { ExactIndex: false, }) } - m.closed = true + m.Closed = true } // Stop stops the member, but the data dir of the member is preserved. -func (m *member) Stop(_ testutil.TB) { +func (m *Member) Stop(_ testutil.TB) { m.Logger.Info( "stopping a member", zap.String("name", m.Name), zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), - zap.String("grpc-url", m.grpcURL), + zap.String("grpc-url", m.GrpcURL), ) m.Close() - m.serverClosers = nil + m.ServerClosers = nil m.Logger.Info( "stopped a member", zap.String("name", m.Name), zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), - zap.String("grpc-url", m.grpcURL), + zap.String("grpc-url", m.GrpcURL), ) } -// checkLeaderTransition waits for leader transition, returning the new leader ID. -func checkLeaderTransition(m *member, oldLead uint64) uint64 { - interval := time.Duration(m.s.Cfg.TickMs) * time.Millisecond - for m.s.Lead() == 0 || (m.s.Lead() == oldLead) { +// CheckLeaderTransition waits for leader transition, returning the new leader ID. +func CheckLeaderTransition(m *Member, oldLead uint64) uint64 { + interval := time.Duration(m.Server.Cfg.TickMs) * time.Millisecond + for m.Server.Lead() == 0 || (m.Server.Lead() == oldLead) { time.Sleep(interval) } - return m.s.Lead() + return m.Server.Lead() } // StopNotify unblocks when a member stop completes -func (m *member) StopNotify() <-chan struct{} { - return m.s.StopNotify() +func (m *Member) StopNotify() <-chan struct{} { + return m.Server.StopNotify() } // Restart starts the member using the preserved data dir. -func (m *member) Restart(t testutil.TB) error { +func (m *Member) Restart(t testutil.TB) error { m.Logger.Info( "restarting a member", zap.String("name", m.Name), zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), - zap.String("grpc-url", m.grpcURL), + zap.String("grpc-url", m.GrpcURL), ) newPeerListeners := make([]net.Listener, 0) for _, ln := range m.PeerListeners { @@ -1245,7 +1245,7 @@ func (m *member) Restart(t testutil.TB) error { } m.ClientListeners = newClientListeners - if m.grpcListener != nil { + if m.GrpcListener != nil { if err := m.listenGRPC(); err != nil { t.Fatal(err) } @@ -1257,23 +1257,23 @@ func (m *member) Restart(t testutil.TB) error { zap.String("name", m.Name), zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), - zap.String("grpc-url", m.grpcURL), + zap.String("grpc-url", m.GrpcURL), zap.Error(err), ) return err } // Terminate stops the member and removes the data dir. -func (m *member) Terminate(t testutil.TB) { +func (m *Member) Terminate(t testutil.TB) { m.Logger.Info( "terminating a member", zap.String("name", m.Name), zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), - zap.String("grpc-url", m.grpcURL), + zap.String("grpc-url", m.GrpcURL), ) m.Close() - if !m.keepDataDirTerminate { + if !m.KeepDataDirTerminate { if err := os.RemoveAll(m.ServerConfig.DataDir); err != nil { t.Fatal(err) } @@ -1283,12 +1283,12 @@ func (m *member) Terminate(t testutil.TB) { zap.String("name", m.Name), zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), - zap.String("grpc-url", m.grpcURL), + zap.String("grpc-url", m.GrpcURL), ) } // Metric gets the metric value for a member -func (m *member) Metric(metricName string, expectLabels ...string) (string, error) { +func (m *Member) Metric(metricName string, expectLabels ...string) (string, error) { cfgtls := transport.TLSInfo{} tr, err := transport.NewTimeoutTransport(cfgtls, time.Second, time.Second, time.Second) if err != nil { @@ -1325,25 +1325,25 @@ func (m *member) Metric(metricName string, expectLabels ...string) (string, erro } // InjectPartition drops connections from m to others, vice versa. -func (m *member) InjectPartition(t testutil.TB, others ...*member) { +func (m *Member) InjectPartition(t testutil.TB, others ...*Member) { for _, other := range others { - m.s.CutPeer(other.s.ID()) - other.s.CutPeer(m.s.ID()) - t.Logf("network partition injected between: %v <-> %v", m.s.ID(), other.s.ID()) + m.Server.CutPeer(other.Server.ID()) + other.Server.CutPeer(m.Server.ID()) + t.Logf("network partition injected between: %v <-> %v", m.Server.ID(), other.Server.ID()) } } // RecoverPartition recovers connections from m to others, vice versa. -func (m *member) RecoverPartition(t testutil.TB, others ...*member) { +func (m *Member) RecoverPartition(t testutil.TB, others ...*Member) { for _, other := range others { - m.s.MendPeer(other.s.ID()) - other.s.MendPeer(m.s.ID()) - t.Logf("network partition between: %v <-> %v", m.s.ID(), other.s.ID()) + m.Server.MendPeer(other.Server.ID()) + other.Server.MendPeer(m.Server.ID()) + t.Logf("network partition between: %v <-> %v", m.Server.ID(), other.Server.ID()) } } -func (m *member) ReadyNotify() <-chan struct{} { - return m.s.ReadyNotify() +func (m *Member) ReadyNotify() <-chan struct{} { + return m.Server.ReadyNotify() } func MustNewHTTPClient(t testutil.TB, eps []string, tls *transport.TLSInfo) client.Client { @@ -1377,15 +1377,15 @@ func (p SortableMemberSliceByPeerURLs) Less(i, j int) bool { func (p SortableMemberSliceByPeerURLs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } type ClusterV3 struct { - *cluster + *Cluster mu sync.Mutex - clients []*clientv3.Client + Clients []*clientv3.Client clusterClient *clientv3.Client } -// NewClusterV3 returns a launched cluster with a grpc client connection -// for each cluster member. +// NewClusterV3 returns a launched Cluster with a grpc client connection +// for each Cluster member. func NewClusterV3(t testutil.TB, cfg *ClusterConfig) *ClusterV3 { t.Helper() @@ -1394,7 +1394,7 @@ func NewClusterV3(t testutil.TB, cfg *ClusterConfig) *ClusterV3 { cfg.UseGRPC = true clus := &ClusterV3{ - cluster: NewClusterByConfig(t, cfg), + Cluster: NewClusterByConfig(t, cfg), } clus.Launch(t) @@ -1404,7 +1404,7 @@ func NewClusterV3(t testutil.TB, cfg *ClusterConfig) *ClusterV3 { if err != nil { t.Fatalf("cannot create client: %v", err) } - clus.clients = append(clus.clients, client) + clus.Clients = append(clus.Clients, client) } } @@ -1413,13 +1413,13 @@ func NewClusterV3(t testutil.TB, cfg *ClusterConfig) *ClusterV3 { func (c *ClusterV3) TakeClient(idx int) { c.mu.Lock() - c.clients[idx] = nil + c.Clients[idx] = nil c.mu.Unlock() } func (c *ClusterV3) Terminate(t testutil.TB) { c.mu.Lock() - for _, client := range c.clients { + for _, client := range c.Clients { if client == nil { continue } @@ -1433,22 +1433,22 @@ func (c *ClusterV3) Terminate(t testutil.TB) { } } c.mu.Unlock() - c.cluster.Terminate(t) + c.Cluster.Terminate(t) } func (c *ClusterV3) RandClient() *clientv3.Client { - return c.clients[rand.Intn(len(c.clients))] + return c.Clients[rand.Intn(len(c.Clients))] } func (c *ClusterV3) Client(i int) *clientv3.Client { - return c.clients[i] + return c.Clients[i] } func (c *ClusterV3) ClusterClient() (client *clientv3.Client, err error) { if c.clusterClient == nil { endpoints := []string{} for _, m := range c.Members { - endpoints = append(endpoints, m.grpcURL) + endpoints = append(endpoints, m.GrpcURL) } cfg := clientv3.Config{ Endpoints: endpoints, @@ -1504,26 +1504,26 @@ func CloseClients(t testutil.TB, clients []*clientv3.Client) { } } -type grpcAPI struct { - // Cluster is the cluster API for the client's connection. +type GrpcAPI struct { + // Cluster is the Cluster API for the client'Server connection. Cluster pb.ClusterClient - // KV is the keyvalue API for the client's connection. + // KV is the keyvalue API for the client'Server connection. KV pb.KVClient - // Lease is the lease API for the client's connection. + // Lease is the lease API for the client'Server connection. Lease pb.LeaseClient - // Watch is the watch API for the client's connection. + // Watch is the watch API for the client'Server connection. Watch pb.WatchClient - // Maintenance is the maintenance API for the client's connection. + // Maintenance is the maintenance API for the client'Server connection. Maintenance pb.MaintenanceClient - // Auth is the authentication API for the client's connection. + // Auth is the authentication API for the client'Server connection. Auth pb.AuthClient - // Lock is the lock API for the client's connection. + // Lock is the lock API for the client'Server connection. Lock lockpb.LockClient - // Election is the election API for the client's connection. + // Election is the election API for the client'Server connection. Election epb.ElectionClient } -// GetLearnerMembers returns the list of learner members in cluster using MemberList API. +// GetLearnerMembers returns the list of learner members in Cluster using MemberList API. func (c *ClusterV3) GetLearnerMembers() ([]*pb.Member, error) { cli := c.Client(0) resp, err := cli.MemberList(context.Background()) @@ -1539,13 +1539,13 @@ func (c *ClusterV3) GetLearnerMembers() ([]*pb.Member, error) { return learners, nil } -// AddAndLaunchLearnerMember creates a leaner member, adds it to cluster +// AddAndLaunchLearnerMember creates a leaner member, adds it to Cluster // via v3 MemberAdd API, and then launches the new member. func (c *ClusterV3) AddAndLaunchLearnerMember(t testutil.TB) { m := c.mustNewMember(t, 0) - m.isLearner = true + m.IsLearner = true - scheme := schemeFromTLSInfo(c.cfg.PeerTLS) + scheme := SchemeFromTLSInfo(c.Cfg.PeerTLS) peerURLs := []string{scheme + "://" + m.PeerListeners[0].Addr().String()} cli := c.Client(0) @@ -1570,7 +1570,7 @@ func (c *ClusterV3) AddAndLaunchLearnerMember(t testutil.TB) { c.waitMembersMatch(t) } -// getMembers returns a list of members in cluster, in format of etcdserverpb.Member +// getMembers returns a list of members in Cluster, in format of etcdserverpb.Member func (c *ClusterV3) getMembers() []*pb.Member { var mems []*pb.Member for _, m := range c.Members { @@ -1578,7 +1578,7 @@ func (c *ClusterV3) getMembers() []*pb.Member { Name: m.Name, PeerURLs: m.PeerURLs.StringSlice(), ClientURLs: m.ClientURLs.StringSlice(), - IsLearner: m.isLearner, + IsLearner: m.IsLearner, } mems = append(mems, mem) } @@ -1586,27 +1586,27 @@ func (c *ClusterV3) getMembers() []*pb.Member { } // waitMembersMatch waits until v3rpc MemberList returns the 'same' members info as the -// local 'c.Members', which is the local recording of members in the testing cluster. With +// local 'c.Members', which is the local recording of members in the testing Cluster. With // the exception that the local recording c.Members does not have info on Member.ID, which -// is generated when the member is been added to cluster. +// is generated when the member is been added to Cluster. // // Note: // A successful match means the Member.clientURLs are matched. This means member has already -// finished publishing its server attributes to cluster. Publishing attributes is a cluster-wide +// finished publishing its server attributes to Cluster. Publishing attributes is a Cluster-wide // write request (in v2 server). Therefore, at this point, any raft log entries prior to this // would have already been applied. // -// If a new member was added to an existing cluster, at this point, it has finished publishing -// its own server attributes to the cluster. And therefore by the same argument, it has already +// If a new member was added to an existing Cluster, at this point, it has finished publishing +// its own server attributes to the Cluster. And therefore by the same argument, it has already // applied the raft log entries (especially those of type raftpb.ConfChangeType). At this point, -// the new member has the correct view of the cluster configuration. +// the new member has the correct view of the Cluster configuration. // // Special note on learner member: -// Learner member is only added to a cluster via v3rpc MemberAdd API (as of v3.4). When starting -// the learner member, its initial view of the cluster created by peerURLs map does not have info +// Learner member is only added to a Cluster via v3rpc MemberAdd API (as of v3.4). When starting +// the learner member, its initial view of the Cluster created by peerURLs map does not have info // on whether or not the new member itself is learner. But at this point, a successful match does // indicate that the new learner member has applied the raftpb.ConfChangeAddLearnerNode entry -// which was used to add the learner itself to the cluster, and therefore it has the correct info +// which was used to add the learner itself to the Cluster, and therefore it has the correct info // on learner. func (c *ClusterV3) waitMembersMatch(t testutil.TB) { wMembers := c.getMembers() @@ -1629,7 +1629,7 @@ func (c *ClusterV3) waitMembersMatch(t testutil.TB) { return } - time.Sleep(tickDuration) + time.Sleep(TickDuration) } } @@ -1642,9 +1642,9 @@ func (p SortableProtoMemberSliceByPeerURLs) Less(i, j int) bool { func (p SortableProtoMemberSliceByPeerURLs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } // MustNewMember creates a new member instance based on the response of V3 Member Add API. -func (c *ClusterV3) MustNewMember(t testutil.TB, resp *clientv3.MemberAddResponse) *member { +func (c *ClusterV3) MustNewMember(t testutil.TB, resp *clientv3.MemberAddResponse) *Member { m := c.mustNewMember(t, 0) - m.isLearner = resp.Member.IsLearner + m.IsLearner = resp.Member.IsLearner m.NewCluster = false m.InitialPeerURLsMap = types.URLsMap{} diff --git a/tests/integration/cluster_direct.go b/tests/framework/integration/cluster_direct.go similarity index 95% rename from tests/integration/cluster_direct.go rename to tests/framework/integration/cluster_direct.go index 0334146d0..dad487525 100644 --- a/tests/integration/cluster_direct.go +++ b/tests/framework/integration/cluster_direct.go @@ -26,8 +26,8 @@ import ( const ThroughProxy = false -func toGRPC(c *clientv3.Client) grpcAPI { - return grpcAPI{ +func ToGRPC(c *clientv3.Client) GrpcAPI { + return GrpcAPI{ pb.NewClusterClient(c.ActiveConnection()), pb.NewKVClient(c.ActiveConnection()), pb.NewLeaseClient(c.ActiveConnection()), diff --git a/tests/integration/cluster_proxy.go b/tests/framework/integration/cluster_proxy.go similarity index 97% rename from tests/integration/cluster_proxy.go rename to tests/framework/integration/cluster_proxy.go index 25e042359..a5266d09e 100644 --- a/tests/integration/cluster_proxy.go +++ b/tests/framework/integration/cluster_proxy.go @@ -39,13 +39,13 @@ const proxyNamespace = "proxy-namespace" type grpcClientProxy struct { ctx context.Context ctxCancel func() - grpc grpcAPI + grpc GrpcAPI wdonec <-chan struct{} kvdonec <-chan struct{} lpdonec <-chan struct{} } -func toGRPC(c *clientv3.Client) grpcAPI { +func ToGRPC(c *clientv3.Client) GrpcAPI { pmu.Lock() defer pmu.Unlock() @@ -74,7 +74,7 @@ func toGRPC(c *clientv3.Client) grpcAPI { lockp := grpcproxy.NewLockProxy(c) electp := grpcproxy.NewElectionProxy(c) - grpc := grpcAPI{ + grpc := GrpcAPI{ adapter.ClusterServerToClusterClient(clp), adapter.KvServerToKvClient(kvp), adapter.LeaseServerToLeaseClient(lp), @@ -112,7 +112,7 @@ func newClientV3(cfg clientv3.Config) (*clientv3.Client, error) { if err != nil { return nil, err } - rpc := toGRPC(c) + rpc := ToGRPC(c) c.KV = clientv3.NewKVFromKVClient(rpc.KV, c) pmu.Lock() lc := c.Lease diff --git a/tests/integration/testing.go b/tests/framework/integration/testing.go similarity index 100% rename from tests/integration/testing.go rename to tests/framework/integration/testing.go diff --git a/tests/integration/client/client_test.go b/tests/integration/client/client_test.go index 630344cb8..aa0271e95 100644 --- a/tests/integration/client/client_test.go +++ b/tests/integration/client/client_test.go @@ -25,14 +25,14 @@ import ( "testing" "go.etcd.io/etcd/client/v2" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) // TestV2NoRetryEOF tests destructive api calls won't retry on a disconnection. func TestV2NoRetryEOF(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) // generate an EOF response; specify address so appears first in sorted ep list - lEOF := integration.NewListenerWithAddr(t, fmt.Sprintf("127.0.0.1:%05d", os.Getpid())) + lEOF := integration2.NewListenerWithAddr(t, fmt.Sprintf("127.0.0.1:%05d", os.Getpid())) defer lEOF.Close() tries := uint32(0) go func() { @@ -45,8 +45,8 @@ func TestV2NoRetryEOF(t *testing.T) { conn.Close() } }() - eofURL := integration.URLScheme + "://" + lEOF.Addr().String() - cli := integration.MustNewHTTPClient(t, []string{eofURL, eofURL}, nil) + eofURL := integration2.URLScheme + "://" + lEOF.Addr().String() + cli := integration2.MustNewHTTPClient(t, []string{eofURL, eofURL}, nil) kapi := client.NewKeysAPI(cli) for i, f := range noRetryList(kapi) { startTries := atomic.LoadUint32(&tries) @@ -62,17 +62,17 @@ func TestV2NoRetryEOF(t *testing.T) { // TestV2NoRetryNoLeader tests destructive api calls won't retry if given an error code. func TestV2NoRetryNoLeader(t *testing.T) { - integration.BeforeTest(t) - lHTTP := integration.NewListenerWithAddr(t, fmt.Sprintf("127.0.0.1:%05d", os.Getpid())) + integration2.BeforeTest(t) + lHTTP := integration2.NewListenerWithAddr(t, fmt.Sprintf("127.0.0.1:%05d", os.Getpid())) eh := &errHandler{errCode: http.StatusServiceUnavailable} srv := httptest.NewUnstartedServer(eh) defer lHTTP.Close() defer srv.Close() srv.Listener = lHTTP go srv.Start() - lHTTPURL := integration.URLScheme + "://" + lHTTP.Addr().String() + lHTTPURL := integration2.URLScheme + "://" + lHTTP.Addr().String() - cli := integration.MustNewHTTPClient(t, []string{lHTTPURL, lHTTPURL}, nil) + cli := integration2.MustNewHTTPClient(t, []string{lHTTPURL, lHTTPURL}, nil) kapi := client.NewKeysAPI(cli) // test error code for i, f := range noRetryList(kapi) { @@ -88,12 +88,12 @@ func TestV2NoRetryNoLeader(t *testing.T) { // TestV2RetryRefuse tests destructive api calls will retry if a connection is refused. func TestV2RetryRefuse(t *testing.T) { - integration.BeforeTest(t) - cl := integration.NewCluster(t, 1) + integration2.BeforeTest(t) + cl := integration2.NewCluster(t, 1) cl.Launch(t) defer cl.Terminate(t) // test connection refused; expect no error failover - cli := integration.MustNewHTTPClient(t, []string{integration.URLScheme + "://refuseconn:123", cl.URL(0)}, nil) + cli := integration2.MustNewHTTPClient(t, []string{integration2.URLScheme + "://refuseconn:123", cl.URL(0)}, nil) kapi := client.NewKeysAPI(cli) if _, err := kapi.Set(context.Background(), "/delkey", "def", nil); err != nil { t.Fatal(err) diff --git a/tests/integration/clientv3/cluster_test.go b/tests/integration/clientv3/cluster_test.go index eff15cf7d..b9a63c55c 100644 --- a/tests/integration/clientv3/cluster_test.go +++ b/tests/integration/clientv3/cluster_test.go @@ -23,13 +23,13 @@ import ( "time" "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) func TestMemberList(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) capi := clus.RandClient() @@ -45,9 +45,9 @@ func TestMemberList(t *testing.T) { } func TestMemberAdd(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) capi := clus.RandClient() @@ -64,9 +64,9 @@ func TestMemberAdd(t *testing.T) { } func TestMemberAddWithExistingURLs(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) capi := clus.RandClient() @@ -88,9 +88,9 @@ func TestMemberAddWithExistingURLs(t *testing.T) { } func TestMemberRemove(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) capi := clus.Client(1) @@ -126,9 +126,9 @@ func TestMemberRemove(t *testing.T) { } func TestMemberUpdate(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) capi := clus.RandClient() @@ -154,9 +154,9 @@ func TestMemberUpdate(t *testing.T) { } func TestMemberAddUpdateWrongURLs(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) capi := clus.RandClient() @@ -187,9 +187,9 @@ func TestMemberAddUpdateWrongURLs(t *testing.T) { } func TestMemberAddForLearner(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) capi := clus.RandClient() @@ -216,9 +216,9 @@ func TestMemberAddForLearner(t *testing.T) { } func TestMemberPromote(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) // member promote request can be sent to any server in cluster, @@ -293,9 +293,9 @@ func TestMemberPromote(t *testing.T) { // TestMemberPromoteMemberNotLearner ensures that promoting a voting member fails. func TestMemberPromoteMemberNotLearner(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) // member promote request can be sent to any server in cluster, @@ -329,9 +329,9 @@ func TestMemberPromoteMemberNotLearner(t *testing.T) { // TestMemberPromoteMemberNotExist ensures that promoting a member that does not exist in cluster fails. func TestMemberPromoteMemberNotExist(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) // member promote request can be sent to any server in cluster, @@ -378,10 +378,10 @@ func TestMemberPromoteMemberNotExist(t *testing.T) { // TestMaxLearnerInCluster verifies that the maximum number of learners allowed in a cluster is 1 func TestMaxLearnerInCluster(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) // 1. start with a cluster with 3 voting member and 0 learner member - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) // 2. adding a learner member should succeed diff --git a/tests/integration/clientv3/concurrency/election_test.go b/tests/integration/clientv3/concurrency/election_test.go index 650bdc015..d101f15d2 100644 --- a/tests/integration/clientv3/concurrency/election_test.go +++ b/tests/integration/clientv3/concurrency/election_test.go @@ -23,13 +23,13 @@ import ( "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/concurrency" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) func TestResumeElection(t *testing.T) { const prefix = "/resume-election/" - cli, err := integration.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()}) + cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()}) if err != nil { log.Fatal(err) } diff --git a/tests/integration/clientv3/concurrency/mutex_test.go b/tests/integration/clientv3/concurrency/mutex_test.go index 1d264bf4e..8220788cf 100644 --- a/tests/integration/clientv3/concurrency/mutex_test.go +++ b/tests/integration/clientv3/concurrency/mutex_test.go @@ -20,11 +20,11 @@ import ( "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/concurrency" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) func TestMutexLockSessionExpired(t *testing.T) { - cli, err := integration.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()}) + cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()}) if err != nil { t.Fatal(err) } diff --git a/tests/integration/clientv3/connectivity/black_hole_test.go b/tests/integration/clientv3/connectivity/black_hole_test.go index 4445c69f6..1c501c08a 100644 --- a/tests/integration/clientv3/connectivity/black_hole_test.go +++ b/tests/integration/clientv3/connectivity/black_hole_test.go @@ -24,7 +24,7 @@ import ( "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" "go.etcd.io/etcd/tests/v3/integration/clientv3" "google.golang.org/grpc" ) @@ -33,9 +33,9 @@ import ( // blackholed endpoint, client balancer switches to healthy one. // TODO: test server-to-client keepalive ping func TestBalancerUnderBlackholeKeepAliveWatch(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{ + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{ Size: 2, GRPCKeepAliveMinTime: time.Millisecond, // avoid too_many_pings UseBridge: true, @@ -58,9 +58,9 @@ func TestBalancerUnderBlackholeKeepAliveWatch(t *testing.T) { // TODO: only send healthy endpoint to gRPC so gRPC wont waste time to // dial for unhealthy endpoint. // then we can reduce 3s to 1s. - timeout := pingInterval + integration.RequestWaitTimeout + timeout := pingInterval + integration2.RequestWaitTimeout - cli, err := integration.NewClient(t, ccfg) + cli, err := integration2.NewClient(t, ccfg) if err != nil { t.Fatal(err) } @@ -166,9 +166,9 @@ func TestBalancerUnderBlackholeNoKeepAliveSerializableGet(t *testing.T) { // testBalancerUnderBlackholeNoKeepAlive ensures that first request to blackholed endpoint // fails due to context timeout, but succeeds on next try, with endpoint switch. func testBalancerUnderBlackholeNoKeepAlive(t *testing.T, op func(*clientv3.Client, context.Context) error) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{ + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{ Size: 2, SkipCreatingClient: true, UseBridge: true, @@ -182,7 +182,7 @@ func testBalancerUnderBlackholeNoKeepAlive(t *testing.T, op func(*clientv3.Clien DialTimeout: 1 * time.Second, DialOptions: []grpc.DialOption{grpc.WithBlock()}, } - cli, err := integration.NewClient(t, ccfg) + cli, err := integration2.NewClient(t, ccfg) if err != nil { t.Fatal(err) } diff --git a/tests/integration/clientv3/connectivity/dial_test.go b/tests/integration/clientv3/connectivity/dial_test.go index 52dcca69e..719de550a 100644 --- a/tests/integration/clientv3/connectivity/dial_test.go +++ b/tests/integration/clientv3/connectivity/dial_test.go @@ -24,31 +24,31 @@ import ( pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/client/pkg/v3/transport" "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" clientv3test "go.etcd.io/etcd/tests/v3/integration/clientv3" "google.golang.org/grpc" ) var ( testTLSInfo = transport.TLSInfo{ - KeyFile: integration.MustAbsPath("../../../fixtures/server.key.insecure"), - CertFile: integration.MustAbsPath("../../../fixtures/server.crt"), - TrustedCAFile: integration.MustAbsPath("../../../fixtures/ca.crt"), + KeyFile: integration2.MustAbsPath("../../../fixtures/server.key.insecure"), + CertFile: integration2.MustAbsPath("../../../fixtures/server.crt"), + TrustedCAFile: integration2.MustAbsPath("../../../fixtures/ca.crt"), ClientCertAuth: true, } testTLSInfoExpired = transport.TLSInfo{ - KeyFile: integration.MustAbsPath("../../fixtures-expired/server.key.insecure"), - CertFile: integration.MustAbsPath("../../fixtures-expired/server.crt"), - TrustedCAFile: integration.MustAbsPath("../../fixtures-expired/ca.crt"), + KeyFile: integration2.MustAbsPath("../../fixtures-expired/server.key.insecure"), + CertFile: integration2.MustAbsPath("../../fixtures-expired/server.crt"), + TrustedCAFile: integration2.MustAbsPath("../../fixtures-expired/ca.crt"), ClientCertAuth: true, } ) // TestDialTLSExpired tests client with expired certs fails to dial. func TestDialTLSExpired(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, PeerTLS: &testTLSInfo, ClientTLS: &testTLSInfo, SkipCreatingClient: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, PeerTLS: &testTLSInfo, ClientTLS: &testTLSInfo, SkipCreatingClient: true}) defer clus.Terminate(t) tls, err := testTLSInfoExpired.ClientConfig() @@ -56,7 +56,7 @@ func TestDialTLSExpired(t *testing.T) { t.Fatal(err) } // expect remote errors "tls: bad certificate" - _, err = integration.NewClient(t, clientv3.Config{ + _, err = integration2.NewClient(t, clientv3.Config{ Endpoints: []string{clus.Members[0].GRPCURL()}, DialTimeout: 3 * time.Second, DialOptions: []grpc.DialOption{grpc.WithBlock()}, @@ -70,11 +70,11 @@ func TestDialTLSExpired(t *testing.T) { // TestDialTLSNoConfig ensures the client fails to dial / times out // when TLS endpoints (https, unixs) are given but no tls config. func TestDialTLSNoConfig(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, ClientTLS: &testTLSInfo, SkipCreatingClient: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, ClientTLS: &testTLSInfo, SkipCreatingClient: true}) defer clus.Terminate(t) // expect "signed by unknown authority" - c, err := integration.NewClient(t, clientv3.Config{ + c, err := integration2.NewClient(t, clientv3.Config{ Endpoints: []string{clus.Members[0].GRPCURL()}, DialTimeout: time.Second, DialOptions: []grpc.DialOption{grpc.WithBlock()}, @@ -101,8 +101,8 @@ func TestDialSetEndpointsAfterFail(t *testing.T) { // testDialSetEndpoints ensures SetEndpoints can replace unavailable endpoints with available ones. func testDialSetEndpoints(t *testing.T, setBefore bool) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, SkipCreatingClient: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, SkipCreatingClient: true}) defer clus.Terminate(t) // get endpoint list @@ -117,7 +117,7 @@ func testDialSetEndpoints(t *testing.T, setBefore bool) { DialTimeout: 1 * time.Second, DialOptions: []grpc.DialOption{grpc.WithBlock()}, } - cli, err := integration.NewClient(t, cfg) + cli, err := integration2.NewClient(t, cfg) if err != nil { t.Fatal(err) } @@ -134,7 +134,7 @@ func testDialSetEndpoints(t *testing.T, setBefore bool) { cli.SetEndpoints(eps[toKill%3], eps[(toKill+1)%3]) } time.Sleep(time.Second * 2) - ctx, cancel := context.WithTimeout(context.Background(), integration.RequestWaitTimeout) + ctx, cancel := context.WithTimeout(context.Background(), integration2.RequestWaitTimeout) if _, err = cli.Get(ctx, "foo", clientv3.WithSerializable()); err != nil { t.Fatal(err) } @@ -144,8 +144,8 @@ func testDialSetEndpoints(t *testing.T, setBefore bool) { // TestSwitchSetEndpoints ensures SetEndpoints can switch one endpoint // with a new one that doesn't include original endpoint. func TestSwitchSetEndpoints(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) // get non partitioned members endpoints @@ -164,9 +164,9 @@ func TestSwitchSetEndpoints(t *testing.T) { } func TestRejectOldCluster(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) // 2 endpoints to test multi-endpoint Status - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2, SkipCreatingClient: true}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 2, SkipCreatingClient: true}) defer clus.Terminate(t) cfg := clientv3.Config{ @@ -175,7 +175,7 @@ func TestRejectOldCluster(t *testing.T) { DialOptions: []grpc.DialOption{grpc.WithBlock()}, RejectOldCluster: true, } - cli, err := integration.NewClient(t, cfg) + cli, err := integration2.NewClient(t, cfg) if err != nil { t.Fatal(err) } @@ -185,8 +185,8 @@ func TestRejectOldCluster(t *testing.T) { // TestDialForeignEndpoint checks an endpoint that is not registered // with the balancer can be dialed. func TestDialForeignEndpoint(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 2}) defer clus.Terminate(t) conn, err := clus.Client(0).Dial(clus.Client(1).Endpoints()[0]) @@ -208,8 +208,8 @@ func TestDialForeignEndpoint(t *testing.T) { // TestSetEndpointAndPut checks that a Put following a SetEndpoints // to a working endpoint will always succeed. func TestSetEndpointAndPut(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 2}) defer clus.Terminate(t) clus.Client(1).SetEndpoints(clus.Members[0].GRPCURL()) diff --git a/tests/integration/clientv3/connectivity/network_partition_test.go b/tests/integration/clientv3/connectivity/network_partition_test.go index c2650ebcd..32abb6e12 100644 --- a/tests/integration/clientv3/connectivity/network_partition_test.go +++ b/tests/integration/clientv3/connectivity/network_partition_test.go @@ -26,7 +26,7 @@ import ( pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" "go.etcd.io/etcd/tests/v3/integration/clientv3" "google.golang.org/grpc" ) @@ -103,9 +103,9 @@ func TestBalancerUnderNetworkPartitionSerializableGet(t *testing.T) { } func testBalancerUnderNetworkPartition(t *testing.T, op func(*clientv3.Client, context.Context) error, timeout time.Duration) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{ + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{ Size: 3, SkipCreatingClient: true, }) @@ -119,7 +119,7 @@ func testBalancerUnderNetworkPartition(t *testing.T, op func(*clientv3.Client, c DialTimeout: 3 * time.Second, DialOptions: []grpc.DialOption{grpc.WithBlock()}, } - cli, err := integration.NewClient(t, ccfg) + cli, err := integration2.NewClient(t, ccfg) if err != nil { t.Fatal(err) } @@ -159,9 +159,9 @@ func testBalancerUnderNetworkPartition(t *testing.T, op func(*clientv3.Client, c // switches endpoint when leader fails and linearizable get requests returns // "etcdserver: request timed out". func TestBalancerUnderNetworkPartitionLinearizableGetLeaderElection(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{ + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{ Size: 3, SkipCreatingClient: true, }) @@ -172,7 +172,7 @@ func TestBalancerUnderNetworkPartitionLinearizableGetLeaderElection(t *testing.T timeout := 3 * clus.Members[(lead+1)%2].ServerConfig.ReqTimeout() - cli, err := integration.NewClient(t, clientv3.Config{ + cli, err := integration2.NewClient(t, clientv3.Config{ Endpoints: []string{eps[(lead+1)%2]}, DialTimeout: 2 * time.Second, DialOptions: []grpc.DialOption{grpc.WithBlock()}, @@ -214,9 +214,9 @@ func TestBalancerUnderNetworkPartitionWatchFollower(t *testing.T) { // testBalancerUnderNetworkPartitionWatch ensures watch stream // to a partitioned node be closed when context requires leader. func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{ + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{ Size: 3, SkipCreatingClient: true, }) @@ -230,7 +230,7 @@ func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) { } // pin eps[target] - watchCli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{eps[target]}}) + watchCli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[target]}}) if err != nil { t.Fatal(err) } @@ -248,7 +248,7 @@ func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) { wch := watchCli.Watch(clientv3.WithRequireLeader(context.Background()), "foo", clientv3.WithCreatedNotify()) select { case <-wch: - case <-time.After(integration.RequestWaitTimeout): + case <-time.After(integration2.RequestWaitTimeout): t.Fatal("took too long to create watch") } @@ -268,15 +268,15 @@ func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) { if err = ev.Err(); err != rpctypes.ErrNoLeader { t.Fatalf("expected %v, got %v", rpctypes.ErrNoLeader, err) } - case <-time.After(integration.RequestWaitTimeout): // enough time to detect leader lost + case <-time.After(integration2.RequestWaitTimeout): // enough time to detect leader lost t.Fatal("took too long to detect leader lost") } } func TestDropReadUnderNetworkPartition(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{ + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{ Size: 3, SkipCreatingClient: true, }) @@ -289,7 +289,7 @@ func TestDropReadUnderNetworkPartition(t *testing.T) { DialTimeout: 10 * time.Second, DialOptions: []grpc.DialOption{grpc.WithBlock()}, } - cli, err := integration.NewClient(t, ccfg) + cli, err := integration2.NewClient(t, ccfg) if err != nil { t.Fatal(err) } diff --git a/tests/integration/clientv3/connectivity/server_shutdown_test.go b/tests/integration/clientv3/connectivity/server_shutdown_test.go index 5b888e6fe..2d14a3999 100644 --- a/tests/integration/clientv3/connectivity/server_shutdown_test.go +++ b/tests/integration/clientv3/connectivity/server_shutdown_test.go @@ -23,16 +23,16 @@ import ( "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" "go.etcd.io/etcd/tests/v3/integration/clientv3" ) // TestBalancerUnderServerShutdownWatch expects that watch client // switch its endpoints when the member of the pinned endpoint fails. func TestBalancerUnderServerShutdownWatch(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{ + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{ Size: 3, SkipCreatingClient: true, UseBridge: true, @@ -44,7 +44,7 @@ func TestBalancerUnderServerShutdownWatch(t *testing.T) { lead := clus.WaitLeader(t) // pin eps[lead] - watchCli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{eps[lead]}}) + watchCli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[lead]}}) if err != nil { t.Fatal(err) } @@ -61,7 +61,7 @@ func TestBalancerUnderServerShutdownWatch(t *testing.T) { wch := watchCli.Watch(context.Background(), key, clientv3.WithCreatedNotify()) select { case <-wch: - case <-time.After(integration.RequestWaitTimeout): + case <-time.After(integration2.RequestWaitTimeout): t.Fatal("took too long to create watch") } @@ -90,7 +90,7 @@ func TestBalancerUnderServerShutdownWatch(t *testing.T) { clus.Members[lead].Terminate(t) // writes to eps[lead+1] - putCli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{eps[(lead+1)%3]}}) + putCli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[(lead+1)%3]}}) if err != nil { t.Fatal(err) } @@ -143,9 +143,9 @@ func TestBalancerUnderServerShutdownTxn(t *testing.T) { // the pinned endpoint is shut down, the balancer switches its endpoints // and all subsequent put/delete/txn requests succeed with new endpoints. func testBalancerUnderServerShutdownMutable(t *testing.T, op func(*clientv3.Client, context.Context) error) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{ + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{ Size: 3, SkipCreatingClient: true, }) @@ -154,7 +154,7 @@ func testBalancerUnderServerShutdownMutable(t *testing.T, op func(*clientv3.Clie eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL(), clus.Members[2].GRPCURL()} // pin eps[0] - cli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{eps[0]}}) + cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[0]}}) if err != nil { t.Fatal(err) } @@ -201,9 +201,9 @@ func TestBalancerUnderServerShutdownGetSerializable(t *testing.T) { // the pinned endpoint is shut down, the balancer switches its endpoints // and all subsequent range requests succeed with new endpoints. func testBalancerUnderServerShutdownImmutable(t *testing.T, op func(*clientv3.Client, context.Context) error, timeout time.Duration) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{ + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{ Size: 3, SkipCreatingClient: true, }) @@ -212,7 +212,7 @@ func testBalancerUnderServerShutdownImmutable(t *testing.T, op func(*clientv3.Cl eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL(), clus.Members[2].GRPCURL()} // pin eps[0] - cli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{eps[0]}}) + cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[0]}}) if err != nil { t.Errorf("failed to create client: %v", err) } @@ -274,9 +274,9 @@ type pinTestOpt struct { // testBalancerUnderServerStopInflightRangeOnRestart expects // inflight range request reconnects on server restart. func testBalancerUnderServerStopInflightRangeOnRestart(t *testing.T, linearizable bool, opt pinTestOpt) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - cfg := &integration.ClusterConfig{ + cfg := &integration2.ClusterConfig{ Size: 2, SkipCreatingClient: true, UseBridge: true, @@ -285,7 +285,7 @@ func testBalancerUnderServerStopInflightRangeOnRestart(t *testing.T, linearizabl cfg.Size = 3 } - clus := integration.NewClusterV3(t, cfg) + clus := integration2.NewClusterV3(t, cfg) defer clus.Terminate(t) eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL()} if linearizable { @@ -300,7 +300,7 @@ func testBalancerUnderServerStopInflightRangeOnRestart(t *testing.T, linearizabl } // pin eps[target] - cli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{eps[target]}}) + cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[target]}}) if err != nil { t.Errorf("failed to create client: %v", err) } @@ -361,7 +361,7 @@ func testBalancerUnderServerStopInflightRangeOnRestart(t *testing.T, linearizabl clus.Members[target].Restart(t) select { - case <-time.After(clientTimeout + integration.RequestWaitTimeout): + case <-time.After(clientTimeout + integration2.RequestWaitTimeout): t.Fatalf("timed out waiting for Get [linearizable: %v, opt: %+v]", linearizable, opt) case <-donec: } diff --git a/tests/integration/clientv3/examples/main_test.go b/tests/integration/clientv3/examples/main_test.go index 3a61a962f..d347fa730 100644 --- a/tests/integration/clientv3/examples/main_test.go +++ b/tests/integration/clientv3/examples/main_test.go @@ -20,6 +20,7 @@ import ( "time" "go.etcd.io/etcd/client/pkg/v3/testutil" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" "go.etcd.io/etcd/tests/v3/integration" ) @@ -29,7 +30,7 @@ const ( ) var lazyCluster = integration.NewLazyClusterWithConfig( - integration.ClusterConfig{ + integration2.ClusterConfig{ Size: 3, WatchProgressNotifyInterval: 200 * time.Millisecond}) diff --git a/tests/integration/clientv3/experimental/recipes/v3_barrier_test.go b/tests/integration/clientv3/experimental/recipes/v3_barrier_test.go index 52bde238d..5692db140 100644 --- a/tests/integration/clientv3/experimental/recipes/v3_barrier_test.go +++ b/tests/integration/clientv3/experimental/recipes/v3_barrier_test.go @@ -20,19 +20,19 @@ import ( "go.etcd.io/etcd/client/v3" recipe "go.etcd.io/etcd/client/v3/experimental/recipes" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) func TestBarrierSingleNode(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) testBarrier(t, 5, func() *clientv3.Client { return clus.Client(0) }) } func TestBarrierMultiNode(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) testBarrier(t, 5, func() *clientv3.Client { return clus.RandClient() }) } diff --git a/tests/integration/clientv3/experimental/recipes/v3_double_barrier_test.go b/tests/integration/clientv3/experimental/recipes/v3_double_barrier_test.go index 463bb6051..e886b90b4 100644 --- a/tests/integration/clientv3/experimental/recipes/v3_double_barrier_test.go +++ b/tests/integration/clientv3/experimental/recipes/v3_double_barrier_test.go @@ -20,13 +20,13 @@ import ( "go.etcd.io/etcd/client/v3/concurrency" recipe "go.etcd.io/etcd/client/v3/experimental/recipes" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) func TestDoubleBarrier(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) waiters := 10 @@ -98,9 +98,9 @@ func TestDoubleBarrier(t *testing.T) { } func TestDoubleBarrierFailover(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) waiters := 10 diff --git a/tests/integration/clientv3/experimental/recipes/v3_lock_test.go b/tests/integration/clientv3/experimental/recipes/v3_lock_test.go index 7104c3ce7..cc873adf2 100644 --- a/tests/integration/clientv3/experimental/recipes/v3_lock_test.go +++ b/tests/integration/clientv3/experimental/recipes/v3_lock_test.go @@ -24,29 +24,29 @@ import ( "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/concurrency" recipe "go.etcd.io/etcd/client/v3/experimental/recipes" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) func TestMutexLockSingleNode(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) var clients []*clientv3.Client - testMutexLock(t, 5, integration.MakeSingleNodeClients(t, clus, &clients)) - integration.CloseClients(t, clients) + testMutexLock(t, 5, integration2.MakeSingleNodeClients(t, clus, &clients)) + integration2.CloseClients(t, clients) } func TestMutexLockMultiNode(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) var clients []*clientv3.Client - testMutexLock(t, 5, integration.MakeMultiNodeClients(t, clus, &clients)) - integration.CloseClients(t, clients) + testMutexLock(t, 5, integration2.MakeMultiNodeClients(t, clus, &clients)) + integration2.CloseClients(t, clients) } func testMutexLock(t *testing.T, waiters int, chooseClient func() *clientv3.Client) { @@ -93,27 +93,27 @@ func testMutexLock(t *testing.T, waiters int, chooseClient func() *clientv3.Clie } func TestMutexTryLockSingleNode(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) var clients []*clientv3.Client - testMutexTryLock(t, 5, integration.MakeSingleNodeClients(t, clus, &clients)) - integration.CloseClients(t, clients) + testMutexTryLock(t, 5, integration2.MakeSingleNodeClients(t, clus, &clients)) + integration2.CloseClients(t, clients) } func TestMutexTryLockMultiNode(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) var clients []*clientv3.Client - testMutexTryLock(t, 5, integration.MakeMultiNodeClients(t, clus, &clients)) - integration.CloseClients(t, clients) + testMutexTryLock(t, 5, integration2.MakeMultiNodeClients(t, clus, &clients)) + integration2.CloseClients(t, clients) } func testMutexTryLock(t *testing.T, lockers int, chooseClient func() *clientv3.Client) { - integration.BeforeTest(t) + integration2.BeforeTest(t) lockedC := make(chan *concurrency.Mutex) notlockedC := make(chan *concurrency.Mutex) @@ -163,9 +163,9 @@ func testMutexTryLock(t *testing.T, lockers int, chooseClient func() *clientv3.C // TestMutexSessionRelock ensures that acquiring the same lock with the same // session will not result in deadlock. func TestMutexSessionRelock(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) session, err := concurrency.NewSession(clus.RandClient()) if err != nil { @@ -187,9 +187,9 @@ func TestMutexSessionRelock(t *testing.T) { // waiters older than the new owner are gone by testing the case where // the waiter prior to the acquirer expires before the current holder. func TestMutexWaitsOnCurrentHolder(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) cctx := context.Background() @@ -295,9 +295,9 @@ func TestMutexWaitsOnCurrentHolder(t *testing.T) { } func BenchmarkMutex4Waiters(b *testing.B) { - integration.BeforeTest(b) + integration2.BeforeTest(b) // XXX switch tests to use TB interface - clus := integration.NewClusterV3(nil, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(nil, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(nil) for i := 0; i < b.N; i++ { testMutexLock(nil, 4, func() *clientv3.Client { return clus.RandClient() }) @@ -305,15 +305,15 @@ func BenchmarkMutex4Waiters(b *testing.B) { } func TestRWMutexSingleNode(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) testRWMutex(t, 5, func() *clientv3.Client { return clus.Client(0) }) } func TestRWMutexMultiNode(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) testRWMutex(t, 5, func() *clientv3.Client { return clus.RandClient() }) } diff --git a/tests/integration/clientv3/experimental/recipes/v3_queue_test.go b/tests/integration/clientv3/experimental/recipes/v3_queue_test.go index 45d1855b9..112d55e1f 100644 --- a/tests/integration/clientv3/experimental/recipes/v3_queue_test.go +++ b/tests/integration/clientv3/experimental/recipes/v3_queue_test.go @@ -21,7 +21,7 @@ import ( "testing" recipe "go.etcd.io/etcd/client/v3/experimental/recipes" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) const ( @@ -31,9 +31,9 @@ const ( // TestQueueOneReaderOneWriter confirms the queue is FIFO func TestQueueOneReaderOneWriter(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) done := make(chan struct{}) @@ -78,10 +78,10 @@ func TestQueueManyReaderManyWriter(t *testing.T) { // BenchmarkQueue benchmarks Queues using many/many readers/writers func BenchmarkQueue(b *testing.B) { - integration.BeforeTest(b) + integration2.BeforeTest(b) // XXX switch tests to use TB interface - clus := integration.NewClusterV3(nil, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(nil, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(nil) for i := 0; i < b.N; i++ { testQueueNReaderMWriter(nil, manyQueueClients, manyQueueClients) @@ -90,9 +90,9 @@ func BenchmarkQueue(b *testing.B) { // TestPrQueueOneReaderOneWriter tests whether priority queues respect priorities. func TestPrQueueOneReaderOneWriter(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) // write out five items with random priority @@ -124,9 +124,9 @@ func TestPrQueueOneReaderOneWriter(t *testing.T) { } func TestPrQueueManyReaderManyWriter(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) rqs := newPriorityQueues(clus, manyQueueClients) wqs := newPriorityQueues(clus, manyQueueClients) @@ -135,10 +135,10 @@ func TestPrQueueManyReaderManyWriter(t *testing.T) { // BenchmarkQueue benchmarks Queues using n/n readers/writers func BenchmarkPrQueueOneReaderOneWriter(b *testing.B) { - integration.BeforeTest(b) + integration2.BeforeTest(b) // XXX switch tests to use TB interface - clus := integration.NewClusterV3(nil, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(nil, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(nil) rqs := newPriorityQueues(clus, 1) wqs := newPriorityQueues(clus, 1) @@ -148,13 +148,13 @@ func BenchmarkPrQueueOneReaderOneWriter(b *testing.B) { } func testQueueNReaderMWriter(t *testing.T, n int, m int) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) testReadersWriters(t, newQueues(clus, n), newQueues(clus, m)) } -func newQueues(clus *integration.ClusterV3, n int) (qs []testQueue) { +func newQueues(clus *integration2.ClusterV3, n int) (qs []testQueue) { for i := 0; i < n; i++ { etcdc := clus.RandClient() qs = append(qs, recipe.NewQueue(etcdc, "q")) @@ -162,7 +162,7 @@ func newQueues(clus *integration.ClusterV3, n int) (qs []testQueue) { return qs } -func newPriorityQueues(clus *integration.ClusterV3, n int) (qs []testQueue) { +func newPriorityQueues(clus *integration2.ClusterV3, n int) (qs []testQueue) { for i := 0; i < n; i++ { etcdc := clus.RandClient() q := &flatPriorityQueue{recipe.NewPriorityQueue(etcdc, "prq")} diff --git a/tests/integration/clientv3/kv_test.go b/tests/integration/clientv3/kv_test.go index 8dd98466d..43cd50fc9 100644 --- a/tests/integration/clientv3/kv_test.go +++ b/tests/integration/clientv3/kv_test.go @@ -29,20 +29,20 @@ import ( "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/api/v3/version" "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) func TestKVPutError(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) var ( maxReqBytes = 1.5 * 1024 * 1024 // hard coded max in v3_server.go quota = int64(int(maxReqBytes*1.2) + 8*os.Getpagesize()) // make sure we have enough overhead in backend quota. See discussion in #6486. ) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, QuotaBackendBytes: quota, ClientMaxCallSendMsgSize: 100 * 1024 * 1024}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, QuotaBackendBytes: quota, ClientMaxCallSendMsgSize: 100 * 1024 * 1024}) defer clus.Terminate(t) kv := clus.RandClient() @@ -72,9 +72,9 @@ func TestKVPutError(t *testing.T) { } func TestKVPut(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) lapi := clus.RandClient() @@ -117,9 +117,9 @@ func TestKVPut(t *testing.T) { // TestKVPutWithIgnoreValue ensures that Put with WithIgnoreValue does not clobber the old value. func TestKVPutWithIgnoreValue(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) kv := clus.RandClient() @@ -150,9 +150,9 @@ func TestKVPutWithIgnoreValue(t *testing.T) { // TestKVPutWithIgnoreLease ensures that Put with WithIgnoreLease does not affect the existing lease for the key. func TestKVPutWithIgnoreLease(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) kv := clus.RandClient() @@ -189,9 +189,9 @@ func TestKVPutWithIgnoreLease(t *testing.T) { } func TestKVPutWithRequireLeader(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) clus.Members[1].Stop(t) @@ -235,9 +235,9 @@ func TestKVPutWithRequireLeader(t *testing.T) { } func TestKVRange(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) kv := clus.RandClient() @@ -464,9 +464,9 @@ func TestKVRange(t *testing.T) { } func TestKVGetErrConnClosed(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.Client(0) @@ -486,16 +486,16 @@ func TestKVGetErrConnClosed(t *testing.T) { }() select { - case <-time.After(integration.RequestWaitTimeout): + case <-time.After(integration2.RequestWaitTimeout): t.Fatal("kv.Get took too long") case <-donec: } } func TestKVNewAfterClose(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.Client(0) @@ -513,16 +513,16 @@ func TestKVNewAfterClose(t *testing.T) { close(donec) }() select { - case <-time.After(integration.RequestWaitTimeout): + case <-time.After(integration2.RequestWaitTimeout): t.Fatal("kv.Get took too long") case <-donec: } } func TestKVDeleteRange(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) kv := clus.RandClient() @@ -592,9 +592,9 @@ func TestKVDeleteRange(t *testing.T) { } func TestKVDelete(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) kv := clus.RandClient() @@ -624,9 +624,9 @@ func TestKVDelete(t *testing.T) { } func TestKVCompactError(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) kv := clus.RandClient() @@ -654,9 +654,9 @@ func TestKVCompactError(t *testing.T) { } func TestKVCompact(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) kv := clus.RandClient() @@ -709,10 +709,10 @@ func TestKVCompact(t *testing.T) { // TestKVGetRetry ensures get will retry on disconnect. func TestKVGetRetry(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) clusterSize := 3 - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: clusterSize, UseBridge: true}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: clusterSize, UseBridge: true}) defer clus.Terminate(t) // because killing leader and following election @@ -763,9 +763,9 @@ func TestKVGetRetry(t *testing.T) { // TestKVPutFailGetRetry ensures a get will retry following a failed put. func TestKVPutFailGetRetry(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) kv := clus.Client(0) @@ -803,9 +803,9 @@ func TestKVPutFailGetRetry(t *testing.T) { // TestKVGetCancel tests that a context cancel on a Get terminates as expected. func TestKVGetCancel(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) oldconn := clus.Client(0).ActiveConnection() @@ -826,9 +826,9 @@ func TestKVGetCancel(t *testing.T) { // TestKVGetStoppedServerAndClose ensures closing after a failed Get works. func TestKVGetStoppedServerAndClose(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.Client(0) @@ -844,9 +844,9 @@ func TestKVGetStoppedServerAndClose(t *testing.T) { // TestKVPutStoppedServerAndClose ensures closing after a failed Put works. func TestKVPutStoppedServerAndClose(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.Client(0) @@ -875,8 +875,8 @@ func TestKVPutStoppedServerAndClose(t *testing.T) { // TestKVPutAtMostOnce ensures that a Put will only occur at most once // in the presence of network errors. func TestKVPutAtMostOnce(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) if _, err := clus.Client(0).Put(context.TODO(), "k", "1"); err != nil { @@ -911,7 +911,7 @@ func TestKVPutAtMostOnce(t *testing.T) { // TestKVLargeRequests tests various client/server side request limits. func TestKVLargeRequests(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) tests := []struct { // make sure that "MaxCallSendMsgSize" < server-side default send/recv limit maxRequestBytesServer uint @@ -970,8 +970,8 @@ func TestKVLargeRequests(t *testing.T) { }, } for i, test := range tests { - clus := integration.NewClusterV3(t, - &integration.ClusterConfig{ + clus := integration2.NewClusterV3(t, + &integration2.ClusterConfig{ Size: 1, MaxRequestBytes: test.maxRequestBytesServer, ClientMaxCallSendMsgSize: test.maxCallSendBytesClient, @@ -1003,9 +1003,9 @@ func TestKVLargeRequests(t *testing.T) { // TestKVForLearner ensures learner member only accepts serializable read request. func TestKVForLearner(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) // we have to add and launch learner member after initial cluster was created, because @@ -1034,7 +1034,7 @@ func TestKVForLearner(t *testing.T) { DialOptions: []grpc.DialOption{grpc.WithBlock()}, } // this client only has endpoint of the learner member - cli, err := integration.NewClient(t, cfg) + cli, err := integration2.NewClient(t, cfg) if err != nil { t.Fatalf("failed to create clientv3: %v", err) } @@ -1082,9 +1082,9 @@ func TestKVForLearner(t *testing.T) { // TestBalancerSupportLearner verifies that balancer's retry and failover mechanism supports cluster with learner member func TestBalancerSupportLearner(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) // we have to add and launch learner member after initial cluster was created, because @@ -1106,7 +1106,7 @@ func TestBalancerSupportLearner(t *testing.T) { DialTimeout: 5 * time.Second, DialOptions: []grpc.DialOption{grpc.WithBlock()}, } - cli, err := integration.NewClient(t, cfg) + cli, err := integration2.NewClient(t, cfg) if err != nil { t.Fatalf("failed to create clientv3: %v", err) } diff --git a/tests/integration/clientv3/lease/lease_test.go b/tests/integration/clientv3/lease/lease_test.go index 6a6cf7dd3..d06cffe05 100644 --- a/tests/integration/clientv3/lease/lease_test.go +++ b/tests/integration/clientv3/lease/lease_test.go @@ -26,13 +26,13 @@ import ( "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/concurrency" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) func TestLeaseNotFoundError(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) kv := clus.RandClient() @@ -44,9 +44,9 @@ func TestLeaseNotFoundError(t *testing.T) { } func TestLeaseGrant(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) lapi := clus.RandClient() @@ -70,9 +70,9 @@ func TestLeaseGrant(t *testing.T) { } func TestLeaseRevoke(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) lapi := clus.RandClient() @@ -96,9 +96,9 @@ func TestLeaseRevoke(t *testing.T) { } func TestLeaseKeepAliveOnce(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) lapi := clus.RandClient() @@ -120,9 +120,9 @@ func TestLeaseKeepAliveOnce(t *testing.T) { } func TestLeaseKeepAlive(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) lapi := clus.Client(0) @@ -160,9 +160,9 @@ func TestLeaseKeepAlive(t *testing.T) { } func TestLeaseKeepAliveOneSecond(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.Client(0) @@ -188,9 +188,9 @@ func TestLeaseKeepAliveOneSecond(t *testing.T) { func TestLeaseKeepAliveHandleFailure(t *testing.T) { t.Skip("test it when we have a cluster client") - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) // TODO: change this line to get a cluster client @@ -243,9 +243,9 @@ type leaseCh struct { // TestLeaseKeepAliveNotFound ensures a revoked lease won't halt other leases. func TestLeaseKeepAliveNotFound(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.RandClient() @@ -276,9 +276,9 @@ func TestLeaseKeepAliveNotFound(t *testing.T) { } func TestLeaseGrantErrConnClosed(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.Client(0) @@ -298,7 +298,7 @@ func TestLeaseGrantErrConnClosed(t *testing.T) { }() select { - case <-time.After(integration.RequestWaitTimeout): + case <-time.After(integration2.RequestWaitTimeout): t.Fatal("le.Grant took too long") case <-donec: } @@ -308,9 +308,9 @@ func TestLeaseGrantErrConnClosed(t *testing.T) { // queue is full thus dropping keepalive response sends, // keepalive request is sent with the same rate of TTL / 3. func TestLeaseKeepAliveFullResponseQueue(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) lapi := clus.Client(0) @@ -348,9 +348,9 @@ func TestLeaseKeepAliveFullResponseQueue(t *testing.T) { } func TestLeaseGrantNewAfterClose(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.Client(0) @@ -368,16 +368,16 @@ func TestLeaseGrantNewAfterClose(t *testing.T) { close(donec) }() select { - case <-time.After(integration.RequestWaitTimeout): + case <-time.After(integration2.RequestWaitTimeout): t.Fatal("le.Grant took too long") case <-donec: } } func TestLeaseRevokeNewAfterClose(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.Client(0) @@ -402,7 +402,7 @@ func TestLeaseRevokeNewAfterClose(t *testing.T) { } }() select { - case <-time.After(integration.RequestWaitTimeout): + case <-time.After(integration2.RequestWaitTimeout): t.Fatal("le.Revoke took too long") case errMsg := <-errMsgCh: if errMsg != "" { @@ -414,9 +414,9 @@ func TestLeaseRevokeNewAfterClose(t *testing.T) { // TestLeaseKeepAliveCloseAfterDisconnectRevoke ensures the keep alive channel is closed // following a disconnection, lease revoke, then reconnect. func TestLeaseKeepAliveCloseAfterDisconnectRevoke(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) cli := clus.Client(0) @@ -460,9 +460,9 @@ func TestLeaseKeepAliveCloseAfterDisconnectRevoke(t *testing.T) { // TestLeaseKeepAliveInitTimeout ensures the keep alive channel closes if // the initial keep alive request never gets a response. func TestLeaseKeepAliveInitTimeout(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) cli := clus.Client(0) @@ -493,9 +493,9 @@ func TestLeaseKeepAliveInitTimeout(t *testing.T) { // TestLeaseKeepAliveInitTimeout ensures the keep alive channel closes if // a keep alive request after the first never gets a response. func TestLeaseKeepAliveTTLTimeout(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) cli := clus.Client(0) @@ -528,9 +528,9 @@ func TestLeaseKeepAliveTTLTimeout(t *testing.T) { } func TestLeaseTimeToLive(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) c := clus.RandClient() @@ -586,9 +586,9 @@ func TestLeaseTimeToLive(t *testing.T) { } func TestLeaseTimeToLiveLeaseNotFound(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.RandClient() @@ -621,9 +621,9 @@ func TestLeaseTimeToLiveLeaseNotFound(t *testing.T) { } func TestLeaseLeases(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.RandClient() @@ -654,9 +654,9 @@ func TestLeaseLeases(t *testing.T) { // TestLeaseRenewLostQuorum ensures keepalives work after losing quorum // for a while. func TestLeaseRenewLostQuorum(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) cli := clus.Client(0) @@ -702,9 +702,9 @@ func TestLeaseRenewLostQuorum(t *testing.T) { } func TestLeaseKeepAliveLoopExit(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) ctx := context.Background() @@ -727,8 +727,8 @@ func TestLeaseKeepAliveLoopExit(t *testing.T) { // before, during, and after quorum loss to confirm Grant/KeepAlive tolerates // transient cluster failure. func TestV3LeaseFailureOverlap(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 2, UseBridge: true}) defer clus.Terminate(t) numReqs := 5 @@ -780,9 +780,9 @@ func TestV3LeaseFailureOverlap(t *testing.T) { // TestLeaseWithRequireLeader checks keep-alive channel close when no leader. func TestLeaseWithRequireLeader(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2, UseBridge: true}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 2, UseBridge: true}) defer clus.Terminate(t) c := clus.Client(0) diff --git a/tests/integration/clientv3/lease/leasing_test.go b/tests/integration/clientv3/lease/leasing_test.go index 3e935d8e3..60bd02719 100644 --- a/tests/integration/clientv3/lease/leasing_test.go +++ b/tests/integration/clientv3/lease/leasing_test.go @@ -28,13 +28,13 @@ import ( clientv3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/concurrency" "go.etcd.io/etcd/client/v3/leasing" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) func TestLeasingPutGet(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) lKV1, closeLKV1, err := leasing.NewKV(clus.Client(0), "foo/") @@ -91,8 +91,8 @@ func TestLeasingPutGet(t *testing.T) { // TestLeasingInterval checks the leasing KV fetches key intervals. func TestLeasingInterval(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -130,8 +130,8 @@ func TestLeasingInterval(t *testing.T) { // TestLeasingPutInvalidateNew checks the leasing KV updates its cache on a Put to a new key. func TestLeasingPutInvalidateNew(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -160,8 +160,8 @@ func TestLeasingPutInvalidateNew(t *testing.T) { // TestLeasingPutInvalidateExisting checks the leasing KV updates its cache on a Put to an existing key. func TestLeasingPutInvalidateExisting(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) if _, err := clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil { @@ -194,8 +194,8 @@ func TestLeasingPutInvalidateExisting(t *testing.T) { // TestLeasingGetNoLeaseTTL checks a key with a TTL is not leased. func TestLeasingGetNoLeaseTTL(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -223,8 +223,8 @@ func TestLeasingGetNoLeaseTTL(t *testing.T) { // TestLeasingGetSerializable checks the leasing KV can make serialized requests // when the etcd cluster is partitioned. func TestLeasingGetSerializable(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 2, UseBridge: true}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -263,8 +263,8 @@ func TestLeasingGetSerializable(t *testing.T) { // TestLeasingPrevKey checks the cache respects WithPrevKV on puts. func TestLeasingPrevKey(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 2}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -289,8 +289,8 @@ func TestLeasingPrevKey(t *testing.T) { // TestLeasingRevGet checks the cache respects Get by Revision. func TestLeasingRevGet(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -325,8 +325,8 @@ func TestLeasingRevGet(t *testing.T) { // TestLeasingGetWithOpts checks options that can be served through the cache do not depend on the server. func TestLeasingGetWithOpts(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -370,8 +370,8 @@ func TestLeasingGetWithOpts(t *testing.T) { // TestLeasingConcurrentPut ensures that a get after concurrent puts returns // the recently put data. func TestLeasingConcurrentPut(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -417,8 +417,8 @@ func TestLeasingConcurrentPut(t *testing.T) { } func TestLeasingDisconnectedGet(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -446,8 +446,8 @@ func TestLeasingDisconnectedGet(t *testing.T) { } func TestLeasingDeleteOwner(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -480,8 +480,8 @@ func TestLeasingDeleteOwner(t *testing.T) { } func TestLeasingDeleteNonOwner(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) lkv1, closeLKV1, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -515,8 +515,8 @@ func TestLeasingDeleteNonOwner(t *testing.T) { } func TestLeasingOverwriteResponse(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -549,8 +549,8 @@ func TestLeasingOverwriteResponse(t *testing.T) { } func TestLeasingOwnerPutResponse(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -587,8 +587,8 @@ func TestLeasingOwnerPutResponse(t *testing.T) { } func TestLeasingTxnOwnerGetRange(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -616,8 +616,8 @@ func TestLeasingTxnOwnerGetRange(t *testing.T) { } func TestLeasingTxnOwnerGet(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) client := clus.Client(0) @@ -702,8 +702,8 @@ func TestLeasingTxnOwnerGet(t *testing.T) { } func TestLeasingTxnOwnerDeleteRange(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -741,8 +741,8 @@ func TestLeasingTxnOwnerDeleteRange(t *testing.T) { } func TestLeasingTxnOwnerDelete(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -772,8 +772,8 @@ func TestLeasingTxnOwnerDelete(t *testing.T) { } func TestLeasingTxnOwnerIf(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -866,8 +866,8 @@ func TestLeasingTxnOwnerIf(t *testing.T) { } func TestLeasingTxnCancel(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) lkv1, closeLKV1, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -900,8 +900,8 @@ func TestLeasingTxnCancel(t *testing.T) { } func TestLeasingTxnNonOwnerPut(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -978,8 +978,8 @@ func TestLeasingTxnNonOwnerPut(t *testing.T) { // TestLeasingTxnRandIfThenOrElse randomly leases keys two separate clients, then // issues a random If/{Then,Else} transaction on those keys to one client. func TestLeasingTxnRandIfThenOrElse(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) lkv1, closeLKV1, err1 := leasing.NewKV(clus.Client(0), "pfx/") @@ -1084,8 +1084,8 @@ func TestLeasingTxnRandIfThenOrElse(t *testing.T) { } func TestLeasingOwnerPutError(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -1105,8 +1105,8 @@ func TestLeasingOwnerPutError(t *testing.T) { } func TestLeasingOwnerDeleteError(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -1126,8 +1126,8 @@ func TestLeasingOwnerDeleteError(t *testing.T) { } func TestLeasingNonOwnerPutError(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -1151,8 +1151,8 @@ func TestLeasingOwnerDeleteFrom(t *testing.T) { } func testLeasingOwnerDelete(t *testing.T, del clientv3.Op) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "0/") @@ -1200,8 +1200,8 @@ func testLeasingOwnerDelete(t *testing.T, del clientv3.Op) { } func TestLeasingDeleteRangeBounds(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) delkv, closeDelKV, err := leasing.NewKV(clus.Client(0), "0/") @@ -1258,8 +1258,8 @@ func TestLeaseDeleteRangeContendDel(t *testing.T) { } func testLeasingDeleteRangeContend(t *testing.T, op clientv3.Op) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) delkv, closeDelKV, err := leasing.NewKV(clus.Client(0), "0/") @@ -1316,8 +1316,8 @@ func testLeasingDeleteRangeContend(t *testing.T, op clientv3.Op) { } func TestLeasingPutGetDeleteConcurrent(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) lkvs := make([]clientv3.KV, 16) @@ -1375,8 +1375,8 @@ func TestLeasingPutGetDeleteConcurrent(t *testing.T) { // TestLeasingReconnectOwnerRevoke checks that revocation works if // disconnected when trying to submit revoke txn. func TestLeasingReconnectOwnerRevoke(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) lkv1, closeLKV1, err1 := leasing.NewKV(clus.Client(0), "foo/") @@ -1436,8 +1436,8 @@ func TestLeasingReconnectOwnerRevoke(t *testing.T) { // TestLeasingReconnectOwnerRevokeCompact checks that revocation works if // disconnected and the watch is compacted. func TestLeasingReconnectOwnerRevokeCompact(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) lkv1, closeLKV1, err1 := leasing.NewKV(clus.Client(0), "foo/") @@ -1489,8 +1489,8 @@ func TestLeasingReconnectOwnerRevokeCompact(t *testing.T) { // TestLeasingReconnectOwnerConsistency checks a write error on an owner will // not cause inconsistency between the server and the client. func TestLeasingReconnectOwnerConsistency(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/") @@ -1563,8 +1563,8 @@ func TestLeasingReconnectOwnerConsistency(t *testing.T) { } func TestLeasingTxnAtomicCache(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/") @@ -1649,8 +1649,8 @@ func TestLeasingTxnAtomicCache(t *testing.T) { // TestLeasingReconnectTxn checks that Txn is resilient to disconnects. func TestLeasingReconnectTxn(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/") @@ -1685,8 +1685,8 @@ func TestLeasingReconnectTxn(t *testing.T) { // TestLeasingReconnectNonOwnerGet checks a get error on an owner will // not cause inconsistency between the server and the client. func TestLeasingReconnectNonOwnerGet(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/") @@ -1736,8 +1736,8 @@ func TestLeasingReconnectNonOwnerGet(t *testing.T) { } func TestLeasingTxnRangeCmp(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/") @@ -1771,8 +1771,8 @@ func TestLeasingTxnRangeCmp(t *testing.T) { } func TestLeasingDo(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/") @@ -1813,8 +1813,8 @@ func TestLeasingDo(t *testing.T) { } func TestLeasingTxnOwnerPutBranch(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/") @@ -1907,8 +1907,8 @@ func randCmps(pfx string, dat []*clientv3.PutResponse) (cmps []clientv3.Cmp, the } func TestLeasingSessionExpire(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/", concurrency.WithTTL(1)) @@ -1931,7 +1931,7 @@ func TestLeasingSessionExpire(t *testing.T) { } waitForExpireAck(t, lkv) clus.Members[0].Restart(t) - integration.WaitClientV3(t, lkv2) + integration2.WaitClientV3(t, lkv2) if _, err = lkv2.Put(context.TODO(), "abc", "def"); err != nil { t.Fatal(err) } @@ -1983,8 +1983,8 @@ func TestLeasingSessionExpireCancel(t *testing.T) { } for i := range tests { t.Run(fmt.Sprintf("test %d", i), func(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/", concurrency.WithTTL(1)) diff --git a/tests/integration/clientv3/maintenance_test.go b/tests/integration/clientv3/maintenance_test.go index e48a4a4fa..945fde082 100644 --- a/tests/integration/clientv3/maintenance_test.go +++ b/tests/integration/clientv3/maintenance_test.go @@ -25,6 +25,7 @@ import ( "testing" "time" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" "go.uber.org/zap/zaptest" "google.golang.org/grpc" @@ -34,13 +35,12 @@ import ( "go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/storage/backend" "go.etcd.io/etcd/server/v3/storage/mvcc" - "go.etcd.io/etcd/tests/v3/integration" ) func TestMaintenanceHashKV(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) for i := 0; i < 3; i++ { @@ -71,9 +71,9 @@ func TestMaintenanceHashKV(t *testing.T) { } func TestMaintenanceMoveLeader(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) oldLeadIdx := clus.WaitLeader(t) @@ -102,9 +102,9 @@ func TestMaintenanceMoveLeader(t *testing.T) { // TestMaintenanceSnapshotCancel ensures that context cancel // before snapshot reading returns corresponding context errors. func TestMaintenanceSnapshotCancel(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) // reading snapshot with canceled context should error out @@ -145,9 +145,9 @@ func TestMaintenanceSnapshotTimeout(t *testing.T) { // testMaintenanceSnapshotTimeout given snapshot function ensures that it // returns corresponding context errors when context timeout happened before snapshot reading func testMaintenanceSnapshotTimeout(t *testing.T, snapshot func(context.Context, *clientv3.Client) (io.ReadCloser, error)) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) // reading snapshot with deadline exceeded should error out @@ -190,9 +190,9 @@ func TestMaintenanceSnapshotErrorInflight(t *testing.T) { // testMaintenanceSnapshotErrorInflight given snapshot function ensures that ReaderCloser returned by it // will fail to read with corresponding context errors on inflight context cancel timeout. func testMaintenanceSnapshotErrorInflight(t *testing.T, snapshot func(context.Context, *clientv3.Client) (io.ReadCloser, error)) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) // take about 1-second to read snapshot @@ -247,10 +247,10 @@ func testMaintenanceSnapshotErrorInflight(t *testing.T, snapshot func(context.Co // TestMaintenanceSnapshotWithVersionVersion ensures that SnapshotWithVersion returns correct version value. func TestMaintenanceSnapshotWithVersionVersion(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) // Set SnapshotCount to 1 to force raft snapshot to ensure that storage version is set - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, SnapshotCount: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, SnapshotCount: 1}) defer clus.Terminate(t) // Put some keys to ensure that wal snapshot is triggered @@ -270,9 +270,9 @@ func TestMaintenanceSnapshotWithVersionVersion(t *testing.T) { } func TestMaintenanceStatus(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) clus.WaitLeader(t) @@ -282,7 +282,7 @@ func TestMaintenanceStatus(t *testing.T) { eps[i] = clus.Members[i].GRPCURL() } - cli, err := integration.NewClient(t, clientv3.Config{Endpoints: eps, DialOptions: []grpc.DialOption{grpc.WithBlock()}}) + cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: eps, DialOptions: []grpc.DialOption{grpc.WithBlock()}}) if err != nil { t.Fatal(err) } diff --git a/tests/integration/clientv3/metrics_test.go b/tests/integration/clientv3/metrics_test.go index 4e2202cee..07c36a81c 100644 --- a/tests/integration/clientv3/metrics_test.go +++ b/tests/integration/clientv3/metrics_test.go @@ -25,17 +25,16 @@ import ( "testing" "time" - "go.etcd.io/etcd/client/pkg/v3/transport" - "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/integration" - grpcprom "github.com/grpc-ecosystem/go-grpc-prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" + "go.etcd.io/etcd/client/pkg/v3/transport" + "go.etcd.io/etcd/client/v3" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" "google.golang.org/grpc" ) func TestV3ClientMetrics(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) var ( addr = "localhost:27989" @@ -71,7 +70,7 @@ func TestV3ClientMetrics(t *testing.T) { url := "unix://" + addr + "/metrics" - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, SkipCreatingClient: true}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, SkipCreatingClient: true}) defer clus.Terminate(t) cfg := clientv3.Config{ @@ -81,7 +80,7 @@ func TestV3ClientMetrics(t *testing.T) { grpc.WithStreamInterceptor(grpcprom.StreamClientInterceptor), }, } - cli, cerr := integration.NewClient(t, cfg) + cli, cerr := integration2.NewClient(t, cfg) if cerr != nil { t.Fatal(cerr) } diff --git a/tests/integration/clientv3/mirror_test.go b/tests/integration/clientv3/mirror_test.go index c9246e0f2..36dc71dcc 100644 --- a/tests/integration/clientv3/mirror_test.go +++ b/tests/integration/clientv3/mirror_test.go @@ -24,13 +24,13 @@ import ( "go.etcd.io/etcd/api/v3/mvccpb" "go.etcd.io/etcd/client/v3/mirror" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) func TestMirrorSync(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) c := clus.Client(0) @@ -72,9 +72,9 @@ func TestMirrorSync(t *testing.T) { } func TestMirrorSyncBase(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + cluster := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer cluster.Terminate(t) cli := cluster.Client(0) diff --git a/tests/integration/clientv3/namespace_test.go b/tests/integration/clientv3/namespace_test.go index b7e07f4f3..4b76ca34e 100644 --- a/tests/integration/clientv3/namespace_test.go +++ b/tests/integration/clientv3/namespace_test.go @@ -22,13 +22,13 @@ import ( "go.etcd.io/etcd/api/v3/mvccpb" "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/namespace" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) func TestNamespacePutGet(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) c := clus.Client(0) @@ -55,9 +55,9 @@ func TestNamespacePutGet(t *testing.T) { } func TestNamespaceWatch(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) c := clus.Client(0) diff --git a/tests/integration/clientv3/naming/endpoints_test.go b/tests/integration/clientv3/naming/endpoints_test.go index 707616851..d5cbbf8cf 100644 --- a/tests/integration/clientv3/naming/endpoints_test.go +++ b/tests/integration/clientv3/naming/endpoints_test.go @@ -21,14 +21,13 @@ import ( etcd "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/naming/endpoints" - - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) func TestEndpointManager(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) em, err := endpoints.NewManager(clus.RandClient(), "foo") @@ -88,9 +87,9 @@ func TestEndpointManager(t *testing.T) { // correctly with multiple hosts and correctly receive multiple // updates in a single revision. func TestEndpointManagerAtomicity(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) c := clus.RandClient() @@ -130,9 +129,9 @@ func TestEndpointManagerAtomicity(t *testing.T) { } func TestEndpointManagerCRUD(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) em, err := endpoints.NewManager(clus.RandClient(), "foo") diff --git a/tests/integration/clientv3/naming/resolver_test.go b/tests/integration/clientv3/naming/resolver_test.go index 980580c16..445ebca86 100644 --- a/tests/integration/clientv3/naming/resolver_test.go +++ b/tests/integration/clientv3/naming/resolver_test.go @@ -23,8 +23,7 @@ import ( "go.etcd.io/etcd/client/v3/naming/endpoints" "go.etcd.io/etcd/client/v3/naming/resolver" "go.etcd.io/etcd/pkg/v3/grpc_testing" - "go.etcd.io/etcd/tests/v3/integration" - + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" "google.golang.org/grpc" testpb "google.golang.org/grpc/test/grpc_testing" ) @@ -32,7 +31,7 @@ import ( // This test mimics scenario described in grpc_naming.md doc. func TestEtcdGrpcResolver(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) s1PayloadBody := []byte{'1'} s1 := grpc_testing.NewDummyStubServer(s1PayloadBody) @@ -48,7 +47,7 @@ func TestEtcdGrpcResolver(t *testing.T) { } defer s2.Stop() - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) em, err := endpoints.NewManager(clus.Client(0), "foo") diff --git a/tests/integration/clientv3/ordering_kv_test.go b/tests/integration/clientv3/ordering_kv_test.go index b6b3ce71f..f213d497c 100644 --- a/tests/integration/clientv3/ordering_kv_test.go +++ b/tests/integration/clientv3/ordering_kv_test.go @@ -23,14 +23,14 @@ import ( "github.com/stretchr/testify/assert" "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/ordering" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) func TestDetectKvOrderViolation(t *testing.T) { var errOrderViolation = errors.New("DetectedOrderViolation") - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) cfg := clientv3.Config{ @@ -40,7 +40,7 @@ func TestDetectKvOrderViolation(t *testing.T) { clus.Members[2].GRPCURL(), }, } - cli, err := integration.NewClient(t, cfg) + cli, err := integration2.NewClient(t, cfg) if err != nil { t.Fatal(err) } @@ -96,8 +96,8 @@ func TestDetectKvOrderViolation(t *testing.T) { func TestDetectTxnOrderViolation(t *testing.T) { var errOrderViolation = errors.New("DetectedOrderViolation") - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) cfg := clientv3.Config{ @@ -107,7 +107,7 @@ func TestDetectTxnOrderViolation(t *testing.T) { clus.Members[2].GRPCURL(), }, } - cli, err := integration.NewClient(t, cfg) + cli, err := integration2.NewClient(t, cfg) if err != nil { t.Fatal(err) } diff --git a/tests/integration/clientv3/ordering_util_test.go b/tests/integration/clientv3/ordering_util_test.go index a4b65ec38..701be05b8 100644 --- a/tests/integration/clientv3/ordering_util_test.go +++ b/tests/integration/clientv3/ordering_util_test.go @@ -21,12 +21,12 @@ import ( "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/ordering" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) func TestEndpointSwitchResolvesViolation(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) eps := []string{ clus.Members[0].GRPCURL(), @@ -34,7 +34,7 @@ func TestEndpointSwitchResolvesViolation(t *testing.T) { clus.Members[2].GRPCURL(), } cfg := clientv3.Config{Endpoints: []string{clus.Members[0].GRPCURL()}} - cli, err := integration.NewClient(t, cfg) + cli, err := integration2.NewClient(t, cfg) if err != nil { t.Fatal(err) } @@ -79,8 +79,8 @@ func TestEndpointSwitchResolvesViolation(t *testing.T) { } func TestUnresolvableOrderViolation(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 5, SkipCreatingClient: true, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 5, SkipCreatingClient: true, UseBridge: true}) defer clus.Terminate(t) cfg := clientv3.Config{ Endpoints: []string{ @@ -91,7 +91,7 @@ func TestUnresolvableOrderViolation(t *testing.T) { clus.Members[4].GRPCURL(), }, } - cli, err := integration.NewClient(t, cfg) + cli, err := integration2.NewClient(t, cfg) if err != nil { t.Fatal(err) } diff --git a/tests/integration/clientv3/role_test.go b/tests/integration/clientv3/role_test.go index 62ba49d3a..a10e6f648 100644 --- a/tests/integration/clientv3/role_test.go +++ b/tests/integration/clientv3/role_test.go @@ -19,13 +19,13 @@ import ( "testing" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) func TestRoleError(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) authapi := clus.RandClient() diff --git a/tests/integration/clientv3/snapshot/v3_snapshot_test.go b/tests/integration/clientv3/snapshot/v3_snapshot_test.go index c61188981..f6188b323 100644 --- a/tests/integration/clientv3/snapshot/v3_snapshot_test.go +++ b/tests/integration/clientv3/snapshot/v3_snapshot_test.go @@ -29,7 +29,7 @@ import ( "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/snapshot" "go.etcd.io/etcd/server/v3/embed" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" "go.uber.org/zap/zaptest" ) @@ -78,7 +78,7 @@ func newEmbedConfig(t *testing.T) *embed.Config { clusterN := 1 urls := newEmbedURLs(clusterN * 2) cURLs, pURLs := urls[:clusterN], urls[clusterN:] - cfg := integration.NewEmbedConfig(t, "default") + cfg := integration2.NewEmbedConfig(t, "default") cfg.ClusterState = "new" cfg.LCUrls, cfg.ACUrls = cURLs, cURLs cfg.LPUrls, cfg.APUrls = pURLs, pURLs @@ -105,7 +105,7 @@ func createSnapshotFile(t *testing.T, cfg *embed.Config, kvs []kv) (version stri } ccfg := clientv3.Config{Endpoints: []string{cfg.ACUrls[0].String()}} - cli, err := integration.NewClient(t, ccfg) + cli, err := integration2.NewClient(t, ccfg) if err != nil { t.Fatal(err) } diff --git a/tests/integration/clientv3/txn_test.go b/tests/integration/clientv3/txn_test.go index 679b9868f..b23573605 100644 --- a/tests/integration/clientv3/txn_test.go +++ b/tests/integration/clientv3/txn_test.go @@ -23,13 +23,13 @@ import ( "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/server/v3/embed" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) func TestTxnError(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) kv := clus.RandClient() @@ -51,9 +51,9 @@ func TestTxnError(t *testing.T) { } func TestTxnWriteFail(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) kv := clus.Client(0) @@ -101,9 +101,9 @@ func TestTxnWriteFail(t *testing.T) { func TestTxnReadRetry(t *testing.T) { t.Skipf("skipping txn read retry test: re-enable after we do retry on txn read request") - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) kv := clus.Client(0) @@ -140,9 +140,9 @@ func TestTxnReadRetry(t *testing.T) { } func TestTxnSuccess(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) kv := clus.Client(0) @@ -163,9 +163,9 @@ func TestTxnSuccess(t *testing.T) { } func TestTxnCompareRange(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) kv := clus.Client(0) @@ -190,9 +190,9 @@ func TestTxnCompareRange(t *testing.T) { } func TestTxnNested(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) kv := clus.Client(0) diff --git a/tests/integration/clientv3/user_test.go b/tests/integration/clientv3/user_test.go index fe8b4cde2..f0fe73a73 100644 --- a/tests/integration/clientv3/user_test.go +++ b/tests/integration/clientv3/user_test.go @@ -21,14 +21,14 @@ import ( "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" "google.golang.org/grpc" ) func TestUserError(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) authapi := clus.RandClient() @@ -55,9 +55,9 @@ func TestUserError(t *testing.T) { } func TestUserErrorAuth(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) authapi := clus.RandClient() @@ -75,16 +75,16 @@ func TestUserErrorAuth(t *testing.T) { DialOptions: []grpc.DialOption{grpc.WithBlock()}, } cfg.Username, cfg.Password = "wrong-id", "123" - if _, err := integration.NewClient(t, cfg); err != rpctypes.ErrAuthFailed { + if _, err := integration2.NewClient(t, cfg); err != rpctypes.ErrAuthFailed { t.Fatalf("expected %v, got %v", rpctypes.ErrAuthFailed, err) } cfg.Username, cfg.Password = "root", "wrong-pass" - if _, err := integration.NewClient(t, cfg); err != rpctypes.ErrAuthFailed { + if _, err := integration2.NewClient(t, cfg); err != rpctypes.ErrAuthFailed { t.Fatalf("expected %v, got %v", rpctypes.ErrAuthFailed, err) } cfg.Username, cfg.Password = "root", "123" - authed, err := integration.NewClient(t, cfg) + authed, err := integration2.NewClient(t, cfg) if err != nil { t.Fatal(err) } @@ -112,9 +112,9 @@ func authSetupRoot(t *testing.T, auth clientv3.Auth) { // Client can connect to etcd even if they supply credentials and the server is in AuthDisable mode. func TestGetTokenWithoutAuth(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 2}) defer clus.Terminate(t) authapi := clus.RandClient() @@ -135,7 +135,7 @@ func TestGetTokenWithoutAuth(t *testing.T) { Password: "123", } - client, err = integration.NewClient(t, cfg) + client, err = integration2.NewClient(t, cfg) if err == nil { defer client.Close() } diff --git a/tests/integration/clientv3/watch_fragment_test.go b/tests/integration/clientv3/watch_fragment_test.go index 7f564fe10..ab2367932 100644 --- a/tests/integration/clientv3/watch_fragment_test.go +++ b/tests/integration/clientv3/watch_fragment_test.go @@ -26,7 +26,7 @@ import ( "go.etcd.io/etcd/client/pkg/v3/testutil" "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) // TestWatchFragmentDisable ensures that large watch @@ -64,16 +64,16 @@ func TestWatchFragmentEnableWithGRPCLimit(t *testing.T) { // testWatchFragment triggers watch response that spans over multiple // revisions exceeding server request limits when combined. func testWatchFragment(t *testing.T, fragment, exceedRecvLimit bool) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - cfg := &integration.ClusterConfig{ + cfg := &integration2.ClusterConfig{ Size: 1, MaxRequestBytes: 1.5 * 1024 * 1024, } if exceedRecvLimit { cfg.ClientMaxCallRecvMsgSize = 1.5 * 1024 * 1024 } - clus := integration.NewClusterV3(t, cfg) + clus := integration2.NewClusterV3(t, cfg) defer clus.Terminate(t) cli := clus.Client(0) diff --git a/tests/integration/clientv3/watch_test.go b/tests/integration/clientv3/watch_test.go index 7a992ecf9..f7ca77039 100644 --- a/tests/integration/clientv3/watch_test.go +++ b/tests/integration/clientv3/watch_test.go @@ -29,14 +29,14 @@ import ( "go.etcd.io/etcd/api/v3/version" "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" "google.golang.org/grpc/metadata" ) type watcherTest func(*testing.T, *watchctx) type watchctx struct { - clus *integration.ClusterV3 + clus *integration2.ClusterV3 w clientv3.Watcher kv clientv3.KV wclientMember int @@ -45,9 +45,9 @@ type watchctx struct { } func runWatchTest(t *testing.T, f watcherTest) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) wclientMember := rand.Intn(3) @@ -299,7 +299,7 @@ func TestWatchCancelRunning(t *testing.T) { } func testWatchCancelRunning(t *testing.T, wctx *watchctx) { - integration.BeforeTest(t) + integration2.BeforeTest(t) ctx, cancel := context.WithCancel(context.Background()) if wctx.ch = wctx.w.Watch(ctx, "a"); wctx.ch == nil { @@ -347,8 +347,8 @@ func putAndWatch(t *testing.T, wctx *watchctx, key, val string) { } func TestWatchResumeInitRev(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) cli := clus.Client(0) @@ -402,9 +402,9 @@ func TestWatchResumeInitRev(t *testing.T) { // either a compaction error or all keys by staying in sync before the compaction // is finally applied. func TestWatchResumeCompacted(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) // create a waiting watcher at rev 1 @@ -489,9 +489,9 @@ func TestWatchResumeCompacted(t *testing.T) { // TestWatchCompactRevision ensures the CompactRevision error is given on a // compaction event ahead of a watcher. func TestWatchCompactRevision(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) // set some keys @@ -531,7 +531,7 @@ func TestWatchWithProgressNotify(t *testing.T) { testWatchWithProgressNot func TestWatchWithProgressNotifyNoEvent(t *testing.T) { testWatchWithProgressNotify(t, false) } func testWatchWithProgressNotify(t *testing.T, watchOnPut bool) { - integration.BeforeTest(t) + integration2.BeforeTest(t) // accelerate report interval so test terminates quickly oldpi := v3rpc.GetProgressReportInterval() @@ -540,7 +540,7 @@ func testWatchWithProgressNotify(t *testing.T, watchOnPut bool) { pi := 3 * time.Second defer func() { v3rpc.SetProgressReportInterval(oldpi) }() - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) wc := clus.RandClient() @@ -585,11 +585,11 @@ func testWatchWithProgressNotify(t *testing.T, watchOnPut bool) { } func TestConfigurableWatchProgressNotifyInterval(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) progressInterval := 200 * time.Millisecond - clus := integration.NewClusterV3(t, - &integration.ClusterConfig{ + clus := integration2.NewClusterV3(t, + &integration2.ClusterConfig{ Size: 3, WatchProgressNotifyInterval: progressInterval, }) @@ -611,7 +611,7 @@ func TestConfigurableWatchProgressNotifyInterval(t *testing.T) { } func TestWatchRequestProgress(t *testing.T) { - if integration.ThroughProxy { + if integration2.ThroughProxy { t.Skipf("grpc-proxy does not support WatchProgress yet") } testCases := []struct { @@ -625,11 +625,11 @@ func TestWatchRequestProgress(t *testing.T) { for _, c := range testCases { t.Run(c.name, func(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) watchTimeout := 3 * time.Second - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) wc := clus.RandClient() @@ -686,9 +686,9 @@ func TestWatchRequestProgress(t *testing.T) { } func TestWatchEventType(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + cluster := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer cluster.Terminate(t) client := cluster.RandClient() @@ -760,9 +760,9 @@ func TestWatchEventType(t *testing.T) { } func TestWatchErrConnClosed(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.Client(0) @@ -783,16 +783,16 @@ func TestWatchErrConnClosed(t *testing.T) { clus.TakeClient(0) select { - case <-time.After(integration.RequestWaitTimeout): + case <-time.After(integration2.RequestWaitTimeout): t.Fatal("wc.Watch took too long") case <-donec: } } func TestWatchAfterClose(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.Client(0) @@ -810,7 +810,7 @@ func TestWatchAfterClose(t *testing.T) { close(donec) }() select { - case <-time.After(integration.RequestWaitTimeout): + case <-time.After(integration2.RequestWaitTimeout): t.Fatal("wc.Watch took too long") case <-donec: } @@ -818,9 +818,9 @@ func TestWatchAfterClose(t *testing.T) { // TestWatchWithRequireLeader checks the watch channel closes when no leader. func TestWatchWithRequireLeader(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) // Put a key for the non-require leader watch to read as an event. @@ -856,7 +856,7 @@ func TestWatchWithRequireLeader(t *testing.T) { if resp.Err() != rpctypes.ErrNoLeader { t.Fatalf("expected %v watch response error, got %+v", rpctypes.ErrNoLeader, resp) } - case <-time.After(integration.RequestWaitTimeout): + case <-time.After(integration2.RequestWaitTimeout): t.Fatal("watch without leader took too long to close") } @@ -865,7 +865,7 @@ func TestWatchWithRequireLeader(t *testing.T) { if ok { t.Fatalf("expected closed channel, got response %v", resp) } - case <-time.After(integration.RequestWaitTimeout): + case <-time.After(integration2.RequestWaitTimeout): t.Fatal("waited too long for channel to close") } @@ -892,9 +892,9 @@ func TestWatchWithRequireLeader(t *testing.T) { // TestWatchWithFilter checks that watch filtering works. func TestWatchWithFilter(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + cluster := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer cluster.Terminate(t) client := cluster.RandClient() @@ -931,9 +931,9 @@ func TestWatchWithFilter(t *testing.T) { // TestWatchWithCreatedNotification checks that WithCreatedNotify returns a // Created watch response. func TestWatchWithCreatedNotification(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + cluster := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer cluster.Terminate(t) client := cluster.RandClient() @@ -953,9 +953,9 @@ func TestWatchWithCreatedNotification(t *testing.T) { // a watcher with created notify does not post duplicate // created events from disconnect. func TestWatchWithCreatedNotificationDropConn(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + cluster := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer cluster.Terminate(t) client := cluster.RandClient() @@ -982,9 +982,9 @@ func TestWatchWithCreatedNotificationDropConn(t *testing.T) { // TestWatchCancelOnServer ensures client watcher cancels propagate back to the server. func TestWatchCancelOnServer(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + cluster := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer cluster.Terminate(t) client := cluster.RandClient() @@ -1050,20 +1050,20 @@ func TestWatchCancelOnServer(t *testing.T) { // 4. watcher client finishes tearing down stream on "ctx" // 5. w2 comes back canceled func TestWatchOverlapContextCancel(t *testing.T) { - f := func(clus *integration.ClusterV3) {} + f := func(clus *integration2.ClusterV3) {} testWatchOverlapContextCancel(t, f) } func TestWatchOverlapDropConnContextCancel(t *testing.T) { - f := func(clus *integration.ClusterV3) { + f := func(clus *integration2.ClusterV3) { clus.Members[0].Bridge().DropConnections() } testWatchOverlapContextCancel(t, f) } -func testWatchOverlapContextCancel(t *testing.T, f func(*integration.ClusterV3)) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) +func testWatchOverlapContextCancel(t *testing.T, f func(*integration2.ClusterV3)) { + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) n := 100 @@ -1123,8 +1123,8 @@ func testWatchOverlapContextCancel(t *testing.T, f func(*integration.ClusterV3)) // TestWatchCancelAndCloseClient ensures that canceling a watcher then immediately // closing the client does not return a client closing error. func TestWatchCancelAndCloseClient(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.Client(0) ctx, cancel := context.WithCancel(context.Background()) @@ -1153,8 +1153,8 @@ func TestWatchCancelAndCloseClient(t *testing.T) { // to put them in resuming mode, cancels them so some resumes by cancel fail, // then closes the watcher interface to ensure correct clean up. func TestWatchStressResumeClose(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) cli := clus.Client(0) @@ -1175,8 +1175,8 @@ func TestWatchStressResumeClose(t *testing.T) { // TestWatchCancelDisconnected ensures canceling a watcher works when // its grpc stream is disconnected / reconnecting. func TestWatchCancelDisconnected(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.Client(0) ctx, cancel := context.WithCancel(context.Background()) diff --git a/tests/integration/cluster_test.go b/tests/integration/cluster_test.go index 2fb5a18d9..443d97d42 100644 --- a/tests/integration/cluster_test.go +++ b/tests/integration/cluster_test.go @@ -27,6 +27,7 @@ import ( "go.etcd.io/etcd/client/v2" "go.etcd.io/etcd/server/v3/etcdserver" + "go.etcd.io/etcd/tests/v3/framework/integration" ) func init() { @@ -34,7 +35,7 @@ func init() { log.SetFlags(log.Ltime | log.Lmicroseconds | log.Lshortfile) if t := os.Getenv("ETCD_ELECTION_TIMEOUT_TICKS"); t != "" { if i, err := strconv.ParseInt(t, 10, 64); err == nil { - electionTicks = int(i) + integration.ElectionTicks = int(i) } } } @@ -43,16 +44,16 @@ func TestClusterOf1(t *testing.T) { testCluster(t, 1) } func TestClusterOf3(t *testing.T) { testCluster(t, 3) } func testCluster(t *testing.T, size int) { - BeforeTest(t) - c := NewCluster(t, size) + integration.BeforeTest(t) + c := integration.NewCluster(t, size) c.Launch(t) defer c.Terminate(t) clusterMustProgress(t, c.Members) } func TestTLSClusterOf3(t *testing.T) { - BeforeTest(t) - c := NewClusterByConfig(t, &ClusterConfig{Size: 3, PeerTLS: &testTLSInfo}) + integration.BeforeTest(t) + c := integration.NewClusterByConfig(t, &integration.ClusterConfig{Size: 3, PeerTLS: &integration.TestTLSInfo}) c.Launch(t) defer c.Terminate(t) clusterMustProgress(t, c.Members) @@ -61,8 +62,8 @@ func TestTLSClusterOf3(t *testing.T) { // Test that a cluster can progress when using separate client and server certs when peering. This supports certificate // authorities that don't issue dual-usage certificates. func TestTLSClusterOf3WithSpecificUsage(t *testing.T) { - BeforeTest(t) - c := NewClusterByConfig(t, &ClusterConfig{Size: 3, PeerTLS: &testTLSInfoWithSpecificUsage}) + integration.BeforeTest(t) + c := integration.NewClusterByConfig(t, &integration.ClusterConfig{Size: 3, PeerTLS: &integration.TestTLSInfoWithSpecificUsage}) c.Launch(t) defer c.Terminate(t) clusterMustProgress(t, c.Members) @@ -72,22 +73,22 @@ func TestClusterOf1UsingDiscovery(t *testing.T) { testClusterUsingDiscovery(t, 1 func TestClusterOf3UsingDiscovery(t *testing.T) { testClusterUsingDiscovery(t, 3) } func testClusterUsingDiscovery(t *testing.T, size int) { - BeforeTest(t) - dc := NewCluster(t, 1) + integration.BeforeTest(t) + dc := integration.NewCluster(t, 1) dc.Launch(t) defer dc.Terminate(t) // init discovery token space - dcc := MustNewHTTPClient(t, dc.URLs(), nil) + dcc := integration.MustNewHTTPClient(t, dc.URLs(), nil) dkapi := client.NewKeysAPI(dcc) - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) + ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout) if _, err := dkapi.Create(ctx, "/_config/size", fmt.Sprintf("%d", size)); err != nil { t.Fatal(err) } cancel() - c := NewClusterByConfig( + c := integration.NewClusterByConfig( t, - &ClusterConfig{Size: size, DiscoveryURL: dc.URL(0) + "/v2/keys"}, + &integration.ClusterConfig{Size: size, DiscoveryURL: dc.URL(0) + "/v2/keys"}, ) c.Launch(t) defer c.Terminate(t) @@ -95,23 +96,23 @@ func testClusterUsingDiscovery(t *testing.T, size int) { } func TestTLSClusterOf3UsingDiscovery(t *testing.T) { - BeforeTest(t) - dc := NewCluster(t, 1) + integration.BeforeTest(t) + dc := integration.NewCluster(t, 1) dc.Launch(t) defer dc.Terminate(t) // init discovery token space - dcc := MustNewHTTPClient(t, dc.URLs(), nil) + dcc := integration.MustNewHTTPClient(t, dc.URLs(), nil) dkapi := client.NewKeysAPI(dcc) - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) + ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout) if _, err := dkapi.Create(ctx, "/_config/size", fmt.Sprintf("%d", 3)); err != nil { t.Fatal(err) } cancel() - c := NewClusterByConfig(t, - &ClusterConfig{ + c := integration.NewClusterByConfig(t, + &integration.ClusterConfig{ Size: 3, - PeerTLS: &testTLSInfo, + PeerTLS: &integration.TestTLSInfo, DiscoveryURL: dc.URL(0) + "/v2/keys"}, ) c.Launch(t) @@ -123,8 +124,8 @@ func TestDoubleClusterSizeOf1(t *testing.T) { testDoubleClusterSize(t, 1) } func TestDoubleClusterSizeOf3(t *testing.T) { testDoubleClusterSize(t, 3) } func testDoubleClusterSize(t *testing.T, size int) { - BeforeTest(t) - c := NewCluster(t, size) + integration.BeforeTest(t) + c := integration.NewCluster(t, size) c.Launch(t) defer c.Terminate(t) @@ -135,8 +136,8 @@ func testDoubleClusterSize(t *testing.T, size int) { } func TestDoubleTLSClusterSizeOf3(t *testing.T) { - BeforeTest(t) - c := NewClusterByConfig(t, &ClusterConfig{Size: 3, PeerTLS: &testTLSInfo}) + integration.BeforeTest(t) + c := integration.NewClusterByConfig(t, &integration.ClusterConfig{Size: 3, PeerTLS: &integration.TestTLSInfo}) c.Launch(t) defer c.Terminate(t) @@ -150,16 +151,16 @@ func TestDecreaseClusterSizeOf3(t *testing.T) { testDecreaseClusterSize(t, 3) } func TestDecreaseClusterSizeOf5(t *testing.T) { testDecreaseClusterSize(t, 5) } func testDecreaseClusterSize(t *testing.T, size int) { - BeforeTest(t) - c := NewCluster(t, size) + integration.BeforeTest(t) + c := integration.NewCluster(t, size) c.Launch(t) defer c.Terminate(t) // TODO: remove the last but one member for i := 0; i < size-1; i++ { - id := c.Members[len(c.Members)-1].s.ID() + id := c.Members[len(c.Members)-1].Server.ID() // may hit second leader election on slow machines - if err := c.removeMember(t, uint64(id)); err != nil { + if err := c.RemoveMember(t, uint64(id)); err != nil { if strings.Contains(err.Error(), "no leader") { t.Logf("got leader error (%v)", err) i-- @@ -167,24 +168,24 @@ func testDecreaseClusterSize(t *testing.T, size int) { } t.Fatal(err) } - c.waitLeader(t, c.Members) + c.WaitMembersForLeader(t, c.Members) } clusterMustProgress(t, c.Members) } func TestForceNewCluster(t *testing.T) { - c := newCluster(t, &ClusterConfig{Size: 3, UseBridge: true}) + c := integration.NewClusterFromConfig(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) c.Launch(t) - cc := MustNewHTTPClient(t, []string{c.Members[0].URL()}, nil) + cc := integration.MustNewHTTPClient(t, []string{c.Members[0].URL()}, nil) kapi := client.NewKeysAPI(cc) - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) + ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout) resp, err := kapi.Create(ctx, "/foo", "bar") if err != nil { t.Fatalf("unexpected create error: %v", err) } cancel() // ensure create has been applied in this machine - ctx, cancel = context.WithTimeout(context.Background(), requestTimeout) + ctx, cancel = context.WithTimeout(context.Background(), integration.RequestTimeout) if _, err = kapi.Watcher("/foo", &client.WatcherOptions{AfterIndex: resp.Node.ModifiedIndex - 1}).Next(ctx); err != nil { t.Fatalf("unexpected watch error: %v", err) } @@ -199,13 +200,13 @@ func TestForceNewCluster(t *testing.T) { t.Fatalf("unexpected ForceRestart error: %v", err) } defer c.Members[0].Terminate(t) - c.waitLeader(t, c.Members[:1]) + c.WaitMembersForLeader(t, c.Members[:1]) // use new http client to init new connection - cc = MustNewHTTPClient(t, []string{c.Members[0].URL()}, nil) + cc = integration.MustNewHTTPClient(t, []string{c.Members[0].URL()}, nil) kapi = client.NewKeysAPI(cc) - // ensure force restart keep the old data, and new cluster can make progress - ctx, cancel = context.WithTimeout(context.Background(), requestTimeout) + // ensure force restart keep the old data, and new Cluster can make progress + ctx, cancel = context.WithTimeout(context.Background(), integration.RequestTimeout) if _, err := kapi.Watcher("/foo", &client.WatcherOptions{AfterIndex: resp.Node.ModifiedIndex - 1}).Next(ctx); err != nil { t.Fatalf("unexpected watch error: %v", err) } @@ -214,38 +215,38 @@ func TestForceNewCluster(t *testing.T) { } func TestAddMemberAfterClusterFullRotation(t *testing.T) { - BeforeTest(t) - c := NewCluster(t, 3) + integration.BeforeTest(t) + c := integration.NewCluster(t, 3) c.Launch(t) defer c.Terminate(t) // remove all the previous three members and add in three new members. for i := 0; i < 3; i++ { - c.RemoveMember(t, uint64(c.Members[0].s.ID())) - c.waitLeader(t, c.Members) + c.MustRemoveMember(t, uint64(c.Members[0].Server.ID())) + c.WaitMembersForLeader(t, c.Members) c.AddMember(t) - c.waitLeader(t, c.Members) + c.WaitMembersForLeader(t, c.Members) } c.AddMember(t) - c.waitLeader(t, c.Members) + c.WaitMembersForLeader(t, c.Members) clusterMustProgress(t, c.Members) } // Ensure we can remove a member then add a new one back immediately. func TestIssue2681(t *testing.T) { - BeforeTest(t) - c := NewCluster(t, 5) + integration.BeforeTest(t) + c := integration.NewCluster(t, 5) c.Launch(t) defer c.Terminate(t) - c.RemoveMember(t, uint64(c.Members[4].s.ID())) - c.waitLeader(t, c.Members) + c.MustRemoveMember(t, uint64(c.Members[4].Server.ID())) + c.WaitMembersForLeader(t, c.Members) c.AddMember(t) - c.waitLeader(t, c.Members) + c.WaitMembersForLeader(t, c.Members) clusterMustProgress(t, c.Members) } @@ -256,8 +257,8 @@ func TestIssue2746(t *testing.T) { testIssue2746(t, 5) } func TestIssue2746WithThree(t *testing.T) { testIssue2746(t, 3) } func testIssue2746(t *testing.T, members int) { - BeforeTest(t) - c := NewCluster(t, members) + integration.BeforeTest(t) + c := integration.NewCluster(t, members) for _, m := range c.Members { m.SnapshotCount = 10 @@ -271,32 +272,32 @@ func testIssue2746(t *testing.T, members int) { clusterMustProgress(t, c.Members) } - c.RemoveMember(t, uint64(c.Members[members-1].s.ID())) - c.waitLeader(t, c.Members) + c.MustRemoveMember(t, uint64(c.Members[members-1].Server.ID())) + c.WaitMembersForLeader(t, c.Members) c.AddMember(t) - c.waitLeader(t, c.Members) + c.WaitMembersForLeader(t, c.Members) clusterMustProgress(t, c.Members) } // Ensure etcd will not panic when removing a just started member. func TestIssue2904(t *testing.T) { - BeforeTest(t) - // start 1-member cluster to ensure member 0 is the leader of the cluster. - c := newCluster(t, &ClusterConfig{Size: 1, UseBridge: true}) + integration.BeforeTest(t) + // start 1-member Cluster to ensure member 0 is the leader of the Cluster. + c := integration.NewClusterFromConfig(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) c.Launch(t) defer c.Terminate(t) c.AddMember(t) c.Members[1].Stop(t) - // send remove member-1 request to the cluster. - cc := MustNewHTTPClient(t, c.URLs(), nil) + // send remove member-1 request to the Cluster. + cc := integration.MustNewHTTPClient(t, c.URLs(), nil) ma := client.NewMembersAPI(cc) - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) + ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout) // the proposal is not committed because member 1 is stopped, but the - // proposal is appended to leader's raft log. - ma.Remove(ctx, c.Members[1].s.ID().String()) + // proposal is appended to leader'Server raft log. + ma.Remove(ctx, c.Members[1].Server.ID().String()) cancel() // restart member, and expect it to send UpdateAttributes request. @@ -305,21 +306,21 @@ func TestIssue2904(t *testing.T) { c.Members[1].Restart(t) // when the member comes back, it ack the proposal to remove itself, // and apply it. - <-c.Members[1].s.StopNotify() + <-c.Members[1].Server.StopNotify() // terminate removed member c.Members[1].Terminate(t) c.Members = c.Members[:1] // wait member to be removed. - c.waitMembersMatch(t, c.HTTPMembers()) + c.WaitMembersMatch(t, c.HTTPMembers()) } // TestIssue3699 tests minority failure during cluster configuration; it was // deadlocking. func TestIssue3699(t *testing.T) { - // start a cluster of 3 nodes a, b, c - BeforeTest(t) - c := newCluster(t, &ClusterConfig{Size: 3, UseBridge: true}) + // start a Cluster of 3 nodes a, b, c + integration.BeforeTest(t) + c := integration.NewClusterFromConfig(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) c.Launch(t) defer c.Terminate(t) @@ -330,16 +331,16 @@ func TestIssue3699(t *testing.T) { c.AddMember(t) // electing node d as leader makes node a unable to participate - leaderID := c.waitLeader(t, c.Members) + leaderID := c.WaitMembersForLeader(t, c.Members) for leaderID != 3 { c.Members[leaderID].Stop(t) - <-c.Members[leaderID].s.StopNotify() + <-c.Members[leaderID].Server.StopNotify() // do not restart the killed member immediately. // the member will advance its election timeout after restart, // so it will have a better chance to become the leader again. - time.Sleep(time.Duration(electionTicks * int(tickDuration))) + time.Sleep(time.Duration(integration.ElectionTicks * int(integration.TickDuration))) c.Members[leaderID].Restart(t) - leaderID = c.waitLeader(t, c.Members) + leaderID = c.WaitMembersForLeader(t, c.Members) } // bring back node a @@ -351,17 +352,17 @@ func TestIssue3699(t *testing.T) { // waiting for ReadyNotify can take several seconds case <-time.After(10 * time.Second): t.Fatalf("waited too long for ready notification") - case <-c.Members[0].s.StopNotify(): + case <-c.Members[0].Server.StopNotify(): t.Fatalf("should not be stopped") - case <-c.Members[0].s.ReadyNotify(): + case <-c.Members[0].Server.ReadyNotify(): } - // must waitLeader so goroutines don't leak on terminate - c.waitLeader(t, c.Members) + // must WaitMembersForLeader so goroutines don't leak on terminate + c.WaitMembersForLeader(t, c.Members) - // try to participate in cluster - cc := MustNewHTTPClient(t, []string{c.URL(0)}, c.cfg.ClientTLS) + // try to participate in Cluster + cc := integration.MustNewHTTPClient(t, []string{c.URL(0)}, c.Cfg.ClientTLS) kapi := client.NewKeysAPI(cc) - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) + ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout) if _, err := kapi.Set(ctx, "/foo", "bar", nil); err != nil { t.Fatalf("unexpected error on Set (%v)", err) } @@ -370,21 +371,21 @@ func TestIssue3699(t *testing.T) { // TestRejectUnhealthyAdd ensures an unhealthy cluster rejects adding members. func TestRejectUnhealthyAdd(t *testing.T) { - BeforeTest(t) - c := newCluster(t, &ClusterConfig{Size: 3, UseBridge: true}) + integration.BeforeTest(t) + c := integration.NewClusterFromConfig(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) for _, m := range c.Members { m.ServerConfig.StrictReconfigCheck = true } c.Launch(t) defer c.Terminate(t) - // make cluster unhealthy and wait for downed peer + // make Cluster unhealthy and wait for downed peer c.Members[0].Stop(t) c.WaitLeader(t) // all attempts to add member should fail for i := 1; i < len(c.Members); i++ { - err := c.addMemberByURL(t, c.URL(i), "unix://foo:12345") + err := c.AddMemberByURL(t, c.URL(i), "unix://foo:12345") if err == nil { t.Fatalf("should have failed adding peer") } @@ -399,23 +400,23 @@ func TestRejectUnhealthyAdd(t *testing.T) { c.WaitLeader(t) time.Sleep(2 * etcdserver.HealthInterval) - // add member should succeed now that it's healthy + // add member should succeed now that it'Server healthy var err error for i := 1; i < len(c.Members); i++ { - if err = c.addMemberByURL(t, c.URL(i), "unix://foo:12345"); err == nil { + if err = c.AddMemberByURL(t, c.URL(i), "unix://foo:12345"); err == nil { break } } if err != nil { - t.Fatalf("should have added peer to healthy cluster (%v)", err) + t.Fatalf("should have added peer to healthy Cluster (%v)", err) } } // TestRejectUnhealthyRemove ensures an unhealthy cluster rejects removing members // if quorum will be lost. func TestRejectUnhealthyRemove(t *testing.T) { - BeforeTest(t) - c := newCluster(t, &ClusterConfig{Size: 5, UseBridge: true}) + integration.BeforeTest(t) + c := integration.NewClusterFromConfig(t, &integration.ClusterConfig{Size: 5, UseBridge: true}) for _, m := range c.Members { m.ServerConfig.StrictReconfigCheck = true } @@ -428,7 +429,7 @@ func TestRejectUnhealthyRemove(t *testing.T) { c.WaitLeader(t) // reject remove active member since (3,2)-(1,0) => (2,2) lacks quorum - err := c.removeMember(t, uint64(c.Members[2].s.ID())) + err := c.RemoveMember(t, uint64(c.Members[2].Server.ID())) if err == nil { t.Fatalf("should reject quorum breaking remove") } @@ -438,10 +439,10 @@ func TestRejectUnhealthyRemove(t *testing.T) { } // member stopped after launch; wait for missing heartbeats - time.Sleep(time.Duration(electionTicks * int(tickDuration))) + time.Sleep(time.Duration(integration.ElectionTicks * int(integration.TickDuration))) // permit remove dead member since (3,2) - (0,1) => (3,1) has quorum - if err = c.removeMember(t, uint64(c.Members[0].s.ID())); err != nil { + if err = c.RemoveMember(t, uint64(c.Members[0].Server.ID())); err != nil { t.Fatalf("should accept removing down member") } @@ -452,7 +453,7 @@ func TestRejectUnhealthyRemove(t *testing.T) { time.Sleep((3 * etcdserver.HealthInterval) / 2) // accept remove member since (4,1)-(1,0) => (3,1) has quorum - if err = c.removeMember(t, uint64(c.Members[0].s.ID())); err != nil { + if err = c.RemoveMember(t, uint64(c.Members[0].Server.ID())); err != nil { t.Fatalf("expected to remove member, got error %v", err) } } @@ -461,10 +462,10 @@ func TestRejectUnhealthyRemove(t *testing.T) { // if 'initial-cluster-state' is set 'new' and old data directory still exists // (see https://github.com/etcd-io/etcd/issues/7512 for more). func TestRestartRemoved(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - // 1. start single-member cluster - c := newCluster(t, &ClusterConfig{Size: 1, UseBridge: true}) + // 1. start single-member Cluster + c := integration.NewClusterFromConfig(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) for _, m := range c.Members { m.ServerConfig.StrictReconfigCheck = true } @@ -476,10 +477,10 @@ func TestRestartRemoved(t *testing.T) { c.WaitLeader(t) oldm := c.Members[0] - oldm.keepDataDirTerminate = true + oldm.KeepDataDirTerminate = true // 3. remove first member, shut down without deleting data - if err := c.removeMember(t, uint64(c.Members[0].s.ID())); err != nil { + if err := c.RemoveMember(t, uint64(c.Members[0].Server.ID())); err != nil { t.Fatalf("expected to remove member, got error %v", err) } c.WaitLeader(t) @@ -495,7 +496,7 @@ func TestRestartRemoved(t *testing.T) { os.RemoveAll(oldm.ServerConfig.DataDir) }() select { - case <-oldm.s.StopNotify(): + case <-oldm.Server.StopNotify(): case <-time.After(time.Minute): t.Fatalf("removed member didn't exit within %v", time.Minute) } @@ -504,8 +505,8 @@ func TestRestartRemoved(t *testing.T) { // clusterMustProgress ensures that cluster can make progress. It creates // a random key first, and check the new key could be got from all client urls // of the cluster. -func clusterMustProgress(t *testing.T, membs []*member) { - cc := MustNewHTTPClient(t, []string{membs[0].URL()}, nil) +func clusterMustProgress(t *testing.T, membs []*integration.Member) { + cc := integration.MustNewHTTPClient(t, []string{membs[0].URL()}, nil) kapi := client.NewKeysAPI(cc) key := fmt.Sprintf("foo%d", rand.Int()) var ( @@ -514,7 +515,7 @@ func clusterMustProgress(t *testing.T, membs []*member) { ) // retry in case of leader loss induced by slow CI for i := 0; i < 3; i++ { - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) + ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout) resp, err = kapi.Create(ctx, "/"+key, "bar") cancel() if err == nil { @@ -528,9 +529,9 @@ func clusterMustProgress(t *testing.T, membs []*member) { for i, m := range membs { u := m.URL() - mcc := MustNewHTTPClient(t, []string{u}, nil) + mcc := integration.MustNewHTTPClient(t, []string{u}, nil) mkapi := client.NewKeysAPI(mcc) - mctx, mcancel := context.WithTimeout(context.Background(), requestTimeout) + mctx, mcancel := context.WithTimeout(context.Background(), integration.RequestTimeout) if _, err := mkapi.Watcher(key, &client.WatcherOptions{AfterIndex: resp.Node.ModifiedIndex - 1}).Next(mctx); err != nil { t.Fatalf("#%d: watch on %s error: %v", i, u, err) } @@ -539,8 +540,8 @@ func clusterMustProgress(t *testing.T, membs []*member) { } func TestSpeedyTerminate(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3, UseBridge: true}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) // Stop/Restart so requests will time out on lost leaders for i := 0; i < 3; i++ { clus.Members[i].Stop(t) @@ -553,7 +554,7 @@ func TestSpeedyTerminate(t *testing.T) { }() select { case <-time.After(10 * time.Second): - t.Fatalf("cluster took too long to terminate") + t.Fatalf("Cluster took too long to terminate") case <-donec: } } diff --git a/tests/integration/embed/embed_test.go b/tests/integration/embed/embed_test.go index c04bf97c9..3733684d2 100644 --- a/tests/integration/embed/embed_test.go +++ b/tests/integration/embed/embed_test.go @@ -34,14 +34,14 @@ import ( "go.etcd.io/etcd/client/pkg/v3/transport" "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/server/v3/embed" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) var ( testTLSInfo = transport.TLSInfo{ - KeyFile: integration.MustAbsPath("../../fixtures/server.key.insecure"), - CertFile: integration.MustAbsPath("../../fixtures/server.crt"), - TrustedCAFile: integration.MustAbsPath("../../fixtures/ca.crt"), + KeyFile: integration2.MustAbsPath("../../fixtures/server.key.insecure"), + CertFile: integration2.MustAbsPath("../../fixtures/server.crt"), + TrustedCAFile: integration2.MustAbsPath("../../fixtures/ca.crt"), ClientCertAuth: true, } ) @@ -160,7 +160,7 @@ func testEmbedEtcdGracefulStop(t *testing.T, secure bool) { t.Fatal(err) } } - cli, err := integration.NewClient(t, clientCfg) + cli, err := integration2.NewClient(t, clientCfg) if err != nil { t.Fatal(err) } diff --git a/tests/integration/grpc_test.go b/tests/integration/grpc_test.go index eb71191a3..dfa9fadcc 100644 --- a/tests/integration/grpc_test.go +++ b/tests/integration/grpc_test.go @@ -23,6 +23,7 @@ import ( "time" clientv3 "go.etcd.io/etcd/client/v3" + "go.etcd.io/etcd/tests/v3/framework/integration" "google.golang.org/grpc" ) @@ -94,14 +95,14 @@ func TestAuthority(t *testing.T) { for _, tc := range tcs { for _, clusterSize := range []int{1, 3} { t.Run(fmt.Sprintf("Size: %d, Scenario: %q", clusterSize, tc.name), func(t *testing.T) { - BeforeTest(t) - cfg := ClusterConfig{ + integration.BeforeTest(t) + cfg := integration.ClusterConfig{ Size: clusterSize, UseTCP: tc.useTCP, UseIP: tc.useTCP, } cfg, tlsConfig := setupTLS(t, tc.useTLS, cfg) - clus := NewClusterV3(t, &cfg) + clus := integration.NewClusterV3(t, &cfg) defer clus.Terminate(t) kv := setupClient(t, tc.clientURLPattern, clus, tlsConfig) @@ -118,11 +119,11 @@ func TestAuthority(t *testing.T) { } } -func setupTLS(t *testing.T, useTLS bool, cfg ClusterConfig) (ClusterConfig, *tls.Config) { +func setupTLS(t *testing.T, useTLS bool, cfg integration.ClusterConfig) (integration.ClusterConfig, *tls.Config) { t.Helper() if useTLS { - cfg.ClientTLS = &testTLSInfo - tlsConfig, err := testTLSInfo.ClientConfig() + cfg.ClientTLS = &integration.TestTLSInfo + tlsConfig, err := integration.TestTLSInfo.ClientConfig() if err != nil { t.Fatal(err) } @@ -131,7 +132,7 @@ func setupTLS(t *testing.T, useTLS bool, cfg ClusterConfig) (ClusterConfig, *tls return cfg, nil } -func setupClient(t *testing.T, endpointPattern string, clus *ClusterV3, tlsConfig *tls.Config) *clientv3.Client { +func setupClient(t *testing.T, endpointPattern string, clus *integration.ClusterV3, tlsConfig *tls.Config) *clientv3.Client { t.Helper() endpoints := templateEndpoints(t, endpointPattern, clus) kv, err := clientv3.New(clientv3.Config{ @@ -146,13 +147,13 @@ func setupClient(t *testing.T, endpointPattern string, clus *ClusterV3, tlsConfi return kv } -func templateEndpoints(t *testing.T, pattern string, clus *ClusterV3) []string { +func templateEndpoints(t *testing.T, pattern string, clus *integration.ClusterV3) []string { t.Helper() endpoints := []string{} for _, m := range clus.Members { ent := pattern if strings.Contains(ent, "%d") { - ent = fmt.Sprintf(ent, GrpcPortNumber(m.UniqNumber, m.MemberNumber)) + ent = fmt.Sprintf(ent, integration.GrpcPortNumber(m.UniqNumber, m.MemberNumber)) } if strings.Contains(ent, "%s") { ent = fmt.Sprintf(ent, m.Name) @@ -165,11 +166,11 @@ func templateEndpoints(t *testing.T, pattern string, clus *ClusterV3) []string { return endpoints } -func templateAuthority(t *testing.T, pattern string, m *member) string { +func templateAuthority(t *testing.T, pattern string, m *integration.Member) string { t.Helper() authority := pattern if strings.Contains(authority, "%d") { - authority = fmt.Sprintf(authority, GrpcPortNumber(m.UniqNumber, m.MemberNumber)) + authority = fmt.Sprintf(authority, integration.GrpcPortNumber(m.UniqNumber, m.MemberNumber)) } if strings.Contains(authority, "%s") { authority = fmt.Sprintf(authority, m.Name) @@ -180,7 +181,7 @@ func templateAuthority(t *testing.T, pattern string, m *member) string { return authority } -func assertAuthority(t *testing.T, expectedAuthority string, clus *ClusterV3) { +func assertAuthority(t *testing.T, expectedAuthority string, clus *integration.ClusterV3) { t.Helper() requestsFound := 0 for _, m := range clus.Members { diff --git a/tests/integration/lazy_cluster.go b/tests/integration/lazy_cluster.go index 4cc7ae765..e8ac1225d 100644 --- a/tests/integration/lazy_cluster.go +++ b/tests/integration/lazy_cluster.go @@ -22,6 +22,7 @@ import ( "go.etcd.io/etcd/client/pkg/v3/testutil" "go.etcd.io/etcd/client/pkg/v3/transport" + "go.etcd.io/etcd/tests/v3/framework/integration" ) // Infrastructure to provision a single shared cluster for tests - only @@ -42,7 +43,7 @@ type LazyCluster interface { EndpointsV3() []string // Cluster - calls to this method might initialize the cluster. - Cluster() *ClusterV3 + Cluster() *integration.ClusterV3 // Transport - call to this method might initialize the cluster. Transport() *http.Transport @@ -53,8 +54,8 @@ type LazyCluster interface { } type lazyCluster struct { - cfg ClusterConfig - cluster *ClusterV3 + cfg integration.ClusterConfig + cluster *integration.ClusterV3 transport *http.Transport once sync.Once tb testutil.TB @@ -64,12 +65,12 @@ type lazyCluster struct { // NewLazyCluster returns a new test cluster handler that gets created on the // first call to GetEndpoints() or GetTransport() func NewLazyCluster() LazyCluster { - return NewLazyClusterWithConfig(ClusterConfig{Size: 1}) + return NewLazyClusterWithConfig(integration.ClusterConfig{Size: 1}) } // NewLazyClusterWithConfig returns a new test cluster handler that gets created // on the first call to GetEndpoints() or GetTransport() -func NewLazyClusterWithConfig(cfg ClusterConfig) LazyCluster { +func NewLazyClusterWithConfig(cfg integration.ClusterConfig) LazyCluster { tb, closer := testutil.NewTestingTBProthesis("lazy_cluster") return &lazyCluster{cfg: cfg, tb: tb, closer: closer} } @@ -81,7 +82,7 @@ func (lc *lazyCluster) mustLazyInit() { if err != nil { log.Fatal(err) } - lc.cluster = NewClusterV3(lc.tb, &lc.cfg) + lc.cluster = integration.NewClusterV3(lc.tb, &lc.cfg) }) } @@ -105,7 +106,7 @@ func (lc *lazyCluster) EndpointsV3() []string { return lc.Cluster().Client(0).Endpoints() } -func (lc *lazyCluster) Cluster() *ClusterV3 { +func (lc *lazyCluster) Cluster() *integration.ClusterV3 { lc.mustLazyInit() return lc.cluster } diff --git a/tests/integration/member_test.go b/tests/integration/member_test.go index 99788b757..f8f146000 100644 --- a/tests/integration/member_test.go +++ b/tests/integration/member_test.go @@ -23,52 +23,53 @@ import ( "github.com/stretchr/testify/assert" "go.etcd.io/etcd/client/v2" + "go.etcd.io/etcd/tests/v3/framework/integration" ) func TestPauseMember(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - c := NewCluster(t, 5) + c := integration.NewCluster(t, 5) c.Launch(t) defer c.Terminate(t) for i := 0; i < 5; i++ { c.Members[i].Pause() - membs := append([]*member{}, c.Members[:i]...) + membs := append([]*integration.Member{}, c.Members[:i]...) membs = append(membs, c.Members[i+1:]...) - c.waitLeader(t, membs) + c.WaitMembersForLeader(t, membs) clusterMustProgress(t, membs) c.Members[i].Resume() } - c.waitLeader(t, c.Members) + c.WaitMembersForLeader(t, c.Members) clusterMustProgress(t, c.Members) } func TestRestartMember(t *testing.T) { - BeforeTest(t) - c := newCluster(t, &ClusterConfig{Size: 3, UseBridge: true}) + integration.BeforeTest(t) + c := integration.NewClusterFromConfig(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) c.Launch(t) defer c.Terminate(t) for i := 0; i < 3; i++ { c.Members[i].Stop(t) - membs := append([]*member{}, c.Members[:i]...) + membs := append([]*integration.Member{}, c.Members[:i]...) membs = append(membs, c.Members[i+1:]...) - c.waitLeader(t, membs) + c.WaitMembersForLeader(t, membs) clusterMustProgress(t, membs) err := c.Members[i].Restart(t) if err != nil { t.Fatal(err) } } - c.waitLeader(t, c.Members) + c.WaitMembersForLeader(t, c.Members) clusterMustProgress(t, c.Members) } func TestLaunchDuplicateMemberShouldFail(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) size := 3 - c := NewCluster(t, size) + c := integration.NewCluster(t, size) m := c.Members[0].Clone(t) var err error m.DataDir, err = ioutil.TempDir(t.TempDir(), "etcd") @@ -87,8 +88,8 @@ func TestLaunchDuplicateMemberShouldFail(t *testing.T) { } func TestSnapshotAndRestartMember(t *testing.T) { - BeforeTest(t) - m := mustNewMember(t, memberConfig{name: "snapAndRestartTest", useBridge: true}) + integration.BeforeTest(t) + m := integration.MustNewMember(t, integration.MemberConfig{Name: "snapAndRestartTest", UseBridge: true}) m.SnapshotCount = 100 m.Launch() defer m.Terminate(t) @@ -97,9 +98,9 @@ func TestSnapshotAndRestartMember(t *testing.T) { resps := make([]*client.Response, 120) var err error for i := 0; i < 120; i++ { - cc := MustNewHTTPClient(t, []string{m.URL()}, nil) + cc := integration.MustNewHTTPClient(t, []string{m.URL()}, nil) kapi := client.NewKeysAPI(cc) - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) + ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout) key := fmt.Sprintf("foo%d", i) resps[i], err = kapi.Create(ctx, "/"+key, "bar") if err != nil { @@ -112,9 +113,9 @@ func TestSnapshotAndRestartMember(t *testing.T) { m.WaitOK(t) for i := 0; i < 120; i++ { - cc := MustNewHTTPClient(t, []string{m.URL()}, nil) + cc := integration.MustNewHTTPClient(t, []string{m.URL()}, nil) kapi := client.NewKeysAPI(cc) - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) + ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout) key := fmt.Sprintf("foo%d", i) resp, err := kapi.Get(ctx, "/"+key, nil) if err != nil { diff --git a/tests/integration/metrics_test.go b/tests/integration/metrics_test.go index 86636ce06..00efe8b1b 100644 --- a/tests/integration/metrics_test.go +++ b/tests/integration/metrics_test.go @@ -25,12 +25,13 @@ import ( pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/client/pkg/v3/transport" "go.etcd.io/etcd/server/v3/storage" + "go.etcd.io/etcd/tests/v3/framework/integration" ) // TestMetricDbSizeBoot checks that the db size metric is set on boot. func TestMetricDbSizeBoot(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) v, err := clus.Members[0].Metric("etcd_debugging_mvcc_db_total_size_in_bytes") @@ -49,12 +50,12 @@ func TestMetricDbSizeDefrag(t *testing.T) { // testMetricDbSizeDefrag checks that the db size metric is set after defrag. func testMetricDbSizeDefrag(t *testing.T, name string) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) - kvc := toGRPC(clus.Client(0)).KV - mc := toGRPC(clus.Client(0)).Maintenance + kvc := integration.ToGRPC(clus.Client(0)).KV + mc := integration.ToGRPC(clus.Client(0)).Maintenance // expand the db size numPuts := 25 // large enough to write more than 1 page @@ -163,8 +164,8 @@ func testMetricDbSizeDefrag(t *testing.T, name string) { } func TestMetricQuotaBackendBytes(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) qs, err := clus.Members[0].Metric("etcd_server_quota_backend_bytes") @@ -181,8 +182,8 @@ func TestMetricQuotaBackendBytes(t *testing.T) { } func TestMetricsHealth(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) tr, err := transport.NewTransport(transport.TLSInfo{}, 5*time.Second) diff --git a/tests/integration/network_partition_test.go b/tests/integration/network_partition_test.go index 6abc36700..9ea4e4534 100644 --- a/tests/integration/network_partition_test.go +++ b/tests/integration/network_partition_test.go @@ -18,12 +18,14 @@ import ( "fmt" "testing" "time" + + "go.etcd.io/etcd/tests/v3/framework/integration" ) func TestNetworkPartition5MembersLeaderInMinority(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 5}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 5}) defer clus.Terminate(t) leadIndex := clus.WaitLeader(t) @@ -32,20 +34,20 @@ func TestNetworkPartition5MembersLeaderInMinority(t *testing.T) { minority := []int{leadIndex, (leadIndex + 1) % 5} majority := []int{(leadIndex + 2) % 5, (leadIndex + 3) % 5, (leadIndex + 4) % 5} - minorityMembers := getMembersByIndexSlice(clus.cluster, minority) - majorityMembers := getMembersByIndexSlice(clus.cluster, majority) + minorityMembers := getMembersByIndexSlice(clus.Cluster, minority) + majorityMembers := getMembersByIndexSlice(clus.Cluster, majority) // network partition (bi-directional) injectPartition(t, minorityMembers, majorityMembers) // minority leader must be lost - clus.waitNoLeader(minorityMembers) + clus.WaitMembersNoLeader(minorityMembers) // wait extra election timeout time.Sleep(2 * majorityMembers[0].ElectionTimeout()) // new leader must be from majority - clus.waitLeader(t, majorityMembers) + clus.WaitMembersForLeader(t, majorityMembers) // recover network partition (bi-directional) recoverPartition(t, minorityMembers, majorityMembers) @@ -69,9 +71,9 @@ func TestNetworkPartition5MembersLeaderInMajority(t *testing.T) { } func testNetworkPartition5MembersLeaderInMajority(t *testing.T) error { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 5}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 5}) defer clus.Terminate(t) leadIndex := clus.WaitLeader(t) @@ -80,21 +82,21 @@ func testNetworkPartition5MembersLeaderInMajority(t *testing.T) error { majority := []int{leadIndex, (leadIndex + 1) % 5, (leadIndex + 2) % 5} minority := []int{(leadIndex + 3) % 5, (leadIndex + 4) % 5} - majorityMembers := getMembersByIndexSlice(clus.cluster, majority) - minorityMembers := getMembersByIndexSlice(clus.cluster, minority) + majorityMembers := getMembersByIndexSlice(clus.Cluster, majority) + minorityMembers := getMembersByIndexSlice(clus.Cluster, minority) // network partition (bi-directional) injectPartition(t, majorityMembers, minorityMembers) // minority leader must be lost - clus.waitNoLeader(minorityMembers) + clus.WaitMembersNoLeader(minorityMembers) // wait extra election timeout time.Sleep(2 * majorityMembers[0].ElectionTimeout()) // leader must be hold in majority - leadIndex2 := clus.waitLeader(t, majorityMembers) - leadID, leadID2 := clus.Members[leadIndex].s.ID(), majorityMembers[leadIndex2].s.ID() + leadIndex2 := clus.WaitMembersForLeader(t, majorityMembers) + leadID, leadID2 := clus.Members[leadIndex].Server.ID(), majorityMembers[leadIndex2].Server.ID() if leadID != leadID2 { return fmt.Errorf("unexpected leader change from %s, got %s", leadID, leadID2) } @@ -108,9 +110,9 @@ func testNetworkPartition5MembersLeaderInMajority(t *testing.T) error { } func TestNetworkPartition4Members(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 4}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 4}) defer clus.Terminate(t) leadIndex := clus.WaitLeader(t) @@ -119,8 +121,8 @@ func TestNetworkPartition4Members(t *testing.T) { groupA := []int{leadIndex, (leadIndex + 1) % 4} groupB := []int{(leadIndex + 2) % 4, (leadIndex + 3) % 4} - leaderPartition := getMembersByIndexSlice(clus.cluster, groupA) - followerPartition := getMembersByIndexSlice(clus.cluster, groupB) + leaderPartition := getMembersByIndexSlice(clus.Cluster, groupA) + followerPartition := getMembersByIndexSlice(clus.Cluster, groupB) // network partition (bi-directional) injectPartition(t, leaderPartition, followerPartition) @@ -137,21 +139,21 @@ func TestNetworkPartition4Members(t *testing.T) { clusterMustProgress(t, clus.Members) } -func getMembersByIndexSlice(clus *cluster, idxs []int) []*member { - ms := make([]*member, len(idxs)) +func getMembersByIndexSlice(clus *integration.Cluster, idxs []int) []*integration.Member { + ms := make([]*integration.Member, len(idxs)) for i, idx := range idxs { ms[i] = clus.Members[idx] } return ms } -func injectPartition(t *testing.T, src, others []*member) { +func injectPartition(t *testing.T, src, others []*integration.Member) { for _, m := range src { m.InjectPartition(t, others...) } } -func recoverPartition(t *testing.T, src, others []*member) { +func recoverPartition(t *testing.T, src, others []*integration.Member) { for _, m := range src { m.RecoverPartition(t, others...) } diff --git a/tests/integration/proxy/grpcproxy/cluster_test.go b/tests/integration/proxy/grpcproxy/cluster_test.go index 162956444..22c6970c8 100644 --- a/tests/integration/proxy/grpcproxy/cluster_test.go +++ b/tests/integration/proxy/grpcproxy/cluster_test.go @@ -23,7 +23,7 @@ import ( pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/server/v3/proxy/grpcproxy" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" "go.uber.org/zap/zaptest" "go.uber.org/zap" @@ -31,9 +31,9 @@ import ( ) func TestClusterProxyMemberList(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) cts := newClusterProxyServer(zaptest.NewLogger(t), []string{clus.Members[0].GRPCURL()}, t) @@ -43,7 +43,7 @@ func TestClusterProxyMemberList(t *testing.T) { Endpoints: []string{cts.caddr}, DialTimeout: 5 * time.Second, } - client, err := integration.NewClient(t, cfg) + client, err := integration2.NewClient(t, cfg) if err != nil { t.Fatalf("err %v, want nil", err) } @@ -95,7 +95,7 @@ func newClusterProxyServer(lg *zap.Logger, endpoints []string, t *testing.T) *cl Endpoints: endpoints, DialTimeout: 5 * time.Second, } - client, err := integration.NewClient(t, cfg) + client, err := integration2.NewClient(t, cfg) if err != nil { t.Fatal(err) } diff --git a/tests/integration/proxy/grpcproxy/kv_test.go b/tests/integration/proxy/grpcproxy/kv_test.go index 4f9ee8d25..c319c54dd 100644 --- a/tests/integration/proxy/grpcproxy/kv_test.go +++ b/tests/integration/proxy/grpcproxy/kv_test.go @@ -23,15 +23,14 @@ import ( pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/server/v3/proxy/grpcproxy" - "go.etcd.io/etcd/tests/v3/integration" - + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" "google.golang.org/grpc" ) func TestKVProxyRange(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) kvts := newKVProxyServer([]string{clus.Members[0].GRPCURL()}, t) @@ -42,7 +41,7 @@ func TestKVProxyRange(t *testing.T) { Endpoints: []string{kvts.l.Addr().String()}, DialTimeout: 5 * time.Second, } - client, err := integration.NewClient(t, cfg) + client, err := integration2.NewClient(t, cfg) if err != nil { t.Fatalf("err = %v, want nil", err) } @@ -71,7 +70,7 @@ func newKVProxyServer(endpoints []string, t *testing.T) *kvproxyTestServer { Endpoints: endpoints, DialTimeout: 5 * time.Second, } - client, err := integration.NewClient(t, cfg) + client, err := integration2.NewClient(t, cfg) if err != nil { t.Fatal(err) } diff --git a/tests/integration/proxy/grpcproxy/register_test.go b/tests/integration/proxy/grpcproxy/register_test.go index d57d01a87..d93000a5e 100644 --- a/tests/integration/proxy/grpcproxy/register_test.go +++ b/tests/integration/proxy/grpcproxy/register_test.go @@ -21,14 +21,14 @@ import ( clientv3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/naming/endpoints" "go.etcd.io/etcd/server/v3/proxy/grpcproxy" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" "go.uber.org/zap/zaptest" ) func TestRegister(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.Client(0) paddr := clus.Members[0].GRPCURL() diff --git a/tests/integration/snapshot/member_test.go b/tests/integration/snapshot/member_test.go index 076d928bb..fb6091533 100644 --- a/tests/integration/snapshot/member_test.go +++ b/tests/integration/snapshot/member_test.go @@ -24,14 +24,14 @@ import ( "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/server/v3/embed" "go.etcd.io/etcd/server/v3/etcdserver" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) // TestSnapshotV3RestoreMultiMemberAdd ensures that multiple members // can boot into the same cluster after being restored from a same // snapshot file, and also be able to add another member to the cluster. func TestSnapshotV3RestoreMultiMemberAdd(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) kvs := []kv{{"foo1", "bar1"}, {"foo2", "bar2"}, {"foo3", "bar3"}} dbPath := createSnapshotFile(t, kvs) @@ -48,7 +48,7 @@ func TestSnapshotV3RestoreMultiMemberAdd(t *testing.T) { // wait for health interval + leader election time.Sleep(etcdserver.HealthInterval + 2*time.Second) - cli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{cURLs[0].String()}}) + cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{cURLs[0].String()}}) if err != nil { t.Fatal(err) } @@ -63,7 +63,7 @@ func TestSnapshotV3RestoreMultiMemberAdd(t *testing.T) { // wait for membership reconfiguration apply time.Sleep(testutil.ApplyTimeout) - cfg := integration.NewEmbedConfig(t, "3") + cfg := integration2.NewEmbedConfig(t, "3") cfg.InitialClusterToken = testClusterTkn cfg.ClusterState = "existing" cfg.LCUrls, cfg.ACUrls = newCURLs, newCURLs @@ -88,7 +88,7 @@ func TestSnapshotV3RestoreMultiMemberAdd(t *testing.T) { t.Fatalf("failed to start the newly added etcd member") } - cli2, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{newCURLs[0].String()}}) + cli2, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{newCURLs[0].String()}}) if err != nil { t.Fatal(err) } diff --git a/tests/integration/snapshot/v3_snapshot_test.go b/tests/integration/snapshot/v3_snapshot_test.go index 1cad1e1e6..c49798650 100644 --- a/tests/integration/snapshot/v3_snapshot_test.go +++ b/tests/integration/snapshot/v3_snapshot_test.go @@ -29,7 +29,7 @@ import ( "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/etcdutl/v3/snapshot" "go.etcd.io/etcd/server/v3/embed" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" "go.uber.org/zap/zapcore" "go.uber.org/zap/zaptest" ) @@ -37,7 +37,7 @@ import ( // TestSnapshotV3RestoreSingle tests single node cluster restoring // from a snapshot file. func TestSnapshotV3RestoreSingle(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) kvs := []kv{{"foo1", "bar1"}, {"foo2", "bar2"}, {"foo3", "bar3"}} dbPath := createSnapshotFile(t, kvs) @@ -45,7 +45,7 @@ func TestSnapshotV3RestoreSingle(t *testing.T) { urls := newEmbedURLs(clusterN * 2) cURLs, pURLs := urls[:clusterN], urls[clusterN:] - cfg := integration.NewEmbedConfig(t, "s1") + cfg := integration2.NewEmbedConfig(t, "s1") cfg.InitialClusterToken = testClusterTkn cfg.ClusterState = "existing" cfg.LCUrls, cfg.ACUrls = cURLs, cURLs @@ -82,7 +82,7 @@ func TestSnapshotV3RestoreSingle(t *testing.T) { } var cli *clientv3.Client - cli, err = integration.NewClient(t, clientv3.Config{Endpoints: []string{cfg.ACUrls[0].String()}}) + cli, err = integration2.NewClient(t, clientv3.Config{Endpoints: []string{cfg.ACUrls[0].String()}}) if err != nil { t.Fatal(err) } @@ -103,7 +103,7 @@ func TestSnapshotV3RestoreSingle(t *testing.T) { // can boot into the same cluster after being restored from a same // snapshot file. func TestSnapshotV3RestoreMulti(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) kvs := []kv{{"foo1", "bar1"}, {"foo2", "bar2"}, {"foo3", "bar3"}} dbPath := createSnapshotFile(t, kvs) @@ -119,7 +119,7 @@ func TestSnapshotV3RestoreMulti(t *testing.T) { time.Sleep(time.Second) for i := 0; i < clusterN; i++ { - cli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{cURLs[i].String()}}) + cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{cURLs[i].String()}}) if err != nil { t.Fatal(err) } @@ -139,8 +139,8 @@ func TestSnapshotV3RestoreMulti(t *testing.T) { // TestCorruptedBackupFileCheck tests if we can correctly identify a corrupted backup file. func TestCorruptedBackupFileCheck(t *testing.T) { - dbPath := integration.MustAbsPath("testdata/corrupted_backup.db") - integration.BeforeTest(t) + dbPath := integration2.MustAbsPath("testdata/corrupted_backup.db") + integration2.BeforeTest(t) if _, err := os.Stat(dbPath); err != nil { t.Fatalf("test file [%s] does not exist: %v", dbPath, err) } @@ -175,7 +175,7 @@ func createSnapshotFile(t *testing.T, kvs []kv) string { urls := newEmbedURLs(clusterN * 2) cURLs, pURLs := urls[:clusterN], urls[clusterN:] - cfg := integration.NewEmbedConfig(t, "default") + cfg := integration2.NewEmbedConfig(t, "default") cfg.ClusterState = "new" cfg.LCUrls, cfg.ACUrls = cURLs, cURLs cfg.LPUrls, cfg.APUrls = pURLs, pURLs @@ -194,7 +194,7 @@ func createSnapshotFile(t *testing.T, kvs []kv) string { } ccfg := clientv3.Config{Endpoints: []string{cfg.ACUrls[0].String()}} - cli, err := integration.NewClient(t, ccfg) + cli, err := integration2.NewClient(t, ccfg) if err != nil { t.Fatal(err) } @@ -234,7 +234,7 @@ func restoreCluster(t *testing.T, clusterN int, dbPath string) ( cfgs := make([]*embed.Config, clusterN) for i := 0; i < clusterN; i++ { - cfg := integration.NewEmbedConfig(t, fmt.Sprintf("m%d", i)) + cfg := integration2.NewEmbedConfig(t, fmt.Sprintf("m%d", i)) cfg.InitialClusterToken = testClusterTkn cfg.ClusterState = "existing" cfg.LCUrls, cfg.ACUrls = []url.URL{cURLs[i]}, []url.URL{cURLs[i]} diff --git a/tests/integration/testing_test.go b/tests/integration/testing_test.go index a225063b1..dfd75e893 100644 --- a/tests/integration/testing_test.go +++ b/tests/integration/testing_test.go @@ -15,12 +15,13 @@ package integration_test import ( - "go.etcd.io/etcd/tests/v3/integration" "testing" + + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) func TestBeforeTestWithoutLeakDetection(t *testing.T) { - integration.BeforeTest(t, integration.WithoutGoLeakDetection(), integration.WithoutSkipInShort()) + integration2.BeforeTest(t, integration2.WithoutGoLeakDetection(), integration2.WithoutSkipInShort()) // Intentional leak that should get ignored go func() { diff --git a/tests/integration/utl_wal_version_test.go b/tests/integration/utl_wal_version_test.go index deed4a46c..774b25385 100644 --- a/tests/integration/utl_wal_version_test.go +++ b/tests/integration/utl_wal_version_test.go @@ -21,6 +21,7 @@ import ( "github.com/coreos/go-semver/semver" "github.com/stretchr/testify/assert" + "go.etcd.io/etcd/tests/v3/framework/integration" "go.uber.org/zap" "go.etcd.io/etcd/client/pkg/v3/testutil" @@ -33,7 +34,7 @@ import ( func TestEtcdVersionFromWAL(t *testing.T) { testutil.SkipTestIfShortMode(t, "Wal creation tests are depending on embedded etcd server so are integration-level tests.") - cfg := NewEmbedConfig(t, "default") + cfg := integration.NewEmbedConfig(t, "default") srv, err := embed.StartEtcd(cfg) if err != nil { t.Fatal(err) @@ -45,7 +46,7 @@ func TestEtcdVersionFromWAL(t *testing.T) { } ccfg := clientv3.Config{Endpoints: []string{cfg.ACUrls[0].String()}} - cli, err := NewClient(t, ccfg) + cli, err := integration.NewClient(t, ccfg) if err != nil { srv.Close() t.Fatal(err) diff --git a/tests/integration/v2_http_kv_test.go b/tests/integration/v2_http_kv_test.go index 36f7cea82..128625a4c 100644 --- a/tests/integration/v2_http_kv_test.go +++ b/tests/integration/v2_http_kv_test.go @@ -27,11 +27,12 @@ import ( "time" "go.etcd.io/etcd/client/pkg/v3/transport" + "go.etcd.io/etcd/tests/v3/framework/integration" ) func TestV2Set(t *testing.T) { - BeforeTest(t) - cl := NewCluster(t, 1) + integration.BeforeTest(t) + cl := integration.NewCluster(t, 1) cl.Launch(t) defer cl.Terminate(t) @@ -92,8 +93,8 @@ func TestV2Set(t *testing.T) { } func TestV2CreateUpdate(t *testing.T) { - BeforeTest(t) - cl := NewCluster(t, 1) + integration.BeforeTest(t) + cl := integration.NewCluster(t, 1) cl.Launch(t) defer cl.Terminate(t) @@ -228,8 +229,8 @@ func TestV2CreateUpdate(t *testing.T) { } func TestV2CAS(t *testing.T) { - BeforeTest(t) - cl := NewCluster(t, 1) + integration.BeforeTest(t) + cl := integration.NewCluster(t, 1) cl.Launch(t) defer cl.Terminate(t) @@ -376,8 +377,8 @@ func TestV2CAS(t *testing.T) { } func TestV2Delete(t *testing.T) { - BeforeTest(t) - cl := NewCluster(t, 1) + integration.BeforeTest(t) + cl := integration.NewCluster(t, 1) cl.Launch(t) defer cl.Terminate(t) @@ -476,8 +477,8 @@ func TestV2Delete(t *testing.T) { } func TestV2CAD(t *testing.T) { - BeforeTest(t) - cl := NewCluster(t, 1) + integration.BeforeTest(t) + cl := integration.NewCluster(t, 1) cl.Launch(t) defer cl.Terminate(t) @@ -576,8 +577,8 @@ func TestV2CAD(t *testing.T) { } func TestV2Unique(t *testing.T) { - BeforeTest(t) - cl := NewCluster(t, 1) + integration.BeforeTest(t) + cl := integration.NewCluster(t, 1) cl.Launch(t) defer cl.Terminate(t) @@ -643,8 +644,8 @@ func TestV2Unique(t *testing.T) { } func TestV2Get(t *testing.T) { - BeforeTest(t) - cl := NewCluster(t, 1) + integration.BeforeTest(t) + cl := integration.NewCluster(t, 1) cl.Launch(t) defer cl.Terminate(t) @@ -741,8 +742,8 @@ func TestV2Get(t *testing.T) { } func TestV2QuorumGet(t *testing.T) { - BeforeTest(t) - cl := NewCluster(t, 1) + integration.BeforeTest(t) + cl := integration.NewCluster(t, 1) cl.Launch(t) defer cl.Terminate(t) @@ -839,8 +840,8 @@ func TestV2QuorumGet(t *testing.T) { } func TestV2Watch(t *testing.T) { - BeforeTest(t) - cl := NewCluster(t, 1) + integration.BeforeTest(t) + cl := integration.NewCluster(t, 1) cl.Launch(t) defer cl.Terminate(t) @@ -877,8 +878,8 @@ func TestV2Watch(t *testing.T) { } func TestV2WatchWithIndex(t *testing.T) { - BeforeTest(t) - cl := NewCluster(t, 1) + integration.BeforeTest(t) + cl := integration.NewCluster(t, 1) cl.Launch(t) defer cl.Terminate(t) @@ -944,8 +945,8 @@ func TestV2WatchWithIndex(t *testing.T) { } func TestV2WatchKeyInDir(t *testing.T) { - BeforeTest(t) - cl := NewCluster(t, 1) + integration.BeforeTest(t) + cl := integration.NewCluster(t, 1) cl.Launch(t) defer cl.Terminate(t) @@ -1005,8 +1006,8 @@ func TestV2WatchKeyInDir(t *testing.T) { } func TestV2Head(t *testing.T) { - BeforeTest(t) - cl := NewCluster(t, 1) + integration.BeforeTest(t) + cl := integration.NewCluster(t, 1) cl.Launch(t) defer cl.Terminate(t) diff --git a/tests/integration/v2store/store_tag_not_v2v3_test.go b/tests/integration/v2store/store_tag_not_v2v3_test.go index b76552f04..012fe4c52 100644 --- a/tests/integration/v2store/store_tag_not_v2v3_test.go +++ b/tests/integration/v2store/store_tag_not_v2v3_test.go @@ -23,7 +23,7 @@ import ( "github.com/stretchr/testify/assert" "go.etcd.io/etcd/client/pkg/v3/testutil" "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) type v2TestStore struct { @@ -41,7 +41,7 @@ func newTestStore(t *testing.T, ns ...string) StoreCloser { // Ensure that the store can recover from a previously saved state. func TestStoreRecover(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) s := newTestStore(t) defer s.Close() var eidx uint64 = 4 diff --git a/tests/integration/v2store/store_tag_v2v3_test.go b/tests/integration/v2store/store_tag_v2v3_test.go index f4e8e21fb..970d9643a 100644 --- a/tests/integration/v2store/store_tag_v2v3_test.go +++ b/tests/integration/v2store/store_tag_v2v3_test.go @@ -22,20 +22,20 @@ import ( "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" "go.etcd.io/etcd/server/v3/etcdserver/api/v2v3" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) type v2v3TestStore struct { v2store.Store - clus *integration.ClusterV3 + clus *integration2.ClusterV3 t *testing.T } func (s *v2v3TestStore) Close() { s.clus.Terminate(s.t) } func newTestStore(t *testing.T, ns ...string) StoreCloser { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) return &v2v3TestStore{ v2v3.NewStore(clus.Client(0), "/v2/"), clus, diff --git a/tests/integration/v2store/store_v2v3_test.go b/tests/integration/v2store/store_v2v3_test.go index 3a6eab14a..2266cb918 100644 --- a/tests/integration/v2store/store_v2v3_test.go +++ b/tests/integration/v2store/store_v2v3_test.go @@ -21,15 +21,15 @@ import ( "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" "go.etcd.io/etcd/server/v3/etcdserver/api/v2v3" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) // TODO: fix tests func runWithCluster(t testing.TB, runner func(testing.TB, []string)) { - integration.BeforeTest(t) - cfg := integration.ClusterConfig{Size: 1} - clus := integration.NewClusterV3(t, &cfg) + integration2.BeforeTest(t) + cfg := integration2.ClusterConfig{Size: 1} + clus := integration2.NewClusterV3(t, &cfg) defer clus.Terminate(t) endpoints := []string{clus.Client(0).Endpoints()[0]} runner(t, endpoints) @@ -38,7 +38,7 @@ func runWithCluster(t testing.TB, runner func(testing.TB, []string)) { func TestCreateKV(t *testing.T) { runWithCluster(t, testCreateKV) } func testCreateKV(t testing.TB, endpoints []string) { - integration.BeforeTest(t) + integration2.BeforeTest(t) testCases := []struct { key string value string @@ -54,7 +54,7 @@ func testCreateKV(t testing.TB, endpoints []string) { //{key: "hello", value: "3", unique: true, wantKeyMatch: false}, } - cli, err := integration.NewClient(t, clientv3.Config{Endpoints: endpoints}) + cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: endpoints}) if err != nil { t.Fatal(err) } @@ -102,7 +102,7 @@ func testSetKV(t testing.TB, endpoints []string) { {key: "/sdir/set", value: "4", wantIndexMatch: false}, } - cli, err := integration.NewClient(t, clientv3.Config{Endpoints: endpoints}) + cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: endpoints}) if err != nil { t.Fatal(err) } @@ -130,7 +130,7 @@ func testSetKV(t testing.TB, endpoints []string) { func TestCreateSetDir(t *testing.T) { runWithCluster(t, testCreateSetDir) } func testCreateSetDir(t testing.TB, endpoints []string) { - integration.BeforeTest(t) + integration2.BeforeTest(t) testCases := []struct { dir string }{ @@ -138,7 +138,7 @@ func testCreateSetDir(t testing.TB, endpoints []string) { {dir: "/ddir/1/2/3"}, } - cli, err := integration.NewClient(t, clientv3.Config{Endpoints: endpoints}) + cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: endpoints}) if err != nil { t.Fatal(err) } diff --git a/tests/integration/v3_alarm_test.go b/tests/integration/v3_alarm_test.go index dc2191253..929d9b926 100644 --- a/tests/integration/v3_alarm_test.go +++ b/tests/integration/v3_alarm_test.go @@ -27,24 +27,25 @@ import ( "go.etcd.io/etcd/pkg/v3/traceutil" "go.etcd.io/etcd/server/v3/storage/backend" "go.etcd.io/etcd/server/v3/storage/mvcc" + "go.etcd.io/etcd/tests/v3/framework/integration" "go.uber.org/zap/zaptest" ) // TestV3StorageQuotaApply tests the V3 server respects quotas during apply func TestV3StorageQuotaApply(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) quotasize := int64(16 * os.Getpagesize()) - clus := NewClusterV3(t, &ClusterConfig{Size: 2, UseBridge: true}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2, UseBridge: true}) defer clus.Terminate(t) - kvc0 := toGRPC(clus.Client(0)).KV - kvc1 := toGRPC(clus.Client(1)).KV + kvc0 := integration.ToGRPC(clus.Client(0)).KV + kvc1 := integration.ToGRPC(clus.Client(1)).KV // Set a quota on one node clus.Members[0].QuotaBackendBytes = quotasize clus.Members[0].Stop(t) clus.Members[0].Restart(t) - clus.waitLeader(t, clus.Members) + clus.WaitMembersForLeader(t, clus.Members) waitForRestart(t, kvc0) key := []byte("abc") @@ -73,7 +74,7 @@ func TestV3StorageQuotaApply(t *testing.T) { stopc := time.After(5 * time.Second) for { req := &pb.AlarmRequest{Action: pb.AlarmRequest_GET} - resp, aerr := clus.Members[0].s.Alarm(context.TODO(), req) + resp, aerr := clus.Members[0].Server.Alarm(context.TODO(), req) if aerr != nil { t.Fatal(aerr) } @@ -87,7 +88,7 @@ func TestV3StorageQuotaApply(t *testing.T) { } } - ctx, cancel := context.WithTimeout(context.TODO(), RequestWaitTimeout) + ctx, cancel := context.WithTimeout(context.TODO(), integration.RequestWaitTimeout) defer cancel() // small quota machine should reject put @@ -103,7 +104,7 @@ func TestV3StorageQuotaApply(t *testing.T) { // reset large quota node to ensure alarm persisted clus.Members[1].Stop(t) clus.Members[1].Restart(t) - clus.waitLeader(t, clus.Members) + clus.WaitMembersForLeader(t, clus.Members) if _, err := kvc1.Put(context.TODO(), &pb.PutRequest{Key: key, Value: smallbuf}); err == nil { t.Fatalf("alarmed instance should reject put after reset") @@ -112,12 +113,12 @@ func TestV3StorageQuotaApply(t *testing.T) { // TestV3AlarmDeactivate ensures that space alarms can be deactivated so puts go through. func TestV3AlarmDeactivate(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) - kvc := toGRPC(clus.RandClient()).KV - mt := toGRPC(clus.RandClient()).Maintenance + kvc := integration.ToGRPC(clus.RandClient()).KV + mt := integration.ToGRPC(clus.RandClient()).Maintenance alarmReq := &pb.AlarmRequest{ MemberID: 123, @@ -146,8 +147,8 @@ func TestV3AlarmDeactivate(t *testing.T) { } func TestV3CorruptAlarm(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3, UseBridge: true}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) var wg sync.WaitGroup diff --git a/tests/integration/v3_auth_test.go b/tests/integration/v3_auth_test.go index 286f2dbe6..5d915a964 100644 --- a/tests/integration/v3_auth_test.go +++ b/tests/integration/v3_auth_test.go @@ -26,18 +26,19 @@ import ( "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/client/pkg/v3/testutil" "go.etcd.io/etcd/client/v3" + "go.etcd.io/etcd/tests/v3/framework/integration" ) // TestV3AuthEmptyUserGet ensures that a get with an empty user will return an empty user error. func TestV3AuthEmptyUserGet(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) defer cancel() - api := toGRPC(clus.Client(0)) + api := integration.ToGRPC(clus.Client(0)) authSetupRoot(t, api.Auth) _, err := api.KV.Range(ctx, &pb.RangeRequest{Key: []byte("abc")}) @@ -49,13 +50,13 @@ func TestV3AuthEmptyUserGet(t *testing.T) { // TestV3AuthTokenWithDisable tests that auth won't crash if // given a valid token when authentication is disabled func TestV3AuthTokenWithDisable(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) - authSetupRoot(t, toGRPC(clus.Client(0)).Auth) + authSetupRoot(t, integration.ToGRPC(clus.Client(0)).Auth) - c, cerr := NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "root", Password: "123"}) + c, cerr := integration.NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "root", Password: "123"}) if cerr != nil { t.Fatal(cerr) } @@ -81,11 +82,11 @@ func TestV3AuthTokenWithDisable(t *testing.T) { } func TestV3AuthRevision(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) - api := toGRPC(clus.Client(0)) + api := integration.ToGRPC(clus.Client(0)) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) presp, perr := api.KV.Put(ctx, &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}) @@ -109,25 +110,25 @@ func TestV3AuthRevision(t *testing.T) { // TestV3AuthWithLeaseRevokeWithRoot ensures that granted leases // with root user be revoked after TTL. func TestV3AuthWithLeaseRevokeWithRoot(t *testing.T) { - testV3AuthWithLeaseRevokeWithRoot(t, ClusterConfig{Size: 1}) + testV3AuthWithLeaseRevokeWithRoot(t, integration.ClusterConfig{Size: 1}) } // TestV3AuthWithLeaseRevokeWithRootJWT creates a lease with a JWT-token enabled cluster. // And tests if server is able to revoke expiry lease item. func TestV3AuthWithLeaseRevokeWithRootJWT(t *testing.T) { - testV3AuthWithLeaseRevokeWithRoot(t, ClusterConfig{Size: 1, AuthToken: defaultTokenJWT}) + testV3AuthWithLeaseRevokeWithRoot(t, integration.ClusterConfig{Size: 1, AuthToken: integration.DefaultTokenJWT}) } -func testV3AuthWithLeaseRevokeWithRoot(t *testing.T, ccfg ClusterConfig) { - BeforeTest(t) +func testV3AuthWithLeaseRevokeWithRoot(t *testing.T, ccfg integration.ClusterConfig) { + integration.BeforeTest(t) - clus := NewClusterV3(t, &ccfg) + clus := integration.NewClusterV3(t, &ccfg) defer clus.Terminate(t) - api := toGRPC(clus.Client(0)) + api := integration.ToGRPC(clus.Client(0)) authSetupRoot(t, api.Auth) - rootc, cerr := NewClient(t, clientv3.Config{ + rootc, cerr := integration.NewClient(t, clientv3.Config{ Endpoints: clus.Client(0).Endpoints(), Username: "root", Password: "123", @@ -177,8 +178,8 @@ type user struct { } func TestV3AuthWithLeaseRevoke(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) users := []user{ @@ -190,11 +191,11 @@ func TestV3AuthWithLeaseRevoke(t *testing.T) { end: "k2", }, } - authSetupUsers(t, toGRPC(clus.Client(0)).Auth, users) + authSetupUsers(t, integration.ToGRPC(clus.Client(0)).Auth, users) - authSetupRoot(t, toGRPC(clus.Client(0)).Auth) + authSetupRoot(t, integration.ToGRPC(clus.Client(0)).Auth) - rootc, cerr := NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "root", Password: "123"}) + rootc, cerr := integration.NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "root", Password: "123"}) if cerr != nil { t.Fatal(cerr) } @@ -211,7 +212,7 @@ func TestV3AuthWithLeaseRevoke(t *testing.T) { t.Fatal(err) } - userc, cerr := NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "user1", Password: "user1-123"}) + userc, cerr := integration.NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "user1", Password: "user1-123"}) if cerr != nil { t.Fatal(cerr) } @@ -223,8 +224,8 @@ func TestV3AuthWithLeaseRevoke(t *testing.T) { } func TestV3AuthWithLeaseAttach(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) users := []user{ @@ -243,17 +244,17 @@ func TestV3AuthWithLeaseAttach(t *testing.T) { end: "k4", }, } - authSetupUsers(t, toGRPC(clus.Client(0)).Auth, users) + authSetupUsers(t, integration.ToGRPC(clus.Client(0)).Auth, users) - authSetupRoot(t, toGRPC(clus.Client(0)).Auth) + authSetupRoot(t, integration.ToGRPC(clus.Client(0)).Auth) - user1c, cerr := NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "user1", Password: "user1-123"}) + user1c, cerr := integration.NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "user1", Password: "user1-123"}) if cerr != nil { t.Fatal(cerr) } defer user1c.Close() - user2c, cerr := NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "user2", Password: "user2-123"}) + user2c, cerr := integration.NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "user2", Password: "user2-123"}) if cerr != nil { t.Fatal(cerr) } @@ -335,8 +336,8 @@ func authSetupRoot(t *testing.T, auth pb.AuthClient) { } func TestV3AuthNonAuthorizedRPCs(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) nonAuthedKV := clus.Client(0).KV @@ -348,7 +349,7 @@ func TestV3AuthNonAuthorizedRPCs(t *testing.T) { t.Fatalf("couldn't put key (%v)", err) } - authSetupRoot(t, toGRPC(clus.Client(0)).Auth) + authSetupRoot(t, integration.ToGRPC(clus.Client(0)).Auth) respput, err := nonAuthedKV.Put(context.TODO(), key, val) if !eqErrGRPC(err, rpctypes.ErrGRPCUserEmpty) { @@ -358,13 +359,13 @@ func TestV3AuthNonAuthorizedRPCs(t *testing.T) { func TestV3AuthOldRevConcurrent(t *testing.T) { t.Skip() // TODO(jingyih): re-enable the test when #10408 is fixed. - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) - authSetupRoot(t, toGRPC(clus.Client(0)).Auth) + authSetupRoot(t, integration.ToGRPC(clus.Client(0)).Auth) - c, cerr := NewClient(t, clientv3.Config{ + c, cerr := integration.NewClient(t, clientv3.Config{ Endpoints: clus.Client(0).Endpoints(), DialTimeout: 5 * time.Second, Username: "root", diff --git a/tests/integration/v3_election_test.go b/tests/integration/v3_election_test.go index a3aed9ba6..b726f26b3 100644 --- a/tests/integration/v3_election_test.go +++ b/tests/integration/v3_election_test.go @@ -22,20 +22,21 @@ import ( "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/concurrency" + "go.etcd.io/etcd/tests/v3/framework/integration" ) // TestElectionWait tests if followers can correctly wait for elections. func TestElectionWait(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) leaders := 3 followers := 3 var clients []*clientv3.Client - newClient := MakeMultiNodeClients(t, clus, &clients) + newClient := integration.MakeMultiNodeClients(t, clus, &clients) defer func() { - CloseClients(t, clients) + integration.CloseClients(t, clients) }() electedc := make(chan string) @@ -108,8 +109,8 @@ func TestElectionWait(t *testing.T) { // TestElectionFailover tests that an election will func TestElectionFailover(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) cctx, cancel := context.WithCancel(context.TODO()) @@ -119,7 +120,7 @@ func TestElectionFailover(t *testing.T) { for i := 0; i < 3; i++ { var err error - ss[i], err = concurrency.NewSession(clus.clients[i]) + ss[i], err = concurrency.NewSession(clus.Clients[i]) if err != nil { t.Error(err) } @@ -176,8 +177,8 @@ func TestElectionFailover(t *testing.T) { // TestElectionSessionRelock ensures that campaigning twice on the same election // with the same lock will Proclaim instead of deadlocking. func TestElectionSessionRecampaign(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.RandClient() @@ -209,8 +210,8 @@ func TestElectionSessionRecampaign(t *testing.T) { // of bug #6278. https://github.com/etcd-io/etcd/issues/6278 // func TestElectionOnPrefixOfExistingKey(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.RandClient() @@ -236,8 +237,8 @@ func TestElectionOnPrefixOfExistingKey(t *testing.T) { // in a new session with the same lease id) does not result in loss of // leadership. func TestElectionOnSessionRestart(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.RandClient() @@ -283,8 +284,8 @@ func TestElectionOnSessionRestart(t *testing.T) { // TestElectionObserveCompacted checks that observe can tolerate // a leader key with a modrev less than the compaction revision. func TestElectionObserveCompacted(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.Client(0) diff --git a/tests/integration/v3_grpc_inflight_test.go b/tests/integration/v3_grpc_inflight_test.go index 7432fb46a..b96bac45a 100644 --- a/tests/integration/v3_grpc_inflight_test.go +++ b/tests/integration/v3_grpc_inflight_test.go @@ -22,6 +22,7 @@ import ( pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" + "go.etcd.io/etcd/tests/v3/framework/integration" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -30,12 +31,12 @@ import ( // TestV3MaintenanceDefragmentInflightRange ensures inflight range requests // does not panic the mvcc backend while defragment is running. func TestV3MaintenanceDefragmentInflightRange(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.RandClient() - kvc := toGRPC(cli).KV + kvc := integration.ToGRPC(cli).KV if _, err := kvc.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil { t.Fatal(err) } @@ -48,7 +49,7 @@ func TestV3MaintenanceDefragmentInflightRange(t *testing.T) { kvc.Range(ctx, &pb.RangeRequest{Key: []byte("foo")}) }() - mvc := toGRPC(cli).Maintenance + mvc := integration.ToGRPC(cli).Maintenance mvc.Defragment(context.Background(), &pb.DefragmentRequest{}) cancel() @@ -60,12 +61,12 @@ func TestV3MaintenanceDefragmentInflightRange(t *testing.T) { // They are either finished or canceled, but never crash the backend. // See https://github.com/etcd-io/etcd/issues/7322 for more detail. func TestV3KVInflightRangeRequests(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1, UseBridge: true}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) cli := clus.RandClient() - kvc := toGRPC(cli).KV + kvc := integration.ToGRPC(cli).KV if _, err := kvc.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil { t.Fatal(err) diff --git a/tests/integration/v3_grpc_test.go b/tests/integration/v3_grpc_test.go index ca9e5c8ad..c7b2a681b 100644 --- a/tests/integration/v3_grpc_test.go +++ b/tests/integration/v3_grpc_test.go @@ -30,6 +30,7 @@ import ( "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/client/pkg/v3/transport" "go.etcd.io/etcd/client/v3" + "go.etcd.io/etcd/tests/v3/framework/integration" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -37,14 +38,14 @@ import ( "google.golang.org/grpc/status" ) -// TestV3PutOverwrite puts a key with the v3 api to a random cluster member, +// TestV3PutOverwrite puts a key with the v3 api to a random Cluster member, // overwrites it, then checks that the change was applied. func TestV3PutOverwrite(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV key := []byte("foo") reqput := &pb.PutRequest{Key: key, Value: []byte("bar"), PrevKv: true} @@ -88,26 +89,26 @@ func TestV3PutOverwrite(t *testing.T) { // TestPutRestart checks if a put after an unrelated member restart succeeds func TestV3PutRestart(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3, UseBridge: true}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) kvIdx := rand.Intn(3) - kvc := toGRPC(clus.Client(kvIdx)).KV + kvc := integration.ToGRPC(clus.Client(kvIdx)).KV stopIdx := kvIdx for stopIdx == kvIdx { stopIdx = rand.Intn(3) } - clus.clients[stopIdx].Close() + clus.Clients[stopIdx].Close() clus.Members[stopIdx].Stop(t) clus.Members[stopIdx].Restart(t) - c, cerr := NewClientV3(clus.Members[stopIdx]) + c, cerr := integration.NewClientV3(clus.Members[stopIdx]) if cerr != nil { t.Fatalf("cannot create client: %v", cerr) } - clus.clients[stopIdx] = c + clus.Clients[stopIdx] = c ctx, cancel := context.WithTimeout(context.TODO(), 10*time.Second) defer cancel() @@ -120,11 +121,11 @@ func TestV3PutRestart(t *testing.T) { // TestV3CompactCurrentRev ensures keys are present when compacting on current revision. func TestV3CompactCurrentRev(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV preq := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")} for i := 0; i < 3; i++ { if _, err := kvc.Put(context.Background(), preq); err != nil { @@ -154,12 +155,12 @@ func TestV3CompactCurrentRev(t *testing.T) { // TestV3HashKV ensures that multiple calls of HashKV on same node return same hash and compact rev. func TestV3HashKV(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) - kvc := toGRPC(clus.RandClient()).KV - mvc := toGRPC(clus.RandClient()).Maintenance + kvc := integration.ToGRPC(clus.RandClient()).KV + mvc := integration.ToGRPC(clus.RandClient()).Maintenance for i := 0; i < 10; i++ { resp, err := kvc.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte(fmt.Sprintf("bar%d", i))}) @@ -202,12 +203,12 @@ func TestV3HashKV(t *testing.T) { } func TestV3TxnTooManyOps(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) maxTxnOps := uint(128) - clus := NewClusterV3(t, &ClusterConfig{Size: 3, MaxTxnOps: maxTxnOps}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, MaxTxnOps: maxTxnOps}) defer clus.Terminate(t) - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV // unique keys i := new(int) @@ -278,8 +279,8 @@ func TestV3TxnTooManyOps(t *testing.T) { } func TestV3TxnDuplicateKeys(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) putreq := &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: &pb.PutRequest{Key: []byte("abc"), Value: []byte("def")}}} @@ -323,7 +324,7 @@ func TestV3TxnDuplicateKeys(t *testing.T) { }, } - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV tests := []struct { txnSuccess []*pb.RequestOp @@ -396,11 +397,11 @@ func TestV3TxnDuplicateKeys(t *testing.T) { // Testv3TxnRevision tests that the transaction header revision is set as expected. func TestV3TxnRevision(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV pr := &pb.PutRequest{Key: []byte("abc"), Value: []byte("def")} presp, err := kvc.Put(context.TODO(), pr) if err != nil { @@ -447,11 +448,11 @@ func TestV3TxnRevision(t *testing.T) { // Testv3TxnCmpHeaderRev tests that the txn header revision is set as expected // when compared to the Succeeded field in the txn response. func TestV3TxnCmpHeaderRev(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV for i := 0; i < 10; i++ { // Concurrently put a key with a txn comparing on it. @@ -503,8 +504,8 @@ func TestV3TxnCmpHeaderRev(t *testing.T) { // TestV3TxnRangeCompare tests range comparisons in txns func TestV3TxnRangeCompare(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) // put keys, named by expected revision @@ -598,7 +599,7 @@ func TestV3TxnRangeCompare(t *testing.T) { }, } - kvc := toGRPC(clus.Client(0)).KV + kvc := integration.ToGRPC(clus.Client(0)).KV for i, tt := range tests { txn := &pb.TxnRequest{} txn.Compare = append(txn.Compare, &tt.cmp) @@ -614,11 +615,11 @@ func TestV3TxnRangeCompare(t *testing.T) { // TestV3TxnNested tests nested txns follow paths as expected. func TestV3TxnNestedPath(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV cmpTrue := &pb.Compare{ Result: pb.Compare_EQUAL, @@ -667,17 +668,17 @@ func TestV3TxnNestedPath(t *testing.T) { // TestV3PutIgnoreValue ensures that writes with ignore_value overwrites with previous key-value pair. func TestV3PutIgnoreValue(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV key, val := []byte("foo"), []byte("bar") putReq := pb.PutRequest{Key: key, Value: val} // create lease - lc := toGRPC(clus.RandClient()).Lease + lc := integration.ToGRPC(clus.RandClient()).Lease lresp, err := lc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) if err != nil { t.Fatal(err) @@ -800,15 +801,15 @@ func TestV3PutIgnoreValue(t *testing.T) { // TestV3PutIgnoreLease ensures that writes with ignore_lease uses previous lease for the key overwrites. func TestV3PutIgnoreLease(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV // create lease - lc := toGRPC(clus.RandClient()).Lease + lc := integration.ToGRPC(clus.RandClient()).Lease lresp, err := lc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) if err != nil { t.Fatal(err) @@ -940,11 +941,11 @@ func TestV3PutIgnoreLease(t *testing.T) { // TestV3PutMissingLease ensures that a Put on a key with a bogus lease fails. func TestV3PutMissingLease(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV key := []byte("foo") preq := &pb.PutRequest{Key: key, Lease: 123456} tests := []func(){ @@ -1011,7 +1012,7 @@ func TestV3PutMissingLease(t *testing.T) { // TestV3DeleteRange tests various edge cases in the DeleteRange API. func TestV3DeleteRange(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) tests := []struct { name string @@ -1069,8 +1070,8 @@ func TestV3DeleteRange(t *testing.T) { for i, tt := range tests { t.Run(tt.name, func(t *testing.T) { - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) - kvc := toGRPC(clus.RandClient()).KV + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + kvc := integration.ToGRPC(clus.RandClient()).KV defer clus.Terminate(t) ks := tt.keySet @@ -1123,11 +1124,11 @@ func TestV3DeleteRange(t *testing.T) { // TestV3TxnInvalidRange tests that invalid ranges are rejected in txns. func TestV3TxnInvalidRange(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV preq := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")} for i := 0; i < 3; i++ { @@ -1166,12 +1167,12 @@ func TestV3TxnInvalidRange(t *testing.T) { } func TestV3TooLargeRequest(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV // 2MB request value largeV := make([]byte, 2*1024*1024) @@ -1185,13 +1186,13 @@ func TestV3TooLargeRequest(t *testing.T) { // TestV3Hash tests hash. func TestV3Hash(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) cli := clus.RandClient() - kvc := toGRPC(cli).KV - m := toGRPC(cli).Maintenance + kvc := integration.ToGRPC(cli).KV + m := integration.ToGRPC(cli).Maintenance preq := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")} @@ -1210,12 +1211,12 @@ func TestV3Hash(t *testing.T) { // TestV3HashRestart ensures that hash stays the same after restart. func TestV3HashRestart(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1, UseBridge: true}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) cli := clus.RandClient() - resp, err := toGRPC(cli).Maintenance.Hash(context.Background(), &pb.HashRequest{}) + resp, err := integration.ToGRPC(cli).Maintenance.Hash(context.Background(), &pb.HashRequest{}) if err != nil || resp.Hash == 0 { t.Fatalf("couldn't hash (%v, hash %d)", err, resp.Hash) } @@ -1223,12 +1224,12 @@ func TestV3HashRestart(t *testing.T) { clus.Members[0].Stop(t) clus.Members[0].Restart(t) - clus.waitLeader(t, clus.Members) - kvc := toGRPC(clus.Client(0)).KV + clus.WaitMembersForLeader(t, clus.Members) + kvc := integration.ToGRPC(clus.Client(0)).KV waitForRestart(t, kvc) cli = clus.RandClient() - resp, err = toGRPC(cli).Maintenance.Hash(context.Background(), &pb.HashRequest{}) + resp, err = integration.ToGRPC(cli).Maintenance.Hash(context.Background(), &pb.HashRequest{}) if err != nil || resp.Hash == 0 { t.Fatalf("couldn't hash (%v, hash %d)", err, resp.Hash) } @@ -1241,10 +1242,10 @@ func TestV3HashRestart(t *testing.T) { // TestV3StorageQuotaAPI tests the V3 server respects quotas at the API layer func TestV3StorageQuotaAPI(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) quotasize := int64(16 * os.Getpagesize()) - clus := NewClusterV3(t, &ClusterConfig{Size: 3, UseBridge: true}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) // Set a quota on one node clus.Members[0].QuotaBackendBytes = quotasize @@ -1252,7 +1253,7 @@ func TestV3StorageQuotaAPI(t *testing.T) { clus.Members[0].Restart(t) defer clus.Terminate(t) - kvc := toGRPC(clus.Client(0)).KV + kvc := integration.ToGRPC(clus.Client(0)).KV waitForRestart(t, kvc) key := []byte("abc") @@ -1288,7 +1289,7 @@ func TestV3StorageQuotaAPI(t *testing.T) { } func TestV3RangeRequest(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) tests := []struct { name string @@ -1508,10 +1509,10 @@ func TestV3RangeRequest(t *testing.T) { for i, tt := range tests { t.Run(tt.name, func(t *testing.T) { - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) for _, k := range tt.putKeys { - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV req := &pb.PutRequest{Key: []byte(k), Value: []byte("bar")} if _, err := kvc.Put(context.TODO(), req); err != nil { t.Fatalf("#%d: couldn't put key (%v)", i, err) @@ -1519,7 +1520,7 @@ func TestV3RangeRequest(t *testing.T) { } for j, req := range tt.reqs { - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV resp, err := kvc.Range(context.TODO(), &req) if err != nil { t.Errorf("#%d.%d: Range error: %v", i, j, err) @@ -1550,24 +1551,24 @@ func TestV3RangeRequest(t *testing.T) { } } -func newClusterV3NoClients(t *testing.T, cfg *ClusterConfig) *ClusterV3 { +func newClusterV3NoClients(t *testing.T, cfg *integration.ClusterConfig) *integration.ClusterV3 { cfg.UseGRPC = true - clus := &ClusterV3{cluster: NewClusterByConfig(t, cfg)} + clus := &integration.ClusterV3{Cluster: integration.NewClusterByConfig(t, cfg)} clus.Launch(t) return clus } // TestTLSGRPCRejectInsecureClient checks that connection is rejected if server is TLS but not client. func TestTLSGRPCRejectInsecureClient(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - cfg := ClusterConfig{Size: 3, ClientTLS: &testTLSInfo} + cfg := integration.ClusterConfig{Size: 3, ClientTLS: &integration.TestTLSInfo} clus := newClusterV3NoClients(t, &cfg) defer clus.Terminate(t) // nil out TLS field so client will use an insecure connection clus.Members[0].ClientTLSInfo = nil - client, err := NewClientV3(clus.Members[0]) + client, err := integration.NewClientV3(clus.Members[0]) if err != nil && err != context.DeadlineExceeded { t.Fatalf("unexpected error (%v)", err) } else if client == nil { @@ -1582,7 +1583,7 @@ func TestTLSGRPCRejectInsecureClient(t *testing.T) { go func() { ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second) reqput := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")} - _, perr := toGRPC(client).KV.Put(ctx, reqput) + _, perr := integration.ToGRPC(client).KV.Put(ctx, reqput) cancel() donec <- perr }() @@ -1594,16 +1595,16 @@ func TestTLSGRPCRejectInsecureClient(t *testing.T) { // TestTLSGRPCRejectSecureClient checks that connection is rejected if client is TLS but not server. func TestTLSGRPCRejectSecureClient(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - cfg := ClusterConfig{Size: 3} + cfg := integration.ClusterConfig{Size: 3} clus := newClusterV3NoClients(t, &cfg) defer clus.Terminate(t) - clus.Members[0].ClientTLSInfo = &testTLSInfo + clus.Members[0].ClientTLSInfo = &integration.TestTLSInfo clus.Members[0].DialOptions = []grpc.DialOption{grpc.WithBlock()} - clus.Members[0].grpcURL = strings.Replace(clus.Members[0].grpcURL, "http://", "https://", 1) - client, err := NewClientV3(clus.Members[0]) + clus.Members[0].GrpcURL = strings.Replace(clus.Members[0].GrpcURL, "http://", "https://", 1) + client, err := integration.NewClientV3(clus.Members[0]) if client != nil || err == nil { client.Close() t.Fatalf("expected no client") @@ -1614,20 +1615,20 @@ func TestTLSGRPCRejectSecureClient(t *testing.T) { // TestTLSGRPCAcceptSecureAll checks that connection is accepted if both client and server are TLS func TestTLSGRPCAcceptSecureAll(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - cfg := ClusterConfig{Size: 3, ClientTLS: &testTLSInfo} + cfg := integration.ClusterConfig{Size: 3, ClientTLS: &integration.TestTLSInfo} clus := newClusterV3NoClients(t, &cfg) defer clus.Terminate(t) - client, err := NewClientV3(clus.Members[0]) + client, err := integration.NewClientV3(clus.Members[0]) if err != nil { t.Fatalf("expected tls client (%v)", err) } defer client.Close() reqput := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")} - if _, err := toGRPC(client).KV.Put(context.TODO(), reqput); err != nil { + if _, err := integration.ToGRPC(client).KV.Put(context.TODO(), reqput); err != nil { t.Fatalf("unexpected error on put over tls (%v)", err) } } @@ -1656,11 +1657,11 @@ func TestTLSReloadAtomicReplace(t *testing.T) { defer os.RemoveAll(certsDirExp) cloneFunc := func() transport.TLSInfo { - tlsInfo, terr := copyTLSFiles(testTLSInfo, certsDir) + tlsInfo, terr := copyTLSFiles(integration.TestTLSInfo, certsDir) if terr != nil { t.Fatal(terr) } - if _, err = copyTLSFiles(testTLSInfoExpired, certsDirExp); err != nil { + if _, err = copyTLSFiles(integration.TestTLSInfoExpired, certsDirExp); err != nil { t.Fatal(err) } return tlsInfo @@ -1702,19 +1703,19 @@ func TestTLSReloadCopy(t *testing.T) { defer os.RemoveAll(certsDir) cloneFunc := func() transport.TLSInfo { - tlsInfo, terr := copyTLSFiles(testTLSInfo, certsDir) + tlsInfo, terr := copyTLSFiles(integration.TestTLSInfo, certsDir) if terr != nil { t.Fatal(terr) } return tlsInfo } replaceFunc := func() { - if _, err = copyTLSFiles(testTLSInfoExpired, certsDir); err != nil { + if _, err = copyTLSFiles(integration.TestTLSInfoExpired, certsDir); err != nil { t.Fatal(err) } } revertFunc := func() { - if _, err = copyTLSFiles(testTLSInfo, certsDir); err != nil { + if _, err = copyTLSFiles(integration.TestTLSInfo, certsDir); err != nil { t.Fatal(err) } } @@ -1732,19 +1733,19 @@ func TestTLSReloadCopyIPOnly(t *testing.T) { defer os.RemoveAll(certsDir) cloneFunc := func() transport.TLSInfo { - tlsInfo, terr := copyTLSFiles(testTLSInfoIP, certsDir) + tlsInfo, terr := copyTLSFiles(integration.TestTLSInfoIP, certsDir) if terr != nil { t.Fatal(terr) } return tlsInfo } replaceFunc := func() { - if _, err = copyTLSFiles(testTLSInfoExpiredIP, certsDir); err != nil { + if _, err = copyTLSFiles(integration.TestTLSInfoExpiredIP, certsDir); err != nil { t.Fatal(err) } } revertFunc := func() { - if _, err = copyTLSFiles(testTLSInfoIP, certsDir); err != nil { + if _, err = copyTLSFiles(integration.TestTLSInfoIP, certsDir); err != nil { t.Fatal(err) } } @@ -1757,13 +1758,13 @@ func testTLSReload( replaceFunc func(), revertFunc func(), useIP bool) { - BeforeTest(t) + integration.BeforeTest(t) // 1. separate copies for TLS assets modification tlsInfo := cloneFunc() // 2. start cluster with valid certs - clus := NewClusterV3(t, &ClusterConfig{ + clus := integration.NewClusterV3(t, &integration.ClusterConfig{ Size: 1, PeerTLS: &tlsInfo, ClientTLS: &tlsInfo, @@ -1785,7 +1786,7 @@ func testTLSReload( t.Log(err) continue } - cli, cerr := NewClient(t, clientv3.Config{ + cli, cerr := integration.NewClient(t, clientv3.Config{ DialOptions: []grpc.DialOption{grpc.WithBlock()}, Endpoints: []string{clus.Members[0].GRPCURL()}, DialTimeout: time.Second, @@ -1820,7 +1821,7 @@ func testTLSReload( if terr != nil { t.Fatal(terr) } - cl, cerr := NewClient(t, clientv3.Config{ + cl, cerr := integration.NewClient(t, clientv3.Config{ Endpoints: []string{clus.Members[0].GRPCURL()}, DialTimeout: 5 * time.Second, TLS: tls, @@ -1832,46 +1833,46 @@ func testTLSReload( } func TestGRPCRequireLeader(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - cfg := ClusterConfig{Size: 3} + cfg := integration.ClusterConfig{Size: 3} clus := newClusterV3NoClients(t, &cfg) defer clus.Terminate(t) clus.Members[1].Stop(t) clus.Members[2].Stop(t) - client, err := NewClientV3(clus.Members[0]) + client, err := integration.NewClientV3(clus.Members[0]) if err != nil { t.Fatalf("cannot create client: %v", err) } defer client.Close() // wait for election timeout, then member[0] will not have a leader. - time.Sleep(time.Duration(3*electionTicks) * tickDuration) + time.Sleep(time.Duration(3*integration.ElectionTicks) * integration.TickDuration) md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) ctx := metadata.NewOutgoingContext(context.Background(), md) reqput := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")} - if _, err := toGRPC(client).KV.Put(ctx, reqput); rpctypes.ErrorDesc(err) != rpctypes.ErrNoLeader.Error() { + if _, err := integration.ToGRPC(client).KV.Put(ctx, reqput); rpctypes.ErrorDesc(err) != rpctypes.ErrNoLeader.Error() { t.Errorf("err = %v, want %v", err, rpctypes.ErrNoLeader) } } func TestGRPCStreamRequireLeader(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - cfg := ClusterConfig{Size: 3, UseBridge: true} + cfg := integration.ClusterConfig{Size: 3, UseBridge: true} clus := newClusterV3NoClients(t, &cfg) defer clus.Terminate(t) - client, err := NewClientV3(clus.Members[0]) + client, err := integration.NewClientV3(clus.Members[0]) if err != nil { t.Fatalf("failed to create client (%v)", err) } defer client.Close() - wAPI := toGRPC(client).Watch + wAPI := integration.ToGRPC(client).Watch md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) ctx := metadata.NewOutgoingContext(context.Background(), md) wStream, err := wAPI.Watch(ctx) @@ -1901,8 +1902,8 @@ func TestGRPCStreamRequireLeader(t *testing.T) { clus.Members[1].Restart(t) clus.Members[2].Restart(t) - clus.waitLeader(t, clus.Members) - time.Sleep(time.Duration(2*electionTicks) * tickDuration) + clus.WaitMembersForLeader(t, clus.Members) + time.Sleep(time.Duration(2*integration.ElectionTicks) * integration.TickDuration) // new stream should also be OK now after we restarted the other members wStream, err = wAPI.Watch(ctx) @@ -1922,7 +1923,7 @@ func TestGRPCStreamRequireLeader(t *testing.T) { // TestV3LargeRequests ensures that configurable MaxRequestBytes works as intended. func TestV3LargeRequests(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) tests := []struct { maxRequestBytes uint valueSize int @@ -1936,9 +1937,9 @@ func TestV3LargeRequests(t *testing.T) { } for i, test := range tests { t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { - clus := NewClusterV3(t, &ClusterConfig{Size: 1, MaxRequestBytes: test.maxRequestBytes}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, MaxRequestBytes: test.maxRequestBytes}) defer clus.Terminate(t) - kvcli := toGRPC(clus.Client(0)).KV + kvcli := integration.ToGRPC(clus.Client(0)).KV reqput := &pb.PutRequest{Key: []byte("foo"), Value: make([]byte, test.valueSize)} _, err := kvcli.Put(context.TODO(), reqput) if !eqErrGRPC(err, test.expectError) { diff --git a/tests/integration/v3_health_test.go b/tests/integration/v3_health_test.go index fd7257fb9..2bd03588d 100644 --- a/tests/integration/v3_health_test.go +++ b/tests/integration/v3_health_test.go @@ -18,13 +18,14 @@ import ( "context" "testing" + "go.etcd.io/etcd/tests/v3/framework/integration" healthpb "google.golang.org/grpc/health/grpc_health_v1" ) func TestHealthCheck(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := healthpb.NewHealthClient(clus.RandClient().ActiveConnection()) diff --git a/tests/integration/v3_kv_test.go b/tests/integration/v3_kv_test.go index aca4aeb49..851edd294 100644 --- a/tests/integration/v3_kv_test.go +++ b/tests/integration/v3_kv_test.go @@ -6,13 +6,14 @@ import ( "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/namespace" + "go.etcd.io/etcd/tests/v3/framework/integration" ) // TestKVWithEmptyValue ensures that a get/delete with an empty value, and with WithFromKey/WithPrefix function will return an empty error. func TestKVWithEmptyValue(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) client := clus.RandClient() diff --git a/tests/integration/v3_leadership_test.go b/tests/integration/v3_leadership_test.go index e530bbdfc..8fba02c86 100644 --- a/tests/integration/v3_leadership_test.go +++ b/tests/integration/v3_leadership_test.go @@ -23,6 +23,7 @@ import ( pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" + "go.etcd.io/etcd/tests/v3/framework/integration" "golang.org/x/sync/errgroup" ) @@ -30,13 +31,13 @@ func TestMoveLeader(t *testing.T) { testMoveLeader(t, true) } func TestMoveLeaderService(t *testing.T) { testMoveLeader(t, false) } func testMoveLeader(t *testing.T, auto bool) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) oldLeadIdx := clus.WaitLeader(t) - oldLeadID := uint64(clus.Members[oldLeadIdx].s.ID()) + oldLeadID := uint64(clus.Members[oldLeadIdx].Server.ID()) // ensure followers go through leader transition while leadership transfer idc := make(chan uint64) @@ -45,23 +46,23 @@ func testMoveLeader(t *testing.T, auto bool) { for i := range clus.Members { if oldLeadIdx != i { - go func(m *member) { + go func(m *integration.Member) { select { - case idc <- checkLeaderTransition(m, oldLeadID): + case idc <- integration.CheckLeaderTransition(m, oldLeadID): case <-stopc: } }(clus.Members[i]) } } - target := uint64(clus.Members[(oldLeadIdx+1)%3].s.ID()) + target := uint64(clus.Members[(oldLeadIdx+1)%3].Server.ID()) if auto { - err := clus.Members[oldLeadIdx].s.TransferLeadership() + err := clus.Members[oldLeadIdx].Server.TransferLeadership() if err != nil { t.Fatal(err) } } else { - mvc := toGRPC(clus.Client(oldLeadIdx)).Maintenance + mvc := integration.ToGRPC(clus.Client(oldLeadIdx)).Maintenance _, err := mvc.MoveLeader(context.TODO(), &pb.MoveLeaderRequest{TargetID: target}) if err != nil { t.Fatal(err) @@ -98,17 +99,17 @@ func testMoveLeader(t *testing.T, auto bool) { // TestMoveLeaderError ensures that request to non-leader fail. func TestMoveLeaderError(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) oldLeadIdx := clus.WaitLeader(t) followerIdx := (oldLeadIdx + 1) % 3 - target := uint64(clus.Members[(oldLeadIdx+2)%3].s.ID()) + target := uint64(clus.Members[(oldLeadIdx+2)%3].Server.ID()) - mvc := toGRPC(clus.Client(followerIdx)).Maintenance + mvc := integration.ToGRPC(clus.Client(followerIdx)).Maintenance _, err := mvc.MoveLeader(context.TODO(), &pb.MoveLeaderRequest{TargetID: target}) if !eqErrGRPC(err, rpctypes.ErrGRPCNotLeader) { t.Errorf("err = %v, want %v", err, rpctypes.ErrGRPCNotLeader) @@ -117,9 +118,9 @@ func TestMoveLeaderError(t *testing.T) { // TestMoveLeaderToLearnerError ensures that leader transfer to learner member will fail. func TestMoveLeaderToLearnerError(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) // we have to add and launch learner member after initial cluster was created, because @@ -128,10 +129,10 @@ func TestMoveLeaderToLearnerError(t *testing.T) { learners, err := clus.GetLearnerMembers() if err != nil { - t.Fatalf("failed to get the learner members in cluster: %v", err) + t.Fatalf("failed to get the learner members in Cluster: %v", err) } if len(learners) != 1 { - t.Fatalf("added 1 learner to cluster, got %d", len(learners)) + t.Fatalf("added 1 learner to Cluster, got %d", len(learners)) } learnerID := learners[0].ID @@ -150,19 +151,19 @@ func TestMoveLeaderToLearnerError(t *testing.T) { // TestTransferLeadershipWithLearner ensures TransferLeadership does not timeout due to learner is // automatically picked by leader as transferee. func TestTransferLeadershipWithLearner(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) clus.AddAndLaunchLearnerMember(t) learners, err := clus.GetLearnerMembers() if err != nil { - t.Fatalf("failed to get the learner members in cluster: %v", err) + t.Fatalf("failed to get the learner members in Cluster: %v", err) } if len(learners) != 1 { - t.Fatalf("added 1 learner to cluster, got %d", len(learners)) + t.Fatalf("added 1 learner to Cluster, got %d", len(learners)) } leaderIdx := clus.WaitLeader(t) @@ -170,7 +171,7 @@ func TestTransferLeadershipWithLearner(t *testing.T) { go func() { // note that this cluster has 1 leader and 1 learner. TransferLeadership should return nil. // Leadership transfer is skipped in cluster with 1 voting member. - errCh <- clus.Members[leaderIdx].s.TransferLeadership() + errCh <- clus.Members[leaderIdx].Server.TransferLeadership() }() select { case err := <-errCh: @@ -183,10 +184,10 @@ func TestTransferLeadershipWithLearner(t *testing.T) { } func TestFirstCommitNotification(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) ctx := context.Background() clusterSize := 3 - cluster := NewClusterV3(t, &ClusterConfig{Size: clusterSize}) + cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: clusterSize}) defer cluster.Terminate(t) oldLeaderIdx := cluster.WaitLeader(t) @@ -197,7 +198,7 @@ func TestFirstCommitNotification(t *testing.T) { notifiers := make(map[int]<-chan struct{}, clusterSize) for i, clusterMember := range cluster.Members { - notifiers[i] = clusterMember.s.FirstCommitInTermNotify() + notifiers[i] = clusterMember.Server.FirstCommitInTermNotify() } _, err := oldLeaderClient.MoveLeader(context.Background(), newLeaderId) @@ -215,7 +216,7 @@ func TestFirstCommitNotification(t *testing.T) { } // It's guaranteed now that leader contains the 'foo'->'bar' index entry. - leaderAppliedIndex := cluster.Members[newLeaderIdx].s.AppliedIndex() + leaderAppliedIndex := cluster.Members[newLeaderIdx].Server.AppliedIndex() ctx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() @@ -238,13 +239,13 @@ func TestFirstCommitNotification(t *testing.T) { func checkFirstCommitNotification( ctx context.Context, t testing.TB, - member *member, + member *integration.Member, leaderAppliedIndex uint64, notifier <-chan struct{}, ) error { // wait until server applies all the changes of leader - for member.s.AppliedIndex() < leaderAppliedIndex { - t.Logf("member.s.AppliedIndex():%v <= leaderAppliedIndex:%v", member.s.AppliedIndex(), leaderAppliedIndex) + for member.Server.AppliedIndex() < leaderAppliedIndex { + t.Logf("member.Server.AppliedIndex():%v <= leaderAppliedIndex:%v", member.Server.AppliedIndex(), leaderAppliedIndex) select { case <-ctx.Done(): return ctx.Err() @@ -262,7 +263,7 @@ func checkFirstCommitNotification( ) } default: - t.Logf("member.s.AppliedIndex():%v >= leaderAppliedIndex:%v", member.s.AppliedIndex(), leaderAppliedIndex) + t.Logf("member.Server.AppliedIndex():%v >= leaderAppliedIndex:%v", member.Server.AppliedIndex(), leaderAppliedIndex) return fmt.Errorf( "notification was not triggered, member ID: %d", member.ID(), diff --git a/tests/integration/v3_lease_test.go b/tests/integration/v3_lease_test.go index 1727da65c..40bced9c3 100644 --- a/tests/integration/v3_lease_test.go +++ b/tests/integration/v3_lease_test.go @@ -24,6 +24,7 @@ import ( "go.etcd.io/etcd/api/v3/mvccpb" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/client/pkg/v3/testutil" + "go.etcd.io/etcd/tests/v3/framework/integration" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" @@ -34,13 +35,13 @@ import ( // to the primary lessor, refresh the leases and start to manage leases. // TODO: use customized clock to make this test go faster? func TestV3LeasePromote(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3, UseBridge: true}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) // create lease - lresp, err := toGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 3}) + lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 3}) ttl := time.Duration(lresp.TTL) * time.Second afterGrant := time.Now() if err != nil { @@ -54,19 +55,19 @@ func TestV3LeasePromote(t *testing.T) { time.Sleep(time.Until(afterGrant.Add(ttl - time.Second))) // kill the current leader, all leases should be refreshed. - toStop := clus.waitLeader(t, clus.Members) + toStop := clus.WaitMembersForLeader(t, clus.Members) beforeStop := time.Now() clus.Members[toStop].Stop(t) - var toWait []*member + var toWait []*integration.Member for i, m := range clus.Members { if i != toStop { toWait = append(toWait, m) } } - clus.waitLeader(t, toWait) + clus.WaitMembersForLeader(t, toWait) clus.Members[toStop].Restart(t) - clus.waitLeader(t, clus.Members) + clus.WaitMembersForLeader(t, clus.Members) afterReelect := time.Now() // ensure lease is refreshed by waiting for a "long" time. @@ -96,9 +97,9 @@ func TestV3LeasePromote(t *testing.T) { // TestV3LeaseRevoke ensures a key is deleted once its lease is revoked. func TestV3LeaseRevoke(t *testing.T) { - BeforeTest(t) - testLeaseRemoveLeasedKey(t, func(clus *ClusterV3, leaseID int64) error { - lc := toGRPC(clus.RandClient()).Lease + integration.BeforeTest(t) + testLeaseRemoveLeasedKey(t, func(clus *integration.ClusterV3, leaseID int64) error { + lc := integration.ToGRPC(clus.RandClient()).Lease _, err := lc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: leaseID}) return err }) @@ -106,12 +107,12 @@ func TestV3LeaseRevoke(t *testing.T) { // TestV3LeaseGrantById ensures leases may be created by a given id. func TestV3LeaseGrantByID(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) // create fixed lease - lresp, err := toGRPC(clus.RandClient()).Lease.LeaseGrant( + lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant( context.TODO(), &pb.LeaseGrantRequest{ID: 1, TTL: 1}) if err != nil { @@ -122,7 +123,7 @@ func TestV3LeaseGrantByID(t *testing.T) { } // create duplicate fixed lease - _, err = toGRPC(clus.RandClient()).Lease.LeaseGrant( + _, err = integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant( context.TODO(), &pb.LeaseGrantRequest{ID: 1, TTL: 1}) if !eqErrGRPC(err, rpctypes.ErrGRPCLeaseExist) { @@ -130,7 +131,7 @@ func TestV3LeaseGrantByID(t *testing.T) { } // create fresh fixed lease - lresp, err = toGRPC(clus.RandClient()).Lease.LeaseGrant( + lresp, err = integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant( context.TODO(), &pb.LeaseGrantRequest{ID: 2, TTL: 1}) if err != nil { @@ -143,13 +144,13 @@ func TestV3LeaseGrantByID(t *testing.T) { // TestV3LeaseExpire ensures a key is deleted once a key expires. func TestV3LeaseExpire(t *testing.T) { - BeforeTest(t) - testLeaseRemoveLeasedKey(t, func(clus *ClusterV3, leaseID int64) error { + integration.BeforeTest(t) + testLeaseRemoveLeasedKey(t, func(clus *integration.ClusterV3, leaseID int64) error { // let lease lapse; wait for deleted key ctx, cancel := context.WithCancel(context.Background()) defer cancel() - wStream, err := toGRPC(clus.RandClient()).Watch.Watch(ctx) + wStream, err := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx) if err != nil { return err } @@ -195,9 +196,9 @@ func TestV3LeaseExpire(t *testing.T) { // TestV3LeaseKeepAlive ensures keepalive keeps the lease alive. func TestV3LeaseKeepAlive(t *testing.T) { - BeforeTest(t) - testLeaseRemoveLeasedKey(t, func(clus *ClusterV3, leaseID int64) error { - lc := toGRPC(clus.RandClient()).Lease + integration.BeforeTest(t) + testLeaseRemoveLeasedKey(t, func(clus *integration.ClusterV3, leaseID int64) error { + lc := integration.ToGRPC(clus.RandClient()).Lease lreq := &pb.LeaseKeepAliveRequest{ID: leaseID} ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -229,11 +230,11 @@ func TestV3LeaseKeepAlive(t *testing.T) { // TestV3LeaseCheckpoint ensures a lease checkpoint results in a remaining TTL being persisted // across leader elections. func TestV3LeaseCheckpoint(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) var ttl int64 = 300 leaseInterval := 2 * time.Second - clus := NewClusterV3(t, &ClusterConfig{ + clus := integration.NewClusterV3(t, &integration.ClusterConfig{ Size: 3, EnableLeaseCheckpoint: true, LeaseCheckpointInterval: leaseInterval, @@ -244,7 +245,7 @@ func TestV3LeaseCheckpoint(t *testing.T) { // create lease ctx, cancel := context.WithCancel(context.Background()) defer cancel() - c := toGRPC(clus.RandClient()) + c := integration.ToGRPC(clus.RandClient()) lresp, err := c.Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: ttl}) if err != nil { t.Fatal(err) @@ -257,10 +258,10 @@ func TestV3LeaseCheckpoint(t *testing.T) { leaderId := clus.WaitLeader(t) leader := clus.Members[leaderId] leader.Stop(t) - time.Sleep(time.Duration(3*electionTicks) * tickDuration) + time.Sleep(time.Duration(3*integration.ElectionTicks) * integration.TickDuration) leader.Restart(t) newLeaderId := clus.WaitLeader(t) - c2 := toGRPC(clus.Client(newLeaderId)) + c2 := integration.ToGRPC(clus.Client(newLeaderId)) time.Sleep(250 * time.Millisecond) @@ -284,14 +285,14 @@ func TestV3LeaseCheckpoint(t *testing.T) { // TestV3LeaseExists creates a lease on a random client and confirms it exists in the cluster. func TestV3LeaseExists(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) // create lease ctx0, cancel0 := context.WithCancel(context.Background()) defer cancel0() - lresp, err := toGRPC(clus.RandClient()).Lease.LeaseGrant( + lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant( ctx0, &pb.LeaseGrantRequest{TTL: 30}) if err != nil { @@ -308,8 +309,8 @@ func TestV3LeaseExists(t *testing.T) { // TestV3LeaseLeases creates leases and confirms list RPC fetches created ones. func TestV3LeaseLeases(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) ctx0, cancel0 := context.WithCancel(context.Background()) @@ -318,7 +319,7 @@ func TestV3LeaseLeases(t *testing.T) { // create leases ids := []int64{} for i := 0; i < 5; i++ { - lresp, err := toGRPC(clus.RandClient()).Lease.LeaseGrant( + lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant( ctx0, &pb.LeaseGrantRequest{TTL: 30}) if err != nil { @@ -330,7 +331,7 @@ func TestV3LeaseLeases(t *testing.T) { ids = append(ids, lresp.ID) } - lresp, err := toGRPC(clus.RandClient()).Lease.LeaseLeases( + lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseLeases( context.Background(), &pb.LeaseLeasesRequest{}) if err != nil { @@ -358,8 +359,8 @@ func TestV3LeaseTimeToLiveStress(t *testing.T) { } func testLeaseStress(t *testing.T, stresser func(context.Context, pb.LeaseClient) error) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) @@ -368,7 +369,7 @@ func testLeaseStress(t *testing.T, stresser func(context.Context, pb.LeaseClient for i := 0; i < 30; i++ { for j := 0; j < 3; j++ { - go func(i int) { errc <- stresser(ctx, toGRPC(clus.Client(i)).Lease) }(j) + go func(i int) { errc <- stresser(ctx, integration.ToGRPC(clus.Client(i)).Lease) }(j) } } @@ -429,8 +430,8 @@ func stressLeaseTimeToLive(tctx context.Context, lc pb.LeaseClient) (reterr erro } func TestV3PutOnNonExistLease(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) ctx, cancel := context.WithCancel(context.Background()) @@ -438,7 +439,7 @@ func TestV3PutOnNonExistLease(t *testing.T) { badLeaseID := int64(0x12345678) putr := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar"), Lease: badLeaseID} - _, err := toGRPC(clus.RandClient()).KV.Put(ctx, putr) + _, err := integration.ToGRPC(clus.RandClient()).KV.Put(ctx, putr) if !eqErrGRPC(err, rpctypes.ErrGRPCLeaseNotFound) { t.Errorf("err = %v, want %v", err, rpctypes.ErrGRPCLeaseNotFound) } @@ -447,13 +448,13 @@ func TestV3PutOnNonExistLease(t *testing.T) { // TestV3GetNonExistLease ensures client retrieving nonexistent lease on a follower doesn't result node panic // related issue https://github.com/etcd-io/etcd/issues/6537 func TestV3GetNonExistLease(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - lc := toGRPC(clus.RandClient()).Lease + lc := integration.ToGRPC(clus.RandClient()).Lease lresp, err := lc.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: 10}) if err != nil { t.Errorf("failed to create lease %v", err) @@ -468,12 +469,12 @@ func TestV3GetNonExistLease(t *testing.T) { Keys: true, } - for _, client := range clus.clients { + for _, client := range clus.Clients { // quorum-read to ensure revoke completes before TimeToLive - if _, err := toGRPC(client).KV.Range(ctx, &pb.RangeRequest{Key: []byte("_")}); err != nil { + if _, err := integration.ToGRPC(client).KV.Range(ctx, &pb.RangeRequest{Key: []byte("_")}); err != nil { t.Fatal(err) } - resp, err := toGRPC(client).Lease.LeaseTimeToLive(ctx, leaseTTLr) + resp, err := integration.ToGRPC(client).Lease.LeaseTimeToLive(ctx, leaseTTLr) if err != nil { t.Fatalf("expected non nil error, but go %v", err) } @@ -485,8 +486,8 @@ func TestV3GetNonExistLease(t *testing.T) { // TestV3LeaseSwitch tests a key can be switched from one lease to another. func TestV3LeaseSwitch(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) key := "foo" @@ -494,34 +495,34 @@ func TestV3LeaseSwitch(t *testing.T) { // create lease ctx, cancel := context.WithCancel(context.Background()) defer cancel() - lresp1, err1 := toGRPC(clus.RandClient()).Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: 30}) + lresp1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: 30}) if err1 != nil { t.Fatal(err1) } - lresp2, err2 := toGRPC(clus.RandClient()).Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: 30}) + lresp2, err2 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: 30}) if err2 != nil { t.Fatal(err2) } // attach key on lease1 then switch it to lease2 put1 := &pb.PutRequest{Key: []byte(key), Lease: lresp1.ID} - _, err := toGRPC(clus.RandClient()).KV.Put(ctx, put1) + _, err := integration.ToGRPC(clus.RandClient()).KV.Put(ctx, put1) if err != nil { t.Fatal(err) } put2 := &pb.PutRequest{Key: []byte(key), Lease: lresp2.ID} - _, err = toGRPC(clus.RandClient()).KV.Put(ctx, put2) + _, err = integration.ToGRPC(clus.RandClient()).KV.Put(ctx, put2) if err != nil { t.Fatal(err) } // revoke lease1 should not remove key - _, err = toGRPC(clus.RandClient()).Lease.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lresp1.ID}) + _, err = integration.ToGRPC(clus.RandClient()).Lease.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lresp1.ID}) if err != nil { t.Fatal(err) } rreq := &pb.RangeRequest{Key: []byte("foo")} - rresp, err := toGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq) + rresp, err := integration.ToGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq) if err != nil { t.Fatal(err) } @@ -530,11 +531,11 @@ func TestV3LeaseSwitch(t *testing.T) { } // revoke lease2 should remove key - _, err = toGRPC(clus.RandClient()).Lease.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lresp2.ID}) + _, err = integration.ToGRPC(clus.RandClient()).Lease.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lresp2.ID}) if err != nil { t.Fatal(err) } - rresp, err = toGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq) + rresp, err = integration.ToGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq) if err != nil { t.Fatal(err) } @@ -547,14 +548,14 @@ func TestV3LeaseSwitch(t *testing.T) { // election timeout after it loses its quorum. And the new leader extends the TTL of // the lease to at least TTL + election timeout. func TestV3LeaseFailover(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) - toIsolate := clus.waitLeader(t, clus.Members) + toIsolate := clus.WaitMembersForLeader(t, clus.Members) - lc := toGRPC(clus.Client(toIsolate)).Lease + lc := integration.ToGRPC(clus.Client(toIsolate)).Lease // create lease lresp, err := lc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 5}) @@ -595,7 +596,7 @@ func TestV3LeaseFailover(t *testing.T) { } clus.Members[toIsolate].Resume() - clus.waitLeader(t, clus.Members) + clus.WaitMembersForLeader(t, clus.Members) // lease should not expire at the last received expire deadline. time.Sleep(time.Until(expectedExp) - 500*time.Millisecond) @@ -608,12 +609,12 @@ func TestV3LeaseFailover(t *testing.T) { // TestV3LeaseRequireLeader ensures that a Recv will get a leader // loss error if there is no leader. func TestV3LeaseRequireLeader(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) - lc := toGRPC(clus.Client(0)).Lease + lc := integration.ToGRPC(clus.Client(0)).Lease clus.Members[1].Stop(t) clus.Members[2].Stop(t) @@ -648,13 +649,13 @@ const fiveMinTTL int64 = 300 // TestV3LeaseRecoverAndRevoke ensures that revoking a lease after restart deletes the attached key. func TestV3LeaseRecoverAndRevoke(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1, UseBridge: true}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) - kvc := toGRPC(clus.Client(0)).KV - lsc := toGRPC(clus.Client(0)).Lease + kvc := integration.ToGRPC(clus.Client(0)).KV + lsc := integration.ToGRPC(clus.Client(0)).Lease lresp, err := lsc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: fiveMinTTL}) if err != nil { @@ -671,16 +672,16 @@ func TestV3LeaseRecoverAndRevoke(t *testing.T) { // restart server and ensure lease still exists clus.Members[0].Stop(t) clus.Members[0].Restart(t) - clus.waitLeader(t, clus.Members) + clus.WaitMembersForLeader(t, clus.Members) // overwrite old client with newly dialed connection // otherwise, error with "grpc: RPC failed fast due to transport failure" - nc, err := NewClientV3(clus.Members[0]) + nc, err := integration.NewClientV3(clus.Members[0]) if err != nil { t.Fatal(err) } - kvc = toGRPC(nc).KV - lsc = toGRPC(nc).Lease + kvc = integration.ToGRPC(nc).KV + lsc = integration.ToGRPC(nc).Lease defer nc.Close() // revoke should delete the key @@ -699,13 +700,13 @@ func TestV3LeaseRecoverAndRevoke(t *testing.T) { // TestV3LeaseRevokeAndRecover ensures that revoked key stays deleted after restart. func TestV3LeaseRevokeAndRecover(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1, UseBridge: true}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) - kvc := toGRPC(clus.Client(0)).KV - lsc := toGRPC(clus.Client(0)).Lease + kvc := integration.ToGRPC(clus.Client(0)).KV + lsc := integration.ToGRPC(clus.Client(0)).Lease lresp, err := lsc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: fiveMinTTL}) if err != nil { @@ -728,15 +729,15 @@ func TestV3LeaseRevokeAndRecover(t *testing.T) { // restart server and ensure revoked key doesn't exist clus.Members[0].Stop(t) clus.Members[0].Restart(t) - clus.waitLeader(t, clus.Members) + clus.WaitMembersForLeader(t, clus.Members) // overwrite old client with newly dialed connection // otherwise, error with "grpc: RPC failed fast due to transport failure" - nc, err := NewClientV3(clus.Members[0]) + nc, err := integration.NewClientV3(clus.Members[0]) if err != nil { t.Fatal(err) } - kvc = toGRPC(nc).KV + kvc = integration.ToGRPC(nc).KV defer nc.Close() rresp, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")}) @@ -751,13 +752,13 @@ func TestV3LeaseRevokeAndRecover(t *testing.T) { // TestV3LeaseRecoverKeyWithDetachedLease ensures that revoking a detached lease after restart // does not delete the key. func TestV3LeaseRecoverKeyWithDetachedLease(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1, UseBridge: true}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) - kvc := toGRPC(clus.Client(0)).KV - lsc := toGRPC(clus.Client(0)).Lease + kvc := integration.ToGRPC(clus.Client(0)).KV + lsc := integration.ToGRPC(clus.Client(0)).Lease lresp, err := lsc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: fiveMinTTL}) if err != nil { @@ -780,16 +781,16 @@ func TestV3LeaseRecoverKeyWithDetachedLease(t *testing.T) { // restart server and ensure lease still exists clus.Members[0].Stop(t) clus.Members[0].Restart(t) - clus.waitLeader(t, clus.Members) + clus.WaitMembersForLeader(t, clus.Members) // overwrite old client with newly dialed connection // otherwise, error with "grpc: RPC failed fast due to transport failure" - nc, err := NewClientV3(clus.Members[0]) + nc, err := integration.NewClientV3(clus.Members[0]) if err != nil { t.Fatal(err) } - kvc = toGRPC(nc).KV - lsc = toGRPC(nc).Lease + kvc = integration.ToGRPC(nc).KV + lsc = integration.ToGRPC(nc).Lease defer nc.Close() // revoke the detached lease @@ -807,13 +808,13 @@ func TestV3LeaseRecoverKeyWithDetachedLease(t *testing.T) { } func TestV3LeaseRecoverKeyWithMutipleLease(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1, UseBridge: true}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) - kvc := toGRPC(clus.Client(0)).KV - lsc := toGRPC(clus.Client(0)).Lease + kvc := integration.ToGRPC(clus.Client(0)).KV + lsc := integration.ToGRPC(clus.Client(0)).Lease var leaseIDs []int64 for i := 0; i < 2; i++ { @@ -835,7 +836,7 @@ func TestV3LeaseRecoverKeyWithMutipleLease(t *testing.T) { // restart server and ensure lease still exists clus.Members[0].Stop(t) clus.Members[0].Restart(t) - clus.waitLeader(t, clus.Members) + clus.WaitMembersForLeader(t, clus.Members) for i, leaseID := range leaseIDs { if !leaseExist(t, clus, leaseID) { t.Errorf("#%d: unexpected lease not exists", i) @@ -844,12 +845,12 @@ func TestV3LeaseRecoverKeyWithMutipleLease(t *testing.T) { // overwrite old client with newly dialed connection // otherwise, error with "grpc: RPC failed fast due to transport failure" - nc, err := NewClientV3(clus.Members[0]) + nc, err := integration.NewClientV3(clus.Members[0]) if err != nil { t.Fatal(err) } - kvc = toGRPC(nc).KV - lsc = toGRPC(nc).Lease + kvc = integration.ToGRPC(nc).KV + lsc = integration.ToGRPC(nc).Lease defer nc.Close() // revoke the old lease @@ -881,9 +882,9 @@ func TestV3LeaseRecoverKeyWithMutipleLease(t *testing.T) { } // acquireLeaseAndKey creates a new lease and creates an attached key. -func acquireLeaseAndKey(clus *ClusterV3, key string) (int64, error) { +func acquireLeaseAndKey(clus *integration.ClusterV3, key string) (int64, error) { // create lease - lresp, err := toGRPC(clus.RandClient()).Lease.LeaseGrant( + lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant( context.TODO(), &pb.LeaseGrantRequest{TTL: 1}) if err != nil { @@ -894,7 +895,7 @@ func acquireLeaseAndKey(clus *ClusterV3, key string) (int64, error) { } // attach to key put := &pb.PutRequest{Key: []byte(key), Lease: lresp.ID} - if _, err := toGRPC(clus.RandClient()).KV.Put(context.TODO(), put); err != nil { + if _, err := integration.ToGRPC(clus.RandClient()).KV.Put(context.TODO(), put); err != nil { return 0, err } return lresp.ID, nil @@ -902,8 +903,8 @@ func acquireLeaseAndKey(clus *ClusterV3, key string) (int64, error) { // testLeaseRemoveLeasedKey performs some action while holding a lease with an // attached key "foo", then confirms the key is gone. -func testLeaseRemoveLeasedKey(t *testing.T, act func(*ClusterV3, int64) error) { - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) +func testLeaseRemoveLeasedKey(t *testing.T, act func(*integration.ClusterV3, int64) error) { + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) leaseID, err := acquireLeaseAndKey(clus, "foo") @@ -917,7 +918,7 @@ func testLeaseRemoveLeasedKey(t *testing.T, act func(*ClusterV3, int64) error) { // confirm no key rreq := &pb.RangeRequest{Key: []byte("foo")} - rresp, err := toGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq) + rresp, err := integration.ToGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq) if err != nil { t.Fatal(err) } @@ -926,8 +927,8 @@ func testLeaseRemoveLeasedKey(t *testing.T, act func(*ClusterV3, int64) error) { } } -func leaseExist(t *testing.T, clus *ClusterV3, leaseID int64) bool { - l := toGRPC(clus.RandClient()).Lease +func leaseExist(t *testing.T, clus *integration.ClusterV3, leaseID int64) bool { + l := integration.ToGRPC(clus.RandClient()).Lease _, err := l.LeaseGrant(context.Background(), &pb.LeaseGrantRequest{ID: leaseID, TTL: 5}) if err == nil { diff --git a/tests/integration/v3_stm_test.go b/tests/integration/v3_stm_test.go index 89c666b02..ccd7eea1d 100644 --- a/tests/integration/v3_stm_test.go +++ b/tests/integration/v3_stm_test.go @@ -24,13 +24,14 @@ import ( "go.etcd.io/etcd/client/pkg/v3/testutil" v3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/concurrency" + "go.etcd.io/etcd/tests/v3/framework/integration" ) // TestSTMConflict tests that conflicts are retried. func TestSTMConflict(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) etcdc := clus.RandClient() @@ -96,9 +97,9 @@ func TestSTMConflict(t *testing.T) { // TestSTMPutNewKey confirms a STM put on a new key is visible after commit. func TestSTMPutNewKey(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) etcdc := clus.RandClient() @@ -123,9 +124,9 @@ func TestSTMPutNewKey(t *testing.T) { // TestSTMAbort tests that an aborted txn does not modify any keys. func TestSTMAbort(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) etcdc := clus.RandClient() @@ -154,9 +155,9 @@ func TestSTMAbort(t *testing.T) { // TestSTMSerialize tests that serialization is honored when serializable. func TestSTMSerialize(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) etcdc := clus.RandClient() @@ -217,9 +218,9 @@ func TestSTMSerialize(t *testing.T) { // TestSTMApplyOnConcurrentDeletion ensures that concurrent key deletion // fails the first GET revision comparison within STM; trigger retry. func TestSTMApplyOnConcurrentDeletion(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) etcdc := clus.RandClient() @@ -266,9 +267,9 @@ func TestSTMApplyOnConcurrentDeletion(t *testing.T) { } func TestSTMSerializableSnapshotPut(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.Client(0) diff --git a/tests/integration/v3_tls_test.go b/tests/integration/v3_tls_test.go index 2437df94e..e50419261 100644 --- a/tests/integration/v3_tls_test.go +++ b/tests/integration/v3_tls_test.go @@ -21,6 +21,7 @@ import ( "time" "go.etcd.io/etcd/client/v3" + "go.etcd.io/etcd/tests/v3/framework/integration" "google.golang.org/grpc" ) @@ -30,7 +31,7 @@ func TestTLSClientCipherSuitesMismatch(t *testing.T) { testTLSCipherSuites(t, fa // testTLSCipherSuites ensures mismatching client-side cipher suite // fail TLS handshake with the server. func testTLSCipherSuites(t *testing.T, valid bool) { - BeforeTest(t) + integration.BeforeTest(t) cipherSuites := []uint16{ tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, @@ -40,21 +41,21 @@ func testTLSCipherSuites(t *testing.T, valid bool) { tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, } - srvTLS, cliTLS := testTLSInfo, testTLSInfo + srvTLS, cliTLS := integration.TestTLSInfo, integration.TestTLSInfo if valid { srvTLS.CipherSuites, cliTLS.CipherSuites = cipherSuites, cipherSuites } else { srvTLS.CipherSuites, cliTLS.CipherSuites = cipherSuites[:2], cipherSuites[2:] } - clus := NewClusterV3(t, &ClusterConfig{Size: 1, ClientTLS: &srvTLS}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, ClientTLS: &srvTLS}) defer clus.Terminate(t) cc, err := cliTLS.ClientConfig() if err != nil { t.Fatal(err) } - cli, cerr := NewClient(t, clientv3.Config{ + cli, cerr := integration.NewClient(t, clientv3.Config{ Endpoints: []string{clus.Members[0].GRPCURL()}, DialTimeout: time.Second, DialOptions: []grpc.DialOption{grpc.WithBlock()}, diff --git a/tests/integration/v3_watch_restore_test.go b/tests/integration/v3_watch_restore_test.go index a07dd138c..532871bf2 100644 --- a/tests/integration/v3_watch_restore_test.go +++ b/tests/integration/v3_watch_restore_test.go @@ -21,13 +21,14 @@ import ( "time" pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/tests/v3/framework/integration" ) // MustFetchNotEmptyMetric attempts to fetch given 'metric' from 'member', // waiting for not-empty value or 'timeout'. -func MustFetchNotEmptyMetric(tb testing.TB, member *member, metric string, timeout <-chan time.Time) string { +func MustFetchNotEmptyMetric(tb testing.TB, member *integration.Member, metric string, timeout <-chan time.Time) string { metricValue := "" - tick := time.Tick(tickDuration) + tick := time.Tick(integration.TickDuration) for metricValue == "" { tb.Logf("Waiting for metric: %v", metric) select { @@ -50,9 +51,9 @@ func MustFetchNotEmptyMetric(tb testing.TB, member *member, metric string, timeo // that were created in synced watcher group in the first place. // TODO: fix panic with gRPC proxy "panic: watcher current revision should not exceed current revision" func TestV3WatchRestoreSnapshotUnsync(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{ + clus := integration.NewClusterV3(t, &integration.ClusterConfig{ Size: 3, SnapshotCount: 10, SnapshotCatchUpEntries: 5, @@ -62,7 +63,7 @@ func TestV3WatchRestoreSnapshotUnsync(t *testing.T) { // spawn a watcher before shutdown, and put it in synced watcher ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() - wStream, errW := toGRPC(clus.Client(0)).Watch.Watch(ctx) + wStream, errW := integration.ToGRPC(clus.Client(0)).Watch.Watch(ctx) if errW != nil { t.Fatal(errW) } @@ -79,13 +80,13 @@ func TestV3WatchRestoreSnapshotUnsync(t *testing.T) { } clus.Members[0].InjectPartition(t, clus.Members[1:]...) - initialLead := clus.waitLeader(t, clus.Members[1:]) - t.Logf("elected lead: %v", clus.Members[initialLead].s.ID()) + initialLead := clus.WaitMembersForLeader(t, clus.Members[1:]) + t.Logf("elected lead: %v", clus.Members[initialLead].Server.ID()) t.Logf("sleeping for 2 seconds") time.Sleep(2 * time.Second) t.Logf("sleeping for 2 seconds DONE") - kvc := toGRPC(clus.Client(1)).KV + kvc := integration.ToGRPC(clus.Client(1)).KV // to trigger snapshot from the leader to the stopped follower for i := 0; i < 15; i++ { @@ -98,7 +99,7 @@ func TestV3WatchRestoreSnapshotUnsync(t *testing.T) { // trigger snapshot send from leader to this slow follower // which then calls watchable store Restore clus.Members[0].RecoverPartition(t, clus.Members[1:]...) - // We don't expect leadership change here, just recompute the leader's index + // We don't expect leadership change here, just recompute the leader'Server index // within clus.Members list. lead := clus.WaitLeader(t) diff --git a/tests/integration/v3_watch_test.go b/tests/integration/v3_watch_test.go index 323d0d72c..59433e0cc 100644 --- a/tests/integration/v3_watch_test.go +++ b/tests/integration/v3_watch_test.go @@ -27,11 +27,12 @@ import ( pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/api/v3/mvccpb" "go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc" + "go.etcd.io/etcd/tests/v3/framework/integration" ) // TestV3WatchFromCurrentRevision tests Watch APIs from current revision. func TestV3WatchFromCurrentRevision(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) tests := []struct { name string @@ -206,10 +207,10 @@ func TestV3WatchFromCurrentRevision(t *testing.T) { for i, tt := range tests { t.Run(tt.name, func(t *testing.T) { - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) - wAPI := toGRPC(clus.RandClient()).Watch + wAPI := integration.ToGRPC(clus.RandClient()).Watch ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() wStream, err := wAPI.Watch(ctx) @@ -243,7 +244,7 @@ func TestV3WatchFromCurrentRevision(t *testing.T) { ch := make(chan struct{}, 1) go func() { for _, k := range tt.putKeys { - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV req := &pb.PutRequest{Key: []byte(k), Value: []byte("bar")} if _, err := kvc.Put(context.TODO(), req); err != nil { t.Errorf("#%d: couldn't put key (%v)", i, err) @@ -291,12 +292,12 @@ func TestV3WatchFromCurrentRevision(t *testing.T) { // TestV3WatchFutureRevision tests Watch APIs from a future revision. func TestV3WatchFutureRevision(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) - wAPI := toGRPC(clus.RandClient()).Watch + wAPI := integration.ToGRPC(clus.RandClient()).Watch ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() wStream, err := wAPI.Watch(ctx) @@ -322,7 +323,7 @@ func TestV3WatchFutureRevision(t *testing.T) { t.Fatalf("create %v, want %v", cresp.Created, true) } - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV for { req := &pb.PutRequest{Key: wkey, Value: []byte("bar")} resp, rerr := kvc.Put(context.TODO(), req) @@ -352,12 +353,12 @@ func TestV3WatchFutureRevision(t *testing.T) { // TestV3WatchWrongRange tests wrong range does not create watchers. func TestV3WatchWrongRange(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) - wAPI := toGRPC(clus.RandClient()).Watch + wAPI := integration.ToGRPC(clus.RandClient()).Watch ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() wStream, err := wAPI.Watch(ctx) @@ -397,23 +398,23 @@ func TestV3WatchWrongRange(t *testing.T) { // TestV3WatchCancelSynced tests Watch APIs cancellation from synced map. func TestV3WatchCancelSynced(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) testV3WatchCancel(t, 0) } // TestV3WatchCancelUnsynced tests Watch APIs cancellation from unsynced map. func TestV3WatchCancelUnsynced(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) testV3WatchCancel(t, 1) } func testV3WatchCancel(t *testing.T, startRev int64) { - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() - wStream, errW := toGRPC(clus.RandClient()).Watch.Watch(ctx) + wStream, errW := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx) if errW != nil { t.Fatalf("wAPI.Watch error: %v", errW) } @@ -448,7 +449,7 @@ func testV3WatchCancel(t *testing.T, startRev int64) { t.Errorf("cresp.Canceled got = %v, want = true", cresp.Canceled) } - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil { t.Errorf("couldn't put key (%v)", err) } @@ -463,13 +464,13 @@ func testV3WatchCancel(t *testing.T, startRev int64) { // TestV3WatchCurrentPutOverlap ensures current watchers receive all events with // overlapping puts. func TestV3WatchCurrentPutOverlap(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() - wStream, wErr := toGRPC(clus.RandClient()).Watch.Watch(ctx) + wStream, wErr := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx) if wErr != nil { t.Fatalf("wAPI.Watch error: %v", wErr) } @@ -482,7 +483,7 @@ func TestV3WatchCurrentPutOverlap(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV req := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")} if _, err := kvc.Put(context.TODO(), req); err != nil { t.Errorf("couldn't put key (%v)", err) @@ -547,15 +548,15 @@ func TestV3WatchCurrentPutOverlap(t *testing.T) { // TestV3WatchEmptyKey ensures synced watchers see empty key PUTs as PUT events func TestV3WatchEmptyKey(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() - ws, werr := toGRPC(clus.RandClient()).Watch.Watch(ctx) + ws, werr := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx) if werr != nil { t.Fatal(werr) } @@ -570,7 +571,7 @@ func TestV3WatchEmptyKey(t *testing.T) { } // put a key with empty value - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV preq := &pb.PutRequest{Key: []byte("foo")} if _, err := kvc.Put(context.TODO(), preq); err != nil { t.Fatal(err) @@ -593,12 +594,12 @@ func TestV3WatchEmptyKey(t *testing.T) { } func TestV3WatchMultipleWatchersSynced(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) testV3WatchMultipleWatchers(t, 0) } func TestV3WatchMultipleWatchersUnsynced(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) testV3WatchMultipleWatchers(t, 1) } @@ -607,14 +608,14 @@ func TestV3WatchMultipleWatchersUnsynced(t *testing.T) { // that matches all watchers, and another key that matches only // one watcher to test if it receives expected events. func testV3WatchMultipleWatchers(t *testing.T, startRev int64) { - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() - wStream, errW := toGRPC(clus.RandClient()).Watch.Watch(ctx) + wStream, errW := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx) if errW != nil { t.Fatalf("wAPI.Watch error: %v", errW) } @@ -698,23 +699,23 @@ func testV3WatchMultipleWatchers(t *testing.T, startRev int64) { } func TestV3WatchMultipleEventsTxnSynced(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) testV3WatchMultipleEventsTxn(t, 0) } func TestV3WatchMultipleEventsTxnUnsynced(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) testV3WatchMultipleEventsTxn(t, 1) } // testV3WatchMultipleEventsTxn tests Watch APIs when it receives multiple events. func testV3WatchMultipleEventsTxn(t *testing.T, startRev int64) { - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() - wStream, wErr := toGRPC(clus.RandClient()).Watch.Watch(ctx) + wStream, wErr := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx) if wErr != nil { t.Fatalf("wAPI.Watch error: %v", wErr) } @@ -729,7 +730,7 @@ func testV3WatchMultipleEventsTxn(t *testing.T, startRev int64) { t.Fatalf("create response failed: resp=%v, err=%v", resp, err) } - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV txn := pb.TxnRequest{} for i := 0; i < 3; i++ { ru := &pb.RequestOp{} @@ -791,11 +792,11 @@ func (evs eventsSortByKey) Less(i, j int) bool { } func TestV3WatchMultipleEventsPutUnsynced(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo0"), Value: []byte("bar")}); err != nil { t.Fatalf("couldn't put key (%v)", err) @@ -806,7 +807,7 @@ func TestV3WatchMultipleEventsPutUnsynced(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() - wStream, wErr := toGRPC(clus.RandClient()).Watch.Watch(ctx) + wStream, wErr := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx) if wErr != nil { t.Fatalf("wAPI.Watch error: %v", wErr) } @@ -870,22 +871,22 @@ func TestV3WatchMultipleEventsPutUnsynced(t *testing.T) { } func TestV3WatchMultipleStreamsSynced(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) testV3WatchMultipleStreams(t, 0) } func TestV3WatchMultipleStreamsUnsynced(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) testV3WatchMultipleStreams(t, 1) } // testV3WatchMultipleStreams tests multiple watchers on the same key on multiple streams. func testV3WatchMultipleStreams(t *testing.T, startRev int64) { - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) - wAPI := toGRPC(clus.RandClient()).Watch - kvc := toGRPC(clus.RandClient()).KV + wAPI := integration.ToGRPC(clus.RandClient()).Watch + kvc := integration.ToGRPC(clus.RandClient()).KV streams := make([]pb.Watch_WatchClient, 5) for i := range streams { @@ -983,13 +984,13 @@ func TestWatchWithProgressNotify(t *testing.T) { testInterval := 3 * time.Second defer func() { v3rpc.SetProgressReportInterval(oldpi) }() - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() - wStream, wErr := toGRPC(clus.RandClient()).Watch.Watch(ctx) + wStream, wErr := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx) if wErr != nil { t.Fatalf("wAPI.Watch error: %v", wErr) } @@ -1033,12 +1034,12 @@ func TestWatchWithProgressNotify(t *testing.T) { // TestV3WatcMultiOpenhClose opens many watchers concurrently on multiple streams. func TestV3WatchClose(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1, UseBridge: true}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) c := clus.Client(0) - wapi := toGRPC(c).Watch + wapi := integration.ToGRPC(c).Watch var wg sync.WaitGroup wg.Add(100) @@ -1068,15 +1069,15 @@ func TestV3WatchClose(t *testing.T) { // TestV3WatchWithFilter ensures watcher filters out the events correctly. func TestV3WatchWithFilter(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() - ws, werr := toGRPC(clus.RandClient()).Watch.Watch(ctx) + ws, werr := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx) if werr != nil { t.Fatal(werr) } @@ -1103,7 +1104,7 @@ func TestV3WatchWithFilter(t *testing.T) { }() // put a key with empty value - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV preq := &pb.PutRequest{Key: []byte("foo")} if _, err := kvc.Put(context.TODO(), preq); err != nil { t.Fatal(err) @@ -1137,8 +1138,8 @@ func TestV3WatchWithFilter(t *testing.T) { } func TestV3WatchWithPrevKV(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) wctx, wcancel := context.WithCancel(context.Background()) @@ -1158,12 +1159,12 @@ func TestV3WatchWithPrevKV(t *testing.T) { vals: []string{"first", "second"}, }} for i, tt := range tests { - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte(tt.key), Value: []byte(tt.vals[0])}); err != nil { t.Fatal(err) } - ws, werr := toGRPC(clus.RandClient()).Watch.Watch(wctx) + ws, werr := integration.ToGRPC(clus.RandClient()).Watch.Watch(wctx) if werr != nil { t.Fatal(werr) } @@ -1211,9 +1212,9 @@ func TestV3WatchWithPrevKV(t *testing.T) { // TestV3WatchCancellation ensures that watch cancellation frees up server resources. func TestV3WatchCancellation(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) @@ -1239,7 +1240,7 @@ func TestV3WatchCancellation(t *testing.T) { } var expected string - if ThroughProxy { + if integration.ThroughProxy { // grpc proxy has additional 2 watches open expected = "3" } else { @@ -1253,9 +1254,9 @@ func TestV3WatchCancellation(t *testing.T) { // TestV3WatchCloseCancelRace ensures that watch close doesn't decrement the watcher total too far. func TestV3WatchCloseCancelRace(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) @@ -1278,7 +1279,7 @@ func TestV3WatchCloseCancelRace(t *testing.T) { } var expected string - if ThroughProxy { + if integration.ThroughProxy { // grpc proxy has additional 2 watches open expected = "2" } else { diff --git a/tests/integration/v3election_grpc_test.go b/tests/integration/v3election_grpc_test.go index d5f67dd9f..32127e50a 100644 --- a/tests/integration/v3election_grpc_test.go +++ b/tests/integration/v3election_grpc_test.go @@ -22,25 +22,26 @@ import ( pb "go.etcd.io/etcd/api/v3/etcdserverpb" epb "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb" + "go.etcd.io/etcd/tests/v3/framework/integration" ) // TestV3ElectionCampaign checks that Campaign will not give // simultaneous leadership to multiple campaigners. func TestV3ElectionCampaign(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) - lease1, err1 := toGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) + lease1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) if err1 != nil { t.Fatal(err1) } - lease2, err2 := toGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) + lease2, err2 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) if err2 != nil { t.Fatal(err2) } - lc := toGRPC(clus.Client(0)).Election + lc := integration.ToGRPC(clus.Client(0)).Election req1 := &epb.CampaignRequest{Name: []byte("foo"), Lease: lease1.ID, Value: []byte("abc")} l1, lerr1 := lc.Campaign(context.TODO(), req1) if lerr1 != nil { @@ -89,11 +90,11 @@ func TestV3ElectionCampaign(t *testing.T) { // TestV3ElectionObserve checks that an Observe stream receives // proclamations from different leaders uninterrupted. func TestV3ElectionObserve(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) - lc := toGRPC(clus.Client(0)).Election + lc := integration.ToGRPC(clus.Client(0)).Election // observe leadership events observec := make(chan struct{}, 1) @@ -125,7 +126,7 @@ func TestV3ElectionObserve(t *testing.T) { t.Fatalf("observe stream took too long to start") } - lease1, err1 := toGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) + lease1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) if err1 != nil { t.Fatal(err1) } @@ -139,7 +140,7 @@ func TestV3ElectionObserve(t *testing.T) { go func() { defer close(leader2c) - lease2, err2 := toGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) + lease2, err2 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) if err2 != nil { t.Error(err2) } diff --git a/tests/integration/v3lock_grpc_test.go b/tests/integration/v3lock_grpc_test.go index 38a36369f..1396fb3d4 100644 --- a/tests/integration/v3lock_grpc_test.go +++ b/tests/integration/v3lock_grpc_test.go @@ -21,25 +21,26 @@ import ( pb "go.etcd.io/etcd/api/v3/etcdserverpb" lockpb "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb" + "go.etcd.io/etcd/tests/v3/framework/integration" ) // TestV3LockLockWaiter tests that a client will wait for a lock, then acquire it // once it is unlocked. func TestV3LockLockWaiter(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) - lease1, err1 := toGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) + lease1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) if err1 != nil { t.Fatal(err1) } - lease2, err2 := toGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) + lease2, err2 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) if err2 != nil { t.Fatal(err2) } - lc := toGRPC(clus.Client(0)).Lock + lc := integration.ToGRPC(clus.Client(0)).Lock l1, lerr1 := lc.Lock(context.TODO(), &lockpb.LockRequest{Name: []byte("foo"), Lease: lease1.ID}) if lerr1 != nil { t.Fatal(lerr1)