mirror of
https://github.com/etcd-io/etcd.git
synced 2024-09-27 06:25:44 +00:00
Merge pull request #13411 from serathius/framework
Move e2e/integration cluster setup to separate package
This commit is contained in:
commit
ef1f71a9f6
@ -1,480 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"go.etcd.io/etcd/server/v3/etcdserver"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zaptest"
|
||||
)
|
||||
|
||||
const etcdProcessBasePort = 20000
|
||||
|
||||
type clientConnType int
|
||||
|
||||
var (
|
||||
fixturesDir = integration.MustAbsPath("../fixtures")
|
||||
)
|
||||
|
||||
const (
|
||||
clientNonTLS clientConnType = iota
|
||||
clientTLS
|
||||
clientTLSAndNonTLS
|
||||
)
|
||||
|
||||
func newConfigNoTLS() *etcdProcessClusterConfig {
|
||||
return &etcdProcessClusterConfig{clusterSize: 3,
|
||||
initialToken: "new",
|
||||
}
|
||||
}
|
||||
|
||||
func newConfigAutoTLS() *etcdProcessClusterConfig {
|
||||
return &etcdProcessClusterConfig{
|
||||
clusterSize: 3,
|
||||
isPeerTLS: true,
|
||||
isPeerAutoTLS: true,
|
||||
initialToken: "new",
|
||||
}
|
||||
}
|
||||
|
||||
func newConfigTLS() *etcdProcessClusterConfig {
|
||||
return &etcdProcessClusterConfig{
|
||||
clusterSize: 3,
|
||||
clientTLS: clientTLS,
|
||||
isPeerTLS: true,
|
||||
initialToken: "new",
|
||||
}
|
||||
}
|
||||
|
||||
func newConfigClientTLS() *etcdProcessClusterConfig {
|
||||
return &etcdProcessClusterConfig{
|
||||
clusterSize: 3,
|
||||
clientTLS: clientTLS,
|
||||
initialToken: "new",
|
||||
}
|
||||
}
|
||||
|
||||
func newConfigClientBoth() *etcdProcessClusterConfig {
|
||||
return &etcdProcessClusterConfig{
|
||||
clusterSize: 1,
|
||||
clientTLS: clientTLSAndNonTLS,
|
||||
initialToken: "new",
|
||||
}
|
||||
}
|
||||
|
||||
func newConfigClientAutoTLS() *etcdProcessClusterConfig {
|
||||
return &etcdProcessClusterConfig{
|
||||
clusterSize: 1,
|
||||
isClientAutoTLS: true,
|
||||
clientTLS: clientTLS,
|
||||
initialToken: "new",
|
||||
}
|
||||
}
|
||||
|
||||
func newConfigPeerTLS() *etcdProcessClusterConfig {
|
||||
return &etcdProcessClusterConfig{
|
||||
clusterSize: 3,
|
||||
isPeerTLS: true,
|
||||
initialToken: "new",
|
||||
}
|
||||
}
|
||||
|
||||
func newConfigClientTLSCertAuth() *etcdProcessClusterConfig {
|
||||
return &etcdProcessClusterConfig{
|
||||
clusterSize: 1,
|
||||
clientTLS: clientTLS,
|
||||
initialToken: "new",
|
||||
clientCertAuthEnabled: true,
|
||||
}
|
||||
}
|
||||
|
||||
func newConfigClientTLSCertAuthWithNoCN() *etcdProcessClusterConfig {
|
||||
return &etcdProcessClusterConfig{
|
||||
clusterSize: 1,
|
||||
clientTLS: clientTLS,
|
||||
initialToken: "new",
|
||||
clientCertAuthEnabled: true,
|
||||
noCN: true,
|
||||
}
|
||||
}
|
||||
|
||||
func newConfigJWT() *etcdProcessClusterConfig {
|
||||
return &etcdProcessClusterConfig{
|
||||
clusterSize: 1,
|
||||
initialToken: "new",
|
||||
authTokenOpts: "jwt,pub-key=" + path.Join(fixturesDir, "server.crt") +
|
||||
",priv-key=" + path.Join(fixturesDir, "server.key.insecure") + ",sign-method=RS256,ttl=1s",
|
||||
}
|
||||
}
|
||||
|
||||
func configStandalone(cfg etcdProcessClusterConfig) *etcdProcessClusterConfig {
|
||||
ret := cfg
|
||||
ret.clusterSize = 1
|
||||
return &ret
|
||||
}
|
||||
|
||||
type etcdProcessCluster struct {
|
||||
lg *zap.Logger
|
||||
cfg *etcdProcessClusterConfig
|
||||
procs []etcdProcess
|
||||
}
|
||||
|
||||
type etcdProcessClusterConfig struct {
|
||||
execPath string
|
||||
dataDirPath string
|
||||
keepDataDir bool
|
||||
envVars map[string]string
|
||||
|
||||
clusterSize int
|
||||
|
||||
baseScheme string
|
||||
basePort int
|
||||
|
||||
metricsURLScheme string
|
||||
|
||||
snapshotCount int // default is 10000
|
||||
|
||||
clientTLS clientConnType
|
||||
clientCertAuthEnabled bool
|
||||
isPeerTLS bool
|
||||
isPeerAutoTLS bool
|
||||
isClientAutoTLS bool
|
||||
isClientCRL bool
|
||||
noCN bool
|
||||
|
||||
cipherSuites []string
|
||||
|
||||
forceNewCluster bool
|
||||
initialToken string
|
||||
quotaBackendBytes int64
|
||||
noStrictReconfig bool
|
||||
enableV2 bool
|
||||
initialCorruptCheck bool
|
||||
authTokenOpts string
|
||||
v2deprecation string
|
||||
|
||||
rollingStart bool
|
||||
}
|
||||
|
||||
// newEtcdProcessCluster launches a new cluster from etcd processes, returning
|
||||
// a new etcdProcessCluster once all nodes are ready to accept client requests.
|
||||
func newEtcdProcessCluster(t testing.TB, cfg *etcdProcessClusterConfig) (*etcdProcessCluster, error) {
|
||||
skipInShortMode(t)
|
||||
|
||||
etcdCfgs := cfg.etcdServerProcessConfigs(t)
|
||||
epc := &etcdProcessCluster{
|
||||
cfg: cfg,
|
||||
lg: zaptest.NewLogger(t),
|
||||
procs: make([]etcdProcess, cfg.clusterSize),
|
||||
}
|
||||
|
||||
// launch etcd processes
|
||||
for i := range etcdCfgs {
|
||||
proc, err := newEtcdProcess(etcdCfgs[i])
|
||||
if err != nil {
|
||||
epc.Close()
|
||||
return nil, fmt.Errorf("Cannot configure: %v", err)
|
||||
}
|
||||
epc.procs[i] = proc
|
||||
}
|
||||
|
||||
if cfg.rollingStart {
|
||||
if err := epc.RollingStart(); err != nil {
|
||||
return nil, fmt.Errorf("Cannot rolling-start: %v", err)
|
||||
}
|
||||
} else {
|
||||
if err := epc.Start(); err != nil {
|
||||
return nil, fmt.Errorf("Cannot start: %v", err)
|
||||
}
|
||||
}
|
||||
return epc, nil
|
||||
}
|
||||
|
||||
func (cfg *etcdProcessClusterConfig) clientScheme() string {
|
||||
if cfg.clientTLS == clientTLS {
|
||||
return "https"
|
||||
}
|
||||
return "http"
|
||||
}
|
||||
|
||||
func (cfg *etcdProcessClusterConfig) peerScheme() string {
|
||||
peerScheme := cfg.baseScheme
|
||||
if peerScheme == "" {
|
||||
peerScheme = "http"
|
||||
}
|
||||
if cfg.isPeerTLS {
|
||||
peerScheme += "s"
|
||||
}
|
||||
return peerScheme
|
||||
}
|
||||
|
||||
func (cfg *etcdProcessClusterConfig) etcdServerProcessConfigs(tb testing.TB) []*etcdServerProcessConfig {
|
||||
lg := zaptest.NewLogger(tb)
|
||||
|
||||
if cfg.basePort == 0 {
|
||||
cfg.basePort = etcdProcessBasePort
|
||||
}
|
||||
if cfg.execPath == "" {
|
||||
cfg.execPath = binPath
|
||||
}
|
||||
if cfg.snapshotCount == 0 {
|
||||
cfg.snapshotCount = etcdserver.DefaultSnapshotCount
|
||||
}
|
||||
|
||||
etcdCfgs := make([]*etcdServerProcessConfig, cfg.clusterSize)
|
||||
initialCluster := make([]string, cfg.clusterSize)
|
||||
for i := 0; i < cfg.clusterSize; i++ {
|
||||
var curls []string
|
||||
var curl, curltls string
|
||||
port := cfg.basePort + 5*i
|
||||
curlHost := fmt.Sprintf("localhost:%d", port)
|
||||
|
||||
switch cfg.clientTLS {
|
||||
case clientNonTLS, clientTLS:
|
||||
curl = (&url.URL{Scheme: cfg.clientScheme(), Host: curlHost}).String()
|
||||
curls = []string{curl}
|
||||
case clientTLSAndNonTLS:
|
||||
curl = (&url.URL{Scheme: "http", Host: curlHost}).String()
|
||||
curltls = (&url.URL{Scheme: "https", Host: curlHost}).String()
|
||||
curls = []string{curl, curltls}
|
||||
}
|
||||
|
||||
purl := url.URL{Scheme: cfg.peerScheme(), Host: fmt.Sprintf("localhost:%d", port+1)}
|
||||
name := fmt.Sprintf("test-%d", i)
|
||||
dataDirPath := cfg.dataDirPath
|
||||
if cfg.dataDirPath == "" {
|
||||
dataDirPath = tb.TempDir()
|
||||
}
|
||||
initialCluster[i] = fmt.Sprintf("%s=%s", name, purl.String())
|
||||
|
||||
args := []string{
|
||||
"--name", name,
|
||||
"--listen-client-urls", strings.Join(curls, ","),
|
||||
"--advertise-client-urls", strings.Join(curls, ","),
|
||||
"--listen-peer-urls", purl.String(),
|
||||
"--initial-advertise-peer-urls", purl.String(),
|
||||
"--initial-cluster-token", cfg.initialToken,
|
||||
"--data-dir", dataDirPath,
|
||||
"--snapshot-count", fmt.Sprintf("%d", cfg.snapshotCount),
|
||||
}
|
||||
args = addV2Args(args)
|
||||
if cfg.forceNewCluster {
|
||||
args = append(args, "--force-new-cluster")
|
||||
}
|
||||
if cfg.quotaBackendBytes > 0 {
|
||||
args = append(args,
|
||||
"--quota-backend-bytes", fmt.Sprintf("%d", cfg.quotaBackendBytes),
|
||||
)
|
||||
}
|
||||
if cfg.noStrictReconfig {
|
||||
args = append(args, "--strict-reconfig-check=false")
|
||||
}
|
||||
if cfg.enableV2 {
|
||||
args = append(args, "--enable-v2")
|
||||
}
|
||||
if cfg.initialCorruptCheck {
|
||||
args = append(args, "--experimental-initial-corrupt-check")
|
||||
}
|
||||
var murl string
|
||||
if cfg.metricsURLScheme != "" {
|
||||
murl = (&url.URL{
|
||||
Scheme: cfg.metricsURLScheme,
|
||||
Host: fmt.Sprintf("localhost:%d", port+2),
|
||||
}).String()
|
||||
args = append(args, "--listen-metrics-urls", murl)
|
||||
}
|
||||
|
||||
args = append(args, cfg.tlsArgs()...)
|
||||
|
||||
if cfg.authTokenOpts != "" {
|
||||
args = append(args, "--auth-token", cfg.authTokenOpts)
|
||||
}
|
||||
|
||||
if cfg.v2deprecation != "" {
|
||||
args = append(args, "--v2-deprecation", cfg.v2deprecation)
|
||||
}
|
||||
|
||||
etcdCfgs[i] = &etcdServerProcessConfig{
|
||||
lg: lg,
|
||||
execPath: cfg.execPath,
|
||||
args: args,
|
||||
envVars: cfg.envVars,
|
||||
tlsArgs: cfg.tlsArgs(),
|
||||
dataDirPath: dataDirPath,
|
||||
keepDataDir: cfg.keepDataDir,
|
||||
name: name,
|
||||
purl: purl,
|
||||
acurl: curl,
|
||||
murl: murl,
|
||||
initialToken: cfg.initialToken,
|
||||
}
|
||||
}
|
||||
|
||||
initialClusterArgs := []string{"--initial-cluster", strings.Join(initialCluster, ",")}
|
||||
for i := range etcdCfgs {
|
||||
etcdCfgs[i].initialCluster = strings.Join(initialCluster, ",")
|
||||
etcdCfgs[i].args = append(etcdCfgs[i].args, initialClusterArgs...)
|
||||
}
|
||||
|
||||
return etcdCfgs
|
||||
}
|
||||
|
||||
func (cfg *etcdProcessClusterConfig) tlsArgs() (args []string) {
|
||||
if cfg.clientTLS != clientNonTLS {
|
||||
if cfg.isClientAutoTLS {
|
||||
args = append(args, "--auto-tls")
|
||||
} else {
|
||||
tlsClientArgs := []string{
|
||||
"--cert-file", certPath,
|
||||
"--key-file", privateKeyPath,
|
||||
"--trusted-ca-file", caPath,
|
||||
}
|
||||
args = append(args, tlsClientArgs...)
|
||||
|
||||
if cfg.clientCertAuthEnabled {
|
||||
args = append(args, "--client-cert-auth")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.isPeerTLS {
|
||||
if cfg.isPeerAutoTLS {
|
||||
args = append(args, "--peer-auto-tls")
|
||||
} else {
|
||||
tlsPeerArgs := []string{
|
||||
"--peer-cert-file", certPath,
|
||||
"--peer-key-file", privateKeyPath,
|
||||
"--peer-trusted-ca-file", caPath,
|
||||
}
|
||||
args = append(args, tlsPeerArgs...)
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.isClientCRL {
|
||||
args = append(args, "--client-crl-file", crlPath, "--client-cert-auth")
|
||||
}
|
||||
|
||||
if len(cfg.cipherSuites) > 0 {
|
||||
args = append(args, "--cipher-suites", strings.Join(cfg.cipherSuites, ","))
|
||||
}
|
||||
|
||||
return args
|
||||
}
|
||||
|
||||
func (epc *etcdProcessCluster) EndpointsV2() []string {
|
||||
return epc.endpoints(func(ep etcdProcess) []string { return ep.EndpointsV2() })
|
||||
}
|
||||
|
||||
func (epc *etcdProcessCluster) EndpointsV3() []string {
|
||||
return epc.endpoints(func(ep etcdProcess) []string { return ep.EndpointsV3() })
|
||||
}
|
||||
|
||||
func (epc *etcdProcessCluster) endpoints(f func(ep etcdProcess) []string) (ret []string) {
|
||||
for _, p := range epc.procs {
|
||||
ret = append(ret, f(p)...)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (epc *etcdProcessCluster) Start() error {
|
||||
return epc.start(func(ep etcdProcess) error { return ep.Start() })
|
||||
}
|
||||
|
||||
func (epc *etcdProcessCluster) RollingStart() error {
|
||||
return epc.rollingStart(func(ep etcdProcess) error { return ep.Start() })
|
||||
}
|
||||
|
||||
func (epc *etcdProcessCluster) Restart() error {
|
||||
return epc.start(func(ep etcdProcess) error { return ep.Restart() })
|
||||
}
|
||||
|
||||
func (epc *etcdProcessCluster) start(f func(ep etcdProcess) error) error {
|
||||
readyC := make(chan error, len(epc.procs))
|
||||
for i := range epc.procs {
|
||||
go func(n int) { readyC <- f(epc.procs[n]) }(i)
|
||||
}
|
||||
for range epc.procs {
|
||||
if err := <-readyC; err != nil {
|
||||
epc.Close()
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (epc *etcdProcessCluster) rollingStart(f func(ep etcdProcess) error) error {
|
||||
readyC := make(chan error, len(epc.procs))
|
||||
for i := range epc.procs {
|
||||
go func(n int) { readyC <- f(epc.procs[n]) }(i)
|
||||
// make sure the servers do not start at the same time
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
for range epc.procs {
|
||||
if err := <-readyC; err != nil {
|
||||
epc.Close()
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (epc *etcdProcessCluster) Stop() (err error) {
|
||||
for _, p := range epc.procs {
|
||||
if p == nil {
|
||||
continue
|
||||
}
|
||||
if curErr := p.Stop(); curErr != nil {
|
||||
if err != nil {
|
||||
err = fmt.Errorf("%v; %v", err, curErr)
|
||||
} else {
|
||||
err = curErr
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (epc *etcdProcessCluster) Close() error {
|
||||
epc.lg.Info("closing test cluster...")
|
||||
err := epc.Stop()
|
||||
for _, p := range epc.procs {
|
||||
// p is nil when newEtcdProcess fails in the middle
|
||||
// Close still gets called to clean up test data
|
||||
if p == nil {
|
||||
continue
|
||||
}
|
||||
if cerr := p.Close(); cerr != nil {
|
||||
err = cerr
|
||||
}
|
||||
}
|
||||
epc.lg.Info("closed test cluster.")
|
||||
return err
|
||||
}
|
||||
|
||||
func (epc *etcdProcessCluster) WithStopSignal(sig os.Signal) (ret os.Signal) {
|
||||
for _, p := range epc.procs {
|
||||
ret = p.WithStopSignal(sig)
|
||||
}
|
||||
return ret
|
||||
}
|
@ -21,25 +21,27 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||
)
|
||||
|
||||
func BeforeTestV2(t testing.TB) {
|
||||
BeforeTest(t)
|
||||
e2e.BeforeTest(t)
|
||||
os.Setenv("ETCDCTL_API", "2")
|
||||
t.Cleanup(func() {
|
||||
os.Unsetenv("ETCDCTL_API")
|
||||
})
|
||||
}
|
||||
|
||||
func TestCtlV2Set(t *testing.T) { testCtlV2Set(t, newConfigNoTLS(), false) }
|
||||
func TestCtlV2SetQuorum(t *testing.T) { testCtlV2Set(t, newConfigNoTLS(), true) }
|
||||
func TestCtlV2SetClientTLS(t *testing.T) { testCtlV2Set(t, newConfigClientTLS(), false) }
|
||||
func TestCtlV2SetPeerTLS(t *testing.T) { testCtlV2Set(t, newConfigPeerTLS(), false) }
|
||||
func TestCtlV2SetTLS(t *testing.T) { testCtlV2Set(t, newConfigTLS(), false) }
|
||||
func testCtlV2Set(t *testing.T, cfg *etcdProcessClusterConfig, quorum bool) {
|
||||
func TestCtlV2Set(t *testing.T) { testCtlV2Set(t, e2e.NewConfigNoTLS(), false) }
|
||||
func TestCtlV2SetQuorum(t *testing.T) { testCtlV2Set(t, e2e.NewConfigNoTLS(), true) }
|
||||
func TestCtlV2SetClientTLS(t *testing.T) { testCtlV2Set(t, e2e.NewConfigClientTLS(), false) }
|
||||
func TestCtlV2SetPeerTLS(t *testing.T) { testCtlV2Set(t, e2e.NewConfigPeerTLS(), false) }
|
||||
func TestCtlV2SetTLS(t *testing.T) { testCtlV2Set(t, e2e.NewConfigTLS(), false) }
|
||||
func testCtlV2Set(t *testing.T, cfg *e2e.EtcdProcessClusterConfig, quorum bool) {
|
||||
BeforeTestV2(t)
|
||||
|
||||
cfg.enableV2 = true
|
||||
cfg.EnableV2 = true
|
||||
epc := setupEtcdctlTest(t, cfg, quorum)
|
||||
defer cleanupEtcdProcessCluster(epc, t)
|
||||
|
||||
@ -54,13 +56,13 @@ func testCtlV2Set(t *testing.T, cfg *etcdProcessClusterConfig, quorum bool) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestCtlV2Mk(t *testing.T) { testCtlV2Mk(t, newConfigNoTLS(), false) }
|
||||
func TestCtlV2MkQuorum(t *testing.T) { testCtlV2Mk(t, newConfigNoTLS(), true) }
|
||||
func TestCtlV2MkTLS(t *testing.T) { testCtlV2Mk(t, newConfigTLS(), false) }
|
||||
func testCtlV2Mk(t *testing.T, cfg *etcdProcessClusterConfig, quorum bool) {
|
||||
func TestCtlV2Mk(t *testing.T) { testCtlV2Mk(t, e2e.NewConfigNoTLS(), false) }
|
||||
func TestCtlV2MkQuorum(t *testing.T) { testCtlV2Mk(t, e2e.NewConfigNoTLS(), true) }
|
||||
func TestCtlV2MkTLS(t *testing.T) { testCtlV2Mk(t, e2e.NewConfigTLS(), false) }
|
||||
func testCtlV2Mk(t *testing.T, cfg *e2e.EtcdProcessClusterConfig, quorum bool) {
|
||||
BeforeTestV2(t)
|
||||
|
||||
cfg.enableV2 = true
|
||||
cfg.EnableV2 = true
|
||||
epc := setupEtcdctlTest(t, cfg, quorum)
|
||||
defer cleanupEtcdProcessCluster(epc, t)
|
||||
|
||||
@ -78,12 +80,12 @@ func testCtlV2Mk(t *testing.T, cfg *etcdProcessClusterConfig, quorum bool) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestCtlV2Rm(t *testing.T) { testCtlV2Rm(t, newConfigNoTLS()) }
|
||||
func TestCtlV2RmTLS(t *testing.T) { testCtlV2Rm(t, newConfigTLS()) }
|
||||
func testCtlV2Rm(t *testing.T, cfg *etcdProcessClusterConfig) {
|
||||
func TestCtlV2Rm(t *testing.T) { testCtlV2Rm(t, e2e.NewConfigNoTLS()) }
|
||||
func TestCtlV2RmTLS(t *testing.T) { testCtlV2Rm(t, e2e.NewConfigTLS()) }
|
||||
func testCtlV2Rm(t *testing.T, cfg *e2e.EtcdProcessClusterConfig) {
|
||||
BeforeTestV2(t)
|
||||
|
||||
cfg.enableV2 = true
|
||||
cfg.EnableV2 = true
|
||||
epc := setupEtcdctlTest(t, cfg, true)
|
||||
defer cleanupEtcdProcessCluster(epc, t)
|
||||
|
||||
@ -101,13 +103,13 @@ func testCtlV2Rm(t *testing.T, cfg *etcdProcessClusterConfig) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestCtlV2Ls(t *testing.T) { testCtlV2Ls(t, newConfigNoTLS(), false) }
|
||||
func TestCtlV2LsQuorum(t *testing.T) { testCtlV2Ls(t, newConfigNoTLS(), true) }
|
||||
func TestCtlV2LsTLS(t *testing.T) { testCtlV2Ls(t, newConfigTLS(), false) }
|
||||
func testCtlV2Ls(t *testing.T, cfg *etcdProcessClusterConfig, quorum bool) {
|
||||
func TestCtlV2Ls(t *testing.T) { testCtlV2Ls(t, e2e.NewConfigNoTLS(), false) }
|
||||
func TestCtlV2LsQuorum(t *testing.T) { testCtlV2Ls(t, e2e.NewConfigNoTLS(), true) }
|
||||
func TestCtlV2LsTLS(t *testing.T) { testCtlV2Ls(t, e2e.NewConfigTLS(), false) }
|
||||
func testCtlV2Ls(t *testing.T, cfg *e2e.EtcdProcessClusterConfig, quorum bool) {
|
||||
BeforeTestV2(t)
|
||||
|
||||
cfg.enableV2 = true
|
||||
cfg.EnableV2 = true
|
||||
epc := setupEtcdctlTest(t, cfg, quorum)
|
||||
defer cleanupEtcdProcessCluster(epc, t)
|
||||
|
||||
@ -122,13 +124,13 @@ func testCtlV2Ls(t *testing.T, cfg *etcdProcessClusterConfig, quorum bool) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestCtlV2Watch(t *testing.T) { testCtlV2Watch(t, newConfigNoTLS(), false) }
|
||||
func TestCtlV2WatchTLS(t *testing.T) { testCtlV2Watch(t, newConfigTLS(), false) }
|
||||
func TestCtlV2Watch(t *testing.T) { testCtlV2Watch(t, e2e.NewConfigNoTLS(), false) }
|
||||
func TestCtlV2WatchTLS(t *testing.T) { testCtlV2Watch(t, e2e.NewConfigTLS(), false) }
|
||||
|
||||
func testCtlV2Watch(t *testing.T, cfg *etcdProcessClusterConfig, noSync bool) {
|
||||
func testCtlV2Watch(t *testing.T, cfg *e2e.EtcdProcessClusterConfig, noSync bool) {
|
||||
BeforeTestV2(t)
|
||||
|
||||
cfg.enableV2 = true
|
||||
cfg.EnableV2 = true
|
||||
epc := setupEtcdctlTest(t, cfg, true)
|
||||
defer cleanupEtcdProcessCluster(epc, t)
|
||||
|
||||
@ -151,8 +153,8 @@ func testCtlV2Watch(t *testing.T, cfg *etcdProcessClusterConfig, noSync bool) {
|
||||
func TestCtlV2GetRoleUser(t *testing.T) {
|
||||
BeforeTestV2(t)
|
||||
|
||||
copied := newConfigNoTLS()
|
||||
copied.enableV2 = true
|
||||
copied := e2e.NewConfigNoTLS()
|
||||
copied.EnableV2 = true
|
||||
epc := setupEtcdctlTest(t, copied, false)
|
||||
defer cleanupEtcdProcessCluster(epc, t)
|
||||
|
||||
@ -172,7 +174,7 @@ func TestCtlV2GetRoleUser(t *testing.T) {
|
||||
// ensure double grant gives an error; was crashing in 2.3.1
|
||||
regrantArgs := etcdctlPrefixArgs(epc)
|
||||
regrantArgs = append(regrantArgs, "user", "grant", "--roles", "foo", "username")
|
||||
if err := spawnWithExpect(regrantArgs, "duplicate"); err != nil {
|
||||
if err := e2e.SpawnWithExpect(regrantArgs, "duplicate"); err != nil {
|
||||
t.Fatalf("missing duplicate error on double grant role (%v)", err)
|
||||
}
|
||||
}
|
||||
@ -182,8 +184,8 @@ func TestCtlV2UserListRoot(t *testing.T) { testCtlV2UserList(t, "root") }
|
||||
func testCtlV2UserList(t *testing.T, username string) {
|
||||
BeforeTestV2(t)
|
||||
|
||||
copied := newConfigNoTLS()
|
||||
copied.enableV2 = true
|
||||
copied := e2e.NewConfigNoTLS()
|
||||
copied.EnableV2 = true
|
||||
epc := setupEtcdctlTest(t, copied, false)
|
||||
defer cleanupEtcdProcessCluster(epc, t)
|
||||
|
||||
@ -198,8 +200,8 @@ func testCtlV2UserList(t *testing.T, username string) {
|
||||
func TestCtlV2RoleList(t *testing.T) {
|
||||
BeforeTestV2(t)
|
||||
|
||||
copied := newConfigNoTLS()
|
||||
copied.enableV2 = true
|
||||
copied := e2e.NewConfigNoTLS()
|
||||
copied.EnableV2 = true
|
||||
epc := setupEtcdctlTest(t, copied, false)
|
||||
defer cleanupEtcdProcessCluster(epc, t)
|
||||
|
||||
@ -232,9 +234,9 @@ func testUtlCtlV2Backup(t *testing.T, snapCount int, v3 bool, utl bool) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
etcdCfg := newConfigNoTLS()
|
||||
etcdCfg.snapshotCount = snapCount
|
||||
etcdCfg.enableV2 = true
|
||||
etcdCfg := e2e.NewConfigNoTLS()
|
||||
etcdCfg.SnapshotCount = snapCount
|
||||
etcdCfg.EnableV2 = true
|
||||
t.Log("Starting etcd-1")
|
||||
epc1 := setupEtcdctlTest(t, etcdCfg, false)
|
||||
|
||||
@ -259,7 +261,7 @@ func testUtlCtlV2Backup(t *testing.T, snapCount int, v3 bool, utl bool) {
|
||||
}
|
||||
}
|
||||
t.Log("Triggering etcd backup")
|
||||
if err := etcdctlBackup(t, epc1, epc1.procs[0].Config().dataDirPath, backupDir, v3, utl); err != nil {
|
||||
if err := etcdctlBackup(t, epc1, epc1.Procs[0].Config().DataDirPath, backupDir, v3, utl); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Log("Closing etcd-1 backup")
|
||||
@ -271,11 +273,11 @@ func testUtlCtlV2Backup(t *testing.T, snapCount int, v3 bool, utl bool) {
|
||||
|
||||
t.Log("Starting etcd-2 (post backup)")
|
||||
// restart from the backup directory
|
||||
cfg2 := newConfigNoTLS()
|
||||
cfg2.dataDirPath = backupDir
|
||||
cfg2.keepDataDir = true
|
||||
cfg2.forceNewCluster = true
|
||||
cfg2.enableV2 = true
|
||||
cfg2 := e2e.NewConfigNoTLS()
|
||||
cfg2.DataDirPath = backupDir
|
||||
cfg2.KeepDataDir = true
|
||||
cfg2.ForceNewCluster = true
|
||||
cfg2.EnableV2 = true
|
||||
epc2 := setupEtcdctlTest(t, cfg2, false)
|
||||
// Make sure a failing test is not leaking resources (running server).
|
||||
defer epc2.Close()
|
||||
@ -318,9 +320,9 @@ func testUtlCtlV2Backup(t *testing.T, snapCount int, v3 bool, utl bool) {
|
||||
func TestCtlV2AuthWithCommonName(t *testing.T) {
|
||||
BeforeTestV2(t)
|
||||
|
||||
copiedCfg := newConfigClientTLS()
|
||||
copiedCfg.clientCertAuthEnabled = true
|
||||
copiedCfg.enableV2 = true
|
||||
copiedCfg := e2e.NewConfigClientTLS()
|
||||
copiedCfg.ClientCertAuthEnabled = true
|
||||
copiedCfg.EnableV2 = true
|
||||
epc := setupEtcdctlTest(t, copiedCfg, false)
|
||||
defer cleanupEtcdProcessCluster(epc, t)
|
||||
|
||||
@ -350,8 +352,8 @@ func TestCtlV2AuthWithCommonName(t *testing.T) {
|
||||
func TestCtlV2ClusterHealth(t *testing.T) {
|
||||
BeforeTestV2(t)
|
||||
|
||||
copied := newConfigNoTLS()
|
||||
copied.enableV2 = true
|
||||
copied := e2e.NewConfigNoTLS()
|
||||
copied.EnableV2 = true
|
||||
epc := setupEtcdctlTest(t, copied, true)
|
||||
defer cleanupEtcdProcessCluster(epc, t)
|
||||
|
||||
@ -361,7 +363,7 @@ func TestCtlV2ClusterHealth(t *testing.T) {
|
||||
}
|
||||
|
||||
// missing members, has quorum
|
||||
epc.procs[0].Stop()
|
||||
epc.Procs[0].Stop()
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
err := etcdctlClusterHealth(epc, "cluster is degraded")
|
||||
@ -375,129 +377,129 @@ func TestCtlV2ClusterHealth(t *testing.T) {
|
||||
}
|
||||
|
||||
// no quorum
|
||||
epc.procs[1].Stop()
|
||||
epc.Procs[1].Stop()
|
||||
if err := etcdctlClusterHealth(epc, "cluster is unavailable"); err != nil {
|
||||
t.Fatalf("cluster-health expected to be unavailable (%v)", err)
|
||||
}
|
||||
|
||||
epc.procs[0], epc.procs[1] = nil, nil
|
||||
epc.Procs[0], epc.Procs[1] = nil, nil
|
||||
}
|
||||
|
||||
func etcdctlPrefixArgs(clus *etcdProcessCluster) []string {
|
||||
func etcdctlPrefixArgs(clus *e2e.EtcdProcessCluster) []string {
|
||||
endpoints := strings.Join(clus.EndpointsV2(), ",")
|
||||
cmdArgs := []string{ctlBinPath}
|
||||
cmdArgs := []string{e2e.CtlBinPath}
|
||||
|
||||
cmdArgs = append(cmdArgs, "--endpoints", endpoints)
|
||||
if clus.cfg.clientTLS == clientTLS {
|
||||
cmdArgs = append(cmdArgs, "--ca-file", caPath, "--cert-file", certPath, "--key-file", privateKeyPath)
|
||||
if clus.Cfg.ClientTLS == e2e.ClientTLS {
|
||||
cmdArgs = append(cmdArgs, "--ca-file", e2e.CaPath, "--cert-file", e2e.CertPath, "--key-file", e2e.PrivateKeyPath)
|
||||
}
|
||||
return cmdArgs
|
||||
}
|
||||
|
||||
func etcductlPrefixArgs(utl bool) []string {
|
||||
if utl {
|
||||
return []string{utlBinPath}
|
||||
return []string{e2e.UtlBinPath}
|
||||
}
|
||||
return []string{ctlBinPath}
|
||||
return []string{e2e.CtlBinPath}
|
||||
}
|
||||
|
||||
func etcdctlClusterHealth(clus *etcdProcessCluster, val string) error {
|
||||
func etcdctlClusterHealth(clus *e2e.EtcdProcessCluster, val string) error {
|
||||
cmdArgs := append(etcdctlPrefixArgs(clus), "cluster-health")
|
||||
return spawnWithExpect(cmdArgs, val)
|
||||
return e2e.SpawnWithExpect(cmdArgs, val)
|
||||
}
|
||||
|
||||
func etcdctlSet(clus *etcdProcessCluster, key, value string) error {
|
||||
func etcdctlSet(clus *e2e.EtcdProcessCluster, key, value string) error {
|
||||
cmdArgs := append(etcdctlPrefixArgs(clus), "set", key, value)
|
||||
return spawnWithExpect(cmdArgs, value)
|
||||
return e2e.SpawnWithExpect(cmdArgs, value)
|
||||
}
|
||||
|
||||
func etcdctlMk(clus *etcdProcessCluster, key, value string, first bool) error {
|
||||
func etcdctlMk(clus *e2e.EtcdProcessCluster, key, value string, first bool) error {
|
||||
cmdArgs := append(etcdctlPrefixArgs(clus), "mk", key, value)
|
||||
if first {
|
||||
return spawnWithExpect(cmdArgs, value)
|
||||
return e2e.SpawnWithExpect(cmdArgs, value)
|
||||
}
|
||||
return spawnWithExpect(cmdArgs, "Error: 105: Key already exists")
|
||||
return e2e.SpawnWithExpect(cmdArgs, "Error: 105: Key already exists")
|
||||
}
|
||||
|
||||
func etcdctlGet(clus *etcdProcessCluster, key, value string, quorum bool) error {
|
||||
func etcdctlGet(clus *e2e.EtcdProcessCluster, key, value string, quorum bool) error {
|
||||
cmdArgs := append(etcdctlPrefixArgs(clus), "get", key)
|
||||
if quorum {
|
||||
cmdArgs = append(cmdArgs, "--quorum")
|
||||
}
|
||||
return spawnWithExpect(cmdArgs, value)
|
||||
return e2e.SpawnWithExpect(cmdArgs, value)
|
||||
}
|
||||
|
||||
func etcdctlRm(clus *etcdProcessCluster, key, value string, first bool) error {
|
||||
func etcdctlRm(clus *e2e.EtcdProcessCluster, key, value string, first bool) error {
|
||||
cmdArgs := append(etcdctlPrefixArgs(clus), "rm", key)
|
||||
if first {
|
||||
return spawnWithExpect(cmdArgs, "PrevNode.Value: "+value)
|
||||
return e2e.SpawnWithExpect(cmdArgs, "PrevNode.Value: "+value)
|
||||
}
|
||||
return spawnWithExpect(cmdArgs, "Error: 100: Key not found")
|
||||
return e2e.SpawnWithExpect(cmdArgs, "Error: 100: Key not found")
|
||||
}
|
||||
|
||||
func etcdctlLs(clus *etcdProcessCluster, key string, quorum bool) error {
|
||||
func etcdctlLs(clus *e2e.EtcdProcessCluster, key string, quorum bool) error {
|
||||
cmdArgs := append(etcdctlPrefixArgs(clus), "ls")
|
||||
if quorum {
|
||||
cmdArgs = append(cmdArgs, "--quorum")
|
||||
}
|
||||
return spawnWithExpect(cmdArgs, key)
|
||||
return e2e.SpawnWithExpect(cmdArgs, key)
|
||||
}
|
||||
|
||||
func etcdctlWatch(clus *etcdProcessCluster, key, value string, noSync bool) <-chan error {
|
||||
func etcdctlWatch(clus *e2e.EtcdProcessCluster, key, value string, noSync bool) <-chan error {
|
||||
cmdArgs := append(etcdctlPrefixArgs(clus), "watch", "--after-index=1", key)
|
||||
if noSync {
|
||||
cmdArgs = append(cmdArgs, "--no-sync")
|
||||
}
|
||||
errc := make(chan error, 1)
|
||||
go func() {
|
||||
errc <- spawnWithExpect(cmdArgs, value)
|
||||
errc <- e2e.SpawnWithExpect(cmdArgs, value)
|
||||
}()
|
||||
return errc
|
||||
}
|
||||
|
||||
func etcdctlRoleAdd(clus *etcdProcessCluster, role string) error {
|
||||
func etcdctlRoleAdd(clus *e2e.EtcdProcessCluster, role string) error {
|
||||
cmdArgs := append(etcdctlPrefixArgs(clus), "role", "add", role)
|
||||
return spawnWithExpect(cmdArgs, role)
|
||||
return e2e.SpawnWithExpect(cmdArgs, role)
|
||||
}
|
||||
|
||||
func etcdctlRoleGrant(clus *etcdProcessCluster, role string, perms ...string) error {
|
||||
func etcdctlRoleGrant(clus *e2e.EtcdProcessCluster, role string, perms ...string) error {
|
||||
cmdArgs := append(etcdctlPrefixArgs(clus), "role", "grant")
|
||||
cmdArgs = append(cmdArgs, perms...)
|
||||
cmdArgs = append(cmdArgs, role)
|
||||
return spawnWithExpect(cmdArgs, role)
|
||||
return e2e.SpawnWithExpect(cmdArgs, role)
|
||||
}
|
||||
|
||||
func etcdctlRoleList(clus *etcdProcessCluster, expectedRole string) error {
|
||||
func etcdctlRoleList(clus *e2e.EtcdProcessCluster, expectedRole string) error {
|
||||
cmdArgs := append(etcdctlPrefixArgs(clus), "role", "list")
|
||||
return spawnWithExpect(cmdArgs, expectedRole)
|
||||
return e2e.SpawnWithExpect(cmdArgs, expectedRole)
|
||||
}
|
||||
|
||||
func etcdctlUserAdd(clus *etcdProcessCluster, user, pass string) error {
|
||||
func etcdctlUserAdd(clus *e2e.EtcdProcessCluster, user, pass string) error {
|
||||
cmdArgs := append(etcdctlPrefixArgs(clus), "user", "add", user+":"+pass)
|
||||
return spawnWithExpect(cmdArgs, "User "+user+" created")
|
||||
return e2e.SpawnWithExpect(cmdArgs, "User "+user+" created")
|
||||
}
|
||||
|
||||
func etcdctlUserGrant(clus *etcdProcessCluster, user, role string) error {
|
||||
func etcdctlUserGrant(clus *e2e.EtcdProcessCluster, user, role string) error {
|
||||
cmdArgs := append(etcdctlPrefixArgs(clus), "user", "grant", "--roles", role, user)
|
||||
return spawnWithExpect(cmdArgs, "User "+user+" updated")
|
||||
return e2e.SpawnWithExpect(cmdArgs, "User "+user+" updated")
|
||||
}
|
||||
|
||||
func etcdctlUserGet(clus *etcdProcessCluster, user string) error {
|
||||
func etcdctlUserGet(clus *e2e.EtcdProcessCluster, user string) error {
|
||||
cmdArgs := append(etcdctlPrefixArgs(clus), "user", "get", user)
|
||||
return spawnWithExpect(cmdArgs, "User: "+user)
|
||||
return e2e.SpawnWithExpect(cmdArgs, "User: "+user)
|
||||
}
|
||||
|
||||
func etcdctlUserList(clus *etcdProcessCluster, expectedUser string) error {
|
||||
func etcdctlUserList(clus *e2e.EtcdProcessCluster, expectedUser string) error {
|
||||
cmdArgs := append(etcdctlPrefixArgs(clus), "user", "list")
|
||||
return spawnWithExpect(cmdArgs, expectedUser)
|
||||
return e2e.SpawnWithExpect(cmdArgs, expectedUser)
|
||||
}
|
||||
|
||||
func etcdctlAuthEnable(clus *etcdProcessCluster) error {
|
||||
func etcdctlAuthEnable(clus *e2e.EtcdProcessCluster) error {
|
||||
cmdArgs := append(etcdctlPrefixArgs(clus), "auth", "enable")
|
||||
return spawnWithExpect(cmdArgs, "Authentication Enabled")
|
||||
return e2e.SpawnWithExpect(cmdArgs, "Authentication Enabled")
|
||||
}
|
||||
|
||||
func etcdctlBackup(t testing.TB, clus *etcdProcessCluster, dataDir, backupDir string, v3 bool, utl bool) error {
|
||||
func etcdctlBackup(t testing.TB, clus *e2e.EtcdProcessCluster, dataDir, backupDir string, v3 bool, utl bool) error {
|
||||
cmdArgs := append(etcductlPrefixArgs(utl), "backup", "--data-dir", dataDir, "--backup-dir", backupDir)
|
||||
if v3 {
|
||||
cmdArgs = append(cmdArgs, "--with-v3")
|
||||
@ -505,7 +507,7 @@ func etcdctlBackup(t testing.TB, clus *etcdProcessCluster, dataDir, backupDir st
|
||||
cmdArgs = append(cmdArgs, "--with-v3=false")
|
||||
}
|
||||
t.Logf("Running: %v", cmdArgs)
|
||||
proc, err := spawnCmd(cmdArgs, nil)
|
||||
proc, err := e2e.SpawnCmd(cmdArgs, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -516,18 +518,18 @@ func etcdctlBackup(t testing.TB, clus *etcdProcessCluster, dataDir, backupDir st
|
||||
return proc.ProcessError()
|
||||
}
|
||||
|
||||
func setupEtcdctlTest(t *testing.T, cfg *etcdProcessClusterConfig, quorum bool) *etcdProcessCluster {
|
||||
func setupEtcdctlTest(t *testing.T, cfg *e2e.EtcdProcessClusterConfig, quorum bool) *e2e.EtcdProcessCluster {
|
||||
if !quorum {
|
||||
cfg = configStandalone(*cfg)
|
||||
cfg = e2e.ConfigStandalone(*cfg)
|
||||
}
|
||||
epc, err := newEtcdProcessCluster(t, cfg)
|
||||
epc, err := e2e.NewEtcdProcessCluster(t, cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("could not start etcd process cluster (%v)", err)
|
||||
}
|
||||
return epc
|
||||
}
|
||||
|
||||
func cleanupEtcdProcessCluster(epc *etcdProcessCluster, t *testing.T) {
|
||||
func cleanupEtcdProcessCluster(epc *e2e.EtcdProcessCluster, t *testing.T) {
|
||||
if errC := epc.Close(); errC != nil {
|
||||
t.Fatalf("error closing etcd processes (%v)", errC)
|
||||
}
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"time"
|
||||
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||
)
|
||||
|
||||
func TestCtlV3Alarm(t *testing.T) {
|
||||
@ -53,7 +54,7 @@ func alarmTest(cx ctlCtx) {
|
||||
}
|
||||
|
||||
// '/health' handler should return 'false'
|
||||
if err := cURLGet(cx.epc, cURLReq{endpoint: "/health", expected: `{"health":"false","reason":"ALARM NOSPACE"}`}); err != nil {
|
||||
if err := e2e.CURLGet(cx.epc, e2e.CURLReq{Endpoint: "/health", Expected: `{"health":"false","reason":"ALARM NOSPACE"}`}); err != nil {
|
||||
cx.t.Fatalf("failed get with curl (%v)", err)
|
||||
}
|
||||
|
||||
@ -101,5 +102,5 @@ func alarmTest(cx ctlCtx) {
|
||||
|
||||
func ctlV3Alarm(cx ctlCtx, cmd string, as ...string) error {
|
||||
cmdArgs := append(cx.PrefixArgs(), "alarm", cmd)
|
||||
return spawnWithExpects(cmdArgs, cx.envMap, as...)
|
||||
return e2e.SpawnWithExpects(cmdArgs, cx.envMap, as...)
|
||||
}
|
||||
|
@ -21,14 +21,16 @@ package e2e
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||
)
|
||||
|
||||
func TestCtlV3AuthCertCN(t *testing.T) {
|
||||
testCtl(t, authTestCertCN, withCfg(*newConfigClientTLSCertAuth()))
|
||||
testCtl(t, authTestCertCN, withCfg(*e2e.NewConfigClientTLSCertAuth()))
|
||||
}
|
||||
func TestCtlV3AuthCertCNAndUsername(t *testing.T) {
|
||||
testCtl(t, authTestCertCNAndUsername, withCfg(*newConfigClientTLSCertAuth()))
|
||||
testCtl(t, authTestCertCNAndUsername, withCfg(*e2e.NewConfigClientTLSCertAuth()))
|
||||
}
|
||||
func TestCtlV3AuthCertCNAndUsernameNoPassword(t *testing.T) {
|
||||
testCtl(t, authTestCertCNAndUsernameNoPassword, withCfg(*newConfigClientTLSCertAuth()))
|
||||
testCtl(t, authTestCertCNAndUsernameNoPassword, withCfg(*e2e.NewConfigClientTLSCertAuth()))
|
||||
}
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
"time"
|
||||
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||
)
|
||||
|
||||
func TestCtlV3AuthEnable(t *testing.T) {
|
||||
@ -35,7 +36,7 @@ func TestCtlV3AuthRoleUpdate(t *testing.T) { testCtl(t, authRoleUpdateT
|
||||
func TestCtlV3AuthUserDeleteDuringOps(t *testing.T) { testCtl(t, authUserDeleteDuringOpsTest) }
|
||||
func TestCtlV3AuthRoleRevokeDuringOps(t *testing.T) { testCtl(t, authRoleRevokeDuringOpsTest) }
|
||||
func TestCtlV3AuthTxn(t *testing.T) { testCtl(t, authTestTxn) }
|
||||
func TestCtlV3AuthTxnJWT(t *testing.T) { testCtl(t, authTestTxn, withCfg(*newConfigJWT())) }
|
||||
func TestCtlV3AuthTxnJWT(t *testing.T) { testCtl(t, authTestTxn, withCfg(*e2e.NewConfigJWT())) }
|
||||
func TestCtlV3AuthPrefixPerm(t *testing.T) { testCtl(t, authTestPrefixPerm) }
|
||||
func TestCtlV3AuthMemberAdd(t *testing.T) { testCtl(t, authTestMemberAdd) }
|
||||
func TestCtlV3AuthMemberRemove(t *testing.T) {
|
||||
@ -46,7 +47,7 @@ func TestCtlV3AuthRevokeWithDelete(t *testing.T) { testCtl(t, authTestRevokeWith
|
||||
func TestCtlV3AuthInvalidMgmt(t *testing.T) { testCtl(t, authTestInvalidMgmt) }
|
||||
func TestCtlV3AuthFromKeyPerm(t *testing.T) { testCtl(t, authTestFromKeyPerm) }
|
||||
func TestCtlV3AuthAndWatch(t *testing.T) { testCtl(t, authTestWatch) }
|
||||
func TestCtlV3AuthAndWatchJWT(t *testing.T) { testCtl(t, authTestWatch, withCfg(*newConfigJWT())) }
|
||||
func TestCtlV3AuthAndWatchJWT(t *testing.T) { testCtl(t, authTestWatch, withCfg(*e2e.NewConfigJWT())) }
|
||||
|
||||
func TestCtlV3AuthLeaseTestKeepAlive(t *testing.T) { testCtl(t, authLeaseTestKeepAlive) }
|
||||
func TestCtlV3AuthLeaseTestTimeToLiveExpired(t *testing.T) {
|
||||
@ -54,7 +55,7 @@ func TestCtlV3AuthLeaseTestTimeToLiveExpired(t *testing.T) {
|
||||
}
|
||||
func TestCtlV3AuthLeaseGrantLeases(t *testing.T) { testCtl(t, authLeaseTestLeaseGrantLeases) }
|
||||
func TestCtlV3AuthLeaseGrantLeasesJWT(t *testing.T) {
|
||||
testCtl(t, authLeaseTestLeaseGrantLeases, withCfg(*newConfigJWT()))
|
||||
testCtl(t, authLeaseTestLeaseGrantLeases, withCfg(*e2e.NewConfigJWT()))
|
||||
}
|
||||
func TestCtlV3AuthLeaseRevoke(t *testing.T) { testCtl(t, authLeaseTestLeaseRevoke) }
|
||||
|
||||
@ -66,9 +67,13 @@ func TestCtlV3AuthDefrag(t *testing.T) { testCtl(t, authTestDefrag) }
|
||||
func TestCtlV3AuthEndpointHealth(t *testing.T) {
|
||||
testCtl(t, authTestEndpointHealth, withQuorum())
|
||||
}
|
||||
func TestCtlV3AuthSnapshot(t *testing.T) { testCtl(t, authTestSnapshot) }
|
||||
func TestCtlV3AuthSnapshotJWT(t *testing.T) { testCtl(t, authTestSnapshot, withCfg(*newConfigJWT())) }
|
||||
func TestCtlV3AuthJWTExpire(t *testing.T) { testCtl(t, authTestJWTExpire, withCfg(*newConfigJWT())) }
|
||||
func TestCtlV3AuthSnapshot(t *testing.T) { testCtl(t, authTestSnapshot) }
|
||||
func TestCtlV3AuthSnapshotJWT(t *testing.T) {
|
||||
testCtl(t, authTestSnapshot, withCfg(*e2e.NewConfigJWT()))
|
||||
}
|
||||
func TestCtlV3AuthJWTExpire(t *testing.T) {
|
||||
testCtl(t, authTestJWTExpire, withCfg(*e2e.NewConfigJWT()))
|
||||
}
|
||||
func TestCtlV3AuthRevisionConsistency(t *testing.T) { testCtl(t, authTestRevisionConsistency) }
|
||||
|
||||
func authEnableTest(cx ctlCtx) {
|
||||
@ -93,7 +98,7 @@ func authEnable(cx ctlCtx) error {
|
||||
|
||||
func ctlV3AuthEnable(cx ctlCtx) error {
|
||||
cmdArgs := append(cx.PrefixArgs(), "auth", "enable")
|
||||
return spawnWithExpectWithEnv(cmdArgs, cx.envMap, "Authentication Enabled")
|
||||
return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, "Authentication Enabled")
|
||||
}
|
||||
|
||||
func authDisableTest(cx ctlCtx) {
|
||||
@ -139,12 +144,12 @@ func authDisableTest(cx ctlCtx) {
|
||||
|
||||
func ctlV3AuthDisable(cx ctlCtx) error {
|
||||
cmdArgs := append(cx.PrefixArgs(), "auth", "disable")
|
||||
return spawnWithExpectWithEnv(cmdArgs, cx.envMap, "Authentication Disabled")
|
||||
return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, "Authentication Disabled")
|
||||
}
|
||||
|
||||
func authStatusTest(cx ctlCtx) {
|
||||
cmdArgs := append(cx.PrefixArgs(), "auth", "status")
|
||||
if err := spawnWithExpects(cmdArgs, cx.envMap, "Authentication Status: false", "AuthRevision:"); err != nil {
|
||||
if err := e2e.SpawnWithExpects(cmdArgs, cx.envMap, "Authentication Status: false", "AuthRevision:"); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
|
||||
@ -155,15 +160,15 @@ func authStatusTest(cx ctlCtx) {
|
||||
cx.user, cx.pass = "root", "root"
|
||||
cmdArgs = append(cx.PrefixArgs(), "auth", "status")
|
||||
|
||||
if err := spawnWithExpects(cmdArgs, cx.envMap, "Authentication Status: true", "AuthRevision:"); err != nil {
|
||||
if err := e2e.SpawnWithExpects(cmdArgs, cx.envMap, "Authentication Status: true", "AuthRevision:"); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
|
||||
cmdArgs = append(cx.PrefixArgs(), "auth", "status", "--write-out", "json")
|
||||
if err := spawnWithExpectWithEnv(cmdArgs, cx.envMap, "enabled"); err != nil {
|
||||
if err := e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, "enabled"); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
if err := spawnWithExpectWithEnv(cmdArgs, cx.envMap, "authRevision"); err != nil {
|
||||
if err := e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, "authRevision"); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@ -381,25 +386,25 @@ func authRoleRevokeDuringOpsTest(cx ctlCtx) {
|
||||
}
|
||||
|
||||
func ctlV3PutFailAuth(cx ctlCtx, key, val string) error {
|
||||
return spawnWithExpectWithEnv(append(cx.PrefixArgs(), "put", key, val), cx.envMap, "authentication failed")
|
||||
return e2e.SpawnWithExpectWithEnv(append(cx.PrefixArgs(), "put", key, val), cx.envMap, "authentication failed")
|
||||
}
|
||||
|
||||
func ctlV3PutFailPerm(cx ctlCtx, key, val string) error {
|
||||
return spawnWithExpectWithEnv(append(cx.PrefixArgs(), "put", key, val), cx.envMap, "permission denied")
|
||||
return e2e.SpawnWithExpectWithEnv(append(cx.PrefixArgs(), "put", key, val), cx.envMap, "permission denied")
|
||||
}
|
||||
|
||||
func authSetupTestUser(cx ctlCtx) {
|
||||
if err := ctlV3User(cx, []string{"add", "test-user", "--interactive=false"}, "User test-user created", []string{"pass"}); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
if err := spawnWithExpectWithEnv(append(cx.PrefixArgs(), "role", "add", "test-role"), cx.envMap, "Role test-role created"); err != nil {
|
||||
if err := e2e.SpawnWithExpectWithEnv(append(cx.PrefixArgs(), "role", "add", "test-role"), cx.envMap, "Role test-role created"); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
if err := ctlV3User(cx, []string{"grant-role", "test-user", "test-role"}, "Role test-role is granted to user test-user", nil); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
cmd := append(cx.PrefixArgs(), "role", "grant-permission", "test-role", "readwrite", "foo")
|
||||
if err := spawnWithExpectWithEnv(cmd, cx.envMap, "Role test-role updated"); err != nil {
|
||||
if err := e2e.SpawnWithExpectWithEnv(cmd, cx.envMap, "Role test-role updated"); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@ -537,7 +542,7 @@ func authTestMemberAdd(cx ctlCtx) {
|
||||
cx.user, cx.pass = "root", "root"
|
||||
authSetupTestUser(cx)
|
||||
|
||||
peerURL := fmt.Sprintf("http://localhost:%d", etcdProcessBasePort+11)
|
||||
peerURL := fmt.Sprintf("http://localhost:%d", e2e.EtcdProcessBasePort+11)
|
||||
// ordinary user cannot add a new member
|
||||
cx.user, cx.pass = "test-user", "pass"
|
||||
if err := ctlV3MemberAdd(cx, peerURL, false); err == nil {
|
||||
@ -589,7 +594,7 @@ func authTestMemberUpdate(cx ctlCtx) {
|
||||
|
||||
// ordinary user cannot update a member
|
||||
cx.user, cx.pass = "test-user", "pass"
|
||||
peerURL := fmt.Sprintf("http://localhost:%d", etcdProcessBasePort+11)
|
||||
peerURL := fmt.Sprintf("http://localhost:%d", e2e.EtcdProcessBasePort+11)
|
||||
memberID := fmt.Sprintf("%x", mr.Members[0].ID)
|
||||
if err = ctlV3MemberUpdate(cx, memberID, peerURL); err == nil {
|
||||
cx.t.Fatalf("ordinary user must not be allowed to update a member")
|
||||
@ -611,7 +616,7 @@ func authTestCertCN(cx ctlCtx) {
|
||||
if err := ctlV3User(cx, []string{"add", "example.com", "--interactive=false"}, "User example.com created", []string{""}); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
if err := spawnWithExpectWithEnv(append(cx.PrefixArgs(), "role", "add", "test-role"), cx.envMap, "Role test-role created"); err != nil {
|
||||
if err := e2e.SpawnWithExpectWithEnv(append(cx.PrefixArgs(), "role", "add", "test-role"), cx.envMap, "Role test-role created"); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
if err := ctlV3User(cx, []string{"grant-role", "example.com", "test-role"}, "Role test-role is granted to user example.com", nil); err != nil {
|
||||
@ -921,13 +926,13 @@ func authTestRoleGet(cx ctlCtx) {
|
||||
"KV Read:", "foo",
|
||||
"KV Write:", "foo",
|
||||
}
|
||||
if err := spawnWithExpects(append(cx.PrefixArgs(), "role", "get", "test-role"), cx.envMap, expected...); err != nil {
|
||||
if err := e2e.SpawnWithExpects(append(cx.PrefixArgs(), "role", "get", "test-role"), cx.envMap, expected...); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
|
||||
// test-user can get the information of test-role because it belongs to the role
|
||||
cx.user, cx.pass = "test-user", "pass"
|
||||
if err := spawnWithExpects(append(cx.PrefixArgs(), "role", "get", "test-role"), cx.envMap, expected...); err != nil {
|
||||
if err := e2e.SpawnWithExpects(append(cx.PrefixArgs(), "role", "get", "test-role"), cx.envMap, expected...); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
|
||||
@ -935,7 +940,7 @@ func authTestRoleGet(cx ctlCtx) {
|
||||
expected = []string{
|
||||
"Error: etcdserver: permission denied",
|
||||
}
|
||||
if err := spawnWithExpects(append(cx.PrefixArgs(), "role", "get", "root"), cx.envMap, expected...); err != nil {
|
||||
if err := e2e.SpawnWithExpects(append(cx.PrefixArgs(), "role", "get", "root"), cx.envMap, expected...); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@ -952,13 +957,13 @@ func authTestUserGet(cx ctlCtx) {
|
||||
"Roles: test-role",
|
||||
}
|
||||
|
||||
if err := spawnWithExpects(append(cx.PrefixArgs(), "user", "get", "test-user"), cx.envMap, expected...); err != nil {
|
||||
if err := e2e.SpawnWithExpects(append(cx.PrefixArgs(), "user", "get", "test-user"), cx.envMap, expected...); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
|
||||
// test-user can get the information of test-user itself
|
||||
cx.user, cx.pass = "test-user", "pass"
|
||||
if err := spawnWithExpects(append(cx.PrefixArgs(), "user", "get", "test-user"), cx.envMap, expected...); err != nil {
|
||||
if err := e2e.SpawnWithExpects(append(cx.PrefixArgs(), "user", "get", "test-user"), cx.envMap, expected...); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
|
||||
@ -966,7 +971,7 @@ func authTestUserGet(cx ctlCtx) {
|
||||
expected = []string{
|
||||
"Error: etcdserver: permission denied",
|
||||
}
|
||||
if err := spawnWithExpects(append(cx.PrefixArgs(), "user", "get", "root"), cx.envMap, expected...); err != nil {
|
||||
if err := e2e.SpawnWithExpects(append(cx.PrefixArgs(), "user", "get", "root"), cx.envMap, expected...); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@ -977,7 +982,7 @@ func authTestRoleList(cx ctlCtx) {
|
||||
}
|
||||
cx.user, cx.pass = "root", "root"
|
||||
authSetupTestUser(cx)
|
||||
if err := spawnWithExpectWithEnv(append(cx.PrefixArgs(), "role", "list"), cx.envMap, "test-role"); err != nil {
|
||||
if err := e2e.SpawnWithExpectWithEnv(append(cx.PrefixArgs(), "role", "list"), cx.envMap, "test-role"); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@ -1088,7 +1093,7 @@ func certCNAndUsername(cx ctlCtx, noPassword bool) {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
}
|
||||
if err := spawnWithExpectWithEnv(append(cx.PrefixArgs(), "role", "add", "test-role-cn"), cx.envMap, "Role test-role-cn created"); err != nil {
|
||||
if err := e2e.SpawnWithExpectWithEnv(append(cx.PrefixArgs(), "role", "add", "test-role-cn"), cx.envMap, "Role test-role-cn created"); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
if err := ctlV3User(cx, []string{"grant-role", "example.com", "test-role-cn"}, "Role test-role-cn is granted to user example.com", nil); err != nil {
|
||||
@ -1174,7 +1179,7 @@ func authTestRevisionConsistency(cx ctlCtx) {
|
||||
}
|
||||
|
||||
// get node0 auth revision
|
||||
node0 := cx.epc.procs[0]
|
||||
node0 := cx.epc.Procs[0]
|
||||
endpoint := node0.EndpointsV3()[0]
|
||||
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{endpoint}, Username: cx.user, Password: cx.pass, DialTimeout: 3 * time.Second})
|
||||
if err != nil {
|
||||
|
@ -18,6 +18,8 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||
)
|
||||
|
||||
func TestCtlV3Compact(t *testing.T) { testCtl(t, compactTest) }
|
||||
@ -71,5 +73,5 @@ func ctlV3Compact(cx ctlCtx, rev int64, physical bool) error {
|
||||
if physical {
|
||||
cmdArgs = append(cmdArgs, "--physical")
|
||||
}
|
||||
return spawnWithExpectWithEnv(cmdArgs, cx.envMap, "compacted revision "+rs)
|
||||
return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, "compacted revision "+rs)
|
||||
}
|
||||
|
@ -22,14 +22,15 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||
)
|
||||
|
||||
func TestCtlV3CompletionBash(t *testing.T) { testShellCompletion(t, ctlBinPath, "bash") }
|
||||
func TestCtlV3CompletionBash(t *testing.T) { testShellCompletion(t, e2e.CtlBinPath, "bash") }
|
||||
|
||||
func TestUtlV3CompletionBash(t *testing.T) { testShellCompletion(t, utlBinPath, "bash") }
|
||||
func TestUtlV3CompletionBash(t *testing.T) { testShellCompletion(t, e2e.UtlBinPath, "bash") }
|
||||
|
||||
func testShellCompletion(t *testing.T, binPath, shellName string) {
|
||||
BeforeTest(t)
|
||||
e2e.BeforeTest(t)
|
||||
|
||||
stdout := new(bytes.Buffer)
|
||||
completionCmd := exec.Command(binPath, "completion", shellName)
|
||||
|
@ -14,7 +14,11 @@
|
||||
|
||||
package e2e
|
||||
|
||||
import "testing"
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||
)
|
||||
|
||||
func TestCtlV3DefragOnline(t *testing.T) { testCtl(t, defragOnlineTest) }
|
||||
|
||||
@ -48,17 +52,17 @@ func defragOnlineTest(cx ctlCtx) {
|
||||
|
||||
func ctlV3OnlineDefrag(cx ctlCtx) error {
|
||||
cmdArgs := append(cx.PrefixArgs(), "defrag")
|
||||
lines := make([]string, cx.epc.cfg.clusterSize)
|
||||
lines := make([]string, cx.epc.Cfg.ClusterSize)
|
||||
for i := range lines {
|
||||
lines[i] = "Finished defragmenting etcd member"
|
||||
}
|
||||
return spawnWithExpects(cmdArgs, cx.envMap, lines...)
|
||||
return e2e.SpawnWithExpects(cmdArgs, cx.envMap, lines...)
|
||||
}
|
||||
|
||||
func ctlV3OfflineDefrag(cx ctlCtx) error {
|
||||
cmdArgs := append(cx.PrefixArgsUtl(), "defrag", "--data-dir", cx.dataDir)
|
||||
lines := []string{"finished defragmenting directory"}
|
||||
return spawnWithExpects(cmdArgs, cx.envMap, lines...)
|
||||
return e2e.SpawnWithExpects(cmdArgs, cx.envMap, lines...)
|
||||
}
|
||||
|
||||
func defragOfflineTest(cx ctlCtx) {
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"time"
|
||||
|
||||
"go.etcd.io/etcd/pkg/v3/expect"
|
||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||
)
|
||||
|
||||
func TestCtlV3Elect(t *testing.T) {
|
||||
@ -72,7 +73,7 @@ func testElect(cx ctlCtx) {
|
||||
if err = blocked.Signal(os.Interrupt); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
if err = closeWithTimeout(blocked, time.Second); err != nil {
|
||||
if err = e2e.CloseWithTimeout(blocked, time.Second); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
|
||||
@ -80,7 +81,7 @@ func testElect(cx ctlCtx) {
|
||||
if err = holder.Signal(os.Interrupt); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
if err = closeWithTimeout(holder, time.Second); err != nil {
|
||||
if err = e2e.CloseWithTimeout(holder, time.Second); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
|
||||
@ -98,7 +99,7 @@ func testElect(cx ctlCtx) {
|
||||
// ctlV3Elect creates a elect process with a channel listening for when it wins the election.
|
||||
func ctlV3Elect(cx ctlCtx, name, proposal string) (*expect.ExpectProcess, <-chan string, error) {
|
||||
cmdArgs := append(cx.PrefixArgs(), "elect", name, proposal)
|
||||
proc, err := spawnCmd(cmdArgs, cx.envMap)
|
||||
proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap)
|
||||
outc := make(chan string, 1)
|
||||
if err != nil {
|
||||
close(outc)
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"time"
|
||||
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||
)
|
||||
|
||||
func TestCtlV3EndpointHealth(t *testing.T) { testCtl(t, endpointHealthTest, withQuorum()) }
|
||||
@ -36,11 +37,11 @@ func endpointHealthTest(cx ctlCtx) {
|
||||
|
||||
func ctlV3EndpointHealth(cx ctlCtx) error {
|
||||
cmdArgs := append(cx.PrefixArgs(), "endpoint", "health")
|
||||
lines := make([]string, cx.epc.cfg.clusterSize)
|
||||
lines := make([]string, cx.epc.Cfg.ClusterSize)
|
||||
for i := range lines {
|
||||
lines[i] = "is healthy"
|
||||
}
|
||||
return spawnWithExpects(cmdArgs, cx.envMap, lines...)
|
||||
return e2e.SpawnWithExpects(cmdArgs, cx.envMap, lines...)
|
||||
}
|
||||
|
||||
func endpointStatusTest(cx ctlCtx) {
|
||||
@ -56,7 +57,7 @@ func ctlV3EndpointStatus(cx ctlCtx) error {
|
||||
u, _ := url.Parse(ep)
|
||||
eps = append(eps, u.Host)
|
||||
}
|
||||
return spawnWithExpects(cmdArgs, cx.envMap, eps...)
|
||||
return e2e.SpawnWithExpects(cmdArgs, cx.envMap, eps...)
|
||||
}
|
||||
|
||||
func endpointHashKVTest(cx ctlCtx) {
|
||||
@ -88,5 +89,5 @@ func ctlV3EndpointHashKV(cx ctlCtx) error {
|
||||
u, _ := url.Parse(ep)
|
||||
ss = append(ss, fmt.Sprintf("%s, %d", u.Host, hresp.Hash))
|
||||
}
|
||||
return spawnWithExpects(cmdArgs, cx.envMap, ss...)
|
||||
return e2e.SpawnWithExpects(cmdArgs, cx.envMap, ss...)
|
||||
}
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.etcd.io/etcd/client/pkg/v3/testutil"
|
||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||
)
|
||||
|
||||
func TestAuthority(t *testing.T) {
|
||||
@ -80,18 +81,18 @@ func TestAuthority(t *testing.T) {
|
||||
for _, tc := range tcs {
|
||||
for _, clusterSize := range []int{1, 3} {
|
||||
t.Run(fmt.Sprintf("Size: %d, Scenario: %q", clusterSize, tc.name), func(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
e2e.BeforeTest(t)
|
||||
|
||||
cfg := newConfigNoTLS()
|
||||
cfg.clusterSize = clusterSize
|
||||
cfg := e2e.NewConfigNoTLS()
|
||||
cfg.ClusterSize = clusterSize
|
||||
if tc.useTLS {
|
||||
cfg.clientTLS = clientTLS
|
||||
cfg.ClientTLS = e2e.ClientTLS
|
||||
}
|
||||
cfg.isClientAutoTLS = tc.useInsecureTLS
|
||||
cfg.IsClientAutoTLS = tc.useInsecureTLS
|
||||
// Enable debug mode to get logs with http2 headers (including authority)
|
||||
cfg.envVars = map[string]string{"GODEBUG": "http2debug=2"}
|
||||
cfg.EnvVars = map[string]string{"GODEBUG": "http2debug=2"}
|
||||
|
||||
epc, err := newEtcdProcessCluster(t, cfg)
|
||||
epc, err := e2e.NewEtcdProcessCluster(t, cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("could not start etcd process cluster (%v)", err)
|
||||
}
|
||||
@ -113,13 +114,13 @@ func TestAuthority(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func templateEndpoints(t *testing.T, pattern string, clus *etcdProcessCluster) []string {
|
||||
func templateEndpoints(t *testing.T, pattern string, clus *e2e.EtcdProcessCluster) []string {
|
||||
t.Helper()
|
||||
endpoints := []string{}
|
||||
for i := 0; i < clus.cfg.clusterSize; i++ {
|
||||
for i := 0; i < clus.Cfg.ClusterSize; i++ {
|
||||
ent := pattern
|
||||
if strings.Contains(ent, "%d") {
|
||||
ent = fmt.Sprintf(ent, etcdProcessBasePort+i*5)
|
||||
ent = fmt.Sprintf(ent, e2e.EtcdProcessBasePort+i*5)
|
||||
}
|
||||
if strings.Contains(ent, "%") {
|
||||
t.Fatalf("Failed to template pattern, %% symbol left %q", ent)
|
||||
@ -129,9 +130,9 @@ func templateEndpoints(t *testing.T, pattern string, clus *etcdProcessCluster) [
|
||||
return endpoints
|
||||
}
|
||||
|
||||
func assertAuthority(t *testing.T, expectAurhority string, clus *etcdProcessCluster) {
|
||||
logs := []logsExpect{}
|
||||
for _, proc := range clus.procs {
|
||||
func assertAuthority(t *testing.T, expectAurhority string, clus *e2e.EtcdProcessCluster) {
|
||||
logs := []e2e.LogsExpect{}
|
||||
for _, proc := range clus.Procs {
|
||||
logs = append(logs, proc.Logs())
|
||||
}
|
||||
line := firstMatch(t, `http2: decoded hpack field header field ":authority"`, logs...)
|
||||
@ -141,11 +142,11 @@ func assertAuthority(t *testing.T, expectAurhority string, clus *etcdProcessClus
|
||||
assert.True(t, strings.HasSuffix(line, expectLine), fmt.Sprintf("Got %q expected suffix %q", line, expectLine))
|
||||
}
|
||||
|
||||
func firstMatch(t *testing.T, expectLine string, logs ...logsExpect) string {
|
||||
func firstMatch(t *testing.T, expectLine string, logs ...e2e.LogsExpect) string {
|
||||
t.Helper()
|
||||
match := make(chan string, len(logs))
|
||||
for i := range logs {
|
||||
go func(l logsExpect) {
|
||||
go func(l e2e.LogsExpect) {
|
||||
line, _ := l.Expect(expectLine)
|
||||
match <- line
|
||||
}(logs[i])
|
||||
@ -168,11 +169,11 @@ func executeWithTimeout(t *testing.T, timeout time.Duration, f func()) {
|
||||
}
|
||||
|
||||
type etcdctlV3 struct {
|
||||
cfg *etcdProcessClusterConfig
|
||||
cfg *e2e.EtcdProcessClusterConfig
|
||||
endpoints []string
|
||||
}
|
||||
|
||||
func clusterEtcdctlV3(cfg *etcdProcessClusterConfig, endpoints []string) *etcdctlV3 {
|
||||
func clusterEtcdctlV3(cfg *e2e.EtcdProcessClusterConfig, endpoints []string) *etcdctlV3 {
|
||||
return &etcdctlV3{
|
||||
cfg: cfg,
|
||||
endpoints: endpoints,
|
||||
@ -184,28 +185,28 @@ func (ctl *etcdctlV3) Put(key, value string) error {
|
||||
}
|
||||
|
||||
func (ctl *etcdctlV3) runCmd(args ...string) error {
|
||||
cmdArgs := []string{ctlBinPath + "3"}
|
||||
cmdArgs := []string{e2e.CtlBinPath + "3"}
|
||||
for k, v := range ctl.flags() {
|
||||
cmdArgs = append(cmdArgs, fmt.Sprintf("--%s=%s", k, v))
|
||||
}
|
||||
cmdArgs = append(cmdArgs, args...)
|
||||
return spawnWithExpect(cmdArgs, "OK")
|
||||
return e2e.SpawnWithExpect(cmdArgs, "OK")
|
||||
}
|
||||
|
||||
func (ctl *etcdctlV3) flags() map[string]string {
|
||||
fmap := make(map[string]string)
|
||||
if ctl.cfg.clientTLS == clientTLS {
|
||||
if ctl.cfg.isClientAutoTLS {
|
||||
if ctl.cfg.ClientTLS == e2e.ClientTLS {
|
||||
if ctl.cfg.IsClientAutoTLS {
|
||||
fmap["insecure-transport"] = "false"
|
||||
fmap["insecure-skip-tls-verify"] = "true"
|
||||
} else if ctl.cfg.isClientCRL {
|
||||
fmap["cacert"] = caPath
|
||||
fmap["cert"] = revokedCertPath
|
||||
fmap["key"] = revokedPrivateKeyPath
|
||||
} else if ctl.cfg.IsClientCRL {
|
||||
fmap["cacert"] = e2e.CaPath
|
||||
fmap["cert"] = e2e.RevokedCertPath
|
||||
fmap["key"] = e2e.RevokedPrivateKeyPath
|
||||
} else {
|
||||
fmap["cacert"] = caPath
|
||||
fmap["cert"] = certPath
|
||||
fmap["key"] = privateKeyPath
|
||||
fmap["cacert"] = e2e.CaPath
|
||||
fmap["cert"] = e2e.CertPath
|
||||
fmap["key"] = e2e.PrivateKeyPath
|
||||
}
|
||||
}
|
||||
fmap["endpoints"] = strings.Join(ctl.endpoints, ",")
|
||||
|
@ -19,27 +19,33 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||
)
|
||||
|
||||
func TestCtlV3Put(t *testing.T) { testCtl(t, putTest, withDialTimeout(7*time.Second)) }
|
||||
func TestCtlV3PutNoTLS(t *testing.T) { testCtl(t, putTest, withCfg(*newConfigNoTLS())) }
|
||||
func TestCtlV3PutClientTLS(t *testing.T) { testCtl(t, putTest, withCfg(*newConfigClientTLS())) }
|
||||
func TestCtlV3PutClientAutoTLS(t *testing.T) { testCtl(t, putTest, withCfg(*newConfigClientAutoTLS())) }
|
||||
func TestCtlV3PutPeerTLS(t *testing.T) { testCtl(t, putTest, withCfg(*newConfigPeerTLS())) }
|
||||
func TestCtlV3PutTimeout(t *testing.T) { testCtl(t, putTest, withDialTimeout(0)) }
|
||||
func TestCtlV3Put(t *testing.T) { testCtl(t, putTest, withDialTimeout(7*time.Second)) }
|
||||
func TestCtlV3PutNoTLS(t *testing.T) { testCtl(t, putTest, withCfg(*e2e.NewConfigNoTLS())) }
|
||||
func TestCtlV3PutClientTLS(t *testing.T) { testCtl(t, putTest, withCfg(*e2e.NewConfigClientTLS())) }
|
||||
func TestCtlV3PutClientAutoTLS(t *testing.T) {
|
||||
testCtl(t, putTest, withCfg(*e2e.NewConfigClientAutoTLS()))
|
||||
}
|
||||
func TestCtlV3PutPeerTLS(t *testing.T) { testCtl(t, putTest, withCfg(*e2e.NewConfigPeerTLS())) }
|
||||
func TestCtlV3PutTimeout(t *testing.T) { testCtl(t, putTest, withDialTimeout(0)) }
|
||||
func TestCtlV3PutClientTLSFlagByEnv(t *testing.T) {
|
||||
testCtl(t, putTest, withCfg(*newConfigClientTLS()), withFlagByEnv())
|
||||
testCtl(t, putTest, withCfg(*e2e.NewConfigClientTLS()), withFlagByEnv())
|
||||
}
|
||||
func TestCtlV3PutIgnoreValue(t *testing.T) { testCtl(t, putTestIgnoreValue) }
|
||||
func TestCtlV3PutIgnoreLease(t *testing.T) { testCtl(t, putTestIgnoreLease) }
|
||||
|
||||
func TestCtlV3Get(t *testing.T) { testCtl(t, getTest) }
|
||||
func TestCtlV3GetNoTLS(t *testing.T) { testCtl(t, getTest, withCfg(*newConfigNoTLS())) }
|
||||
func TestCtlV3GetClientTLS(t *testing.T) { testCtl(t, getTest, withCfg(*newConfigClientTLS())) }
|
||||
func TestCtlV3GetClientAutoTLS(t *testing.T) { testCtl(t, getTest, withCfg(*newConfigClientAutoTLS())) }
|
||||
func TestCtlV3GetPeerTLS(t *testing.T) { testCtl(t, getTest, withCfg(*newConfigPeerTLS())) }
|
||||
func TestCtlV3GetTimeout(t *testing.T) { testCtl(t, getTest, withDialTimeout(0)) }
|
||||
func TestCtlV3GetQuorum(t *testing.T) { testCtl(t, getTest, withQuorum()) }
|
||||
func TestCtlV3Get(t *testing.T) { testCtl(t, getTest) }
|
||||
func TestCtlV3GetNoTLS(t *testing.T) { testCtl(t, getTest, withCfg(*e2e.NewConfigNoTLS())) }
|
||||
func TestCtlV3GetClientTLS(t *testing.T) { testCtl(t, getTest, withCfg(*e2e.NewConfigClientTLS())) }
|
||||
func TestCtlV3GetClientAutoTLS(t *testing.T) {
|
||||
testCtl(t, getTest, withCfg(*e2e.NewConfigClientAutoTLS()))
|
||||
}
|
||||
func TestCtlV3GetPeerTLS(t *testing.T) { testCtl(t, getTest, withCfg(*e2e.NewConfigPeerTLS())) }
|
||||
func TestCtlV3GetTimeout(t *testing.T) { testCtl(t, getTest, withDialTimeout(0)) }
|
||||
func TestCtlV3GetQuorum(t *testing.T) { testCtl(t, getTest, withQuorum()) }
|
||||
|
||||
func TestCtlV3GetFormat(t *testing.T) { testCtl(t, getFormatTest) }
|
||||
func TestCtlV3GetRev(t *testing.T) { testCtl(t, getRevTest) }
|
||||
@ -47,18 +53,18 @@ func TestCtlV3GetKeysOnly(t *testing.T) { testCtl(t, getKeysOnlyTest) }
|
||||
func TestCtlV3GetCountOnly(t *testing.T) { testCtl(t, getCountOnlyTest) }
|
||||
|
||||
func TestCtlV3Del(t *testing.T) { testCtl(t, delTest) }
|
||||
func TestCtlV3DelNoTLS(t *testing.T) { testCtl(t, delTest, withCfg(*newConfigNoTLS())) }
|
||||
func TestCtlV3DelClientTLS(t *testing.T) { testCtl(t, delTest, withCfg(*newConfigClientTLS())) }
|
||||
func TestCtlV3DelPeerTLS(t *testing.T) { testCtl(t, delTest, withCfg(*newConfigPeerTLS())) }
|
||||
func TestCtlV3DelNoTLS(t *testing.T) { testCtl(t, delTest, withCfg(*e2e.NewConfigNoTLS())) }
|
||||
func TestCtlV3DelClientTLS(t *testing.T) { testCtl(t, delTest, withCfg(*e2e.NewConfigClientTLS())) }
|
||||
func TestCtlV3DelPeerTLS(t *testing.T) { testCtl(t, delTest, withCfg(*e2e.NewConfigPeerTLS())) }
|
||||
func TestCtlV3DelTimeout(t *testing.T) { testCtl(t, delTest, withDialTimeout(0)) }
|
||||
|
||||
func TestCtlV3GetRevokedCRL(t *testing.T) {
|
||||
cfg := etcdProcessClusterConfig{
|
||||
clusterSize: 1,
|
||||
initialToken: "new",
|
||||
clientTLS: clientTLS,
|
||||
isClientCRL: true,
|
||||
clientCertAuthEnabled: true,
|
||||
cfg := e2e.EtcdProcessClusterConfig{
|
||||
ClusterSize: 1,
|
||||
InitialToken: "new",
|
||||
ClientTLS: e2e.ClientTLS,
|
||||
IsClientCRL: true,
|
||||
ClientCertAuthEnabled: true,
|
||||
}
|
||||
testCtl(t, testGetRevokedCRL, withCfg(cfg))
|
||||
}
|
||||
@ -69,7 +75,7 @@ func testGetRevokedCRL(cx ctlCtx) {
|
||||
cx.t.Fatalf("expected reset connection on put, got %v", err)
|
||||
}
|
||||
// test accept
|
||||
cx.epc.cfg.isClientCRL = false
|
||||
cx.epc.Cfg.IsClientCRL = false
|
||||
if err := ctlV3Put(cx, "k", "v", ""); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
@ -190,7 +196,7 @@ func getFormatTest(cx ctlCtx) {
|
||||
cmdArgs = append(cmdArgs, "--print-value-only")
|
||||
}
|
||||
cmdArgs = append(cmdArgs, "abc")
|
||||
if err := spawnWithExpectWithEnv(cmdArgs, cx.envMap, tt.wstr); err != nil {
|
||||
if err := e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, tt.wstr); err != nil {
|
||||
cx.t.Errorf("#%d: error (%v), wanted %v", i, err, tt.wstr)
|
||||
}
|
||||
}
|
||||
@ -228,24 +234,24 @@ func getKeysOnlyTest(cx ctlCtx) {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
cmdArgs := append(cx.PrefixArgs(), []string{"get", "--keys-only", "key"}...)
|
||||
if err := spawnWithExpectWithEnv(cmdArgs, cx.envMap, "key"); err != nil {
|
||||
if err := e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, "key"); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
if err := spawnWithExpects(cmdArgs, cx.envMap, "val"); err == nil {
|
||||
if err := e2e.SpawnWithExpects(cmdArgs, cx.envMap, "val"); err == nil {
|
||||
cx.t.Fatalf("got value but passed --keys-only")
|
||||
}
|
||||
}
|
||||
|
||||
func getCountOnlyTest(cx ctlCtx) {
|
||||
cmdArgs := append(cx.PrefixArgs(), []string{"get", "--count-only", "key", "--prefix", "--write-out=fields"}...)
|
||||
if err := spawnWithExpects(cmdArgs, cx.envMap, "\"Count\" : 0"); err != nil {
|
||||
if err := e2e.SpawnWithExpects(cmdArgs, cx.envMap, "\"Count\" : 0"); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
if err := ctlV3Put(cx, "key", "val", ""); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
cmdArgs = append(cx.PrefixArgs(), []string{"get", "--count-only", "key", "--prefix", "--write-out=fields"}...)
|
||||
if err := spawnWithExpects(cmdArgs, cx.envMap, "\"Count\" : 1"); err != nil {
|
||||
if err := e2e.SpawnWithExpects(cmdArgs, cx.envMap, "\"Count\" : 1"); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
if err := ctlV3Put(cx, "key1", "val", ""); err != nil {
|
||||
@ -255,21 +261,21 @@ func getCountOnlyTest(cx ctlCtx) {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
cmdArgs = append(cx.PrefixArgs(), []string{"get", "--count-only", "key", "--prefix", "--write-out=fields"}...)
|
||||
if err := spawnWithExpects(cmdArgs, cx.envMap, "\"Count\" : 2"); err != nil {
|
||||
if err := e2e.SpawnWithExpects(cmdArgs, cx.envMap, "\"Count\" : 2"); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
if err := ctlV3Put(cx, "key2", "val", ""); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
cmdArgs = append(cx.PrefixArgs(), []string{"get", "--count-only", "key", "--prefix", "--write-out=fields"}...)
|
||||
if err := spawnWithExpects(cmdArgs, cx.envMap, "\"Count\" : 3"); err != nil {
|
||||
if err := e2e.SpawnWithExpects(cmdArgs, cx.envMap, "\"Count\" : 3"); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
expected := []string{
|
||||
"\"Count\" : 3",
|
||||
}
|
||||
cmdArgs = append(cx.PrefixArgs(), []string{"get", "--count-only", "key3", "--prefix", "--write-out=fields"}...)
|
||||
if err := spawnWithExpects(cmdArgs, cx.envMap, expected...); err == nil {
|
||||
if err := e2e.SpawnWithExpects(cmdArgs, cx.envMap, expected...); err == nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@ -348,7 +354,7 @@ func ctlV3Put(cx ctlCtx, key, value, leaseID string, flags ...string) error {
|
||||
if len(flags) != 0 {
|
||||
cmdArgs = append(cmdArgs, flags...)
|
||||
}
|
||||
return spawnWithExpectWithEnv(cmdArgs, cx.envMap, "OK")
|
||||
return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, "OK")
|
||||
}
|
||||
|
||||
type kv struct {
|
||||
@ -365,7 +371,7 @@ func ctlV3Get(cx ctlCtx, args []string, kvs ...kv) error {
|
||||
for _, elem := range kvs {
|
||||
lines = append(lines, elem.key, elem.val)
|
||||
}
|
||||
return spawnWithExpects(cmdArgs, cx.envMap, lines...)
|
||||
return e2e.SpawnWithExpects(cmdArgs, cx.envMap, lines...)
|
||||
}
|
||||
|
||||
// ctlV3GetWithErr runs "get" command expecting no output but error
|
||||
@ -375,11 +381,11 @@ func ctlV3GetWithErr(cx ctlCtx, args []string, errs []string) error {
|
||||
if !cx.quorum {
|
||||
cmdArgs = append(cmdArgs, "--consistency", "s")
|
||||
}
|
||||
return spawnWithExpects(cmdArgs, cx.envMap, errs...)
|
||||
return e2e.SpawnWithExpects(cmdArgs, cx.envMap, errs...)
|
||||
}
|
||||
|
||||
func ctlV3Del(cx ctlCtx, args []string, num int) error {
|
||||
cmdArgs := append(cx.PrefixArgs(), "del")
|
||||
cmdArgs = append(cmdArgs, args...)
|
||||
return spawnWithExpects(cmdArgs, cx.envMap, fmt.Sprintf("%d", num))
|
||||
return e2e.SpawnWithExpects(cmdArgs, cx.envMap, fmt.Sprintf("%d", num))
|
||||
}
|
||||
|
@ -20,90 +20,92 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||
)
|
||||
|
||||
func TestCtlV3LeaseGrantTimeToLive(t *testing.T) { testCtl(t, leaseTestGrantTimeToLive) }
|
||||
func TestCtlV3LeaseGrantTimeToLiveNoTLS(t *testing.T) {
|
||||
testCtl(t, leaseTestGrantTimeToLive, withCfg(*newConfigNoTLS()))
|
||||
testCtl(t, leaseTestGrantTimeToLive, withCfg(*e2e.NewConfigNoTLS()))
|
||||
}
|
||||
func TestCtlV3LeaseGrantTimeToLiveClientTLS(t *testing.T) {
|
||||
testCtl(t, leaseTestGrantTimeToLive, withCfg(*newConfigClientTLS()))
|
||||
testCtl(t, leaseTestGrantTimeToLive, withCfg(*e2e.NewConfigClientTLS()))
|
||||
}
|
||||
func TestCtlV3LeaseGrantTimeToLiveClientAutoTLS(t *testing.T) {
|
||||
testCtl(t, leaseTestGrantTimeToLive, withCfg(*newConfigClientAutoTLS()))
|
||||
testCtl(t, leaseTestGrantTimeToLive, withCfg(*e2e.NewConfigClientAutoTLS()))
|
||||
}
|
||||
func TestCtlV3LeaseGrantTimeToLivePeerTLS(t *testing.T) {
|
||||
testCtl(t, leaseTestGrantTimeToLive, withCfg(*newConfigPeerTLS()))
|
||||
testCtl(t, leaseTestGrantTimeToLive, withCfg(*e2e.NewConfigPeerTLS()))
|
||||
}
|
||||
|
||||
func TestCtlV3LeaseGrantLeases(t *testing.T) { testCtl(t, leaseTestGrantLeaseListed) }
|
||||
func TestCtlV3LeaseGrantLeasesNoTLS(t *testing.T) {
|
||||
testCtl(t, leaseTestGrantLeaseListed, withCfg(*newConfigNoTLS()))
|
||||
testCtl(t, leaseTestGrantLeaseListed, withCfg(*e2e.NewConfigNoTLS()))
|
||||
}
|
||||
func TestCtlV3LeaseGrantLeasesClientTLS(t *testing.T) {
|
||||
testCtl(t, leaseTestGrantLeaseListed, withCfg(*newConfigClientTLS()))
|
||||
testCtl(t, leaseTestGrantLeaseListed, withCfg(*e2e.NewConfigClientTLS()))
|
||||
}
|
||||
func TestCtlV3LeaseGrantLeasesClientAutoTLS(t *testing.T) {
|
||||
testCtl(t, leaseTestGrantLeaseListed, withCfg(*newConfigClientAutoTLS()))
|
||||
testCtl(t, leaseTestGrantLeaseListed, withCfg(*e2e.NewConfigClientAutoTLS()))
|
||||
}
|
||||
func TestCtlV3LeaseGrantLeasesPeerTLS(t *testing.T) {
|
||||
testCtl(t, leaseTestGrantLeaseListed, withCfg(*newConfigPeerTLS()))
|
||||
testCtl(t, leaseTestGrantLeaseListed, withCfg(*e2e.NewConfigPeerTLS()))
|
||||
}
|
||||
|
||||
func TestCtlV3LeaseTestTimeToLiveExpired(t *testing.T) { testCtl(t, leaseTestTimeToLiveExpired) }
|
||||
func TestCtlV3LeaseTestTimeToLiveExpiredNoTLS(t *testing.T) {
|
||||
testCtl(t, leaseTestTimeToLiveExpired, withCfg(*newConfigNoTLS()))
|
||||
testCtl(t, leaseTestTimeToLiveExpired, withCfg(*e2e.NewConfigNoTLS()))
|
||||
}
|
||||
func TestCtlV3LeaseTestTimeToLiveExpiredClientTLS(t *testing.T) {
|
||||
testCtl(t, leaseTestTimeToLiveExpired, withCfg(*newConfigClientTLS()))
|
||||
testCtl(t, leaseTestTimeToLiveExpired, withCfg(*e2e.NewConfigClientTLS()))
|
||||
}
|
||||
func TestCtlV3LeaseTestTimeToLiveExpiredClientAutoTLS(t *testing.T) {
|
||||
testCtl(t, leaseTestTimeToLiveExpired, withCfg(*newConfigClientAutoTLS()))
|
||||
testCtl(t, leaseTestTimeToLiveExpired, withCfg(*e2e.NewConfigClientAutoTLS()))
|
||||
}
|
||||
func TestCtlV3LeaseTestTimeToLiveExpiredPeerTLS(t *testing.T) {
|
||||
testCtl(t, leaseTestTimeToLiveExpired, withCfg(*newConfigPeerTLS()))
|
||||
testCtl(t, leaseTestTimeToLiveExpired, withCfg(*e2e.NewConfigPeerTLS()))
|
||||
}
|
||||
|
||||
func TestCtlV3LeaseKeepAlive(t *testing.T) { testCtl(t, leaseTestKeepAlive) }
|
||||
func TestCtlV3LeaseKeepAliveNoTLS(t *testing.T) {
|
||||
testCtl(t, leaseTestKeepAlive, withCfg(*newConfigNoTLS()))
|
||||
testCtl(t, leaseTestKeepAlive, withCfg(*e2e.NewConfigNoTLS()))
|
||||
}
|
||||
func TestCtlV3LeaseKeepAliveClientTLS(t *testing.T) {
|
||||
testCtl(t, leaseTestKeepAlive, withCfg(*newConfigClientTLS()))
|
||||
testCtl(t, leaseTestKeepAlive, withCfg(*e2e.NewConfigClientTLS()))
|
||||
}
|
||||
func TestCtlV3LeaseKeepAliveClientAutoTLS(t *testing.T) {
|
||||
testCtl(t, leaseTestKeepAlive, withCfg(*newConfigClientAutoTLS()))
|
||||
testCtl(t, leaseTestKeepAlive, withCfg(*e2e.NewConfigClientAutoTLS()))
|
||||
}
|
||||
func TestCtlV3LeaseKeepAlivePeerTLS(t *testing.T) {
|
||||
testCtl(t, leaseTestKeepAlive, withCfg(*newConfigPeerTLS()))
|
||||
testCtl(t, leaseTestKeepAlive, withCfg(*e2e.NewConfigPeerTLS()))
|
||||
}
|
||||
|
||||
func TestCtlV3LeaseKeepAliveOnce(t *testing.T) { testCtl(t, leaseTestKeepAliveOnce) }
|
||||
func TestCtlV3LeaseKeepAliveOnceNoTLS(t *testing.T) {
|
||||
testCtl(t, leaseTestKeepAliveOnce, withCfg(*newConfigNoTLS()))
|
||||
testCtl(t, leaseTestKeepAliveOnce, withCfg(*e2e.NewConfigNoTLS()))
|
||||
}
|
||||
func TestCtlV3LeaseKeepAliveOnceClientTLS(t *testing.T) {
|
||||
testCtl(t, leaseTestKeepAliveOnce, withCfg(*newConfigClientTLS()))
|
||||
testCtl(t, leaseTestKeepAliveOnce, withCfg(*e2e.NewConfigClientTLS()))
|
||||
}
|
||||
func TestCtlV3LeaseKeepAliveOnceClientAutoTLS(t *testing.T) {
|
||||
testCtl(t, leaseTestKeepAliveOnce, withCfg(*newConfigClientAutoTLS()))
|
||||
testCtl(t, leaseTestKeepAliveOnce, withCfg(*e2e.NewConfigClientAutoTLS()))
|
||||
}
|
||||
func TestCtlV3LeaseKeepAliveOncePeerTLS(t *testing.T) {
|
||||
testCtl(t, leaseTestKeepAliveOnce, withCfg(*newConfigPeerTLS()))
|
||||
testCtl(t, leaseTestKeepAliveOnce, withCfg(*e2e.NewConfigPeerTLS()))
|
||||
}
|
||||
|
||||
func TestCtlV3LeaseRevoke(t *testing.T) { testCtl(t, leaseTestRevoked) }
|
||||
func TestCtlV3LeaseRevokeNoTLS(t *testing.T) {
|
||||
testCtl(t, leaseTestRevoked, withCfg(*newConfigNoTLS()))
|
||||
testCtl(t, leaseTestRevoked, withCfg(*e2e.NewConfigNoTLS()))
|
||||
}
|
||||
func TestCtlV3LeaseRevokeClientTLS(t *testing.T) {
|
||||
testCtl(t, leaseTestRevoked, withCfg(*newConfigClientTLS()))
|
||||
testCtl(t, leaseTestRevoked, withCfg(*e2e.NewConfigClientTLS()))
|
||||
}
|
||||
func TestCtlV3LeaseRevokeClientAutoTLS(t *testing.T) {
|
||||
testCtl(t, leaseTestRevoked, withCfg(*newConfigClientAutoTLS()))
|
||||
testCtl(t, leaseTestRevoked, withCfg(*e2e.NewConfigClientAutoTLS()))
|
||||
}
|
||||
func TestCtlV3LeaseRevokePeerTLS(t *testing.T) {
|
||||
testCtl(t, leaseTestRevoked, withCfg(*newConfigPeerTLS()))
|
||||
testCtl(t, leaseTestRevoked, withCfg(*e2e.NewConfigPeerTLS()))
|
||||
}
|
||||
|
||||
func leaseTestGrantTimeToLive(cx ctlCtx) {
|
||||
@ -113,7 +115,7 @@ func leaseTestGrantTimeToLive(cx ctlCtx) {
|
||||
}
|
||||
|
||||
cmdArgs := append(cx.PrefixArgs(), "lease", "timetolive", id, "--keys")
|
||||
proc, err := spawnCmd(cmdArgs, cx.envMap)
|
||||
proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap)
|
||||
if err != nil {
|
||||
cx.t.Fatalf("leaseTestGrantTimeToLive: error (%v)", err)
|
||||
}
|
||||
@ -146,7 +148,7 @@ func leaseTestGrantLeasesList(cx ctlCtx) error {
|
||||
}
|
||||
|
||||
cmdArgs := append(cx.PrefixArgs(), "lease", "list")
|
||||
proc, err := spawnCmd(cmdArgs, cx.envMap)
|
||||
proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap)
|
||||
if err != nil {
|
||||
return fmt.Errorf("lease list failed (%v)", err)
|
||||
}
|
||||
@ -177,7 +179,7 @@ func leaseTestTimeToLiveExpire(cx ctlCtx, ttl int) error {
|
||||
time.Sleep(time.Duration(ttl+1) * time.Second)
|
||||
cmdArgs := append(cx.PrefixArgs(), "lease", "timetolive", leaseID)
|
||||
exp := fmt.Sprintf("lease %s already expired", leaseID)
|
||||
if err = spawnWithExpectWithEnv(cmdArgs, cx.envMap, exp); err != nil {
|
||||
if err = e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, exp); err != nil {
|
||||
return fmt.Errorf("lease not properly expired: (%v)", err)
|
||||
}
|
||||
if err := ctlV3Get(cx, []string{"key"}); err != nil {
|
||||
@ -247,7 +249,7 @@ func leaseTestRevoke(cx ctlCtx) error {
|
||||
|
||||
func ctlV3LeaseGrant(cx ctlCtx, ttl int) (string, error) {
|
||||
cmdArgs := append(cx.PrefixArgs(), "lease", "grant", strconv.Itoa(ttl))
|
||||
proc, err := spawnCmd(cmdArgs, cx.envMap)
|
||||
proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -271,7 +273,7 @@ func ctlV3LeaseGrant(cx ctlCtx, ttl int) (string, error) {
|
||||
func ctlV3LeaseKeepAlive(cx ctlCtx, leaseID string) error {
|
||||
cmdArgs := append(cx.PrefixArgs(), "lease", "keep-alive", leaseID)
|
||||
|
||||
proc, err := spawnCmd(cmdArgs, nil)
|
||||
proc, err := e2e.SpawnCmd(cmdArgs, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -285,7 +287,7 @@ func ctlV3LeaseKeepAlive(cx ctlCtx, leaseID string) error {
|
||||
func ctlV3LeaseKeepAliveOnce(cx ctlCtx, leaseID string) error {
|
||||
cmdArgs := append(cx.PrefixArgs(), "lease", "keep-alive", "--once", leaseID)
|
||||
|
||||
proc, err := spawnCmd(cmdArgs, nil)
|
||||
proc, err := e2e.SpawnCmd(cmdArgs, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -298,5 +300,5 @@ func ctlV3LeaseKeepAliveOnce(cx ctlCtx, leaseID string) error {
|
||||
|
||||
func ctlV3LeaseRevoke(cx ctlCtx, leaseID string) error {
|
||||
cmdArgs := append(cx.PrefixArgs(), "lease", "revoke", leaseID)
|
||||
return spawnWithExpectWithEnv(cmdArgs, cx.envMap, fmt.Sprintf("lease %s revoked", leaseID))
|
||||
return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, fmt.Sprintf("lease %s revoked", leaseID))
|
||||
}
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"time"
|
||||
|
||||
"go.etcd.io/etcd/pkg/v3/expect"
|
||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||
)
|
||||
|
||||
func TestCtlV3Lock(t *testing.T) {
|
||||
@ -77,7 +78,7 @@ func testLock(cx ctlCtx) {
|
||||
if err = blocked.Signal(os.Interrupt); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
if err = closeWithTimeout(blocked, time.Second); err != nil {
|
||||
if err = e2e.CloseWithTimeout(blocked, time.Second); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
|
||||
@ -85,7 +86,7 @@ func testLock(cx ctlCtx) {
|
||||
if err = holder.Signal(os.Interrupt); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
if err = closeWithTimeout(holder, 200*time.Millisecond+time.Second); err != nil {
|
||||
if err = e2e.CloseWithTimeout(holder, 200*time.Millisecond+time.Second); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
|
||||
@ -119,7 +120,7 @@ func testLockWithCmd(cx ctlCtx) {
|
||||
// ctlV3Lock creates a lock process with a channel listening for when it acquires the lock.
|
||||
func ctlV3Lock(cx ctlCtx, name string) (*expect.ExpectProcess, <-chan string, error) {
|
||||
cmdArgs := append(cx.PrefixArgs(), "lock", name)
|
||||
proc, err := spawnCmd(cmdArgs, cx.envMap)
|
||||
proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap)
|
||||
outc := make(chan string, 1)
|
||||
if err != nil {
|
||||
close(outc)
|
||||
@ -140,5 +141,5 @@ func ctlV3LockWithCmd(cx ctlCtx, execCmd []string, as ...string) error {
|
||||
// use command as lock name
|
||||
cmdArgs := append(cx.PrefixArgs(), "lock", execCmd[0])
|
||||
cmdArgs = append(cmdArgs, execCmd...)
|
||||
return spawnWithExpects(cmdArgs, cx.envMap, as...)
|
||||
return e2e.SpawnWithExpects(cmdArgs, cx.envMap, as...)
|
||||
}
|
||||
|
@ -18,6 +18,8 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||
)
|
||||
|
||||
func TestCtlV3MakeMirror(t *testing.T) { testCtl(t, makeMirrorTest) }
|
||||
@ -59,16 +61,16 @@ func makeMirrorNoDestPrefixTest(cx ctlCtx) {
|
||||
|
||||
func testMirrorCommand(cx ctlCtx, flags []string, sourcekvs []kv, destkvs []kvExec, srcprefix, destprefix string) {
|
||||
// set up another cluster to mirror with
|
||||
mirrorcfg := newConfigAutoTLS()
|
||||
mirrorcfg.clusterSize = 1
|
||||
mirrorcfg.basePort = 10000
|
||||
mirrorcfg := e2e.NewConfigAutoTLS()
|
||||
mirrorcfg.ClusterSize = 1
|
||||
mirrorcfg.BasePort = 10000
|
||||
mirrorctx := ctlCtx{
|
||||
t: cx.t,
|
||||
cfg: *mirrorcfg,
|
||||
dialTimeout: 7 * time.Second,
|
||||
}
|
||||
|
||||
mirrorepc, err := newEtcdProcessCluster(cx.t, &mirrorctx.cfg)
|
||||
mirrorepc, err := e2e.NewEtcdProcessCluster(cx.t, &mirrorctx.cfg)
|
||||
if err != nil {
|
||||
cx.t.Fatalf("could not start etcd process cluster (%v)", err)
|
||||
}
|
||||
@ -82,8 +84,8 @@ func testMirrorCommand(cx ctlCtx, flags []string, sourcekvs []kv, destkvs []kvEx
|
||||
|
||||
cmdArgs := append(cx.PrefixArgs(), "make-mirror")
|
||||
cmdArgs = append(cmdArgs, flags...)
|
||||
cmdArgs = append(cmdArgs, fmt.Sprintf("localhost:%d", mirrorcfg.basePort))
|
||||
proc, err := spawnCmd(cmdArgs, cx.envMap)
|
||||
cmdArgs = append(cmdArgs, fmt.Sprintf("localhost:%d", mirrorcfg.BasePort))
|
||||
proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap)
|
||||
if err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
|
@ -23,64 +23,69 @@ import (
|
||||
"testing"
|
||||
|
||||
"go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||
)
|
||||
|
||||
func TestCtlV3MemberList(t *testing.T) { testCtl(t, memberListTest) }
|
||||
func TestCtlV3MemberListWithHex(t *testing.T) { testCtl(t, memberListWithHexTest) }
|
||||
func TestCtlV3MemberListNoTLS(t *testing.T) { testCtl(t, memberListTest, withCfg(*newConfigNoTLS())) }
|
||||
func TestCtlV3MemberListNoTLS(t *testing.T) {
|
||||
testCtl(t, memberListTest, withCfg(*e2e.NewConfigNoTLS()))
|
||||
}
|
||||
func TestCtlV3MemberListClientTLS(t *testing.T) {
|
||||
testCtl(t, memberListTest, withCfg(*newConfigClientTLS()))
|
||||
testCtl(t, memberListTest, withCfg(*e2e.NewConfigClientTLS()))
|
||||
}
|
||||
func TestCtlV3MemberListClientAutoTLS(t *testing.T) {
|
||||
testCtl(t, memberListTest, withCfg(*newConfigClientAutoTLS()))
|
||||
testCtl(t, memberListTest, withCfg(*e2e.NewConfigClientAutoTLS()))
|
||||
}
|
||||
func TestCtlV3MemberListPeerTLS(t *testing.T) {
|
||||
testCtl(t, memberListTest, withCfg(*newConfigPeerTLS()))
|
||||
testCtl(t, memberListTest, withCfg(*e2e.NewConfigPeerTLS()))
|
||||
}
|
||||
func TestCtlV3MemberRemove(t *testing.T) {
|
||||
testCtl(t, memberRemoveTest, withQuorum(), withNoStrictReconfig())
|
||||
}
|
||||
func TestCtlV3MemberRemoveNoTLS(t *testing.T) {
|
||||
testCtl(t, memberRemoveTest, withQuorum(), withNoStrictReconfig(), withCfg(*newConfigNoTLS()))
|
||||
testCtl(t, memberRemoveTest, withQuorum(), withNoStrictReconfig(), withCfg(*e2e.NewConfigNoTLS()))
|
||||
}
|
||||
func TestCtlV3MemberRemoveClientTLS(t *testing.T) {
|
||||
testCtl(t, memberRemoveTest, withQuorum(), withNoStrictReconfig(), withCfg(*newConfigClientTLS()))
|
||||
testCtl(t, memberRemoveTest, withQuorum(), withNoStrictReconfig(), withCfg(*e2e.NewConfigClientTLS()))
|
||||
}
|
||||
func TestCtlV3MemberRemoveClientAutoTLS(t *testing.T) {
|
||||
testCtl(t, memberRemoveTest, withQuorum(), withNoStrictReconfig(), withCfg(
|
||||
// default clusterSize is 1
|
||||
etcdProcessClusterConfig{
|
||||
clusterSize: 3,
|
||||
isClientAutoTLS: true,
|
||||
clientTLS: clientTLS,
|
||||
initialToken: "new",
|
||||
// default ClusterSize is 1
|
||||
e2e.EtcdProcessClusterConfig{
|
||||
ClusterSize: 3,
|
||||
IsClientAutoTLS: true,
|
||||
ClientTLS: e2e.ClientTLS,
|
||||
InitialToken: "new",
|
||||
}))
|
||||
}
|
||||
func TestCtlV3MemberRemovePeerTLS(t *testing.T) {
|
||||
testCtl(t, memberRemoveTest, withQuorum(), withNoStrictReconfig(), withCfg(*newConfigPeerTLS()))
|
||||
testCtl(t, memberRemoveTest, withQuorum(), withNoStrictReconfig(), withCfg(*e2e.NewConfigPeerTLS()))
|
||||
}
|
||||
func TestCtlV3MemberAdd(t *testing.T) { testCtl(t, memberAddTest) }
|
||||
func TestCtlV3MemberAddNoTLS(t *testing.T) { testCtl(t, memberAddTest, withCfg(*newConfigNoTLS())) }
|
||||
func TestCtlV3MemberAddNoTLS(t *testing.T) { testCtl(t, memberAddTest, withCfg(*e2e.NewConfigNoTLS())) }
|
||||
func TestCtlV3MemberAddClientTLS(t *testing.T) {
|
||||
testCtl(t, memberAddTest, withCfg(*newConfigClientTLS()))
|
||||
testCtl(t, memberAddTest, withCfg(*e2e.NewConfigClientTLS()))
|
||||
}
|
||||
func TestCtlV3MemberAddClientAutoTLS(t *testing.T) {
|
||||
testCtl(t, memberAddTest, withCfg(*newConfigClientAutoTLS()))
|
||||
testCtl(t, memberAddTest, withCfg(*e2e.NewConfigClientAutoTLS()))
|
||||
}
|
||||
func TestCtlV3MemberAddPeerTLS(t *testing.T) {
|
||||
testCtl(t, memberAddTest, withCfg(*e2e.NewConfigPeerTLS()))
|
||||
}
|
||||
func TestCtlV3MemberAddPeerTLS(t *testing.T) { testCtl(t, memberAddTest, withCfg(*newConfigPeerTLS())) }
|
||||
func TestCtlV3MemberAddForLearner(t *testing.T) { testCtl(t, memberAddForLearnerTest) }
|
||||
func TestCtlV3MemberUpdate(t *testing.T) { testCtl(t, memberUpdateTest) }
|
||||
func TestCtlV3MemberUpdateNoTLS(t *testing.T) {
|
||||
testCtl(t, memberUpdateTest, withCfg(*newConfigNoTLS()))
|
||||
testCtl(t, memberUpdateTest, withCfg(*e2e.NewConfigNoTLS()))
|
||||
}
|
||||
func TestCtlV3MemberUpdateClientTLS(t *testing.T) {
|
||||
testCtl(t, memberUpdateTest, withCfg(*newConfigClientTLS()))
|
||||
testCtl(t, memberUpdateTest, withCfg(*e2e.NewConfigClientTLS()))
|
||||
}
|
||||
func TestCtlV3MemberUpdateClientAutoTLS(t *testing.T) {
|
||||
testCtl(t, memberUpdateTest, withCfg(*newConfigClientAutoTLS()))
|
||||
testCtl(t, memberUpdateTest, withCfg(*e2e.NewConfigClientAutoTLS()))
|
||||
}
|
||||
func TestCtlV3MemberUpdatePeerTLS(t *testing.T) {
|
||||
testCtl(t, memberUpdateTest, withCfg(*newConfigPeerTLS()))
|
||||
testCtl(t, memberUpdateTest, withCfg(*e2e.NewConfigPeerTLS()))
|
||||
}
|
||||
|
||||
func memberListTest(cx ctlCtx) {
|
||||
@ -91,17 +96,17 @@ func memberListTest(cx ctlCtx) {
|
||||
|
||||
func ctlV3MemberList(cx ctlCtx) error {
|
||||
cmdArgs := append(cx.PrefixArgs(), "member", "list")
|
||||
lines := make([]string, cx.cfg.clusterSize)
|
||||
lines := make([]string, cx.cfg.ClusterSize)
|
||||
for i := range lines {
|
||||
lines[i] = "started"
|
||||
}
|
||||
return spawnWithExpects(cmdArgs, cx.envMap, lines...)
|
||||
return e2e.SpawnWithExpects(cmdArgs, cx.envMap, lines...)
|
||||
}
|
||||
|
||||
func getMemberList(cx ctlCtx) (etcdserverpb.MemberListResponse, error) {
|
||||
cmdArgs := append(cx.PrefixArgs(), "--write-out", "json", "member", "list")
|
||||
|
||||
proc, err := spawnCmd(cmdArgs, cx.envMap)
|
||||
proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap)
|
||||
if err != nil {
|
||||
return etcdserverpb.MemberListResponse{}, err
|
||||
}
|
||||
@ -130,7 +135,7 @@ func memberListWithHexTest(cx ctlCtx) {
|
||||
|
||||
cmdArgs := append(cx.PrefixArgs(), "--write-out", "json", "--hex", "member", "list")
|
||||
|
||||
proc, err := spawnCmd(cmdArgs, cx.envMap)
|
||||
proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap)
|
||||
if err != nil {
|
||||
cx.t.Fatalf("memberListWithHexTest error (%v)", err)
|
||||
}
|
||||
@ -177,17 +182,17 @@ func memberRemoveTest(cx ctlCtx) {
|
||||
|
||||
func ctlV3MemberRemove(cx ctlCtx, ep, memberID, clusterID string) error {
|
||||
cmdArgs := append(cx.prefixArgs([]string{ep}), "member", "remove", memberID)
|
||||
return spawnWithExpectWithEnv(cmdArgs, cx.envMap, fmt.Sprintf("%s removed from cluster %s", memberID, clusterID))
|
||||
return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, fmt.Sprintf("%s removed from cluster %s", memberID, clusterID))
|
||||
}
|
||||
|
||||
func memberAddTest(cx ctlCtx) {
|
||||
if err := ctlV3MemberAdd(cx, fmt.Sprintf("http://localhost:%d", etcdProcessBasePort+11), false); err != nil {
|
||||
if err := ctlV3MemberAdd(cx, fmt.Sprintf("http://localhost:%d", e2e.EtcdProcessBasePort+11), false); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func memberAddForLearnerTest(cx ctlCtx) {
|
||||
if err := ctlV3MemberAdd(cx, fmt.Sprintf("http://localhost:%d", etcdProcessBasePort+11), true); err != nil {
|
||||
if err := ctlV3MemberAdd(cx, fmt.Sprintf("http://localhost:%d", e2e.EtcdProcessBasePort+11), true); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@ -197,7 +202,7 @@ func ctlV3MemberAdd(cx ctlCtx, peerURL string, isLearner bool) error {
|
||||
if isLearner {
|
||||
cmdArgs = append(cmdArgs, "--learner")
|
||||
}
|
||||
return spawnWithExpectWithEnv(cmdArgs, cx.envMap, " added to cluster ")
|
||||
return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, " added to cluster ")
|
||||
}
|
||||
|
||||
func memberUpdateTest(cx ctlCtx) {
|
||||
@ -206,7 +211,7 @@ func memberUpdateTest(cx ctlCtx) {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
|
||||
peerURL := fmt.Sprintf("http://localhost:%d", etcdProcessBasePort+11)
|
||||
peerURL := fmt.Sprintf("http://localhost:%d", e2e.EtcdProcessBasePort+11)
|
||||
memberID := fmt.Sprintf("%x", mr.Members[0].ID)
|
||||
if err = ctlV3MemberUpdate(cx, memberID, peerURL); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
@ -215,5 +220,5 @@ func memberUpdateTest(cx ctlCtx) {
|
||||
|
||||
func ctlV3MemberUpdate(cx ctlCtx, memberID, peerURL string) error {
|
||||
cmdArgs := append(cx.PrefixArgs(), "member", "update", memberID, fmt.Sprintf("--peer-urls=%s", peerURL))
|
||||
return spawnWithExpectWithEnv(cmdArgs, cx.envMap, " updated in cluster ")
|
||||
return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, " updated in cluster ")
|
||||
}
|
||||
|
@ -25,18 +25,19 @@ import (
|
||||
"go.etcd.io/etcd/client/pkg/v3/transport"
|
||||
"go.etcd.io/etcd/client/pkg/v3/types"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||
)
|
||||
|
||||
func TestCtlV3MoveLeaderSecure(t *testing.T) {
|
||||
testCtlV3MoveLeader(t, *newConfigTLS())
|
||||
testCtlV3MoveLeader(t, *e2e.NewConfigTLS())
|
||||
}
|
||||
|
||||
func TestCtlV3MoveLeaderInsecure(t *testing.T) {
|
||||
testCtlV3MoveLeader(t, *newConfigNoTLS())
|
||||
testCtlV3MoveLeader(t, *e2e.NewConfigNoTLS())
|
||||
}
|
||||
|
||||
func testCtlV3MoveLeader(t *testing.T, cfg etcdProcessClusterConfig) {
|
||||
BeforeTest(t)
|
||||
func testCtlV3MoveLeader(t *testing.T, cfg e2e.EtcdProcessClusterConfig) {
|
||||
e2e.BeforeTest(t)
|
||||
|
||||
epc := setupEtcdctlTest(t, &cfg, true)
|
||||
defer func() {
|
||||
@ -46,11 +47,11 @@ func testCtlV3MoveLeader(t *testing.T, cfg etcdProcessClusterConfig) {
|
||||
}()
|
||||
|
||||
var tcfg *tls.Config
|
||||
if cfg.clientTLS == clientTLS {
|
||||
if cfg.ClientTLS == e2e.ClientTLS {
|
||||
tinfo := transport.TLSInfo{
|
||||
CertFile: certPath,
|
||||
KeyFile: privateKeyPath,
|
||||
TrustedCAFile: caPath,
|
||||
CertFile: e2e.CertPath,
|
||||
KeyFile: e2e.PrivateKeyPath,
|
||||
TrustedCAFile: e2e.CaPath,
|
||||
}
|
||||
var err error
|
||||
tcfg, err = tinfo.ClientConfig()
|
||||
@ -91,7 +92,7 @@ func testCtlV3MoveLeader(t *testing.T, cfg etcdProcessClusterConfig) {
|
||||
defer os.Unsetenv("ETCDCTL_API")
|
||||
cx := ctlCtx{
|
||||
t: t,
|
||||
cfg: *newConfigNoTLS(),
|
||||
cfg: *e2e.NewConfigNoTLS(),
|
||||
dialTimeout: 7 * time.Second,
|
||||
epc: epc,
|
||||
}
|
||||
@ -112,7 +113,7 @@ func testCtlV3MoveLeader(t *testing.T, cfg etcdProcessClusterConfig) {
|
||||
for i, tc := range tests {
|
||||
prefix := cx.prefixArgs(tc.eps)
|
||||
cmdArgs := append(prefix, "move-leader", types.ID(transferee).String())
|
||||
if err := spawnWithExpectWithEnv(cmdArgs, cx.envMap, tc.expect); err != nil {
|
||||
if err := e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, tc.expect); err != nil {
|
||||
t.Fatalf("#%d: %v", i, err)
|
||||
}
|
||||
}
|
||||
|
@ -17,14 +17,18 @@ package e2e
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||
)
|
||||
|
||||
func TestCtlV3RoleAdd(t *testing.T) { testCtl(t, roleAddTest) }
|
||||
func TestCtlV3RootRoleGet(t *testing.T) { testCtl(t, rootRoleGetTest) }
|
||||
func TestCtlV3RoleAddNoTLS(t *testing.T) { testCtl(t, roleAddTest, withCfg(*newConfigNoTLS())) }
|
||||
func TestCtlV3RoleAddClientTLS(t *testing.T) { testCtl(t, roleAddTest, withCfg(*newConfigClientTLS())) }
|
||||
func TestCtlV3RoleAddPeerTLS(t *testing.T) { testCtl(t, roleAddTest, withCfg(*newConfigPeerTLS())) }
|
||||
func TestCtlV3RoleAddTimeout(t *testing.T) { testCtl(t, roleAddTest, withDialTimeout(0)) }
|
||||
func TestCtlV3RoleAdd(t *testing.T) { testCtl(t, roleAddTest) }
|
||||
func TestCtlV3RootRoleGet(t *testing.T) { testCtl(t, rootRoleGetTest) }
|
||||
func TestCtlV3RoleAddNoTLS(t *testing.T) { testCtl(t, roleAddTest, withCfg(*e2e.NewConfigNoTLS())) }
|
||||
func TestCtlV3RoleAddClientTLS(t *testing.T) {
|
||||
testCtl(t, roleAddTest, withCfg(*e2e.NewConfigClientTLS()))
|
||||
}
|
||||
func TestCtlV3RoleAddPeerTLS(t *testing.T) { testCtl(t, roleAddTest, withCfg(*e2e.NewConfigPeerTLS())) }
|
||||
func TestCtlV3RoleAddTimeout(t *testing.T) { testCtl(t, roleAddTest, withDialTimeout(0)) }
|
||||
|
||||
func TestCtlV3RoleGrant(t *testing.T) { testCtl(t, roleGrantTest) }
|
||||
|
||||
@ -140,13 +144,13 @@ func ctlV3RoleMultiExpect(cx ctlCtx, args []string, expStr ...string) error {
|
||||
cmdArgs := append(cx.PrefixArgs(), "role")
|
||||
cmdArgs = append(cmdArgs, args...)
|
||||
|
||||
return spawnWithExpects(cmdArgs, cx.envMap, expStr...)
|
||||
return e2e.SpawnWithExpects(cmdArgs, cx.envMap, expStr...)
|
||||
}
|
||||
func ctlV3Role(cx ctlCtx, args []string, expStr string) error {
|
||||
cmdArgs := append(cx.PrefixArgs(), "role")
|
||||
cmdArgs = append(cmdArgs, args...)
|
||||
|
||||
return spawnWithExpectWithEnv(cmdArgs, cx.envMap, expStr)
|
||||
return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, expStr)
|
||||
}
|
||||
|
||||
func ctlV3RoleGrantPermission(cx ctlCtx, rolename string, perm grantingPerm) error {
|
||||
@ -160,7 +164,7 @@ func ctlV3RoleGrantPermission(cx ctlCtx, rolename string, perm grantingPerm) err
|
||||
cmdArgs = append(cmdArgs, rolename)
|
||||
cmdArgs = append(cmdArgs, grantingPermToArgs(perm)...)
|
||||
|
||||
proc, err := spawnCmd(cmdArgs, cx.envMap)
|
||||
proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -186,7 +190,7 @@ func ctlV3RoleRevokePermission(cx ctlCtx, rolename string, key, rangeEnd string,
|
||||
expStr = fmt.Sprintf("Permission of key %s is revoked from role %s", key, rolename)
|
||||
}
|
||||
|
||||
proc, err := spawnCmd(cmdArgs, cx.envMap)
|
||||
proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
|
||||
"go.etcd.io/etcd/etcdutl/v3/snapshot"
|
||||
"go.etcd.io/etcd/pkg/v3/expect"
|
||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||
)
|
||||
|
||||
func TestCtlV3Snapshot(t *testing.T) { testCtl(t, snapshotTest) }
|
||||
@ -84,7 +85,7 @@ func snapshotCorruptTest(cx ctlCtx) {
|
||||
|
||||
datadir := cx.t.TempDir()
|
||||
|
||||
serr := spawnWithExpectWithEnv(
|
||||
serr := e2e.SpawnWithExpectWithEnv(
|
||||
append(cx.PrefixArgsUtl(), "snapshot", "restore",
|
||||
"--data-dir", datadir,
|
||||
fpath),
|
||||
@ -118,7 +119,7 @@ func snapshotStatusBeforeRestoreTest(cx ctlCtx) {
|
||||
|
||||
dataDir := cx.t.TempDir()
|
||||
defer os.RemoveAll(dataDir)
|
||||
serr := spawnWithExpectWithEnv(
|
||||
serr := e2e.SpawnWithExpectWithEnv(
|
||||
append(cx.PrefixArgsUtl(), "snapshot", "restore",
|
||||
"--data-dir", dataDir,
|
||||
fpath),
|
||||
@ -131,13 +132,13 @@ func snapshotStatusBeforeRestoreTest(cx ctlCtx) {
|
||||
|
||||
func ctlV3SnapshotSave(cx ctlCtx, fpath string) error {
|
||||
cmdArgs := append(cx.PrefixArgs(), "snapshot", "save", fpath)
|
||||
return spawnWithExpectWithEnv(cmdArgs, cx.envMap, fmt.Sprintf("Snapshot saved at %s", fpath))
|
||||
return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, fmt.Sprintf("Snapshot saved at %s", fpath))
|
||||
}
|
||||
|
||||
func getSnapshotStatus(cx ctlCtx, fpath string) (snapshot.Status, error) {
|
||||
cmdArgs := append(cx.PrefixArgsUtl(), "--write-out", "json", "snapshot", "status", fpath)
|
||||
|
||||
proc, err := spawnCmd(cmdArgs, nil)
|
||||
proc, err := e2e.SpawnCmd(cmdArgs, nil)
|
||||
if err != nil {
|
||||
return snapshot.Status{}, err
|
||||
}
|
||||
@ -172,14 +173,14 @@ func testIssue6361(t *testing.T, etcdutl bool) {
|
||||
os.Setenv("EXPECT_DEBUG", "1")
|
||||
}
|
||||
|
||||
BeforeTest(t)
|
||||
e2e.BeforeTest(t)
|
||||
os.Setenv("ETCDCTL_API", "3")
|
||||
defer os.Unsetenv("ETCDCTL_API")
|
||||
|
||||
epc, err := newEtcdProcessCluster(t, &etcdProcessClusterConfig{
|
||||
clusterSize: 1,
|
||||
initialToken: "new",
|
||||
keepDataDir: true,
|
||||
epc, err := e2e.NewEtcdProcessCluster(t, &e2e.EtcdProcessClusterConfig{
|
||||
ClusterSize: 1,
|
||||
InitialToken: "new",
|
||||
KeepDataDir: true,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("could not start etcd process cluster (%v)", err)
|
||||
@ -191,12 +192,12 @@ func testIssue6361(t *testing.T, etcdutl bool) {
|
||||
}()
|
||||
|
||||
dialTimeout := 10 * time.Second
|
||||
prefixArgs := []string{ctlBinPath, "--endpoints", strings.Join(epc.EndpointsV3(), ","), "--dial-timeout", dialTimeout.String()}
|
||||
prefixArgs := []string{e2e.CtlBinPath, "--endpoints", strings.Join(epc.EndpointsV3(), ","), "--dial-timeout", dialTimeout.String()}
|
||||
|
||||
t.Log("Writing some keys...")
|
||||
kvs := []kv{{"foo1", "val1"}, {"foo2", "val2"}, {"foo3", "val3"}}
|
||||
for i := range kvs {
|
||||
if err = spawnWithExpect(append(prefixArgs, "put", kvs[i].key, kvs[i].val), "OK"); err != nil {
|
||||
if err = e2e.SpawnWithExpect(append(prefixArgs, "put", kvs[i].key, kvs[i].val), "OK"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@ -204,7 +205,7 @@ func testIssue6361(t *testing.T, etcdutl bool) {
|
||||
fpath := filepath.Join(t.TempDir(), "test.snapshot")
|
||||
|
||||
t.Log("etcdctl saving snapshot...")
|
||||
if err = spawnWithExpects(append(prefixArgs, "snapshot", "save", fpath),
|
||||
if err = e2e.SpawnWithExpects(append(prefixArgs, "snapshot", "save", fpath),
|
||||
nil,
|
||||
fmt.Sprintf("Snapshot saved at %s", fpath),
|
||||
); err != nil {
|
||||
@ -212,45 +213,45 @@ func testIssue6361(t *testing.T, etcdutl bool) {
|
||||
}
|
||||
|
||||
t.Log("Stopping the original server...")
|
||||
if err = epc.procs[0].Stop(); err != nil {
|
||||
if err = epc.Procs[0].Stop(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
newDataDir := filepath.Join(t.TempDir(), "test.data")
|
||||
|
||||
uctlBinPath := ctlBinPath
|
||||
uctlBinPath := e2e.CtlBinPath
|
||||
if etcdutl {
|
||||
uctlBinPath = utlBinPath
|
||||
uctlBinPath = e2e.UtlBinPath
|
||||
}
|
||||
|
||||
t.Log("etcdctl restoring the snapshot...")
|
||||
err = spawnWithExpect([]string{uctlBinPath, "snapshot", "restore", fpath, "--name", epc.procs[0].Config().name, "--initial-cluster", epc.procs[0].Config().initialCluster, "--initial-cluster-token", epc.procs[0].Config().initialToken, "--initial-advertise-peer-urls", epc.procs[0].Config().purl.String(), "--data-dir", newDataDir}, "added member")
|
||||
err = e2e.SpawnWithExpect([]string{uctlBinPath, "snapshot", "restore", fpath, "--name", epc.Procs[0].Config().Name, "--initial-cluster", epc.Procs[0].Config().InitialCluster, "--initial-cluster-token", epc.Procs[0].Config().InitialToken, "--initial-advertise-peer-urls", epc.Procs[0].Config().Purl.String(), "--data-dir", newDataDir}, "added member")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Log("(Re)starting the etcd member using the restored snapshot...")
|
||||
epc.procs[0].Config().dataDirPath = newDataDir
|
||||
for i := range epc.procs[0].Config().args {
|
||||
if epc.procs[0].Config().args[i] == "--data-dir" {
|
||||
epc.procs[0].Config().args[i+1] = newDataDir
|
||||
epc.Procs[0].Config().DataDirPath = newDataDir
|
||||
for i := range epc.Procs[0].Config().Args {
|
||||
if epc.Procs[0].Config().Args[i] == "--data-dir" {
|
||||
epc.Procs[0].Config().Args[i+1] = newDataDir
|
||||
}
|
||||
}
|
||||
if err = epc.procs[0].Restart(); err != nil {
|
||||
if err = epc.Procs[0].Restart(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Log("Ensuring the restored member has the correct data...")
|
||||
for i := range kvs {
|
||||
if err = spawnWithExpect(append(prefixArgs, "get", kvs[i].key), kvs[i].val); err != nil {
|
||||
if err = e2e.SpawnWithExpect(append(prefixArgs, "get", kvs[i].key), kvs[i].val); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
t.Log("Adding new member into the cluster")
|
||||
clientURL := fmt.Sprintf("http://localhost:%d", etcdProcessBasePort+30)
|
||||
peerURL := fmt.Sprintf("http://localhost:%d", etcdProcessBasePort+31)
|
||||
err = spawnWithExpect(append(prefixArgs, "member", "add", "newmember", fmt.Sprintf("--peer-urls=%s", peerURL)), " added to cluster ")
|
||||
clientURL := fmt.Sprintf("http://localhost:%d", e2e.EtcdProcessBasePort+30)
|
||||
peerURL := fmt.Sprintf("http://localhost:%d", e2e.EtcdProcessBasePort+31)
|
||||
err = e2e.SpawnWithExpect(append(prefixArgs, "member", "add", "newmember", fmt.Sprintf("--peer-urls=%s", peerURL)), " added to cluster ")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -259,12 +260,12 @@ func testIssue6361(t *testing.T, etcdutl bool) {
|
||||
defer os.RemoveAll(newDataDir2)
|
||||
|
||||
name2 := "infra2"
|
||||
initialCluster2 := epc.procs[0].Config().initialCluster + fmt.Sprintf(",%s=%s", name2, peerURL)
|
||||
initialCluster2 := epc.Procs[0].Config().InitialCluster + fmt.Sprintf(",%s=%s", name2, peerURL)
|
||||
|
||||
t.Log("Starting the new member")
|
||||
// start the new member
|
||||
var nepc *expect.ExpectProcess
|
||||
nepc, err = spawnCmd([]string{epc.procs[0].Config().execPath, "--name", name2,
|
||||
nepc, err = e2e.SpawnCmd([]string{epc.Procs[0].Config().ExecPath, "--name", name2,
|
||||
"--listen-client-urls", clientURL, "--advertise-client-urls", clientURL,
|
||||
"--listen-peer-urls", peerURL, "--initial-advertise-peer-urls", peerURL,
|
||||
"--initial-cluster", initialCluster2, "--initial-cluster-state", "existing", "--data-dir", newDataDir2}, nil)
|
||||
@ -275,11 +276,11 @@ func testIssue6361(t *testing.T, etcdutl bool) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
prefixArgs = []string{ctlBinPath, "--endpoints", clientURL, "--dial-timeout", dialTimeout.String()}
|
||||
prefixArgs = []string{e2e.CtlBinPath, "--endpoints", clientURL, "--dial-timeout", dialTimeout.String()}
|
||||
|
||||
t.Log("Ensuring added member has data from incoming snapshot...")
|
||||
for i := range kvs {
|
||||
if err = spawnWithExpect(append(prefixArgs, "get", kvs[i].key), kvs[i].val); err != nil {
|
||||
if err = e2e.SpawnWithExpect(append(prefixArgs, "get", kvs[i].key), kvs[i].val); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@ -292,12 +293,12 @@ func testIssue6361(t *testing.T, etcdutl bool) {
|
||||
}
|
||||
|
||||
// For storageVersion to be stored, all fields expected 3.6 fields need to be set. This happens after first WAL snapshot.
|
||||
// In this test we lower snapshotCount to 1 to ensure WAL snapshot is triggered.
|
||||
// In this test we lower SnapshotCount to 1 to ensure WAL snapshot is triggered.
|
||||
func TestCtlV3SnapshotVersion(t *testing.T) {
|
||||
testCtl(t, snapshotVersionTest, withCfg(etcdProcessClusterConfig{snapshotCount: 1}))
|
||||
testCtl(t, snapshotVersionTest, withCfg(e2e.EtcdProcessClusterConfig{SnapshotCount: 1}))
|
||||
}
|
||||
func TestCtlV3SnapshotVersionEtcdutl(t *testing.T) {
|
||||
testCtl(t, snapshotVersionTest, withEtcdutl(), withCfg(etcdProcessClusterConfig{snapshotCount: 1}))
|
||||
testCtl(t, snapshotVersionTest, withEtcdutl(), withCfg(e2e.EtcdProcessClusterConfig{SnapshotCount: 1}))
|
||||
}
|
||||
|
||||
func snapshotVersionTest(cx ctlCtx) {
|
||||
|
@ -26,12 +26,13 @@ import (
|
||||
"go.etcd.io/etcd/client/pkg/v3/fileutil"
|
||||
"go.etcd.io/etcd/client/pkg/v3/testutil"
|
||||
"go.etcd.io/etcd/pkg/v3/flags"
|
||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||
)
|
||||
|
||||
func TestCtlV3Version(t *testing.T) { testCtl(t, versionTest) }
|
||||
|
||||
func TestClusterVersion(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
e2e.BeforeTest(t)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
@ -49,18 +50,18 @@ func TestClusterVersion(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
binary := binDir + "/etcd"
|
||||
binary := e2e.BinDir + "/etcd"
|
||||
if !fileutil.Exist(binary) {
|
||||
t.Skipf("%q does not exist", binary)
|
||||
}
|
||||
BeforeTest(t)
|
||||
cfg := newConfigNoTLS()
|
||||
cfg.execPath = binary
|
||||
cfg.snapshotCount = 3
|
||||
cfg.baseScheme = "unix" // to avoid port conflict
|
||||
cfg.rollingStart = tt.rollingStart
|
||||
e2e.BeforeTest(t)
|
||||
cfg := e2e.NewConfigNoTLS()
|
||||
cfg.ExecPath = binary
|
||||
cfg.SnapshotCount = 3
|
||||
cfg.BaseScheme = "unix" // to avoid port conflict
|
||||
cfg.RollingStart = tt.rollingStart
|
||||
|
||||
epc, err := newEtcdProcessCluster(t, cfg)
|
||||
epc, err := e2e.NewEtcdProcessCluster(t, cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("could not start etcd process cluster (%v)", err)
|
||||
}
|
||||
@ -90,7 +91,7 @@ func versionTest(cx ctlCtx) {
|
||||
func clusterVersionTest(cx ctlCtx, expected string) {
|
||||
var err error
|
||||
for i := 0; i < 35; i++ {
|
||||
if err = cURLGet(cx.epc, cURLReq{endpoint: "/version", expected: expected}); err != nil {
|
||||
if err = e2e.CURLGet(cx.epc, e2e.CURLReq{Endpoint: "/version", Expected: expected}); err != nil {
|
||||
cx.t.Logf("#%d: v3 is not ready yet (%v)", i, err)
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
continue
|
||||
@ -104,17 +105,17 @@ func clusterVersionTest(cx ctlCtx, expected string) {
|
||||
|
||||
func ctlV3Version(cx ctlCtx) error {
|
||||
cmdArgs := append(cx.PrefixArgs(), "version")
|
||||
return spawnWithExpectWithEnv(cmdArgs, cx.envMap, version.Version)
|
||||
return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, version.Version)
|
||||
}
|
||||
|
||||
// TestCtlV3DialWithHTTPScheme ensures that client handles endpoints with HTTPS scheme.
|
||||
// TestCtlV3DialWithHTTPScheme ensures that client handles Endpoints with HTTPS scheme.
|
||||
func TestCtlV3DialWithHTTPScheme(t *testing.T) {
|
||||
testCtl(t, dialWithSchemeTest, withCfg(*newConfigClientTLS()))
|
||||
testCtl(t, dialWithSchemeTest, withCfg(*e2e.NewConfigClientTLS()))
|
||||
}
|
||||
|
||||
func dialWithSchemeTest(cx ctlCtx) {
|
||||
cmdArgs := append(cx.prefixArgs(cx.epc.EndpointsV3()), "put", "foo", "bar")
|
||||
if err := spawnWithExpectWithEnv(cmdArgs, cx.envMap, "OK"); err != nil {
|
||||
if err := e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, "OK"); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@ -122,12 +123,12 @@ func dialWithSchemeTest(cx ctlCtx) {
|
||||
type ctlCtx struct {
|
||||
t *testing.T
|
||||
apiPrefix string
|
||||
cfg etcdProcessClusterConfig
|
||||
cfg e2e.EtcdProcessClusterConfig
|
||||
quotaBackendBytes int64
|
||||
corruptFunc func(string) error
|
||||
noStrictReconfig bool
|
||||
|
||||
epc *etcdProcessCluster
|
||||
epc *e2e.EtcdProcessCluster
|
||||
|
||||
envMap map[string]string
|
||||
|
||||
@ -160,7 +161,7 @@ func (cx *ctlCtx) applyOpts(opts []ctlOption) {
|
||||
cx.initialCorruptCheck = true
|
||||
}
|
||||
|
||||
func withCfg(cfg etcdProcessClusterConfig) ctlOption {
|
||||
func withCfg(cfg e2e.EtcdProcessClusterConfig) ctlOption {
|
||||
return func(cx *ctlCtx) { cx.cfg = cfg }
|
||||
}
|
||||
|
||||
@ -213,35 +214,35 @@ func testCtl(t *testing.T, testFunc func(ctlCtx), opts ...ctlOption) {
|
||||
}
|
||||
|
||||
func testCtlWithOffline(t *testing.T, testFunc func(ctlCtx), testOfflineFunc func(ctlCtx), opts ...ctlOption) {
|
||||
BeforeTest(t)
|
||||
e2e.BeforeTest(t)
|
||||
|
||||
ret := ctlCtx{
|
||||
t: t,
|
||||
cfg: *newConfigAutoTLS(),
|
||||
cfg: *e2e.NewConfigAutoTLS(),
|
||||
dialTimeout: 7 * time.Second,
|
||||
}
|
||||
ret.applyOpts(opts)
|
||||
|
||||
if !ret.quorum {
|
||||
ret.cfg = *configStandalone(ret.cfg)
|
||||
ret.cfg = *e2e.ConfigStandalone(ret.cfg)
|
||||
}
|
||||
if ret.quotaBackendBytes > 0 {
|
||||
ret.cfg.quotaBackendBytes = ret.quotaBackendBytes
|
||||
ret.cfg.QuotaBackendBytes = ret.quotaBackendBytes
|
||||
}
|
||||
ret.cfg.noStrictReconfig = ret.noStrictReconfig
|
||||
ret.cfg.NoStrictReconfig = ret.noStrictReconfig
|
||||
if ret.initialCorruptCheck {
|
||||
ret.cfg.initialCorruptCheck = ret.initialCorruptCheck
|
||||
ret.cfg.InitialCorruptCheck = ret.initialCorruptCheck
|
||||
}
|
||||
if testOfflineFunc != nil {
|
||||
ret.cfg.keepDataDir = true
|
||||
ret.cfg.KeepDataDir = true
|
||||
}
|
||||
|
||||
epc, err := newEtcdProcessCluster(t, &ret.cfg)
|
||||
epc, err := e2e.NewEtcdProcessCluster(t, &ret.cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("could not start etcd process cluster (%v)", err)
|
||||
}
|
||||
ret.epc = epc
|
||||
ret.dataDir = epc.procs[0].Config().dataDirPath
|
||||
ret.dataDir = epc.Procs[0].Config().DataDirPath
|
||||
|
||||
defer func() {
|
||||
if ret.envMap != nil {
|
||||
@ -288,18 +289,18 @@ func (cx *ctlCtx) prefixArgs(eps []string) []string {
|
||||
fmap := make(map[string]string)
|
||||
fmap["endpoints"] = strings.Join(eps, ",")
|
||||
fmap["dial-timeout"] = cx.dialTimeout.String()
|
||||
if cx.epc.cfg.clientTLS == clientTLS {
|
||||
if cx.epc.cfg.isClientAutoTLS {
|
||||
if cx.epc.Cfg.ClientTLS == e2e.ClientTLS {
|
||||
if cx.epc.Cfg.IsClientAutoTLS {
|
||||
fmap["insecure-transport"] = "false"
|
||||
fmap["insecure-skip-tls-verify"] = "true"
|
||||
} else if cx.epc.cfg.isClientCRL {
|
||||
fmap["cacert"] = caPath
|
||||
fmap["cert"] = revokedCertPath
|
||||
fmap["key"] = revokedPrivateKeyPath
|
||||
} else if cx.epc.Cfg.IsClientCRL {
|
||||
fmap["cacert"] = e2e.CaPath
|
||||
fmap["cert"] = e2e.RevokedCertPath
|
||||
fmap["key"] = e2e.RevokedPrivateKeyPath
|
||||
} else {
|
||||
fmap["cacert"] = caPath
|
||||
fmap["cert"] = certPath
|
||||
fmap["key"] = privateKeyPath
|
||||
fmap["cacert"] = e2e.CaPath
|
||||
fmap["cert"] = e2e.CertPath
|
||||
fmap["key"] = e2e.PrivateKeyPath
|
||||
}
|
||||
}
|
||||
if cx.user != "" {
|
||||
@ -308,7 +309,7 @@ func (cx *ctlCtx) prefixArgs(eps []string) []string {
|
||||
|
||||
useEnv := cx.envMap != nil
|
||||
|
||||
cmdArgs := []string{ctlBinPath + "3"}
|
||||
cmdArgs := []string{e2e.CtlBinPath + "3"}
|
||||
for k, v := range fmap {
|
||||
if useEnv {
|
||||
ek := flags.FlagToEnv("ETCDCTL", k)
|
||||
@ -331,9 +332,9 @@ func (cx *ctlCtx) PrefixArgs() []string {
|
||||
// Please not thet 'utl' compatible commands does not consume --endpoints flag.
|
||||
func (cx *ctlCtx) PrefixArgsUtl() []string {
|
||||
if cx.etcdutl {
|
||||
return []string{utlBinPath}
|
||||
return []string{e2e.UtlBinPath}
|
||||
}
|
||||
return []string{ctlBinPath}
|
||||
return []string{e2e.CtlBinPath}
|
||||
}
|
||||
|
||||
func isGRPCTimedout(err error) bool {
|
||||
@ -341,7 +342,7 @@ func isGRPCTimedout(err error) bool {
|
||||
}
|
||||
|
||||
func (cx *ctlCtx) memberToRemove() (ep string, memberID string, clusterID string) {
|
||||
n1 := cx.cfg.clusterSize
|
||||
n1 := cx.cfg.ClusterSize
|
||||
if n1 < 2 {
|
||||
cx.t.Fatalf("%d-node is too small to test 'member remove'", n1)
|
||||
}
|
||||
|
@ -14,19 +14,23 @@
|
||||
|
||||
package e2e
|
||||
|
||||
import "testing"
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||
)
|
||||
|
||||
func TestCtlV3TxnInteractiveSuccess(t *testing.T) {
|
||||
testCtl(t, txnTestSuccess, withInteractive())
|
||||
}
|
||||
func TestCtlV3TxnInteractiveSuccessNoTLS(t *testing.T) {
|
||||
testCtl(t, txnTestSuccess, withInteractive(), withCfg(*newConfigNoTLS()))
|
||||
testCtl(t, txnTestSuccess, withInteractive(), withCfg(*e2e.NewConfigNoTLS()))
|
||||
}
|
||||
func TestCtlV3TxnInteractiveSuccessClientTLS(t *testing.T) {
|
||||
testCtl(t, txnTestSuccess, withInteractive(), withCfg(*newConfigClientTLS()))
|
||||
testCtl(t, txnTestSuccess, withInteractive(), withCfg(*e2e.NewConfigClientTLS()))
|
||||
}
|
||||
func TestCtlV3TxnInteractiveSuccessPeerTLS(t *testing.T) {
|
||||
testCtl(t, txnTestSuccess, withInteractive(), withCfg(*newConfigPeerTLS()))
|
||||
testCtl(t, txnTestSuccess, withInteractive(), withCfg(*e2e.NewConfigPeerTLS()))
|
||||
}
|
||||
func TestCtlV3TxnInteractiveFail(t *testing.T) {
|
||||
testCtl(t, txnTestFail, withInteractive())
|
||||
@ -102,7 +106,7 @@ func ctlV3Txn(cx ctlCtx, rqs txnRequests) error {
|
||||
if cx.interactive {
|
||||
cmdArgs = append(cmdArgs, "--interactive")
|
||||
}
|
||||
proc, err := spawnCmd(cmdArgs, cx.envMap)
|
||||
proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -14,44 +14,56 @@
|
||||
|
||||
package e2e
|
||||
|
||||
import "testing"
|
||||
import (
|
||||
"testing"
|
||||
|
||||
func TestCtlV3UserAdd(t *testing.T) { testCtl(t, userAddTest) }
|
||||
func TestCtlV3UserAddNoTLS(t *testing.T) { testCtl(t, userAddTest, withCfg(*newConfigNoTLS())) }
|
||||
func TestCtlV3UserAddClientTLS(t *testing.T) { testCtl(t, userAddTest, withCfg(*newConfigClientTLS())) }
|
||||
func TestCtlV3UserAddPeerTLS(t *testing.T) { testCtl(t, userAddTest, withCfg(*newConfigPeerTLS())) }
|
||||
func TestCtlV3UserAddTimeout(t *testing.T) { testCtl(t, userAddTest, withDialTimeout(0)) }
|
||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||
)
|
||||
|
||||
func TestCtlV3UserAdd(t *testing.T) { testCtl(t, userAddTest) }
|
||||
func TestCtlV3UserAddNoTLS(t *testing.T) { testCtl(t, userAddTest, withCfg(*e2e.NewConfigNoTLS())) }
|
||||
func TestCtlV3UserAddClientTLS(t *testing.T) {
|
||||
testCtl(t, userAddTest, withCfg(*e2e.NewConfigClientTLS()))
|
||||
}
|
||||
func TestCtlV3UserAddPeerTLS(t *testing.T) { testCtl(t, userAddTest, withCfg(*e2e.NewConfigPeerTLS())) }
|
||||
func TestCtlV3UserAddTimeout(t *testing.T) { testCtl(t, userAddTest, withDialTimeout(0)) }
|
||||
func TestCtlV3UserAddClientAutoTLS(t *testing.T) {
|
||||
testCtl(t, userAddTest, withCfg(*newConfigClientAutoTLS()))
|
||||
testCtl(t, userAddTest, withCfg(*e2e.NewConfigClientAutoTLS()))
|
||||
}
|
||||
func TestCtlV3UserList(t *testing.T) { testCtl(t, userListTest) }
|
||||
func TestCtlV3UserListNoTLS(t *testing.T) { testCtl(t, userListTest, withCfg(*newConfigNoTLS())) }
|
||||
func TestCtlV3UserListNoTLS(t *testing.T) { testCtl(t, userListTest, withCfg(*e2e.NewConfigNoTLS())) }
|
||||
func TestCtlV3UserListClientTLS(t *testing.T) {
|
||||
testCtl(t, userListTest, withCfg(*newConfigClientTLS()))
|
||||
testCtl(t, userListTest, withCfg(*e2e.NewConfigClientTLS()))
|
||||
}
|
||||
func TestCtlV3UserListPeerTLS(t *testing.T) {
|
||||
testCtl(t, userListTest, withCfg(*e2e.NewConfigPeerTLS()))
|
||||
}
|
||||
func TestCtlV3UserListPeerTLS(t *testing.T) { testCtl(t, userListTest, withCfg(*newConfigPeerTLS())) }
|
||||
func TestCtlV3UserListClientAutoTLS(t *testing.T) {
|
||||
testCtl(t, userListTest, withCfg(*newConfigClientAutoTLS()))
|
||||
testCtl(t, userListTest, withCfg(*e2e.NewConfigClientAutoTLS()))
|
||||
}
|
||||
func TestCtlV3UserDelete(t *testing.T) { testCtl(t, userDelTest) }
|
||||
func TestCtlV3UserDeleteNoTLS(t *testing.T) { testCtl(t, userDelTest, withCfg(*newConfigNoTLS())) }
|
||||
func TestCtlV3UserDeleteNoTLS(t *testing.T) { testCtl(t, userDelTest, withCfg(*e2e.NewConfigNoTLS())) }
|
||||
func TestCtlV3UserDeleteClientTLS(t *testing.T) {
|
||||
testCtl(t, userDelTest, withCfg(*newConfigClientTLS()))
|
||||
testCtl(t, userDelTest, withCfg(*e2e.NewConfigClientTLS()))
|
||||
}
|
||||
func TestCtlV3UserDeletePeerTLS(t *testing.T) {
|
||||
testCtl(t, userDelTest, withCfg(*e2e.NewConfigPeerTLS()))
|
||||
}
|
||||
func TestCtlV3UserDeletePeerTLS(t *testing.T) { testCtl(t, userDelTest, withCfg(*newConfigPeerTLS())) }
|
||||
func TestCtlV3UserDeleteClientAutoTLS(t *testing.T) {
|
||||
testCtl(t, userDelTest, withCfg(*newConfigClientAutoTLS()))
|
||||
testCtl(t, userDelTest, withCfg(*e2e.NewConfigClientAutoTLS()))
|
||||
}
|
||||
func TestCtlV3UserPasswd(t *testing.T) { testCtl(t, userPasswdTest) }
|
||||
func TestCtlV3UserPasswdNoTLS(t *testing.T) {
|
||||
testCtl(t, userPasswdTest, withCfg(*e2e.NewConfigNoTLS()))
|
||||
}
|
||||
func TestCtlV3UserPasswd(t *testing.T) { testCtl(t, userPasswdTest) }
|
||||
func TestCtlV3UserPasswdNoTLS(t *testing.T) { testCtl(t, userPasswdTest, withCfg(*newConfigNoTLS())) }
|
||||
func TestCtlV3UserPasswdClientTLS(t *testing.T) {
|
||||
testCtl(t, userPasswdTest, withCfg(*newConfigClientTLS()))
|
||||
testCtl(t, userPasswdTest, withCfg(*e2e.NewConfigClientTLS()))
|
||||
}
|
||||
func TestCtlV3UserPasswdPeerTLS(t *testing.T) {
|
||||
testCtl(t, userPasswdTest, withCfg(*newConfigPeerTLS()))
|
||||
testCtl(t, userPasswdTest, withCfg(*e2e.NewConfigPeerTLS()))
|
||||
}
|
||||
func TestCtlV3UserPasswdClientAutoTLS(t *testing.T) {
|
||||
testCtl(t, userPasswdTest, withCfg(*newConfigClientAutoTLS()))
|
||||
testCtl(t, userPasswdTest, withCfg(*e2e.NewConfigClientAutoTLS()))
|
||||
}
|
||||
|
||||
type userCmdDesc struct {
|
||||
@ -179,7 +191,7 @@ func ctlV3User(cx ctlCtx, args []string, expStr string, stdIn []string) error {
|
||||
cmdArgs := append(cx.PrefixArgs(), "user")
|
||||
cmdArgs = append(cmdArgs, args...)
|
||||
|
||||
proc, err := spawnCmd(cmdArgs, cx.envMap)
|
||||
proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -20,25 +20,27 @@ package e2e
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||
)
|
||||
|
||||
func TestCtlV3Watch(t *testing.T) { testCtl(t, watchTest) }
|
||||
func TestCtlV3WatchNoTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*newConfigNoTLS())) }
|
||||
func TestCtlV3WatchClientTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*newConfigClientTLS())) }
|
||||
func TestCtlV3WatchPeerTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*newConfigPeerTLS())) }
|
||||
func TestCtlV3WatchNoTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*e2e.NewConfigNoTLS())) }
|
||||
func TestCtlV3WatchClientTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*e2e.NewConfigClientTLS())) }
|
||||
func TestCtlV3WatchPeerTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*e2e.NewConfigPeerTLS())) }
|
||||
func TestCtlV3WatchTimeout(t *testing.T) { testCtl(t, watchTest, withDialTimeout(0)) }
|
||||
|
||||
func TestCtlV3WatchInteractive(t *testing.T) {
|
||||
testCtl(t, watchTest, withInteractive())
|
||||
}
|
||||
func TestCtlV3WatchInteractiveNoTLS(t *testing.T) {
|
||||
testCtl(t, watchTest, withInteractive(), withCfg(*newConfigNoTLS()))
|
||||
testCtl(t, watchTest, withInteractive(), withCfg(*e2e.NewConfigNoTLS()))
|
||||
}
|
||||
func TestCtlV3WatchInteractiveClientTLS(t *testing.T) {
|
||||
testCtl(t, watchTest, withInteractive(), withCfg(*newConfigClientTLS()))
|
||||
testCtl(t, watchTest, withInteractive(), withCfg(*e2e.NewConfigClientTLS()))
|
||||
}
|
||||
func TestCtlV3WatchInteractivePeerTLS(t *testing.T) {
|
||||
testCtl(t, watchTest, withInteractive(), withCfg(*newConfigPeerTLS()))
|
||||
testCtl(t, watchTest, withInteractive(), withCfg(*e2e.NewConfigPeerTLS()))
|
||||
}
|
||||
|
||||
func watchTest(cx ctlCtx) {
|
||||
|
@ -20,25 +20,27 @@ package e2e
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||
)
|
||||
|
||||
func TestCtlV3Watch(t *testing.T) { testCtl(t, watchTest) }
|
||||
func TestCtlV3WatchNoTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*newConfigNoTLS())) }
|
||||
func TestCtlV3WatchClientTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*newConfigClientTLS())) }
|
||||
func TestCtlV3WatchPeerTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*newConfigPeerTLS())) }
|
||||
func TestCtlV3WatchNoTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*e2e.NewConfigNoTLS())) }
|
||||
func TestCtlV3WatchClientTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*e2e.NewConfigClientTLS())) }
|
||||
func TestCtlV3WatchPeerTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*e2e.NewConfigPeerTLS())) }
|
||||
func TestCtlV3WatchTimeout(t *testing.T) { testCtl(t, watchTest, withDialTimeout(0)) }
|
||||
|
||||
func TestCtlV3WatchInteractive(t *testing.T) {
|
||||
testCtl(t, watchTest, withInteractive())
|
||||
}
|
||||
func TestCtlV3WatchInteractiveNoTLS(t *testing.T) {
|
||||
testCtl(t, watchTest, withInteractive(), withCfg(*newConfigNoTLS()))
|
||||
testCtl(t, watchTest, withInteractive(), withCfg(*e2e.NewConfigNoTLS()))
|
||||
}
|
||||
func TestCtlV3WatchInteractiveClientTLS(t *testing.T) {
|
||||
testCtl(t, watchTest, withInteractive(), withCfg(*newConfigClientTLS()))
|
||||
testCtl(t, watchTest, withInteractive(), withCfg(*e2e.NewConfigClientTLS()))
|
||||
}
|
||||
func TestCtlV3WatchInteractivePeerTLS(t *testing.T) {
|
||||
testCtl(t, watchTest, withInteractive(), withCfg(*newConfigPeerTLS()))
|
||||
testCtl(t, watchTest, withInteractive(), withCfg(*e2e.NewConfigPeerTLS()))
|
||||
}
|
||||
|
||||
func watchTest(cx ctlCtx) {
|
||||
|
@ -14,7 +14,11 @@
|
||||
|
||||
package e2e
|
||||
|
||||
import "strings"
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||
)
|
||||
|
||||
type kvExec struct {
|
||||
key, val string
|
||||
@ -35,7 +39,7 @@ func setupWatchArgs(cx ctlCtx, args []string) []string {
|
||||
func ctlV3Watch(cx ctlCtx, args []string, kvs ...kvExec) error {
|
||||
cmdArgs := setupWatchArgs(cx, args)
|
||||
|
||||
proc, err := spawnCmd(cmdArgs, nil)
|
||||
proc, err := e2e.SpawnCmd(cmdArgs, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -66,7 +70,7 @@ func ctlV3Watch(cx ctlCtx, args []string, kvs ...kvExec) error {
|
||||
func ctlV3WatchFailPerm(cx ctlCtx, args []string) error {
|
||||
cmdArgs := setupWatchArgs(cx, args)
|
||||
|
||||
proc, err := spawnCmd(cmdArgs, nil)
|
||||
proc, err := e2e.SpawnCmd(cmdArgs, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -22,18 +22,19 @@ import (
|
||||
"testing"
|
||||
|
||||
"go.etcd.io/etcd/pkg/v3/expect"
|
||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||
)
|
||||
|
||||
const exampleConfigFile = "../../etcd.conf.yml.sample"
|
||||
|
||||
func TestEtcdExampleConfig(t *testing.T) {
|
||||
skipInShortMode(t)
|
||||
e2e.SkipInShortMode(t)
|
||||
|
||||
proc, err := spawnCmd([]string{binDir + "/etcd", "--config-file", exampleConfigFile}, nil)
|
||||
proc, err := e2e.SpawnCmd([]string{e2e.BinDir + "/etcd", "--config-file", exampleConfigFile}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = waitReadyExpectProc(proc, etcdServerReadyLines); err != nil {
|
||||
if err = e2e.WaitReadyExpectProc(proc, e2e.EtcdServerReadyLines); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = proc.Stop(); err != nil {
|
||||
@ -42,11 +43,11 @@ func TestEtcdExampleConfig(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestEtcdMultiPeer(t *testing.T) {
|
||||
skipInShortMode(t)
|
||||
e2e.SkipInShortMode(t)
|
||||
|
||||
peers, tmpdirs := make([]string, 3), make([]string, 3)
|
||||
for i := range peers {
|
||||
peers[i] = fmt.Sprintf("e%d=http://127.0.0.1:%d", i, etcdProcessBasePort+i)
|
||||
peers[i] = fmt.Sprintf("e%d=http://127.0.0.1:%d", i, e2e.EtcdProcessBasePort+i)
|
||||
d, err := ioutil.TempDir("", fmt.Sprintf("e%d.etcd", i))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -66,16 +67,16 @@ func TestEtcdMultiPeer(t *testing.T) {
|
||||
}()
|
||||
for i := range procs {
|
||||
args := []string{
|
||||
binDir + "/etcd",
|
||||
e2e.BinDir + "/etcd",
|
||||
"--name", fmt.Sprintf("e%d", i),
|
||||
"--listen-client-urls", "http://0.0.0.0:0",
|
||||
"--data-dir", tmpdirs[i],
|
||||
"--advertise-client-urls", "http://0.0.0.0:0",
|
||||
"--listen-peer-urls", fmt.Sprintf("http://127.0.0.1:%d,http://127.0.0.1:%d", etcdProcessBasePort+i, etcdProcessBasePort+len(peers)+i),
|
||||
"--initial-advertise-peer-urls", fmt.Sprintf("http://127.0.0.1:%d", etcdProcessBasePort+i),
|
||||
"--listen-peer-urls", fmt.Sprintf("http://127.0.0.1:%d,http://127.0.0.1:%d", e2e.EtcdProcessBasePort+i, e2e.EtcdProcessBasePort+len(peers)+i),
|
||||
"--initial-advertise-peer-urls", fmt.Sprintf("http://127.0.0.1:%d", e2e.EtcdProcessBasePort+i),
|
||||
"--initial-cluster", ic,
|
||||
}
|
||||
p, err := spawnCmd(args, nil)
|
||||
p, err := e2e.SpawnCmd(args, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -83,7 +84,7 @@ func TestEtcdMultiPeer(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, p := range procs {
|
||||
if err := waitReadyExpectProc(p, etcdServerReadyLines); err != nil {
|
||||
if err := e2e.WaitReadyExpectProc(p, e2e.EtcdServerReadyLines); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@ -91,16 +92,16 @@ func TestEtcdMultiPeer(t *testing.T) {
|
||||
|
||||
// TestEtcdUnixPeers checks that etcd will boot with unix socket peers.
|
||||
func TestEtcdUnixPeers(t *testing.T) {
|
||||
skipInShortMode(t)
|
||||
e2e.SkipInShortMode(t)
|
||||
|
||||
d, err := ioutil.TempDir("", "e1.etcd")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(d)
|
||||
proc, err := spawnCmd(
|
||||
proc, err := e2e.SpawnCmd(
|
||||
[]string{
|
||||
binDir + "/etcd",
|
||||
e2e.BinDir + "/etcd",
|
||||
"--data-dir", d,
|
||||
"--name", "e1",
|
||||
"--listen-peer-urls", "unix://etcd.unix:1",
|
||||
@ -112,7 +113,7 @@ func TestEtcdUnixPeers(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = waitReadyExpectProc(proc, etcdServerReadyLines); err != nil {
|
||||
if err = e2e.WaitReadyExpectProc(proc, e2e.EtcdServerReadyLines); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = proc.Stop(); err != nil {
|
||||
@ -122,11 +123,11 @@ func TestEtcdUnixPeers(t *testing.T) {
|
||||
|
||||
// TestEtcdPeerCNAuth checks that the inter peer auth based on CN of cert is working correctly.
|
||||
func TestEtcdPeerCNAuth(t *testing.T) {
|
||||
skipInShortMode(t)
|
||||
e2e.SkipInShortMode(t)
|
||||
|
||||
peers, tmpdirs := make([]string, 3), make([]string, 3)
|
||||
for i := range peers {
|
||||
peers[i] = fmt.Sprintf("e%d=https://127.0.0.1:%d", i, etcdProcessBasePort+i)
|
||||
peers[i] = fmt.Sprintf("e%d=https://127.0.0.1:%d", i, e2e.EtcdProcessBasePort+i)
|
||||
d, err := ioutil.TempDir("", fmt.Sprintf("e%d.etcd", i))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -148,34 +149,34 @@ func TestEtcdPeerCNAuth(t *testing.T) {
|
||||
// node 0 and 1 have a cert with the correct CN, node 2 doesn't
|
||||
for i := range procs {
|
||||
commonArgs := []string{
|
||||
binDir + "/etcd",
|
||||
e2e.BinDir + "/etcd",
|
||||
"--name", fmt.Sprintf("e%d", i),
|
||||
"--listen-client-urls", "http://0.0.0.0:0",
|
||||
"--data-dir", tmpdirs[i],
|
||||
"--advertise-client-urls", "http://0.0.0.0:0",
|
||||
"--listen-peer-urls", fmt.Sprintf("https://127.0.0.1:%d,https://127.0.0.1:%d", etcdProcessBasePort+i, etcdProcessBasePort+len(peers)+i),
|
||||
"--initial-advertise-peer-urls", fmt.Sprintf("https://127.0.0.1:%d", etcdProcessBasePort+i),
|
||||
"--listen-peer-urls", fmt.Sprintf("https://127.0.0.1:%d,https://127.0.0.1:%d", e2e.EtcdProcessBasePort+i, e2e.EtcdProcessBasePort+len(peers)+i),
|
||||
"--initial-advertise-peer-urls", fmt.Sprintf("https://127.0.0.1:%d", e2e.EtcdProcessBasePort+i),
|
||||
"--initial-cluster", ic,
|
||||
}
|
||||
|
||||
var args []string
|
||||
if i <= 1 {
|
||||
args = []string{
|
||||
"--peer-cert-file", certPath,
|
||||
"--peer-key-file", privateKeyPath,
|
||||
"--peer-client-cert-file", certPath,
|
||||
"--peer-client-key-file", privateKeyPath,
|
||||
"--peer-trusted-ca-file", caPath,
|
||||
"--peer-cert-file", e2e.CertPath,
|
||||
"--peer-key-file", e2e.PrivateKeyPath,
|
||||
"--peer-client-cert-file", e2e.CertPath,
|
||||
"--peer-client-key-file", e2e.PrivateKeyPath,
|
||||
"--peer-trusted-ca-file", e2e.CaPath,
|
||||
"--peer-client-cert-auth",
|
||||
"--peer-cert-allowed-cn", "example.com",
|
||||
}
|
||||
} else {
|
||||
args = []string{
|
||||
"--peer-cert-file", certPath2,
|
||||
"--peer-key-file", privateKeyPath2,
|
||||
"--peer-client-cert-file", certPath2,
|
||||
"--peer-client-key-file", privateKeyPath2,
|
||||
"--peer-trusted-ca-file", caPath,
|
||||
"--peer-cert-file", e2e.CertPath2,
|
||||
"--peer-key-file", e2e.PrivateKeyPath2,
|
||||
"--peer-client-cert-file", e2e.CertPath2,
|
||||
"--peer-client-key-file", e2e.PrivateKeyPath2,
|
||||
"--peer-trusted-ca-file", e2e.CaPath,
|
||||
"--peer-client-cert-auth",
|
||||
"--peer-cert-allowed-cn", "example2.com",
|
||||
}
|
||||
@ -183,7 +184,7 @@ func TestEtcdPeerCNAuth(t *testing.T) {
|
||||
|
||||
commonArgs = append(commonArgs, args...)
|
||||
|
||||
p, err := spawnCmd(commonArgs, nil)
|
||||
p, err := e2e.SpawnCmd(commonArgs, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -193,11 +194,11 @@ func TestEtcdPeerCNAuth(t *testing.T) {
|
||||
for i, p := range procs {
|
||||
var expect []string
|
||||
if i <= 1 {
|
||||
expect = etcdServerReadyLines
|
||||
expect = e2e.EtcdServerReadyLines
|
||||
} else {
|
||||
expect = []string{"remote error: tls: bad certificate"}
|
||||
}
|
||||
if err := waitReadyExpectProc(p, expect); err != nil {
|
||||
if err := e2e.WaitReadyExpectProc(p, expect); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@ -205,11 +206,11 @@ func TestEtcdPeerCNAuth(t *testing.T) {
|
||||
|
||||
// TestEtcdPeerNameAuth checks that the inter peer auth based on cert name validation is working correctly.
|
||||
func TestEtcdPeerNameAuth(t *testing.T) {
|
||||
skipInShortMode(t)
|
||||
e2e.SkipInShortMode(t)
|
||||
|
||||
peers, tmpdirs := make([]string, 3), make([]string, 3)
|
||||
for i := range peers {
|
||||
peers[i] = fmt.Sprintf("e%d=https://127.0.0.1:%d", i, etcdProcessBasePort+i)
|
||||
peers[i] = fmt.Sprintf("e%d=https://127.0.0.1:%d", i, e2e.EtcdProcessBasePort+i)
|
||||
d, err := ioutil.TempDir("", fmt.Sprintf("e%d.etcd", i))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -231,30 +232,30 @@ func TestEtcdPeerNameAuth(t *testing.T) {
|
||||
// node 0 and 1 have a cert with the correct certificate name, node 2 doesn't
|
||||
for i := range procs {
|
||||
commonArgs := []string{
|
||||
binDir + "/etcd",
|
||||
e2e.BinDir + "/etcd",
|
||||
"--name", fmt.Sprintf("e%d", i),
|
||||
"--listen-client-urls", "http://0.0.0.0:0",
|
||||
"--data-dir", tmpdirs[i],
|
||||
"--advertise-client-urls", "http://0.0.0.0:0",
|
||||
"--listen-peer-urls", fmt.Sprintf("https://127.0.0.1:%d,https://127.0.0.1:%d", etcdProcessBasePort+i, etcdProcessBasePort+len(peers)+i),
|
||||
"--initial-advertise-peer-urls", fmt.Sprintf("https://127.0.0.1:%d", etcdProcessBasePort+i),
|
||||
"--listen-peer-urls", fmt.Sprintf("https://127.0.0.1:%d,https://127.0.0.1:%d", e2e.EtcdProcessBasePort+i, e2e.EtcdProcessBasePort+len(peers)+i),
|
||||
"--initial-advertise-peer-urls", fmt.Sprintf("https://127.0.0.1:%d", e2e.EtcdProcessBasePort+i),
|
||||
"--initial-cluster", ic,
|
||||
}
|
||||
|
||||
var args []string
|
||||
if i <= 1 {
|
||||
args = []string{
|
||||
"--peer-cert-file", certPath,
|
||||
"--peer-key-file", privateKeyPath,
|
||||
"--peer-trusted-ca-file", caPath,
|
||||
"--peer-cert-file", e2e.CertPath,
|
||||
"--peer-key-file", e2e.PrivateKeyPath,
|
||||
"--peer-trusted-ca-file", e2e.CaPath,
|
||||
"--peer-client-cert-auth",
|
||||
"--peer-cert-allowed-hostname", "localhost",
|
||||
}
|
||||
} else {
|
||||
args = []string{
|
||||
"--peer-cert-file", certPath2,
|
||||
"--peer-key-file", privateKeyPath2,
|
||||
"--peer-trusted-ca-file", caPath,
|
||||
"--peer-cert-file", e2e.CertPath2,
|
||||
"--peer-key-file", e2e.PrivateKeyPath2,
|
||||
"--peer-trusted-ca-file", e2e.CaPath,
|
||||
"--peer-client-cert-auth",
|
||||
"--peer-cert-allowed-hostname", "example2.com",
|
||||
}
|
||||
@ -262,7 +263,7 @@ func TestEtcdPeerNameAuth(t *testing.T) {
|
||||
|
||||
commonArgs = append(commonArgs, args...)
|
||||
|
||||
p, err := spawnCmd(commonArgs, nil)
|
||||
p, err := e2e.SpawnCmd(commonArgs, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -272,43 +273,43 @@ func TestEtcdPeerNameAuth(t *testing.T) {
|
||||
for i, p := range procs {
|
||||
var expect []string
|
||||
if i <= 1 {
|
||||
expect = etcdServerReadyLines
|
||||
expect = e2e.EtcdServerReadyLines
|
||||
} else {
|
||||
expect = []string{"client certificate authentication failed"}
|
||||
}
|
||||
if err := waitReadyExpectProc(p, expect); err != nil {
|
||||
if err := e2e.WaitReadyExpectProc(p, expect); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGrpcproxyAndCommonName(t *testing.T) {
|
||||
skipInShortMode(t)
|
||||
e2e.SkipInShortMode(t)
|
||||
|
||||
argsWithNonEmptyCN := []string{
|
||||
binDir + "/etcd",
|
||||
e2e.BinDir + "/etcd",
|
||||
"grpc-proxy",
|
||||
"start",
|
||||
"--cert", certPath2,
|
||||
"--key", privateKeyPath2,
|
||||
"--cacert", caPath,
|
||||
"--cert", e2e.CertPath2,
|
||||
"--key", e2e.PrivateKeyPath2,
|
||||
"--cacert", e2e.CaPath,
|
||||
}
|
||||
|
||||
argsWithEmptyCN := []string{
|
||||
binDir + "/etcd",
|
||||
e2e.BinDir + "/etcd",
|
||||
"grpc-proxy",
|
||||
"start",
|
||||
"--cert", certPath3,
|
||||
"--key", privateKeyPath3,
|
||||
"--cacert", caPath,
|
||||
"--cert", e2e.CertPath3,
|
||||
"--key", e2e.PrivateKeyPath3,
|
||||
"--cacert", e2e.CaPath,
|
||||
}
|
||||
|
||||
err := spawnWithExpect(argsWithNonEmptyCN, "cert has non empty Common Name")
|
||||
err := e2e.SpawnWithExpect(argsWithNonEmptyCN, "cert has non empty Common Name")
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %s", err)
|
||||
}
|
||||
|
||||
p, err := spawnCmd(argsWithEmptyCN, nil)
|
||||
p, err := e2e.SpawnCmd(argsWithEmptyCN, nil)
|
||||
defer func() {
|
||||
if p != nil {
|
||||
p.Stop()
|
||||
@ -321,13 +322,13 @@ func TestGrpcproxyAndCommonName(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBootstrapDefragFlag(t *testing.T) {
|
||||
skipInShortMode(t)
|
||||
e2e.SkipInShortMode(t)
|
||||
|
||||
proc, err := spawnCmd([]string{binDir + "/etcd", "--experimental-bootstrap-defrag-threshold-megabytes", "1000"}, nil)
|
||||
proc, err := e2e.SpawnCmd([]string{e2e.BinDir + "/etcd", "--experimental-bootstrap-defrag-threshold-megabytes", "1000"}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = waitReadyExpectProc(proc, []string{"Skipping defragmentation"}); err != nil {
|
||||
if err = e2e.WaitReadyExpectProc(proc, []string{"Skipping defragmentation"}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = proc.Stop(); err != nil {
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
"go.etcd.io/etcd/api/v3/mvccpb"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/server/v3/storage/datadir"
|
||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||
)
|
||||
|
||||
// TODO: test with embedded etcd in integration package
|
||||
@ -35,10 +36,10 @@ func TestEtcdCorruptHash(t *testing.T) {
|
||||
// defer os.Setenv("EXPECT_DEBUG", oldenv)
|
||||
// os.Setenv("EXPECT_DEBUG", "1")
|
||||
|
||||
cfg := newConfigNoTLS()
|
||||
cfg := e2e.NewConfigNoTLS()
|
||||
|
||||
// trigger snapshot so that restart member can load peers from disk
|
||||
cfg.snapshotCount = 3
|
||||
cfg.SnapshotCount = 3
|
||||
|
||||
testCtl(t, corruptTest, withQuorum(),
|
||||
withCfg(*cfg),
|
||||
@ -76,18 +77,18 @@ func corruptTest(cx ctlCtx) {
|
||||
id0 := sresp.Header.GetMemberId()
|
||||
|
||||
cx.t.Log("stopping etcd[0]...")
|
||||
cx.epc.procs[0].Stop()
|
||||
cx.epc.Procs[0].Stop()
|
||||
|
||||
// corrupting first member by modifying backend offline.
|
||||
fp := datadir.ToBackendFileName(cx.epc.procs[0].Config().dataDirPath)
|
||||
fp := datadir.ToBackendFileName(cx.epc.Procs[0].Config().DataDirPath)
|
||||
cx.t.Logf("corrupting backend: %v", fp)
|
||||
if err = cx.corruptFunc(fp); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
|
||||
cx.t.Log("restarting etcd[0]")
|
||||
ep := cx.epc.procs[0]
|
||||
proc, err := spawnCmd(append([]string{ep.Config().execPath}, ep.Config().args...), cx.envMap)
|
||||
ep := cx.epc.Procs[0]
|
||||
proc, err := e2e.SpawnCmd(append([]string{ep.Config().ExecPath}, ep.Config().Args...), cx.envMap)
|
||||
if err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
@ -95,7 +96,7 @@ func corruptTest(cx ctlCtx) {
|
||||
|
||||
cx.t.Log("waiting for etcd[0] failure...")
|
||||
// restarting corrupted member should fail
|
||||
waitReadyExpectProc(proc, []string{fmt.Sprintf("etcdmain: %016x found data inconsistency with peers", id0)})
|
||||
e2e.WaitReadyExpectProc(proc, []string{fmt.Sprintf("etcdmain: %016x found data inconsistency with peers", id0)})
|
||||
}
|
||||
|
||||
func corruptHash(fpath string) error {
|
||||
|
@ -1,178 +0,0 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
"go.etcd.io/etcd/client/pkg/v3/fileutil"
|
||||
"go.etcd.io/etcd/pkg/v3/expect"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var (
|
||||
etcdServerReadyLines = []string{"ready to serve client requests"}
|
||||
binPath string
|
||||
ctlBinPath string
|
||||
utlBinPath string
|
||||
)
|
||||
|
||||
// etcdProcess is a process that serves etcd requests.
|
||||
type etcdProcess interface {
|
||||
EndpointsV2() []string
|
||||
EndpointsV3() []string
|
||||
EndpointsMetrics() []string
|
||||
|
||||
Start() error
|
||||
Restart() error
|
||||
Stop() error
|
||||
Close() error
|
||||
WithStopSignal(sig os.Signal) os.Signal
|
||||
Config() *etcdServerProcessConfig
|
||||
Logs() logsExpect
|
||||
}
|
||||
|
||||
type logsExpect interface {
|
||||
Expect(string) (string, error)
|
||||
}
|
||||
|
||||
type etcdServerProcess struct {
|
||||
cfg *etcdServerProcessConfig
|
||||
proc *expect.ExpectProcess
|
||||
donec chan struct{} // closed when Interact() terminates
|
||||
}
|
||||
|
||||
type etcdServerProcessConfig struct {
|
||||
lg *zap.Logger
|
||||
execPath string
|
||||
args []string
|
||||
tlsArgs []string
|
||||
envVars map[string]string
|
||||
|
||||
dataDirPath string
|
||||
keepDataDir bool
|
||||
|
||||
name string
|
||||
|
||||
purl url.URL
|
||||
|
||||
acurl string
|
||||
murl string
|
||||
|
||||
initialToken string
|
||||
initialCluster string
|
||||
}
|
||||
|
||||
func newEtcdServerProcess(cfg *etcdServerProcessConfig) (*etcdServerProcess, error) {
|
||||
if !fileutil.Exist(cfg.execPath) {
|
||||
return nil, fmt.Errorf("could not find etcd binary: %s", cfg.execPath)
|
||||
}
|
||||
if !cfg.keepDataDir {
|
||||
if err := os.RemoveAll(cfg.dataDirPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &etcdServerProcess{cfg: cfg, donec: make(chan struct{})}, nil
|
||||
}
|
||||
|
||||
func (ep *etcdServerProcess) EndpointsV2() []string { return []string{ep.cfg.acurl} }
|
||||
func (ep *etcdServerProcess) EndpointsV3() []string { return ep.EndpointsV2() }
|
||||
func (ep *etcdServerProcess) EndpointsMetrics() []string { return []string{ep.cfg.murl} }
|
||||
|
||||
func (ep *etcdServerProcess) Start() error {
|
||||
if ep.proc != nil {
|
||||
panic("already started")
|
||||
}
|
||||
ep.cfg.lg.Info("starting server...", zap.String("name", ep.cfg.name))
|
||||
proc, err := spawnCmdWithLogger(ep.cfg.lg, append([]string{ep.cfg.execPath}, ep.cfg.args...), ep.cfg.envVars)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ep.proc = proc
|
||||
err = ep.waitReady()
|
||||
if err == nil {
|
||||
ep.cfg.lg.Info("started server.", zap.String("name", ep.cfg.name))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (ep *etcdServerProcess) Restart() error {
|
||||
ep.cfg.lg.Info("restaring server...", zap.String("name", ep.cfg.name))
|
||||
if err := ep.Stop(); err != nil {
|
||||
return err
|
||||
}
|
||||
ep.donec = make(chan struct{})
|
||||
err := ep.Start()
|
||||
if err == nil {
|
||||
ep.cfg.lg.Info("restared server", zap.String("name", ep.cfg.name))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (ep *etcdServerProcess) Stop() (err error) {
|
||||
ep.cfg.lg.Info("stoping server...", zap.String("name", ep.cfg.name))
|
||||
if ep == nil || ep.proc == nil {
|
||||
return nil
|
||||
}
|
||||
err = ep.proc.Stop()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ep.proc = nil
|
||||
<-ep.donec
|
||||
ep.donec = make(chan struct{})
|
||||
if ep.cfg.purl.Scheme == "unix" || ep.cfg.purl.Scheme == "unixs" {
|
||||
err = os.Remove(ep.cfg.purl.Host + ep.cfg.purl.Path)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
ep.cfg.lg.Info("stopped server.", zap.String("name", ep.cfg.name))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ep *etcdServerProcess) Close() error {
|
||||
ep.cfg.lg.Info("closing server...", zap.String("name", ep.cfg.name))
|
||||
if err := ep.Stop(); err != nil {
|
||||
return err
|
||||
}
|
||||
if !ep.cfg.keepDataDir {
|
||||
ep.cfg.lg.Info("removing directory", zap.String("data-dir", ep.cfg.dataDirPath))
|
||||
return os.RemoveAll(ep.cfg.dataDirPath)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ep *etcdServerProcess) WithStopSignal(sig os.Signal) os.Signal {
|
||||
ret := ep.proc.StopSignal
|
||||
ep.proc.StopSignal = sig
|
||||
return ret
|
||||
}
|
||||
|
||||
func (ep *etcdServerProcess) waitReady() error {
|
||||
defer close(ep.donec)
|
||||
return waitReadyExpectProc(ep.proc, etcdServerReadyLines)
|
||||
}
|
||||
|
||||
func (ep *etcdServerProcess) Config() *etcdServerProcessConfig { return ep.cfg }
|
||||
|
||||
func (ep *etcdServerProcess) Logs() logsExpect {
|
||||
if ep.proc == nil {
|
||||
ep.cfg.lg.Panic("Please grap logs before process is stopped")
|
||||
}
|
||||
return ep.proc
|
||||
}
|
@ -23,24 +23,25 @@ import (
|
||||
|
||||
"go.etcd.io/etcd/api/v3/version"
|
||||
"go.etcd.io/etcd/client/pkg/v3/fileutil"
|
||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||
)
|
||||
|
||||
// TestReleaseUpgrade ensures that changes to master branch does not affect
|
||||
// upgrade from latest etcd releases.
|
||||
func TestReleaseUpgrade(t *testing.T) {
|
||||
lastReleaseBinary := binDir + "/etcd-last-release"
|
||||
lastReleaseBinary := e2e.BinDir + "/etcd-last-release"
|
||||
if !fileutil.Exist(lastReleaseBinary) {
|
||||
t.Skipf("%q does not exist", lastReleaseBinary)
|
||||
}
|
||||
|
||||
BeforeTest(t)
|
||||
e2e.BeforeTest(t)
|
||||
|
||||
copiedCfg := newConfigNoTLS()
|
||||
copiedCfg.execPath = lastReleaseBinary
|
||||
copiedCfg.snapshotCount = 3
|
||||
copiedCfg.baseScheme = "unix" // to avoid port conflict
|
||||
copiedCfg := e2e.NewConfigNoTLS()
|
||||
copiedCfg.ExecPath = lastReleaseBinary
|
||||
copiedCfg.SnapshotCount = 3
|
||||
copiedCfg.BaseScheme = "unix" // to avoid port conflict
|
||||
|
||||
epc, err := newEtcdProcessCluster(t, copiedCfg)
|
||||
epc, err := e2e.NewEtcdProcessCluster(t, copiedCfg)
|
||||
if err != nil {
|
||||
t.Fatalf("could not start etcd process cluster (%v)", err)
|
||||
}
|
||||
@ -54,7 +55,7 @@ func TestReleaseUpgrade(t *testing.T) {
|
||||
defer os.Unsetenv("ETCDCTL_API")
|
||||
cx := ctlCtx{
|
||||
t: t,
|
||||
cfg: *newConfigNoTLS(),
|
||||
cfg: *e2e.NewConfigNoTLS(),
|
||||
dialTimeout: 7 * time.Second,
|
||||
quorum: true,
|
||||
epc: epc,
|
||||
@ -71,17 +72,17 @@ func TestReleaseUpgrade(t *testing.T) {
|
||||
|
||||
t.Log("Cluster of etcd in old version running")
|
||||
|
||||
for i := range epc.procs {
|
||||
for i := range epc.Procs {
|
||||
t.Logf("Stopping node: %v", i)
|
||||
if err := epc.procs[i].Stop(); err != nil {
|
||||
if err := epc.Procs[i].Stop(); err != nil {
|
||||
t.Fatalf("#%d: error closing etcd process (%v)", i, err)
|
||||
}
|
||||
t.Logf("Stopped node: %v", i)
|
||||
epc.procs[i].Config().execPath = binDir + "/etcd"
|
||||
epc.procs[i].Config().keepDataDir = true
|
||||
epc.Procs[i].Config().ExecPath = e2e.BinDir + "/etcd"
|
||||
epc.Procs[i].Config().KeepDataDir = true
|
||||
|
||||
t.Logf("Restarting node in the new version: %v", i)
|
||||
if err := epc.procs[i].Restart(); err != nil {
|
||||
if err := epc.Procs[i].Restart(); err != nil {
|
||||
t.Fatalf("error restarting etcd process (%v)", err)
|
||||
}
|
||||
|
||||
@ -100,7 +101,7 @@ func TestReleaseUpgrade(t *testing.T) {
|
||||
// new cluster version needs more time to upgrade
|
||||
ver := version.Cluster(version.Version)
|
||||
for i := 0; i < 7; i++ {
|
||||
if err = cURLGet(epc, cURLReq{endpoint: "/version", expected: `"etcdcluster":"` + ver}); err != nil {
|
||||
if err = e2e.CURLGet(epc, e2e.CURLReq{Endpoint: "/version", Expected: `"etcdcluster":"` + ver}); err != nil {
|
||||
t.Logf("#%d: %v is not ready yet (%v)", i, ver, err)
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
@ -114,19 +115,19 @@ func TestReleaseUpgrade(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestReleaseUpgradeWithRestart(t *testing.T) {
|
||||
lastReleaseBinary := binDir + "/etcd-last-release"
|
||||
lastReleaseBinary := e2e.BinDir + "/etcd-last-release"
|
||||
if !fileutil.Exist(lastReleaseBinary) {
|
||||
t.Skipf("%q does not exist", lastReleaseBinary)
|
||||
}
|
||||
|
||||
BeforeTest(t)
|
||||
e2e.BeforeTest(t)
|
||||
|
||||
copiedCfg := newConfigNoTLS()
|
||||
copiedCfg.execPath = lastReleaseBinary
|
||||
copiedCfg.snapshotCount = 10
|
||||
copiedCfg.baseScheme = "unix"
|
||||
copiedCfg := e2e.NewConfigNoTLS()
|
||||
copiedCfg.ExecPath = lastReleaseBinary
|
||||
copiedCfg.SnapshotCount = 10
|
||||
copiedCfg.BaseScheme = "unix"
|
||||
|
||||
epc, err := newEtcdProcessCluster(t, copiedCfg)
|
||||
epc, err := e2e.NewEtcdProcessCluster(t, copiedCfg)
|
||||
if err != nil {
|
||||
t.Fatalf("could not start etcd process cluster (%v)", err)
|
||||
}
|
||||
@ -140,7 +141,7 @@ func TestReleaseUpgradeWithRestart(t *testing.T) {
|
||||
defer os.Unsetenv("ETCDCTL_API")
|
||||
cx := ctlCtx{
|
||||
t: t,
|
||||
cfg: *newConfigNoTLS(),
|
||||
cfg: *e2e.NewConfigNoTLS(),
|
||||
dialTimeout: 7 * time.Second,
|
||||
quorum: true,
|
||||
epc: epc,
|
||||
@ -155,19 +156,19 @@ func TestReleaseUpgradeWithRestart(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
for i := range epc.procs {
|
||||
if err := epc.procs[i].Stop(); err != nil {
|
||||
for i := range epc.Procs {
|
||||
if err := epc.Procs[i].Stop(); err != nil {
|
||||
t.Fatalf("#%d: error closing etcd process (%v)", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(epc.procs))
|
||||
for i := range epc.procs {
|
||||
wg.Add(len(epc.Procs))
|
||||
for i := range epc.Procs {
|
||||
go func(i int) {
|
||||
epc.procs[i].Config().execPath = binDir + "/etcd"
|
||||
epc.procs[i].Config().keepDataDir = true
|
||||
if err := epc.procs[i].Restart(); err != nil {
|
||||
epc.Procs[i].Config().ExecPath = e2e.BinDir + "/etcd"
|
||||
epc.Procs[i].Config().KeepDataDir = true
|
||||
if err := epc.Procs[i].Restart(); err != nil {
|
||||
t.Errorf("error restarting etcd process (%v)", err)
|
||||
}
|
||||
wg.Done()
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"go.etcd.io/etcd/pkg/v3/expect"
|
||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -27,7 +28,7 @@ var (
|
||||
)
|
||||
|
||||
func TestGateway(t *testing.T) {
|
||||
ec, err := newEtcdProcessCluster(t, newConfigNoTLS())
|
||||
ec, err := e2e.NewEtcdProcessCluster(t, e2e.NewConfigNoTLS())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -41,14 +42,14 @@ func TestGateway(t *testing.T) {
|
||||
os.Setenv("ETCDCTL_API", "3")
|
||||
defer os.Unsetenv("ETCDCTL_API")
|
||||
|
||||
err = spawnWithExpect([]string{ctlBinPath, "--endpoints=" + defaultGatewayEndpoint, "put", "foo", "bar"}, "OK\r\n")
|
||||
err = e2e.SpawnWithExpect([]string{e2e.CtlBinPath, "--endpoints=" + defaultGatewayEndpoint, "put", "foo", "bar"}, "OK\r\n")
|
||||
if err != nil {
|
||||
t.Errorf("failed to finish put request through gateway: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func startGateway(t *testing.T, endpoints string) *expect.ExpectProcess {
|
||||
p, err := expect.NewExpect(binPath, "gateway", "--endpoints="+endpoints, "start")
|
||||
p, err := expect.NewExpect(e2e.BinPath, "gateway", "--endpoints="+endpoints, "start")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -5,61 +5,15 @@
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"os"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"go.etcd.io/etcd/client/pkg/v3/testutil"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
)
|
||||
|
||||
var (
|
||||
binDir string
|
||||
certDir string
|
||||
|
||||
certPath string
|
||||
privateKeyPath string
|
||||
caPath string
|
||||
|
||||
certPath2 string
|
||||
privateKeyPath2 string
|
||||
|
||||
certPath3 string
|
||||
privateKeyPath3 string
|
||||
|
||||
crlPath string
|
||||
revokedCertPath string
|
||||
revokedPrivateKeyPath string
|
||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
os.Setenv("ETCD_UNSUPPORTED_ARCH", runtime.GOARCH)
|
||||
os.Unsetenv("ETCDCTL_API")
|
||||
|
||||
binDirDef := integration.MustAbsPath("../../bin")
|
||||
certDirDef := fixturesDir
|
||||
|
||||
flag.StringVar(&binDir, "bin-dir", binDirDef, "The directory for store etcd and etcdctl binaries.")
|
||||
flag.StringVar(&certDir, "cert-dir", certDirDef, "The directory for store certificate files.")
|
||||
flag.Parse()
|
||||
|
||||
binPath = binDir + "/etcd"
|
||||
ctlBinPath = binDir + "/etcdctl"
|
||||
utlBinPath = binDir + "/etcdutl"
|
||||
certPath = certDir + "/server.crt"
|
||||
privateKeyPath = certDir + "/server.key.insecure"
|
||||
caPath = certDir + "/ca.crt"
|
||||
revokedCertPath = certDir + "/server-revoked.crt"
|
||||
revokedPrivateKeyPath = certDir + "/server-revoked.key.insecure"
|
||||
crlPath = certDir + "/revoke.crl"
|
||||
|
||||
certPath2 = certDir + "/server2.crt"
|
||||
privateKeyPath2 = certDir + "/server2.key.insecure"
|
||||
|
||||
certPath3 = certDir + "/server3.crt"
|
||||
privateKeyPath3 = certDir + "/server3.key.insecure"
|
||||
|
||||
e2e.InitFlags()
|
||||
v := m.Run()
|
||||
if v == 0 && testutil.CheckLeakedGoroutine() {
|
||||
os.Exit(1)
|
||||
|
@ -19,19 +19,20 @@ import (
|
||||
"testing"
|
||||
|
||||
"go.etcd.io/etcd/api/v3/version"
|
||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||
)
|
||||
|
||||
func TestV3MetricsSecure(t *testing.T) {
|
||||
cfg := newConfigTLS()
|
||||
cfg.clusterSize = 1
|
||||
cfg.metricsURLScheme = "https"
|
||||
cfg := e2e.NewConfigTLS()
|
||||
cfg.ClusterSize = 1
|
||||
cfg.MetricsURLScheme = "https"
|
||||
testCtl(t, metricsTest)
|
||||
}
|
||||
|
||||
func TestV3MetricsInsecure(t *testing.T) {
|
||||
cfg := newConfigTLS()
|
||||
cfg.clusterSize = 1
|
||||
cfg.metricsURLScheme = "http"
|
||||
cfg := e2e.NewConfigTLS()
|
||||
cfg.ClusterSize = 1
|
||||
cfg.MetricsURLScheme = "http"
|
||||
testCtl(t, metricsTest)
|
||||
}
|
||||
|
||||
@ -62,7 +63,7 @@ func metricsTest(cx ctlCtx) {
|
||||
if err := ctlV3Watch(cx, []string{"k", "--rev", "1"}, []kvExec{{key: "k", val: "v"}}...); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
if err := cURLGet(cx.epc, cURLReq{endpoint: test.endpoint, expected: test.expected, metricsURLScheme: cx.cfg.metricsURLScheme}); err != nil {
|
||||
if err := e2e.CURLGet(cx.epc, e2e.CURLReq{Endpoint: test.endpoint, Expected: test.expected, MetricsURLScheme: cx.cfg.MetricsURLScheme}); err != nil {
|
||||
cx.t.Fatalf("failed get with curl (%v)", err)
|
||||
}
|
||||
}
|
||||
|
@ -25,10 +25,11 @@ import (
|
||||
"go.etcd.io/etcd/client/pkg/v3/fileutil"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/schema"
|
||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||
)
|
||||
|
||||
func TestEtctlutlMigrate(t *testing.T) {
|
||||
lastReleaseBinary := binDir + "/etcd-last-release"
|
||||
lastReleaseBinary := e2e.BinDir + "/etcd-last-release"
|
||||
|
||||
tcs := []struct {
|
||||
name string
|
||||
@ -103,20 +104,20 @@ func TestEtctlutlMigrate(t *testing.T) {
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
e2e.BeforeTest(t)
|
||||
if tc.binary != "" && !fileutil.Exist(tc.binary) {
|
||||
t.Skipf("%q does not exist", lastReleaseBinary)
|
||||
}
|
||||
dataDirPath := t.TempDir()
|
||||
|
||||
epc, err := newEtcdProcessCluster(t, &etcdProcessClusterConfig{
|
||||
execPath: tc.binary,
|
||||
dataDirPath: dataDirPath,
|
||||
clusterSize: 1,
|
||||
initialToken: "new",
|
||||
keepDataDir: true,
|
||||
// Set low snapshotCount to ensure wal snapshot is done
|
||||
snapshotCount: 1,
|
||||
epc, err := e2e.NewEtcdProcessCluster(t, &e2e.EtcdProcessClusterConfig{
|
||||
ExecPath: tc.binary,
|
||||
DataDirPath: dataDirPath,
|
||||
ClusterSize: 1,
|
||||
InitialToken: "new",
|
||||
KeepDataDir: true,
|
||||
// Set low SnapshotCount to ensure wal snapshot is done
|
||||
SnapshotCount: 1,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("could not start etcd process cluster (%v)", err)
|
||||
@ -128,26 +129,26 @@ func TestEtctlutlMigrate(t *testing.T) {
|
||||
}()
|
||||
|
||||
dialTimeout := 10 * time.Second
|
||||
prefixArgs := []string{ctlBinPath, "--endpoints", strings.Join(epc.EndpointsV3(), ","), "--dial-timeout", dialTimeout.String()}
|
||||
prefixArgs := []string{e2e.CtlBinPath, "--endpoints", strings.Join(epc.EndpointsV3(), ","), "--dial-timeout", dialTimeout.String()}
|
||||
|
||||
t.Log("Write keys to ensure wal snapshot is created and all v3.5 fields are set...")
|
||||
for i := 0; i < 10; i++ {
|
||||
if err = spawnWithExpect(append(prefixArgs, "put", fmt.Sprintf("%d", i), "value"), "OK"); err != nil {
|
||||
if err = e2e.SpawnWithExpect(append(prefixArgs, "put", fmt.Sprintf("%d", i), "value"), "OK"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
t.Log("Stopping the server...")
|
||||
if err = epc.procs[0].Stop(); err != nil {
|
||||
if err = epc.Procs[0].Stop(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Log("etcdutl migrate...")
|
||||
args := []string{utlBinPath, "migrate", "--data-dir", dataDirPath, "--target-version", tc.targetVersion}
|
||||
args := []string{e2e.UtlBinPath, "migrate", "--data-dir", dataDirPath, "--target-version", tc.targetVersion}
|
||||
if tc.force {
|
||||
args = append(args, "--force")
|
||||
}
|
||||
err = spawnWithExpect(args, tc.expectLogsSubString)
|
||||
err = e2e.SpawnWithExpect(args, tc.expectLogsSubString)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -15,27 +15,27 @@
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||
)
|
||||
|
||||
func TestV2CurlNoTLS(t *testing.T) { testCurlPutGet(t, newConfigNoTLS()) }
|
||||
func TestV2CurlAutoTLS(t *testing.T) { testCurlPutGet(t, newConfigAutoTLS()) }
|
||||
func TestV2CurlAllTLS(t *testing.T) { testCurlPutGet(t, newConfigTLS()) }
|
||||
func TestV2CurlPeerTLS(t *testing.T) { testCurlPutGet(t, newConfigPeerTLS()) }
|
||||
func TestV2CurlClientTLS(t *testing.T) { testCurlPutGet(t, newConfigClientTLS()) }
|
||||
func TestV2CurlClientBoth(t *testing.T) { testCurlPutGet(t, newConfigClientBoth()) }
|
||||
func testCurlPutGet(t *testing.T, cfg *etcdProcessClusterConfig) {
|
||||
func TestV2CurlNoTLS(t *testing.T) { testCurlPutGet(t, e2e.NewConfigNoTLS()) }
|
||||
func TestV2CurlAutoTLS(t *testing.T) { testCurlPutGet(t, e2e.NewConfigAutoTLS()) }
|
||||
func TestV2CurlAllTLS(t *testing.T) { testCurlPutGet(t, e2e.NewConfigTLS()) }
|
||||
func TestV2CurlPeerTLS(t *testing.T) { testCurlPutGet(t, e2e.NewConfigPeerTLS()) }
|
||||
func TestV2CurlClientTLS(t *testing.T) { testCurlPutGet(t, e2e.NewConfigClientTLS()) }
|
||||
func TestV2CurlClientBoth(t *testing.T) { testCurlPutGet(t, e2e.NewConfigClientBoth()) }
|
||||
func testCurlPutGet(t *testing.T, cfg *e2e.EtcdProcessClusterConfig) {
|
||||
BeforeTestV2(t)
|
||||
|
||||
// test doesn't use quorum gets, so ensure there are no followers to avoid
|
||||
// stale reads that will break the test
|
||||
cfg = configStandalone(*cfg)
|
||||
cfg = e2e.ConfigStandalone(*cfg)
|
||||
|
||||
cfg.enableV2 = true
|
||||
epc, err := newEtcdProcessCluster(t, cfg)
|
||||
cfg.EnableV2 = true
|
||||
epc, err := e2e.NewEtcdProcessCluster(t, cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("could not start etcd process cluster (%v)", err)
|
||||
}
|
||||
@ -49,14 +49,14 @@ func testCurlPutGet(t *testing.T, cfg *etcdProcessClusterConfig) {
|
||||
expectPut = `{"action":"set","node":{"key":"/foo","value":"bar","`
|
||||
expectGet = `{"action":"get","node":{"key":"/foo","value":"bar","`
|
||||
)
|
||||
if err := cURLPut(epc, cURLReq{endpoint: "/v2/keys/foo", value: "bar", expected: expectPut}); err != nil {
|
||||
if err := e2e.CURLPut(epc, e2e.CURLReq{Endpoint: "/v2/keys/foo", Value: "bar", Expected: expectPut}); err != nil {
|
||||
t.Fatalf("failed put with curl (%v)", err)
|
||||
}
|
||||
if err := cURLGet(epc, cURLReq{endpoint: "/v2/keys/foo", expected: expectGet}); err != nil {
|
||||
if err := e2e.CURLGet(epc, e2e.CURLReq{Endpoint: "/v2/keys/foo", Expected: expectGet}); err != nil {
|
||||
t.Fatalf("failed get with curl (%v)", err)
|
||||
}
|
||||
if cfg.clientTLS == clientTLSAndNonTLS {
|
||||
if err := cURLGet(epc, cURLReq{endpoint: "/v2/keys/foo", expected: expectGet, isTLS: true}); err != nil {
|
||||
if cfg.ClientTLS == e2e.ClientTLSAndNonTLS {
|
||||
if err := e2e.CURLGet(epc, e2e.CURLReq{Endpoint: "/v2/keys/foo", Expected: expectGet, IsTLS: true}); err != nil {
|
||||
t.Fatalf("failed get with curl (%v)", err)
|
||||
}
|
||||
}
|
||||
@ -65,8 +65,8 @@ func testCurlPutGet(t *testing.T, cfg *etcdProcessClusterConfig) {
|
||||
func TestV2CurlIssue5182(t *testing.T) {
|
||||
BeforeTestV2(t)
|
||||
|
||||
copied := newConfigNoTLS()
|
||||
copied.enableV2 = true
|
||||
copied := e2e.NewConfigNoTLS()
|
||||
copied.EnableV2 = true
|
||||
epc := setupEtcdctlTest(t, copied, false)
|
||||
defer func() {
|
||||
if err := epc.Close(); err != nil {
|
||||
@ -75,20 +75,20 @@ func TestV2CurlIssue5182(t *testing.T) {
|
||||
}()
|
||||
|
||||
expectPut := `{"action":"set","node":{"key":"/foo","value":"bar","`
|
||||
if err := cURLPut(epc, cURLReq{endpoint: "/v2/keys/foo", value: "bar", expected: expectPut}); err != nil {
|
||||
if err := e2e.CURLPut(epc, e2e.CURLReq{Endpoint: "/v2/keys/foo", Value: "bar", Expected: expectPut}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expectUserAdd := `{"user":"foo","roles":null}`
|
||||
if err := cURLPut(epc, cURLReq{endpoint: "/v2/auth/users/foo", value: `{"user":"foo", "password":"pass"}`, expected: expectUserAdd}); err != nil {
|
||||
if err := e2e.CURLPut(epc, e2e.CURLReq{Endpoint: "/v2/auth/users/foo", Value: `{"user":"foo", "password":"pass"}`, Expected: expectUserAdd}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expectRoleAdd := `{"role":"foo","permissions":{"kv":{"read":["/foo/*"],"write":null}}`
|
||||
if err := cURLPut(epc, cURLReq{endpoint: "/v2/auth/roles/foo", value: `{"role":"foo", "permissions": {"kv": {"read": ["/foo/*"]}}}`, expected: expectRoleAdd}); err != nil {
|
||||
if err := e2e.CURLPut(epc, e2e.CURLReq{Endpoint: "/v2/auth/roles/foo", Value: `{"role":"foo", "permissions": {"kv": {"read": ["/foo/*"]}}}`, Expected: expectRoleAdd}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expectUserUpdate := `{"user":"foo","roles":["foo"]}`
|
||||
if err := cURLPut(epc, cURLReq{endpoint: "/v2/auth/users/foo", value: `{"user": "foo", "grant": ["foo"]}`, expected: expectUserUpdate}); err != nil {
|
||||
if err := e2e.CURLPut(epc, e2e.CURLReq{Endpoint: "/v2/auth/users/foo", Value: `{"user": "foo", "grant": ["foo"]}`, Expected: expectUserUpdate}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@ -99,13 +99,13 @@ func TestV2CurlIssue5182(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := cURLGet(epc, cURLReq{endpoint: "/v2/keys/foo/", username: "root", password: "a", expected: "bar"}); err != nil {
|
||||
if err := e2e.CURLGet(epc, e2e.CURLReq{Endpoint: "/v2/keys/foo/", Username: "root", Password: "a", Expected: "bar"}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := cURLGet(epc, cURLReq{endpoint: "/v2/keys/foo/", username: "foo", password: "pass", expected: "bar"}); err != nil {
|
||||
if err := e2e.CURLGet(epc, e2e.CURLReq{Endpoint: "/v2/keys/foo/", Username: "foo", Password: "pass", Expected: "bar"}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := cURLGet(epc, cURLReq{endpoint: "/v2/keys/foo/", username: "foo", password: "", expected: "bar"}); err != nil {
|
||||
if err := e2e.CURLGet(epc, e2e.CURLReq{Endpoint: "/v2/keys/foo/", Username: "foo", Password: "", Expected: "bar"}); err != nil {
|
||||
if !strings.Contains(err.Error(), `The request requires user authentication`) {
|
||||
t.Fatalf("expected 'The request requires user authentication' error, got %v", err)
|
||||
}
|
||||
@ -113,88 +113,3 @@ func TestV2CurlIssue5182(t *testing.T) {
|
||||
t.Fatalf("expected 'The request requires user authentication' error")
|
||||
}
|
||||
}
|
||||
|
||||
type cURLReq struct {
|
||||
username string
|
||||
password string
|
||||
|
||||
isTLS bool
|
||||
timeout int
|
||||
|
||||
endpoint string
|
||||
|
||||
value string
|
||||
expected string
|
||||
header string
|
||||
|
||||
metricsURLScheme string
|
||||
|
||||
ciphers string
|
||||
}
|
||||
|
||||
// cURLPrefixArgs builds the beginning of a curl command for a given key
|
||||
// addressed to a random URL in the given cluster.
|
||||
func cURLPrefixArgs(clus *etcdProcessCluster, method string, req cURLReq) []string {
|
||||
var (
|
||||
cmdArgs = []string{"curl"}
|
||||
acurl = clus.procs[rand.Intn(clus.cfg.clusterSize)].Config().acurl
|
||||
)
|
||||
if req.metricsURLScheme != "https" {
|
||||
if req.isTLS {
|
||||
if clus.cfg.clientTLS != clientTLSAndNonTLS {
|
||||
panic("should not use cURLPrefixArgsUseTLS when serving only TLS or non-TLS")
|
||||
}
|
||||
cmdArgs = append(cmdArgs, "--cacert", caPath, "--cert", certPath, "--key", privateKeyPath)
|
||||
acurl = toTLS(clus.procs[rand.Intn(clus.cfg.clusterSize)].Config().acurl)
|
||||
} else if clus.cfg.clientTLS == clientTLS {
|
||||
if !clus.cfg.noCN {
|
||||
cmdArgs = append(cmdArgs, "--cacert", caPath, "--cert", certPath, "--key", privateKeyPath)
|
||||
} else {
|
||||
cmdArgs = append(cmdArgs, "--cacert", caPath, "--cert", certPath3, "--key", privateKeyPath3)
|
||||
}
|
||||
}
|
||||
}
|
||||
if req.metricsURLScheme != "" {
|
||||
acurl = clus.procs[rand.Intn(clus.cfg.clusterSize)].EndpointsMetrics()[0]
|
||||
}
|
||||
ep := acurl + req.endpoint
|
||||
|
||||
if req.username != "" || req.password != "" {
|
||||
cmdArgs = append(cmdArgs, "-L", "-u", fmt.Sprintf("%s:%s", req.username, req.password), ep)
|
||||
} else {
|
||||
cmdArgs = append(cmdArgs, "-L", ep)
|
||||
}
|
||||
if req.timeout != 0 {
|
||||
cmdArgs = append(cmdArgs, "-m", fmt.Sprintf("%d", req.timeout))
|
||||
}
|
||||
|
||||
if req.header != "" {
|
||||
cmdArgs = append(cmdArgs, "-H", req.header)
|
||||
}
|
||||
|
||||
if req.ciphers != "" {
|
||||
cmdArgs = append(cmdArgs, "--ciphers", req.ciphers)
|
||||
}
|
||||
|
||||
switch method {
|
||||
case "POST", "PUT":
|
||||
dt := req.value
|
||||
if !strings.HasPrefix(dt, "{") { // for non-JSON value
|
||||
dt = "value=" + dt
|
||||
}
|
||||
cmdArgs = append(cmdArgs, "-X", method, "-d", dt)
|
||||
}
|
||||
return cmdArgs
|
||||
}
|
||||
|
||||
func cURLPost(clus *etcdProcessCluster, req cURLReq) error {
|
||||
return spawnWithExpect(cURLPrefixArgs(clus, "POST", req), req.expected)
|
||||
}
|
||||
|
||||
func cURLPut(clus *etcdProcessCluster, req cURLReq) error {
|
||||
return spawnWithExpect(cURLPrefixArgs(clus, "PUT", req), req.expected)
|
||||
}
|
||||
|
||||
func cURLGet(clus *etcdProcessCluster, req cURLReq) error {
|
||||
return spawnWithExpect(cURLPrefixArgs(clus, "GET", req), req.expected)
|
||||
}
|
||||
|
@ -19,24 +19,25 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||
)
|
||||
|
||||
func createV2store(t testing.TB, dataDirPath string) {
|
||||
t.Log("Creating not-yet v2-deprecated etcd")
|
||||
|
||||
cfg := configStandalone(etcdProcessClusterConfig{enableV2: true, dataDirPath: dataDirPath, snapshotCount: 5})
|
||||
epc, err := newEtcdProcessCluster(t, cfg)
|
||||
cfg := e2e.ConfigStandalone(e2e.EtcdProcessClusterConfig{EnableV2: true, DataDirPath: dataDirPath, SnapshotCount: 5})
|
||||
epc, err := e2e.NewEtcdProcessCluster(t, cfg)
|
||||
assert.NoError(t, err)
|
||||
|
||||
defer func() {
|
||||
assert.NoError(t, epc.Stop())
|
||||
}()
|
||||
|
||||
// We need to exceed 'snapshotCount' such that v2 snapshot is dumped.
|
||||
// We need to exceed 'SnapshotCount' such that v2 snapshot is dumped.
|
||||
for i := 0; i < 10; i++ {
|
||||
if err := cURLPut(epc, cURLReq{
|
||||
endpoint: "/v2/keys/foo", value: "bar" + fmt.Sprint(i),
|
||||
expected: `{"action":"set","node":{"key":"/foo","value":"bar` + fmt.Sprint(i)}); err != nil {
|
||||
if err := e2e.CURLPut(epc, e2e.CURLReq{
|
||||
Endpoint: "/v2/keys/foo", Value: "bar" + fmt.Sprint(i),
|
||||
Expected: `{"action":"set","node":{"key":"/foo","value":"bar` + fmt.Sprint(i)}); err != nil {
|
||||
t.Fatalf("failed put with curl (%v)", err)
|
||||
}
|
||||
}
|
||||
@ -45,17 +46,17 @@ func createV2store(t testing.TB, dataDirPath string) {
|
||||
func assertVerifyCanStartV2deprecationNotYet(t testing.TB, dataDirPath string) {
|
||||
t.Log("verify: possible to start etcd with --v2-deprecation=not-yet mode")
|
||||
|
||||
cfg := configStandalone(etcdProcessClusterConfig{enableV2: true, dataDirPath: dataDirPath, v2deprecation: "not-yet", keepDataDir: true})
|
||||
epc, err := newEtcdProcessCluster(t, cfg)
|
||||
cfg := e2e.ConfigStandalone(e2e.EtcdProcessClusterConfig{EnableV2: true, DataDirPath: dataDirPath, V2deprecation: "not-yet", KeepDataDir: true})
|
||||
epc, err := e2e.NewEtcdProcessCluster(t, cfg)
|
||||
assert.NoError(t, err)
|
||||
|
||||
defer func() {
|
||||
assert.NoError(t, epc.Stop())
|
||||
}()
|
||||
|
||||
if err := cURLGet(epc, cURLReq{
|
||||
endpoint: "/v2/keys/foo",
|
||||
expected: `{"action":"get","node":{"key":"/foo","value":"bar9","modifiedIndex":13,"createdIndex":13}}`}); err != nil {
|
||||
if err := e2e.CURLGet(epc, e2e.CURLReq{
|
||||
Endpoint: "/v2/keys/foo",
|
||||
Expected: `{"action":"get","node":{"key":"/foo","value":"bar9","modifiedIndex":13,"createdIndex":13}}`}); err != nil {
|
||||
t.Fatalf("failed get with curl (%v)", err)
|
||||
}
|
||||
|
||||
@ -63,7 +64,7 @@ func assertVerifyCanStartV2deprecationNotYet(t testing.TB, dataDirPath string) {
|
||||
|
||||
func assertVerifyCannotStartV2deprecationWriteOnly(t testing.TB, dataDirPath string) {
|
||||
t.Log("Verify its infeasible to start etcd with --v2-deprecation=write-only mode")
|
||||
proc, err := spawnCmd([]string{binDir + "/etcd", "--v2-deprecation=write-only", "--data-dir=" + dataDirPath}, nil)
|
||||
proc, err := e2e.SpawnCmd([]string{e2e.BinDir + "/etcd", "--v2-deprecation=write-only", "--data-dir=" + dataDirPath}, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = proc.Expect("detected disallowed custom content in v2store for stage --v2-deprecation=write-only")
|
||||
@ -71,7 +72,7 @@ func assertVerifyCannotStartV2deprecationWriteOnly(t testing.TB, dataDirPath str
|
||||
}
|
||||
|
||||
func TestV2Deprecation(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
e2e.BeforeTest(t)
|
||||
dataDirPath := t.TempDir()
|
||||
|
||||
t.Run("create-storev2-data", func(t *testing.T) {
|
||||
@ -89,8 +90,8 @@ func TestV2Deprecation(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV2DeprecationWriteOnlyNoV2Api(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
proc, err := spawnCmd([]string{binDir + "/etcd", "--v2-deprecation=write-only", "--enable-v2"}, nil)
|
||||
e2e.BeforeTest(t)
|
||||
proc, err := e2e.SpawnCmd([]string{e2e.BinDir + "/etcd", "--v2-deprecation=write-only", "--enable-v2"}, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = proc.Expect("--enable-v2 and --v2-deprecation=write-only are mutually exclusive")
|
||||
|
@ -22,14 +22,15 @@ import (
|
||||
"testing"
|
||||
|
||||
"go.etcd.io/etcd/api/v3/version"
|
||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||
)
|
||||
|
||||
func TestV3CurlCipherSuitesValid(t *testing.T) { testV3CurlCipherSuites(t, true) }
|
||||
func TestV3CurlCipherSuitesMismatch(t *testing.T) { testV3CurlCipherSuites(t, false) }
|
||||
func testV3CurlCipherSuites(t *testing.T, valid bool) {
|
||||
cc := newConfigClientTLS()
|
||||
cc.clusterSize = 1
|
||||
cc.cipherSuites = []string{
|
||||
cc := e2e.NewConfigClientTLS()
|
||||
cc.ClusterSize = 1
|
||||
cc.CipherSuites = []string{
|
||||
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
|
||||
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
|
||||
@ -45,11 +46,11 @@ func testV3CurlCipherSuites(t *testing.T, valid bool) {
|
||||
}
|
||||
|
||||
func cipherSuiteTestValid(cx ctlCtx) {
|
||||
if err := cURLGet(cx.epc, cURLReq{
|
||||
endpoint: "/metrics",
|
||||
expected: fmt.Sprintf(`etcd_server_version{server_version="%s"} 1`, version.Version),
|
||||
metricsURLScheme: cx.cfg.metricsURLScheme,
|
||||
ciphers: "ECDHE-RSA-AES128-GCM-SHA256", // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
|
||||
if err := e2e.CURLGet(cx.epc, e2e.CURLReq{
|
||||
Endpoint: "/metrics",
|
||||
Expected: fmt.Sprintf(`etcd_server_version{server_version="%s"} 1`, version.Version),
|
||||
MetricsURLScheme: cx.cfg.MetricsURLScheme,
|
||||
Ciphers: "ECDHE-RSA-AES128-GCM-SHA256", // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
|
||||
}); err != nil {
|
||||
cx.t.Fatalf("failed get with curl (%v)", err)
|
||||
}
|
||||
@ -58,11 +59,11 @@ func cipherSuiteTestValid(cx ctlCtx) {
|
||||
func cipherSuiteTestMismatch(cx ctlCtx) {
|
||||
var err error
|
||||
for _, exp := range []string{"alert handshake failure", "failed setting cipher list"} {
|
||||
err = cURLGet(cx.epc, cURLReq{
|
||||
endpoint: "/metrics",
|
||||
expected: exp,
|
||||
metricsURLScheme: cx.cfg.metricsURLScheme,
|
||||
ciphers: "ECDHE-RSA-DES-CBC3-SHA", // TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA
|
||||
err = e2e.CURLGet(cx.epc, e2e.CURLReq{
|
||||
Endpoint: "/metrics",
|
||||
Expected: exp,
|
||||
MetricsURLScheme: cx.cfg.MetricsURLScheme,
|
||||
Ciphers: "ECDHE-RSA-DES-CBC3-SHA", // TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA
|
||||
})
|
||||
if err == nil {
|
||||
break
|
||||
|
@ -19,26 +19,27 @@ import (
|
||||
"testing"
|
||||
|
||||
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||
)
|
||||
|
||||
func TestV3CurlLeaseGrantNoTLS(t *testing.T) {
|
||||
for _, p := range apiPrefix {
|
||||
testCtl(t, testV3CurlLeaseGrant, withApiPrefix(p), withCfg(*newConfigNoTLS()))
|
||||
testCtl(t, testV3CurlLeaseGrant, withApiPrefix(p), withCfg(*e2e.NewConfigNoTLS()))
|
||||
}
|
||||
}
|
||||
func TestV3CurlLeaseRevokeNoTLS(t *testing.T) {
|
||||
for _, p := range apiPrefix {
|
||||
testCtl(t, testV3CurlLeaseRevoke, withApiPrefix(p), withCfg(*newConfigNoTLS()))
|
||||
testCtl(t, testV3CurlLeaseRevoke, withApiPrefix(p), withCfg(*e2e.NewConfigNoTLS()))
|
||||
}
|
||||
}
|
||||
func TestV3CurlLeaseLeasesNoTLS(t *testing.T) {
|
||||
for _, p := range apiPrefix {
|
||||
testCtl(t, testV3CurlLeaseLeases, withApiPrefix(p), withCfg(*newConfigNoTLS()))
|
||||
testCtl(t, testV3CurlLeaseLeases, withApiPrefix(p), withCfg(*e2e.NewConfigNoTLS()))
|
||||
}
|
||||
}
|
||||
func TestV3CurlLeaseKeepAliveNoTLS(t *testing.T) {
|
||||
for _, p := range apiPrefix {
|
||||
testCtl(t, testV3CurlLeaseKeepAlive, withApiPrefix(p), withCfg(*newConfigNoTLS()))
|
||||
testCtl(t, testV3CurlLeaseKeepAlive, withApiPrefix(p), withCfg(*e2e.NewConfigNoTLS()))
|
||||
}
|
||||
}
|
||||
|
||||
@ -49,7 +50,7 @@ type v3cURLTest struct {
|
||||
}
|
||||
|
||||
func testV3CurlLeaseGrant(cx ctlCtx) {
|
||||
leaseID := randomLeaseID()
|
||||
leaseID := e2e.RandomLeaseID()
|
||||
|
||||
tests := []v3cURLTest{
|
||||
{
|
||||
@ -73,13 +74,13 @@ func testV3CurlLeaseGrant(cx ctlCtx) {
|
||||
expected: `"grantedTTL"`,
|
||||
},
|
||||
}
|
||||
if err := cURLWithExpected(cx, tests); err != nil {
|
||||
if err := CURLWithExpected(cx, tests); err != nil {
|
||||
cx.t.Fatalf("testV3CurlLeaseGrant: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func testV3CurlLeaseRevoke(cx ctlCtx) {
|
||||
leaseID := randomLeaseID()
|
||||
leaseID := e2e.RandomLeaseID()
|
||||
|
||||
tests := []v3cURLTest{
|
||||
{
|
||||
@ -93,13 +94,13 @@ func testV3CurlLeaseRevoke(cx ctlCtx) {
|
||||
expected: `"revision":"`,
|
||||
},
|
||||
}
|
||||
if err := cURLWithExpected(cx, tests); err != nil {
|
||||
if err := CURLWithExpected(cx, tests); err != nil {
|
||||
cx.t.Fatalf("testV3CurlLeaseRevoke: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func testV3CurlLeaseLeases(cx ctlCtx) {
|
||||
leaseID := randomLeaseID()
|
||||
leaseID := e2e.RandomLeaseID()
|
||||
|
||||
tests := []v3cURLTest{
|
||||
{
|
||||
@ -113,13 +114,13 @@ func testV3CurlLeaseLeases(cx ctlCtx) {
|
||||
expected: gwLeaseIDExpected(leaseID),
|
||||
},
|
||||
}
|
||||
if err := cURLWithExpected(cx, tests); err != nil {
|
||||
if err := CURLWithExpected(cx, tests); err != nil {
|
||||
cx.t.Fatalf("testV3CurlLeaseGrant: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func testV3CurlLeaseKeepAlive(cx ctlCtx) {
|
||||
leaseID := randomLeaseID()
|
||||
leaseID := e2e.RandomLeaseID()
|
||||
|
||||
tests := []v3cURLTest{
|
||||
{
|
||||
@ -133,7 +134,7 @@ func testV3CurlLeaseKeepAlive(cx ctlCtx) {
|
||||
expected: gwLeaseIDExpected(leaseID),
|
||||
},
|
||||
}
|
||||
if err := cURLWithExpected(cx, tests); err != nil {
|
||||
if err := CURLWithExpected(cx, tests); err != nil {
|
||||
cx.t.Fatalf("testV3CurlLeaseGrant: %v", err)
|
||||
}
|
||||
}
|
||||
@ -144,7 +145,7 @@ func gwLeaseIDExpected(leaseID int64) string {
|
||||
|
||||
func gwLeaseTTLWithKeys(cx ctlCtx, leaseID int64) string {
|
||||
d := &pb.LeaseTimeToLiveRequest{ID: leaseID, Keys: true}
|
||||
s, err := dataMarshal(d)
|
||||
s, err := e2e.DataMarshal(d)
|
||||
if err != nil {
|
||||
cx.t.Fatalf("gwLeaseTTLWithKeys: error (%v)", err)
|
||||
}
|
||||
@ -153,7 +154,7 @@ func gwLeaseTTLWithKeys(cx ctlCtx, leaseID int64) string {
|
||||
|
||||
func gwLeaseKeepAlive(cx ctlCtx, leaseID int64) string {
|
||||
d := &pb.LeaseKeepAliveRequest{ID: leaseID}
|
||||
s, err := dataMarshal(d)
|
||||
s, err := e2e.DataMarshal(d)
|
||||
if err != nil {
|
||||
cx.t.Fatalf("gwLeaseKeepAlive: Marshal error (%v)", err)
|
||||
}
|
||||
@ -162,7 +163,7 @@ func gwLeaseKeepAlive(cx ctlCtx, leaseID int64) string {
|
||||
|
||||
func gwLeaseGrant(cx ctlCtx, leaseID int64, ttl int64) string {
|
||||
d := &pb.LeaseGrantRequest{ID: leaseID, TTL: ttl}
|
||||
s, err := dataMarshal(d)
|
||||
s, err := e2e.DataMarshal(d)
|
||||
if err != nil {
|
||||
cx.t.Fatalf("gwLeaseGrant: Marshal error (%v)", err)
|
||||
}
|
||||
@ -171,7 +172,7 @@ func gwLeaseGrant(cx ctlCtx, leaseID int64, ttl int64) string {
|
||||
|
||||
func gwLeaseRevoke(cx ctlCtx, leaseID int64) string {
|
||||
d := &pb.LeaseRevokeRequest{ID: leaseID}
|
||||
s, err := dataMarshal(d)
|
||||
s, err := e2e.DataMarshal(d)
|
||||
if err != nil {
|
||||
cx.t.Fatalf("gwLeaseRevoke: Marshal error (%v)", err)
|
||||
}
|
||||
@ -180,7 +181,7 @@ func gwLeaseRevoke(cx ctlCtx, leaseID int64) string {
|
||||
|
||||
func gwKVPutLease(cx ctlCtx, k string, v string, leaseID int64) string {
|
||||
d := pb.PutRequest{Key: []byte(k), Value: []byte(v), Lease: leaseID}
|
||||
s, err := dataMarshal(d)
|
||||
s, err := e2e.DataMarshal(d)
|
||||
if err != nil {
|
||||
cx.t.Fatalf("gwKVPutLease: Marshal error (%v)", err)
|
||||
}
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
|
||||
"go.etcd.io/etcd/client/pkg/v3/testutil"
|
||||
epb "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb"
|
||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||
|
||||
"github.com/grpc-ecosystem/grpc-gateway/runtime"
|
||||
)
|
||||
@ -35,27 +36,27 @@ var apiPrefix = []string{"/v3"}
|
||||
|
||||
func TestV3CurlPutGetNoTLS(t *testing.T) {
|
||||
for _, p := range apiPrefix {
|
||||
testCtl(t, testV3CurlPutGet, withApiPrefix(p), withCfg(*newConfigNoTLS()))
|
||||
testCtl(t, testV3CurlPutGet, withApiPrefix(p), withCfg(*e2e.NewConfigNoTLS()))
|
||||
}
|
||||
}
|
||||
func TestV3CurlPutGetAutoTLS(t *testing.T) {
|
||||
for _, p := range apiPrefix {
|
||||
testCtl(t, testV3CurlPutGet, withApiPrefix(p), withCfg(*newConfigAutoTLS()))
|
||||
testCtl(t, testV3CurlPutGet, withApiPrefix(p), withCfg(*e2e.NewConfigAutoTLS()))
|
||||
}
|
||||
}
|
||||
func TestV3CurlPutGetAllTLS(t *testing.T) {
|
||||
for _, p := range apiPrefix {
|
||||
testCtl(t, testV3CurlPutGet, withApiPrefix(p), withCfg(*newConfigTLS()))
|
||||
testCtl(t, testV3CurlPutGet, withApiPrefix(p), withCfg(*e2e.NewConfigTLS()))
|
||||
}
|
||||
}
|
||||
func TestV3CurlPutGetPeerTLS(t *testing.T) {
|
||||
for _, p := range apiPrefix {
|
||||
testCtl(t, testV3CurlPutGet, withApiPrefix(p), withCfg(*newConfigPeerTLS()))
|
||||
testCtl(t, testV3CurlPutGet, withApiPrefix(p), withCfg(*e2e.NewConfigPeerTLS()))
|
||||
}
|
||||
}
|
||||
func TestV3CurlPutGetClientTLS(t *testing.T) {
|
||||
for _, p := range apiPrefix {
|
||||
testCtl(t, testV3CurlPutGet, withApiPrefix(p), withCfg(*newConfigClientTLS()))
|
||||
testCtl(t, testV3CurlPutGet, withApiPrefix(p), withCfg(*e2e.NewConfigClientTLS()))
|
||||
}
|
||||
}
|
||||
func TestV3CurlWatch(t *testing.T) {
|
||||
@ -75,7 +76,7 @@ func TestV3CurlAuth(t *testing.T) {
|
||||
}
|
||||
func TestV3CurlAuthClientTLSCertAuth(t *testing.T) {
|
||||
for _, p := range apiPrefix {
|
||||
testCtl(t, testV3CurlAuth, withApiPrefix(p), withCfg(*newConfigClientTLSCertAuthWithNoCN()))
|
||||
testCtl(t, testV3CurlAuth, withApiPrefix(p), withCfg(*e2e.NewConfigClientTLSCertAuthWithNoCN()))
|
||||
}
|
||||
}
|
||||
|
||||
@ -103,14 +104,14 @@ func testV3CurlPutGet(cx ctlCtx) {
|
||||
|
||||
p := cx.apiPrefix
|
||||
|
||||
if err := cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/kv/put"), value: string(putData), expected: expectPut}); err != nil {
|
||||
if err := e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/kv/put"), Value: string(putData), Expected: expectPut}); err != nil {
|
||||
cx.t.Fatalf("failed testV3CurlPutGet put with curl using prefix (%s) (%v)", p, err)
|
||||
}
|
||||
if err := cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/kv/range"), value: string(rangeData), expected: expectGet}); err != nil {
|
||||
if err := e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/kv/range"), Value: string(rangeData), Expected: expectGet}); err != nil {
|
||||
cx.t.Fatalf("failed testV3CurlPutGet get with curl using prefix (%s) (%v)", p, err)
|
||||
}
|
||||
if cx.cfg.clientTLS == clientTLSAndNonTLS {
|
||||
if err := cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/kv/range"), value: string(rangeData), expected: expectGet, isTLS: true}); err != nil {
|
||||
if cx.cfg.ClientTLS == e2e.ClientTLSAndNonTLS {
|
||||
if err := e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/kv/range"), Value: string(rangeData), Expected: expectGet, IsTLS: true}); err != nil {
|
||||
cx.t.Fatalf("failed testV3CurlPutGet get with curl using prefix (%s) (%v)", p, err)
|
||||
}
|
||||
}
|
||||
@ -134,11 +135,11 @@ func testV3CurlWatch(cx ctlCtx) {
|
||||
wstr := `{"create_request" : ` + string(wreq) + "}"
|
||||
p := cx.apiPrefix
|
||||
|
||||
if err = cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/kv/put"), value: string(putreq), expected: "revision"}); err != nil {
|
||||
if err = e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/kv/put"), Value: string(putreq), Expected: "revision"}); err != nil {
|
||||
cx.t.Fatalf("failed testV3CurlWatch put with curl using prefix (%s) (%v)", p, err)
|
||||
}
|
||||
// expects "bar", timeout after 2 seconds since stream waits forever
|
||||
if err = cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/watch"), value: wstr, expected: `"YmFy"`, timeout: 2}); err != nil {
|
||||
if err = e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/watch"), Value: wstr, Expected: `"YmFy"`, Timeout: 2}); err != nil {
|
||||
cx.t.Fatalf("failed testV3CurlWatch watch with curl using prefix (%s) (%v)", p, err)
|
||||
}
|
||||
}
|
||||
@ -171,13 +172,13 @@ func testV3CurlTxn(cx ctlCtx) {
|
||||
}
|
||||
expected := `"succeeded":true,"responses":[{"response_put":{"header":{"revision":"2"}}}]`
|
||||
p := cx.apiPrefix
|
||||
if err := cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/kv/txn"), value: string(jsonDat), expected: expected}); err != nil {
|
||||
if err := e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/kv/txn"), Value: string(jsonDat), Expected: expected}); err != nil {
|
||||
cx.t.Fatalf("failed testV3CurlTxn txn with curl using prefix (%s) (%v)", p, err)
|
||||
}
|
||||
|
||||
// was crashing etcd server
|
||||
malformed := `{"compare":[{"result":0,"target":1,"key":"Zm9v","TargetUnion":null}],"success":[{"Request":{"RequestPut":{"key":"Zm9v","value":"YmFy"}}}]}`
|
||||
if err := cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/kv/txn"), value: malformed, expected: "error"}); err != nil {
|
||||
if err := e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/kv/txn"), Value: malformed, Expected: "error"}); err != nil {
|
||||
cx.t.Fatalf("failed testV3CurlTxn put with curl using prefix (%s) (%v)", p, err)
|
||||
}
|
||||
|
||||
@ -194,7 +195,7 @@ func testV3CurlAuth(cx ctlCtx) {
|
||||
user, err := json.Marshal(&pb.AuthUserAddRequest{Name: usernames[i], Password: pwds[i], Options: options[i]})
|
||||
testutil.AssertNil(cx.t, err)
|
||||
|
||||
if err = cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/auth/user/add"), value: string(user), expected: "revision"}); err != nil {
|
||||
if err = e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/auth/user/add"), Value: string(user), Expected: "revision"}); err != nil {
|
||||
cx.t.Fatalf("failed testV3CurlAuth add user %v with curl (%v)", usernames[i], err)
|
||||
}
|
||||
}
|
||||
@ -203,7 +204,7 @@ func testV3CurlAuth(cx ctlCtx) {
|
||||
rolereq, err := json.Marshal(&pb.AuthRoleAddRequest{Name: string("root")})
|
||||
testutil.AssertNil(cx.t, err)
|
||||
|
||||
if err = cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/auth/role/add"), value: string(rolereq), expected: "revision"}); err != nil {
|
||||
if err = e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/auth/role/add"), Value: string(rolereq), Expected: "revision"}); err != nil {
|
||||
cx.t.Fatalf("failed testV3CurlAuth create role with curl using prefix (%s) (%v)", p, err)
|
||||
}
|
||||
|
||||
@ -212,13 +213,13 @@ func testV3CurlAuth(cx ctlCtx) {
|
||||
grantroleroot, err := json.Marshal(&pb.AuthUserGrantRoleRequest{User: usernames[i], Role: "root"})
|
||||
testutil.AssertNil(cx.t, err)
|
||||
|
||||
if err = cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/auth/user/grant"), value: string(grantroleroot), expected: "revision"}); err != nil {
|
||||
if err = e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/auth/user/grant"), Value: string(grantroleroot), Expected: "revision"}); err != nil {
|
||||
cx.t.Fatalf("failed testV3CurlAuth grant role with curl using prefix (%s) (%v)", p, err)
|
||||
}
|
||||
}
|
||||
|
||||
// enable auth
|
||||
if err = cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/auth/enable"), value: string("{}"), expected: "revision"}); err != nil {
|
||||
if err = e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/auth/enable"), Value: string("{}"), Expected: "revision"}); err != nil {
|
||||
cx.t.Fatalf("failed testV3CurlAuth enable auth with curl using prefix (%s) (%v)", p, err)
|
||||
}
|
||||
|
||||
@ -228,7 +229,7 @@ func testV3CurlAuth(cx ctlCtx) {
|
||||
testutil.AssertNil(cx.t, err)
|
||||
|
||||
// fail put no auth
|
||||
if err = cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/kv/put"), value: string(putreq), expected: "error"}); err != nil {
|
||||
if err = e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/kv/put"), Value: string(putreq), Expected: "error"}); err != nil {
|
||||
cx.t.Fatalf("failed testV3CurlAuth no auth put with curl using prefix (%s) (%v)", p, err)
|
||||
}
|
||||
|
||||
@ -242,8 +243,8 @@ func testV3CurlAuth(cx ctlCtx) {
|
||||
lineFunc = func(txt string) bool { return true }
|
||||
)
|
||||
|
||||
cmdArgs = cURLPrefixArgs(cx.epc, "POST", cURLReq{endpoint: path.Join(p, "/auth/authenticate"), value: string(authreq)})
|
||||
proc, err := spawnCmd(cmdArgs, cx.envMap)
|
||||
cmdArgs = e2e.CURLPrefixArgs(cx.epc, "POST", e2e.CURLReq{Endpoint: path.Join(p, "/auth/authenticate"), Value: string(authreq)})
|
||||
proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap)
|
||||
testutil.AssertNil(cx.t, err)
|
||||
defer proc.Close()
|
||||
|
||||
@ -261,7 +262,7 @@ func testV3CurlAuth(cx ctlCtx) {
|
||||
authHeader = "Authorization: " + token
|
||||
|
||||
// put with auth
|
||||
if err = cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/kv/put"), value: string(putreq), header: authHeader, expected: "revision"}); err != nil {
|
||||
if err = e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/kv/put"), Value: string(putreq), Header: authHeader, Expected: "revision"}); err != nil {
|
||||
cx.t.Fatalf("failed testV3CurlAuth auth put with curl using prefix (%s) and user (%v) (%v)", p, usernames[i], err)
|
||||
}
|
||||
}
|
||||
@ -269,7 +270,7 @@ func testV3CurlAuth(cx ctlCtx) {
|
||||
|
||||
func TestV3CurlCampaignNoTLS(t *testing.T) {
|
||||
for _, p := range apiPrefix {
|
||||
testCtl(t, testV3CurlCampaign, withApiPrefix(p), withCfg(*newConfigNoTLS()))
|
||||
testCtl(t, testV3CurlCampaign, withApiPrefix(p), withCfg(*e2e.NewConfigNoTLS()))
|
||||
}
|
||||
}
|
||||
|
||||
@ -281,11 +282,11 @@ func testV3CurlCampaign(cx ctlCtx) {
|
||||
if err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
cargs := cURLPrefixArgs(cx.epc, "POST", cURLReq{
|
||||
endpoint: path.Join(cx.apiPrefix, "/election/campaign"),
|
||||
value: string(cdata),
|
||||
cargs := e2e.CURLPrefixArgs(cx.epc, "POST", e2e.CURLReq{
|
||||
Endpoint: path.Join(cx.apiPrefix, "/election/campaign"),
|
||||
Value: string(cdata),
|
||||
})
|
||||
lines, err := spawnWithExpectLines(cargs, cx.envMap, `"leader":{"name":"`)
|
||||
lines, err := e2e.SpawnWithExpectLines(cargs, cx.envMap, `"leader":{"name":"`)
|
||||
if err != nil {
|
||||
cx.t.Fatalf("failed post campaign request (%s) (%v)", cx.apiPrefix, err)
|
||||
}
|
||||
@ -320,10 +321,10 @@ func testV3CurlCampaign(cx ctlCtx) {
|
||||
if err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
if err = cURLPost(cx.epc, cURLReq{
|
||||
endpoint: path.Join(cx.apiPrefix, "/election/proclaim"),
|
||||
value: string(pdata),
|
||||
expected: `"revision":`,
|
||||
if err = e2e.CURLPost(cx.epc, e2e.CURLReq{
|
||||
Endpoint: path.Join(cx.apiPrefix, "/election/proclaim"),
|
||||
Value: string(pdata),
|
||||
Expected: `"revision":`,
|
||||
}); err != nil {
|
||||
cx.t.Fatalf("failed post proclaim request (%s) (%v)", cx.apiPrefix, err)
|
||||
}
|
||||
@ -331,7 +332,7 @@ func testV3CurlCampaign(cx ctlCtx) {
|
||||
|
||||
func TestV3CurlProclaimMissiongLeaderKeyNoTLS(t *testing.T) {
|
||||
for _, p := range apiPrefix {
|
||||
testCtl(t, testV3CurlProclaimMissiongLeaderKey, withApiPrefix(p), withCfg(*newConfigNoTLS()))
|
||||
testCtl(t, testV3CurlProclaimMissiongLeaderKey, withApiPrefix(p), withCfg(*e2e.NewConfigNoTLS()))
|
||||
}
|
||||
}
|
||||
|
||||
@ -340,10 +341,10 @@ func testV3CurlProclaimMissiongLeaderKey(cx ctlCtx) {
|
||||
if err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
if err = cURLPost(cx.epc, cURLReq{
|
||||
endpoint: path.Join(cx.apiPrefix, "/election/proclaim"),
|
||||
value: string(pdata),
|
||||
expected: `{"error":"\"leader\" field must be provided","code":2,"message":"\"leader\" field must be provided"}`,
|
||||
if err = e2e.CURLPost(cx.epc, e2e.CURLReq{
|
||||
Endpoint: path.Join(cx.apiPrefix, "/election/proclaim"),
|
||||
Value: string(pdata),
|
||||
Expected: `{"error":"\"leader\" field must be provided","code":2,"message":"\"leader\" field must be provided"}`,
|
||||
}); err != nil {
|
||||
cx.t.Fatalf("failed post proclaim request (%s) (%v)", cx.apiPrefix, err)
|
||||
}
|
||||
@ -351,15 +352,15 @@ func testV3CurlProclaimMissiongLeaderKey(cx ctlCtx) {
|
||||
|
||||
func TestV3CurlResignMissiongLeaderKeyNoTLS(t *testing.T) {
|
||||
for _, p := range apiPrefix {
|
||||
testCtl(t, testV3CurlResignMissiongLeaderKey, withApiPrefix(p), withCfg(*newConfigNoTLS()))
|
||||
testCtl(t, testV3CurlResignMissiongLeaderKey, withApiPrefix(p), withCfg(*e2e.NewConfigNoTLS()))
|
||||
}
|
||||
}
|
||||
|
||||
func testV3CurlResignMissiongLeaderKey(cx ctlCtx) {
|
||||
if err := cURLPost(cx.epc, cURLReq{
|
||||
endpoint: path.Join(cx.apiPrefix, "/election/resign"),
|
||||
value: `{}`,
|
||||
expected: `{"error":"\"leader\" field must be provided","code":2,"message":"\"leader\" field must be provided"}`,
|
||||
if err := e2e.CURLPost(cx.epc, e2e.CURLReq{
|
||||
Endpoint: path.Join(cx.apiPrefix, "/election/resign"),
|
||||
Value: `{}`,
|
||||
Expected: `{"error":"\"leader\" field must be provided","code":2,"message":"\"leader\" field must be provided"}`,
|
||||
}); err != nil {
|
||||
cx.t.Fatalf("failed post resign request (%s) (%v)", cx.apiPrefix, err)
|
||||
}
|
||||
@ -367,14 +368,14 @@ func testV3CurlResignMissiongLeaderKey(cx ctlCtx) {
|
||||
|
||||
func TestV3CurlMaintenanceAlarmMissiongAlarm(t *testing.T) {
|
||||
for _, p := range apiPrefix {
|
||||
testCtl(t, testV3CurlMaintenanceAlarmMissiongAlarm, withApiPrefix(p), withCfg(*newConfigNoTLS()))
|
||||
testCtl(t, testV3CurlMaintenanceAlarmMissiongAlarm, withApiPrefix(p), withCfg(*e2e.NewConfigNoTLS()))
|
||||
}
|
||||
}
|
||||
|
||||
func testV3CurlMaintenanceAlarmMissiongAlarm(cx ctlCtx) {
|
||||
if err := cURLPost(cx.epc, cURLReq{
|
||||
endpoint: path.Join(cx.apiPrefix, "/maintenance/alarm"),
|
||||
value: `{"action": "ACTIVATE"}`,
|
||||
if err := e2e.CURLPost(cx.epc, e2e.CURLReq{
|
||||
Endpoint: path.Join(cx.apiPrefix, "/maintenance/alarm"),
|
||||
Value: `{"action": "ACTIVATE"}`,
|
||||
}); err != nil {
|
||||
cx.t.Fatalf("failed post maintenance alarm (%s) (%v)", cx.apiPrefix, err)
|
||||
}
|
||||
@ -391,11 +392,11 @@ type campaignResponse struct {
|
||||
} `json:"leader,omitempty"`
|
||||
}
|
||||
|
||||
func cURLWithExpected(cx ctlCtx, tests []v3cURLTest) error {
|
||||
func CURLWithExpected(cx ctlCtx, tests []v3cURLTest) error {
|
||||
p := cx.apiPrefix
|
||||
for _, t := range tests {
|
||||
value := fmt.Sprintf("%v", t.value)
|
||||
if err := cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, t.endpoint), value: value, expected: t.expected}); err != nil {
|
||||
if err := e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, t.endpoint), Value: value, Expected: t.expected}); err != nil {
|
||||
return fmt.Errorf("prefix (%s) endpoint (%s): error (%v), wanted %v", p, t.endpoint, err, t.expected)
|
||||
}
|
||||
}
|
||||
|
475
tests/framework/e2e/cluster.go
Normal file
475
tests/framework/e2e/cluster.go
Normal file
@ -0,0 +1,475 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"go.etcd.io/etcd/server/v3/etcdserver"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zaptest"
|
||||
)
|
||||
|
||||
const EtcdProcessBasePort = 20000
|
||||
|
||||
type ClientConnType int
|
||||
|
||||
const (
|
||||
ClientNonTLS ClientConnType = iota
|
||||
ClientTLS
|
||||
ClientTLSAndNonTLS
|
||||
)
|
||||
|
||||
func NewConfigNoTLS() *EtcdProcessClusterConfig {
|
||||
return &EtcdProcessClusterConfig{ClusterSize: 3,
|
||||
InitialToken: "new",
|
||||
}
|
||||
}
|
||||
|
||||
func NewConfigAutoTLS() *EtcdProcessClusterConfig {
|
||||
return &EtcdProcessClusterConfig{
|
||||
ClusterSize: 3,
|
||||
IsPeerTLS: true,
|
||||
IsPeerAutoTLS: true,
|
||||
InitialToken: "new",
|
||||
}
|
||||
}
|
||||
|
||||
func NewConfigTLS() *EtcdProcessClusterConfig {
|
||||
return &EtcdProcessClusterConfig{
|
||||
ClusterSize: 3,
|
||||
ClientTLS: ClientTLS,
|
||||
IsPeerTLS: true,
|
||||
InitialToken: "new",
|
||||
}
|
||||
}
|
||||
|
||||
func NewConfigClientTLS() *EtcdProcessClusterConfig {
|
||||
return &EtcdProcessClusterConfig{
|
||||
ClusterSize: 3,
|
||||
ClientTLS: ClientTLS,
|
||||
InitialToken: "new",
|
||||
}
|
||||
}
|
||||
|
||||
func NewConfigClientBoth() *EtcdProcessClusterConfig {
|
||||
return &EtcdProcessClusterConfig{
|
||||
ClusterSize: 1,
|
||||
ClientTLS: ClientTLSAndNonTLS,
|
||||
InitialToken: "new",
|
||||
}
|
||||
}
|
||||
|
||||
func NewConfigClientAutoTLS() *EtcdProcessClusterConfig {
|
||||
return &EtcdProcessClusterConfig{
|
||||
ClusterSize: 1,
|
||||
IsClientAutoTLS: true,
|
||||
ClientTLS: ClientTLS,
|
||||
InitialToken: "new",
|
||||
}
|
||||
}
|
||||
|
||||
func NewConfigPeerTLS() *EtcdProcessClusterConfig {
|
||||
return &EtcdProcessClusterConfig{
|
||||
ClusterSize: 3,
|
||||
IsPeerTLS: true,
|
||||
InitialToken: "new",
|
||||
}
|
||||
}
|
||||
|
||||
func NewConfigClientTLSCertAuth() *EtcdProcessClusterConfig {
|
||||
return &EtcdProcessClusterConfig{
|
||||
ClusterSize: 1,
|
||||
ClientTLS: ClientTLS,
|
||||
InitialToken: "new",
|
||||
ClientCertAuthEnabled: true,
|
||||
}
|
||||
}
|
||||
|
||||
func NewConfigClientTLSCertAuthWithNoCN() *EtcdProcessClusterConfig {
|
||||
return &EtcdProcessClusterConfig{
|
||||
ClusterSize: 1,
|
||||
ClientTLS: ClientTLS,
|
||||
InitialToken: "new",
|
||||
ClientCertAuthEnabled: true,
|
||||
NoCN: true,
|
||||
}
|
||||
}
|
||||
|
||||
func NewConfigJWT() *EtcdProcessClusterConfig {
|
||||
return &EtcdProcessClusterConfig{
|
||||
ClusterSize: 1,
|
||||
InitialToken: "new",
|
||||
AuthTokenOpts: "jwt,pub-key=" + path.Join(FixturesDir, "server.crt") +
|
||||
",priv-key=" + path.Join(FixturesDir, "server.key.insecure") + ",sign-method=RS256,ttl=1s",
|
||||
}
|
||||
}
|
||||
|
||||
func ConfigStandalone(cfg EtcdProcessClusterConfig) *EtcdProcessClusterConfig {
|
||||
ret := cfg
|
||||
ret.ClusterSize = 1
|
||||
return &ret
|
||||
}
|
||||
|
||||
type EtcdProcessCluster struct {
|
||||
lg *zap.Logger
|
||||
Cfg *EtcdProcessClusterConfig
|
||||
Procs []EtcdProcess
|
||||
}
|
||||
|
||||
type EtcdProcessClusterConfig struct {
|
||||
ExecPath string
|
||||
DataDirPath string
|
||||
KeepDataDir bool
|
||||
EnvVars map[string]string
|
||||
|
||||
ClusterSize int
|
||||
|
||||
BaseScheme string
|
||||
BasePort int
|
||||
|
||||
MetricsURLScheme string
|
||||
|
||||
SnapshotCount int // default is 10000
|
||||
|
||||
ClientTLS ClientConnType
|
||||
ClientCertAuthEnabled bool
|
||||
IsPeerTLS bool
|
||||
IsPeerAutoTLS bool
|
||||
IsClientAutoTLS bool
|
||||
IsClientCRL bool
|
||||
NoCN bool
|
||||
|
||||
CipherSuites []string
|
||||
|
||||
ForceNewCluster bool
|
||||
InitialToken string
|
||||
QuotaBackendBytes int64
|
||||
NoStrictReconfig bool
|
||||
EnableV2 bool
|
||||
InitialCorruptCheck bool
|
||||
AuthTokenOpts string
|
||||
V2deprecation string
|
||||
|
||||
RollingStart bool
|
||||
}
|
||||
|
||||
// NewEtcdProcessCluster launches a new cluster from etcd processes, returning
|
||||
// a new EtcdProcessCluster once all nodes are ready to accept client requests.
|
||||
func NewEtcdProcessCluster(t testing.TB, cfg *EtcdProcessClusterConfig) (*EtcdProcessCluster, error) {
|
||||
SkipInShortMode(t)
|
||||
|
||||
etcdCfgs := cfg.EtcdServerProcessConfigs(t)
|
||||
epc := &EtcdProcessCluster{
|
||||
Cfg: cfg,
|
||||
lg: zaptest.NewLogger(t),
|
||||
Procs: make([]EtcdProcess, cfg.ClusterSize),
|
||||
}
|
||||
|
||||
// launch etcd processes
|
||||
for i := range etcdCfgs {
|
||||
proc, err := NewEtcdProcess(etcdCfgs[i])
|
||||
if err != nil {
|
||||
epc.Close()
|
||||
return nil, fmt.Errorf("Cannot configure: %v", err)
|
||||
}
|
||||
epc.Procs[i] = proc
|
||||
}
|
||||
|
||||
if cfg.RollingStart {
|
||||
if err := epc.RollingStart(); err != nil {
|
||||
return nil, fmt.Errorf("Cannot rolling-start: %v", err)
|
||||
}
|
||||
} else {
|
||||
if err := epc.Start(); err != nil {
|
||||
return nil, fmt.Errorf("Cannot start: %v", err)
|
||||
}
|
||||
}
|
||||
return epc, nil
|
||||
}
|
||||
|
||||
func (cfg *EtcdProcessClusterConfig) ClientScheme() string {
|
||||
if cfg.ClientTLS == ClientTLS {
|
||||
return "https"
|
||||
}
|
||||
return "http"
|
||||
}
|
||||
|
||||
func (cfg *EtcdProcessClusterConfig) PeerScheme() string {
|
||||
peerScheme := cfg.BaseScheme
|
||||
if peerScheme == "" {
|
||||
peerScheme = "http"
|
||||
}
|
||||
if cfg.IsPeerTLS {
|
||||
peerScheme += "s"
|
||||
}
|
||||
return peerScheme
|
||||
}
|
||||
|
||||
func (cfg *EtcdProcessClusterConfig) EtcdServerProcessConfigs(tb testing.TB) []*EtcdServerProcessConfig {
|
||||
lg := zaptest.NewLogger(tb)
|
||||
|
||||
if cfg.BasePort == 0 {
|
||||
cfg.BasePort = EtcdProcessBasePort
|
||||
}
|
||||
if cfg.ExecPath == "" {
|
||||
cfg.ExecPath = BinPath
|
||||
}
|
||||
if cfg.SnapshotCount == 0 {
|
||||
cfg.SnapshotCount = etcdserver.DefaultSnapshotCount
|
||||
}
|
||||
|
||||
etcdCfgs := make([]*EtcdServerProcessConfig, cfg.ClusterSize)
|
||||
initialCluster := make([]string, cfg.ClusterSize)
|
||||
for i := 0; i < cfg.ClusterSize; i++ {
|
||||
var curls []string
|
||||
var curl, curltls string
|
||||
port := cfg.BasePort + 5*i
|
||||
curlHost := fmt.Sprintf("localhost:%d", port)
|
||||
|
||||
switch cfg.ClientTLS {
|
||||
case ClientNonTLS, ClientTLS:
|
||||
curl = (&url.URL{Scheme: cfg.ClientScheme(), Host: curlHost}).String()
|
||||
curls = []string{curl}
|
||||
case ClientTLSAndNonTLS:
|
||||
curl = (&url.URL{Scheme: "http", Host: curlHost}).String()
|
||||
curltls = (&url.URL{Scheme: "https", Host: curlHost}).String()
|
||||
curls = []string{curl, curltls}
|
||||
}
|
||||
|
||||
purl := url.URL{Scheme: cfg.PeerScheme(), Host: fmt.Sprintf("localhost:%d", port+1)}
|
||||
name := fmt.Sprintf("test-%d", i)
|
||||
dataDirPath := cfg.DataDirPath
|
||||
if cfg.DataDirPath == "" {
|
||||
dataDirPath = tb.TempDir()
|
||||
}
|
||||
initialCluster[i] = fmt.Sprintf("%s=%s", name, purl.String())
|
||||
|
||||
args := []string{
|
||||
"--name", name,
|
||||
"--listen-client-urls", strings.Join(curls, ","),
|
||||
"--advertise-client-urls", strings.Join(curls, ","),
|
||||
"--listen-peer-urls", purl.String(),
|
||||
"--initial-advertise-peer-urls", purl.String(),
|
||||
"--initial-cluster-token", cfg.InitialToken,
|
||||
"--data-dir", dataDirPath,
|
||||
"--snapshot-count", fmt.Sprintf("%d", cfg.SnapshotCount),
|
||||
}
|
||||
args = AddV2Args(args)
|
||||
if cfg.ForceNewCluster {
|
||||
args = append(args, "--force-new-cluster")
|
||||
}
|
||||
if cfg.QuotaBackendBytes > 0 {
|
||||
args = append(args,
|
||||
"--quota-backend-bytes", fmt.Sprintf("%d", cfg.QuotaBackendBytes),
|
||||
)
|
||||
}
|
||||
if cfg.NoStrictReconfig {
|
||||
args = append(args, "--strict-reconfig-check=false")
|
||||
}
|
||||
if cfg.EnableV2 {
|
||||
args = append(args, "--enable-v2")
|
||||
}
|
||||
if cfg.InitialCorruptCheck {
|
||||
args = append(args, "--experimental-initial-corrupt-check")
|
||||
}
|
||||
var murl string
|
||||
if cfg.MetricsURLScheme != "" {
|
||||
murl = (&url.URL{
|
||||
Scheme: cfg.MetricsURLScheme,
|
||||
Host: fmt.Sprintf("localhost:%d", port+2),
|
||||
}).String()
|
||||
args = append(args, "--listen-metrics-urls", murl)
|
||||
}
|
||||
|
||||
args = append(args, cfg.TlsArgs()...)
|
||||
|
||||
if cfg.AuthTokenOpts != "" {
|
||||
args = append(args, "--auth-token", cfg.AuthTokenOpts)
|
||||
}
|
||||
|
||||
if cfg.V2deprecation != "" {
|
||||
args = append(args, "--v2-deprecation", cfg.V2deprecation)
|
||||
}
|
||||
|
||||
etcdCfgs[i] = &EtcdServerProcessConfig{
|
||||
lg: lg,
|
||||
ExecPath: cfg.ExecPath,
|
||||
Args: args,
|
||||
EnvVars: cfg.EnvVars,
|
||||
TlsArgs: cfg.TlsArgs(),
|
||||
DataDirPath: dataDirPath,
|
||||
KeepDataDir: cfg.KeepDataDir,
|
||||
Name: name,
|
||||
Purl: purl,
|
||||
Acurl: curl,
|
||||
Murl: murl,
|
||||
InitialToken: cfg.InitialToken,
|
||||
}
|
||||
}
|
||||
|
||||
initialClusterArgs := []string{"--initial-cluster", strings.Join(initialCluster, ",")}
|
||||
for i := range etcdCfgs {
|
||||
etcdCfgs[i].InitialCluster = strings.Join(initialCluster, ",")
|
||||
etcdCfgs[i].Args = append(etcdCfgs[i].Args, initialClusterArgs...)
|
||||
}
|
||||
|
||||
return etcdCfgs
|
||||
}
|
||||
|
||||
func (cfg *EtcdProcessClusterConfig) TlsArgs() (args []string) {
|
||||
if cfg.ClientTLS != ClientNonTLS {
|
||||
if cfg.IsClientAutoTLS {
|
||||
args = append(args, "--auto-tls")
|
||||
} else {
|
||||
tlsClientArgs := []string{
|
||||
"--cert-file", CertPath,
|
||||
"--key-file", PrivateKeyPath,
|
||||
"--trusted-ca-file", CaPath,
|
||||
}
|
||||
args = append(args, tlsClientArgs...)
|
||||
|
||||
if cfg.ClientCertAuthEnabled {
|
||||
args = append(args, "--client-cert-auth")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.IsPeerTLS {
|
||||
if cfg.IsPeerAutoTLS {
|
||||
args = append(args, "--peer-auto-tls")
|
||||
} else {
|
||||
tlsPeerArgs := []string{
|
||||
"--peer-cert-file", CertPath,
|
||||
"--peer-key-file", PrivateKeyPath,
|
||||
"--peer-trusted-ca-file", CaPath,
|
||||
}
|
||||
args = append(args, tlsPeerArgs...)
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.IsClientCRL {
|
||||
args = append(args, "--client-crl-file", CrlPath, "--client-cert-auth")
|
||||
}
|
||||
|
||||
if len(cfg.CipherSuites) > 0 {
|
||||
args = append(args, "--cipher-suites", strings.Join(cfg.CipherSuites, ","))
|
||||
}
|
||||
|
||||
return args
|
||||
}
|
||||
|
||||
func (epc *EtcdProcessCluster) EndpointsV2() []string {
|
||||
return epc.Endpoints(func(ep EtcdProcess) []string { return ep.EndpointsV2() })
|
||||
}
|
||||
|
||||
func (epc *EtcdProcessCluster) EndpointsV3() []string {
|
||||
return epc.Endpoints(func(ep EtcdProcess) []string { return ep.EndpointsV3() })
|
||||
}
|
||||
|
||||
func (epc *EtcdProcessCluster) Endpoints(f func(ep EtcdProcess) []string) (ret []string) {
|
||||
for _, p := range epc.Procs {
|
||||
ret = append(ret, f(p)...)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (epc *EtcdProcessCluster) Start() error {
|
||||
return epc.start(func(ep EtcdProcess) error { return ep.Start() })
|
||||
}
|
||||
|
||||
func (epc *EtcdProcessCluster) RollingStart() error {
|
||||
return epc.rollingStart(func(ep EtcdProcess) error { return ep.Start() })
|
||||
}
|
||||
|
||||
func (epc *EtcdProcessCluster) Restart() error {
|
||||
return epc.start(func(ep EtcdProcess) error { return ep.Restart() })
|
||||
}
|
||||
|
||||
func (epc *EtcdProcessCluster) start(f func(ep EtcdProcess) error) error {
|
||||
readyC := make(chan error, len(epc.Procs))
|
||||
for i := range epc.Procs {
|
||||
go func(n int) { readyC <- f(epc.Procs[n]) }(i)
|
||||
}
|
||||
for range epc.Procs {
|
||||
if err := <-readyC; err != nil {
|
||||
epc.Close()
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (epc *EtcdProcessCluster) rollingStart(f func(ep EtcdProcess) error) error {
|
||||
readyC := make(chan error, len(epc.Procs))
|
||||
for i := range epc.Procs {
|
||||
go func(n int) { readyC <- f(epc.Procs[n]) }(i)
|
||||
// make sure the servers do not start at the same time
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
for range epc.Procs {
|
||||
if err := <-readyC; err != nil {
|
||||
epc.Close()
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (epc *EtcdProcessCluster) Stop() (err error) {
|
||||
for _, p := range epc.Procs {
|
||||
if p == nil {
|
||||
continue
|
||||
}
|
||||
if curErr := p.Stop(); curErr != nil {
|
||||
if err != nil {
|
||||
err = fmt.Errorf("%v; %v", err, curErr)
|
||||
} else {
|
||||
err = curErr
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (epc *EtcdProcessCluster) Close() error {
|
||||
epc.lg.Info("closing test cluster...")
|
||||
err := epc.Stop()
|
||||
for _, p := range epc.Procs {
|
||||
// p is nil when NewEtcdProcess fails in the middle
|
||||
// Close still gets called to clean up test data
|
||||
if p == nil {
|
||||
continue
|
||||
}
|
||||
if cerr := p.Close(); cerr != nil {
|
||||
err = cerr
|
||||
}
|
||||
}
|
||||
epc.lg.Info("closed test cluster.")
|
||||
return err
|
||||
}
|
||||
|
||||
func (epc *EtcdProcessCluster) WithStopSignal(sig os.Signal) (ret os.Signal) {
|
||||
for _, p := range epc.Procs {
|
||||
ret = p.WithStopSignal(sig)
|
||||
}
|
||||
return ret
|
||||
}
|
@ -17,6 +17,6 @@
|
||||
|
||||
package e2e
|
||||
|
||||
func newEtcdProcess(cfg *etcdServerProcessConfig) (etcdProcess, error) {
|
||||
return newEtcdServerProcess(cfg)
|
||||
func NewEtcdProcess(cfg *EtcdServerProcessConfig) (EtcdProcess, error) {
|
||||
return NewEtcdServerProcess(cfg)
|
||||
}
|
@ -31,17 +31,17 @@ import (
|
||||
)
|
||||
|
||||
type proxyEtcdProcess struct {
|
||||
etcdProc etcdProcess
|
||||
etcdProc EtcdProcess
|
||||
proxyV2 *proxyV2Proc
|
||||
proxyV3 *proxyV3Proc
|
||||
}
|
||||
|
||||
func newEtcdProcess(cfg *etcdServerProcessConfig) (etcdProcess, error) {
|
||||
return newProxyEtcdProcess(cfg)
|
||||
func NewEtcdProcess(cfg *EtcdServerProcessConfig) (EtcdProcess, error) {
|
||||
return NewProxyEtcdProcess(cfg)
|
||||
}
|
||||
|
||||
func newProxyEtcdProcess(cfg *etcdServerProcessConfig) (*proxyEtcdProcess, error) {
|
||||
ep, err := newEtcdServerProcess(cfg)
|
||||
func NewProxyEtcdProcess(cfg *EtcdServerProcessConfig) (*proxyEtcdProcess, error) {
|
||||
ep, err := NewEtcdServerProcess(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -53,7 +53,7 @@ func newProxyEtcdProcess(cfg *etcdServerProcessConfig) (*proxyEtcdProcess, error
|
||||
return pep, nil
|
||||
}
|
||||
|
||||
func (p *proxyEtcdProcess) Config() *etcdServerProcessConfig { return p.etcdProc.Config() }
|
||||
func (p *proxyEtcdProcess) Config() *EtcdServerProcessConfig { return p.etcdProc.Config() }
|
||||
|
||||
func (p *proxyEtcdProcess) EndpointsV2() []string { return p.proxyV2.endpoints() }
|
||||
func (p *proxyEtcdProcess) EndpointsV3() []string { return p.proxyV3.endpoints() }
|
||||
@ -115,7 +115,7 @@ func (p *proxyEtcdProcess) WithStopSignal(sig os.Signal) os.Signal {
|
||||
return p.etcdProc.WithStopSignal(sig)
|
||||
}
|
||||
|
||||
func (p *proxyEtcdProcess) Logs() logsExpect {
|
||||
func (p *proxyEtcdProcess) Logs() LogsExpect {
|
||||
return p.etcdProc.Logs()
|
||||
}
|
||||
|
||||
@ -136,7 +136,7 @@ func (pp *proxyProc) start() error {
|
||||
if pp.proc != nil {
|
||||
panic("already started")
|
||||
}
|
||||
proc, err := spawnCmdWithLogger(pp.lg, append([]string{pp.execPath}, pp.args...), nil)
|
||||
proc, err := SpawnCmdWithLogger(pp.lg, append([]string{pp.execPath}, pp.args...), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -146,7 +146,7 @@ func (pp *proxyProc) start() error {
|
||||
|
||||
func (pp *proxyProc) waitReady(readyStr string) error {
|
||||
defer close(pp.donec)
|
||||
return waitReadyExpectProc(pp.proc, []string{readyStr})
|
||||
return WaitReadyExpectProc(pp.proc, []string{readyStr})
|
||||
}
|
||||
|
||||
func (pp *proxyProc) Stop() error {
|
||||
@ -176,8 +176,8 @@ type proxyV2Proc struct {
|
||||
dataDir string
|
||||
}
|
||||
|
||||
func proxyListenURL(cfg *etcdServerProcessConfig, portOffset int) string {
|
||||
u, err := url.Parse(cfg.acurl)
|
||||
func proxyListenURL(cfg *EtcdServerProcessConfig, portOffset int) string {
|
||||
u, err := url.Parse(cfg.Acurl)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -187,22 +187,22 @@ func proxyListenURL(cfg *etcdServerProcessConfig, portOffset int) string {
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func newProxyV2Proc(cfg *etcdServerProcessConfig) *proxyV2Proc {
|
||||
func newProxyV2Proc(cfg *EtcdServerProcessConfig) *proxyV2Proc {
|
||||
listenAddr := proxyListenURL(cfg, 2)
|
||||
name := fmt.Sprintf("testname-proxy-%p", cfg)
|
||||
dataDir := path.Join(cfg.dataDirPath, name+".etcd")
|
||||
dataDir := path.Join(cfg.DataDirPath, name+".etcd")
|
||||
args := []string{
|
||||
"--name", name,
|
||||
"--proxy", "on",
|
||||
"--listen-client-urls", listenAddr,
|
||||
"--initial-cluster", cfg.name + "=" + cfg.purl.String(),
|
||||
"--initial-cluster", cfg.Name + "=" + cfg.Purl.String(),
|
||||
"--data-dir", dataDir,
|
||||
}
|
||||
return &proxyV2Proc{
|
||||
proxyProc: proxyProc{
|
||||
lg: cfg.lg,
|
||||
execPath: cfg.execPath,
|
||||
args: append(args, cfg.tlsArgs...),
|
||||
execPath: cfg.ExecPath,
|
||||
args: append(args, cfg.TlsArgs...),
|
||||
ep: listenAddr,
|
||||
donec: make(chan struct{}),
|
||||
},
|
||||
@ -239,33 +239,33 @@ type proxyV3Proc struct {
|
||||
proxyProc
|
||||
}
|
||||
|
||||
func newProxyV3Proc(cfg *etcdServerProcessConfig) *proxyV3Proc {
|
||||
func newProxyV3Proc(cfg *EtcdServerProcessConfig) *proxyV3Proc {
|
||||
listenAddr := proxyListenURL(cfg, 3)
|
||||
args := []string{
|
||||
"grpc-proxy",
|
||||
"start",
|
||||
"--listen-addr", strings.Split(listenAddr, "/")[2],
|
||||
"--endpoints", cfg.acurl,
|
||||
"--endpoints", cfg.Acurl,
|
||||
// pass-through member RPCs
|
||||
"--advertise-client-url", "",
|
||||
"--data-dir", cfg.dataDirPath,
|
||||
"--data-dir", cfg.DataDirPath,
|
||||
}
|
||||
murl := ""
|
||||
if cfg.murl != "" {
|
||||
if cfg.Murl != "" {
|
||||
murl = proxyListenURL(cfg, 4)
|
||||
args = append(args, "--metrics-addr", murl)
|
||||
}
|
||||
tlsArgs := []string{}
|
||||
for i := 0; i < len(cfg.tlsArgs); i++ {
|
||||
switch cfg.tlsArgs[i] {
|
||||
for i := 0; i < len(cfg.TlsArgs); i++ {
|
||||
switch cfg.TlsArgs[i] {
|
||||
case "--cert-file":
|
||||
tlsArgs = append(tlsArgs, "--cert-file", cfg.tlsArgs[i+1])
|
||||
tlsArgs = append(tlsArgs, "--cert-file", cfg.TlsArgs[i+1])
|
||||
i++
|
||||
case "--key-file":
|
||||
tlsArgs = append(tlsArgs, "--key-file", cfg.tlsArgs[i+1])
|
||||
tlsArgs = append(tlsArgs, "--key-file", cfg.TlsArgs[i+1])
|
||||
i++
|
||||
case "--trusted-ca-file":
|
||||
tlsArgs = append(tlsArgs, "--trusted-ca-file", cfg.tlsArgs[i+1])
|
||||
tlsArgs = append(tlsArgs, "--trusted-ca-file", cfg.TlsArgs[i+1])
|
||||
i++
|
||||
case "--auto-tls":
|
||||
tlsArgs = append(tlsArgs, "--auto-tls", "--insecure-skip-tls-verify")
|
||||
@ -273,21 +273,21 @@ func newProxyV3Proc(cfg *etcdServerProcessConfig) *proxyV3Proc {
|
||||
i++ // skip arg
|
||||
case "--client-cert-auth", "--peer-auto-tls":
|
||||
default:
|
||||
tlsArgs = append(tlsArgs, cfg.tlsArgs[i])
|
||||
tlsArgs = append(tlsArgs, cfg.TlsArgs[i])
|
||||
}
|
||||
|
||||
// Configure certificates for connection proxy ---> server.
|
||||
// This certificate must NOT have CN set.
|
||||
tlsArgs = append(tlsArgs,
|
||||
"--cert", path.Join(fixturesDir, "client-nocn.crt"),
|
||||
"--key", path.Join(fixturesDir, "client-nocn.key.insecure"),
|
||||
"--cacert", path.Join(fixturesDir, "ca.crt"),
|
||||
"--client-crl-file", path.Join(fixturesDir, "revoke.crl"))
|
||||
"--cert", path.Join(FixturesDir, "client-nocn.crt"),
|
||||
"--key", path.Join(FixturesDir, "client-nocn.key.insecure"),
|
||||
"--cacert", path.Join(FixturesDir, "ca.crt"),
|
||||
"--client-crl-file", path.Join(FixturesDir, "revoke.crl"))
|
||||
}
|
||||
return &proxyV3Proc{
|
||||
proxyProc{
|
||||
lg: cfg.lg,
|
||||
execPath: cfg.execPath,
|
||||
execPath: cfg.ExecPath,
|
||||
args: append(args, tlsArgs...),
|
||||
ep: listenAddr,
|
||||
murl: murl,
|
106
tests/framework/e2e/curl.go
Normal file
106
tests/framework/e2e/curl.go
Normal file
@ -0,0 +1,106 @@
|
||||
// Copyright 2021 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type CURLReq struct {
|
||||
Username string
|
||||
Password string
|
||||
|
||||
IsTLS bool
|
||||
Timeout int
|
||||
|
||||
Endpoint string
|
||||
|
||||
Value string
|
||||
Expected string
|
||||
Header string
|
||||
|
||||
MetricsURLScheme string
|
||||
|
||||
Ciphers string
|
||||
}
|
||||
|
||||
// CURLPrefixArgs builds the beginning of a curl command for a given key
|
||||
// addressed to a random URL in the given cluster.
|
||||
func CURLPrefixArgs(clus *EtcdProcessCluster, method string, req CURLReq) []string {
|
||||
var (
|
||||
cmdArgs = []string{"curl"}
|
||||
acurl = clus.Procs[rand.Intn(clus.Cfg.ClusterSize)].Config().Acurl
|
||||
)
|
||||
if req.MetricsURLScheme != "https" {
|
||||
if req.IsTLS {
|
||||
if clus.Cfg.ClientTLS != ClientTLSAndNonTLS {
|
||||
panic("should not use cURLPrefixArgsUseTLS when serving only TLS or non-TLS")
|
||||
}
|
||||
cmdArgs = append(cmdArgs, "--cacert", CaPath, "--cert", CertPath, "--key", PrivateKeyPath)
|
||||
acurl = ToTLS(clus.Procs[rand.Intn(clus.Cfg.ClusterSize)].Config().Acurl)
|
||||
} else if clus.Cfg.ClientTLS == ClientTLS {
|
||||
if !clus.Cfg.NoCN {
|
||||
cmdArgs = append(cmdArgs, "--cacert", CaPath, "--cert", CertPath, "--key", PrivateKeyPath)
|
||||
} else {
|
||||
cmdArgs = append(cmdArgs, "--cacert", CaPath, "--cert", CertPath3, "--key", PrivateKeyPath3)
|
||||
}
|
||||
}
|
||||
}
|
||||
if req.MetricsURLScheme != "" {
|
||||
acurl = clus.Procs[rand.Intn(clus.Cfg.ClusterSize)].EndpointsMetrics()[0]
|
||||
}
|
||||
ep := acurl + req.Endpoint
|
||||
|
||||
if req.Username != "" || req.Password != "" {
|
||||
cmdArgs = append(cmdArgs, "-L", "-u", fmt.Sprintf("%s:%s", req.Username, req.Password), ep)
|
||||
} else {
|
||||
cmdArgs = append(cmdArgs, "-L", ep)
|
||||
}
|
||||
if req.Timeout != 0 {
|
||||
cmdArgs = append(cmdArgs, "-m", fmt.Sprintf("%d", req.Timeout))
|
||||
}
|
||||
|
||||
if req.Header != "" {
|
||||
cmdArgs = append(cmdArgs, "-H", req.Header)
|
||||
}
|
||||
|
||||
if req.Ciphers != "" {
|
||||
cmdArgs = append(cmdArgs, "--ciphers", req.Ciphers)
|
||||
}
|
||||
|
||||
switch method {
|
||||
case "POST", "PUT":
|
||||
dt := req.Value
|
||||
if !strings.HasPrefix(dt, "{") { // for non-JSON value
|
||||
dt = "value=" + dt
|
||||
}
|
||||
cmdArgs = append(cmdArgs, "-X", method, "-d", dt)
|
||||
}
|
||||
return cmdArgs
|
||||
}
|
||||
|
||||
func CURLPost(clus *EtcdProcessCluster, req CURLReq) error {
|
||||
return SpawnWithExpect(CURLPrefixArgs(clus, "POST", req), req.Expected)
|
||||
}
|
||||
|
||||
func CURLPut(clus *EtcdProcessCluster, req CURLReq) error {
|
||||
return SpawnWithExpect(CURLPrefixArgs(clus, "PUT", req), req.Expected)
|
||||
}
|
||||
|
||||
func CURLGet(clus *EtcdProcessCluster, req CURLReq) error {
|
||||
return SpawnWithExpect(CURLPrefixArgs(clus, "GET", req), req.Expected)
|
||||
}
|
178
tests/framework/e2e/etcd_process.go
Normal file
178
tests/framework/e2e/etcd_process.go
Normal file
@ -0,0 +1,178 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
"go.etcd.io/etcd/client/pkg/v3/fileutil"
|
||||
"go.etcd.io/etcd/pkg/v3/expect"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var (
|
||||
EtcdServerReadyLines = []string{"ready to serve client requests"}
|
||||
BinPath string
|
||||
CtlBinPath string
|
||||
UtlBinPath string
|
||||
)
|
||||
|
||||
// EtcdProcess is a process that serves etcd requests.
|
||||
type EtcdProcess interface {
|
||||
EndpointsV2() []string
|
||||
EndpointsV3() []string
|
||||
EndpointsMetrics() []string
|
||||
|
||||
Start() error
|
||||
Restart() error
|
||||
Stop() error
|
||||
Close() error
|
||||
WithStopSignal(sig os.Signal) os.Signal
|
||||
Config() *EtcdServerProcessConfig
|
||||
Logs() LogsExpect
|
||||
}
|
||||
|
||||
type LogsExpect interface {
|
||||
Expect(string) (string, error)
|
||||
}
|
||||
|
||||
type EtcdServerProcess struct {
|
||||
cfg *EtcdServerProcessConfig
|
||||
proc *expect.ExpectProcess
|
||||
donec chan struct{} // closed when Interact() terminates
|
||||
}
|
||||
|
||||
type EtcdServerProcessConfig struct {
|
||||
lg *zap.Logger
|
||||
ExecPath string
|
||||
Args []string
|
||||
TlsArgs []string
|
||||
EnvVars map[string]string
|
||||
|
||||
DataDirPath string
|
||||
KeepDataDir bool
|
||||
|
||||
Name string
|
||||
|
||||
Purl url.URL
|
||||
|
||||
Acurl string
|
||||
Murl string
|
||||
|
||||
InitialToken string
|
||||
InitialCluster string
|
||||
}
|
||||
|
||||
func NewEtcdServerProcess(cfg *EtcdServerProcessConfig) (*EtcdServerProcess, error) {
|
||||
if !fileutil.Exist(cfg.ExecPath) {
|
||||
return nil, fmt.Errorf("could not find etcd binary: %s", cfg.ExecPath)
|
||||
}
|
||||
if !cfg.KeepDataDir {
|
||||
if err := os.RemoveAll(cfg.DataDirPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &EtcdServerProcess{cfg: cfg, donec: make(chan struct{})}, nil
|
||||
}
|
||||
|
||||
func (ep *EtcdServerProcess) EndpointsV2() []string { return []string{ep.cfg.Acurl} }
|
||||
func (ep *EtcdServerProcess) EndpointsV3() []string { return ep.EndpointsV2() }
|
||||
func (ep *EtcdServerProcess) EndpointsMetrics() []string { return []string{ep.cfg.Murl} }
|
||||
|
||||
func (ep *EtcdServerProcess) Start() error {
|
||||
if ep.proc != nil {
|
||||
panic("already started")
|
||||
}
|
||||
ep.cfg.lg.Info("starting server...", zap.String("name", ep.cfg.Name))
|
||||
proc, err := SpawnCmdWithLogger(ep.cfg.lg, append([]string{ep.cfg.ExecPath}, ep.cfg.Args...), ep.cfg.EnvVars)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ep.proc = proc
|
||||
err = ep.waitReady()
|
||||
if err == nil {
|
||||
ep.cfg.lg.Info("started server.", zap.String("name", ep.cfg.Name))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (ep *EtcdServerProcess) Restart() error {
|
||||
ep.cfg.lg.Info("restaring server...", zap.String("name", ep.cfg.Name))
|
||||
if err := ep.Stop(); err != nil {
|
||||
return err
|
||||
}
|
||||
ep.donec = make(chan struct{})
|
||||
err := ep.Start()
|
||||
if err == nil {
|
||||
ep.cfg.lg.Info("restared server", zap.String("name", ep.cfg.Name))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (ep *EtcdServerProcess) Stop() (err error) {
|
||||
ep.cfg.lg.Info("stoping server...", zap.String("name", ep.cfg.Name))
|
||||
if ep == nil || ep.proc == nil {
|
||||
return nil
|
||||
}
|
||||
err = ep.proc.Stop()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ep.proc = nil
|
||||
<-ep.donec
|
||||
ep.donec = make(chan struct{})
|
||||
if ep.cfg.Purl.Scheme == "unix" || ep.cfg.Purl.Scheme == "unixs" {
|
||||
err = os.Remove(ep.cfg.Purl.Host + ep.cfg.Purl.Path)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
ep.cfg.lg.Info("stopped server.", zap.String("name", ep.cfg.Name))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ep *EtcdServerProcess) Close() error {
|
||||
ep.cfg.lg.Info("closing server...", zap.String("name", ep.cfg.Name))
|
||||
if err := ep.Stop(); err != nil {
|
||||
return err
|
||||
}
|
||||
if !ep.cfg.KeepDataDir {
|
||||
ep.cfg.lg.Info("removing directory", zap.String("data-dir", ep.cfg.DataDirPath))
|
||||
return os.RemoveAll(ep.cfg.DataDirPath)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ep *EtcdServerProcess) WithStopSignal(sig os.Signal) os.Signal {
|
||||
ret := ep.proc.StopSignal
|
||||
ep.proc.StopSignal = sig
|
||||
return ret
|
||||
}
|
||||
|
||||
func (ep *EtcdServerProcess) waitReady() error {
|
||||
defer close(ep.donec)
|
||||
return WaitReadyExpectProc(ep.proc, EtcdServerReadyLines)
|
||||
}
|
||||
|
||||
func (ep *EtcdServerProcess) Config() *EtcdServerProcessConfig { return ep.cfg }
|
||||
|
||||
func (ep *EtcdServerProcess) Logs() LogsExpect {
|
||||
if ep.proc == nil {
|
||||
ep.cfg.lg.Panic("Please grap logs before process is stopped")
|
||||
}
|
||||
return ep.proc
|
||||
}
|
@ -26,7 +26,7 @@ import (
|
||||
|
||||
"go.etcd.io/etcd/client/pkg/v3/fileutil"
|
||||
"go.etcd.io/etcd/pkg/v3/expect"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
@ -36,11 +36,11 @@ var (
|
||||
coverDir = integration.MustAbsPath(os.Getenv("COVERDIR"))
|
||||
)
|
||||
|
||||
func spawnCmd(args []string) (*expect.ExpectProcess, error) {
|
||||
return spawnCmdWithLogger(zap.NewNop(), args)
|
||||
func SpawnCmd(args []string) (*expect.ExpectProcess, error) {
|
||||
return SpawnCmdWithLogger(zap.NewNop(), args)
|
||||
}
|
||||
|
||||
func spawnCmdWithLogger(lg *zap.Logger, args []string) (*expect.ExpectProcess, error) {
|
||||
func SpawnCmdWithLogger(lg *zap.Logger, args []string) (*expect.ExpectProcess, error) {
|
||||
cmd := args[0]
|
||||
env := make([]string, 0)
|
||||
switch {
|
||||
@ -51,7 +51,7 @@ func spawnCmdWithLogger(lg *zap.Logger, args []string) (*expect.ExpectProcess, e
|
||||
case strings.HasSuffix(cmd, "/etcdutl"):
|
||||
cmd = cmd + "_test"
|
||||
case strings.HasSuffix(cmd, "/etcdctl3"):
|
||||
cmd = ctlBinPath + "_test"
|
||||
cmd = CtlBinPath + "_test"
|
||||
env = append(env, "ETCDCTL_API=3")
|
||||
}
|
||||
|
@ -28,11 +28,11 @@ import (
|
||||
|
||||
const noOutputLineCount = 0 // regular binaries emit no extra lines
|
||||
|
||||
func spawnCmd(args []string, envVars map[string]string) (*expect.ExpectProcess, error) {
|
||||
return spawnCmdWithLogger(zap.NewNop(), args, envVars)
|
||||
func SpawnCmd(args []string, envVars map[string]string) (*expect.ExpectProcess, error) {
|
||||
return SpawnCmdWithLogger(zap.NewNop(), args, envVars)
|
||||
}
|
||||
|
||||
func spawnCmdWithLogger(lg *zap.Logger, args []string, envVars map[string]string) (*expect.ExpectProcess, error) {
|
||||
func SpawnCmdWithLogger(lg *zap.Logger, args []string, envVars map[string]string) (*expect.ExpectProcess, error) {
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -41,7 +41,7 @@ func spawnCmdWithLogger(lg *zap.Logger, args []string, envVars map[string]string
|
||||
if strings.HasSuffix(args[0], "/etcdctl3") {
|
||||
env = append(env, "ETCDCTL_API=3")
|
||||
lg.Info("spawning process with ETCDCTL_API=3", zap.Strings("args", args), zap.String("working-dir", wd), zap.Strings("environment-variables", env))
|
||||
return expect.NewExpectWithEnv(ctlBinPath, args[1:], env)
|
||||
return expect.NewExpectWithEnv(CtlBinPath, args[1:], env)
|
||||
}
|
||||
lg.Info("spawning process", zap.Strings("args", args), zap.String("working-dir", wd), zap.Strings("environment-variables", env))
|
||||
return expect.NewExpectWithEnv(args[0], args[1:], env)
|
72
tests/framework/e2e/flags.go
Normal file
72
tests/framework/e2e/flags.go
Normal file
@ -0,0 +1,72 @@
|
||||
// Copyright 2021 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
var (
|
||||
BinDir string
|
||||
CertDir string
|
||||
|
||||
CertPath string
|
||||
PrivateKeyPath string
|
||||
CaPath string
|
||||
|
||||
CertPath2 string
|
||||
PrivateKeyPath2 string
|
||||
|
||||
CertPath3 string
|
||||
PrivateKeyPath3 string
|
||||
|
||||
CrlPath string
|
||||
RevokedCertPath string
|
||||
RevokedPrivateKeyPath string
|
||||
|
||||
FixturesDir = integration.MustAbsPath("../fixtures")
|
||||
)
|
||||
|
||||
func InitFlags() {
|
||||
os.Setenv("ETCD_UNSUPPORTED_ARCH", runtime.GOARCH)
|
||||
os.Unsetenv("ETCDCTL_API")
|
||||
|
||||
binDirDef := integration.MustAbsPath("../../bin")
|
||||
certDirDef := FixturesDir
|
||||
|
||||
flag.StringVar(&BinDir, "bin-dir", binDirDef, "The directory for store etcd and etcdctl binaries.")
|
||||
flag.StringVar(&CertDir, "cert-dir", certDirDef, "The directory for store certificate files.")
|
||||
flag.Parse()
|
||||
|
||||
BinPath = BinDir + "/etcd"
|
||||
CtlBinPath = BinDir + "/etcdctl"
|
||||
UtlBinPath = BinDir + "/etcdutl"
|
||||
CertPath = CertDir + "/server.crt"
|
||||
PrivateKeyPath = CertDir + "/server.key.insecure"
|
||||
CaPath = CertDir + "/ca.crt"
|
||||
RevokedCertPath = CertDir + "/server-revoked.crt"
|
||||
RevokedPrivateKeyPath = CertDir + "/server-revoked.key.insecure"
|
||||
CrlPath = CertDir + "/revoke.crl"
|
||||
|
||||
CertPath2 = CertDir + "/server2.crt"
|
||||
PrivateKeyPath2 = CertDir + "/server2.key.insecure"
|
||||
|
||||
CertPath3 = CertDir + "/server3.crt"
|
||||
PrivateKeyPath3 = CertDir + "/server3.key.insecure"
|
||||
}
|
@ -24,7 +24,7 @@ import (
|
||||
)
|
||||
|
||||
func BeforeTest(t testing.TB) {
|
||||
skipInShortMode(t)
|
||||
SkipInShortMode(t)
|
||||
testutil.RegisterLeakDetection(t)
|
||||
os.Setenv(verify.ENV_VERIFY, verify.ENV_VERIFY_ALL_VALUE)
|
||||
|
@ -26,7 +26,7 @@ import (
|
||||
"go.etcd.io/etcd/pkg/v3/expect"
|
||||
)
|
||||
|
||||
func waitReadyExpectProc(exproc *expect.ExpectProcess, readyStrs []string) error {
|
||||
func WaitReadyExpectProc(exproc *expect.ExpectProcess, readyStrs []string) error {
|
||||
matchSet := func(l string) bool {
|
||||
for _, s := range readyStrs {
|
||||
if strings.Contains(l, s) {
|
||||
@ -39,21 +39,21 @@ func waitReadyExpectProc(exproc *expect.ExpectProcess, readyStrs []string) error
|
||||
return err
|
||||
}
|
||||
|
||||
func spawnWithExpect(args []string, expected string) error {
|
||||
return spawnWithExpects(args, nil, []string{expected}...)
|
||||
func SpawnWithExpect(args []string, expected string) error {
|
||||
return SpawnWithExpects(args, nil, []string{expected}...)
|
||||
}
|
||||
|
||||
func spawnWithExpectWithEnv(args []string, envVars map[string]string, expected string) error {
|
||||
return spawnWithExpects(args, envVars, []string{expected}...)
|
||||
func SpawnWithExpectWithEnv(args []string, envVars map[string]string, expected string) error {
|
||||
return SpawnWithExpects(args, envVars, []string{expected}...)
|
||||
}
|
||||
|
||||
func spawnWithExpects(args []string, envVars map[string]string, xs ...string) error {
|
||||
_, err := spawnWithExpectLines(args, envVars, xs...)
|
||||
func SpawnWithExpects(args []string, envVars map[string]string, xs ...string) error {
|
||||
_, err := SpawnWithExpectLines(args, envVars, xs...)
|
||||
return err
|
||||
}
|
||||
|
||||
func spawnWithExpectLines(args []string, envVars map[string]string, xs ...string) ([]string, error) {
|
||||
proc, err := spawnCmd(args, envVars)
|
||||
func SpawnWithExpectLines(args []string, envVars map[string]string, xs ...string) ([]string, error) {
|
||||
proc, err := SpawnCmd(args, envVars)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -84,11 +84,11 @@ func spawnWithExpectLines(args []string, envVars map[string]string, xs ...string
|
||||
return lines, perr
|
||||
}
|
||||
|
||||
func randomLeaseID() int64 {
|
||||
func RandomLeaseID() int64 {
|
||||
return rand.New(rand.NewSource(time.Now().UnixNano())).Int63()
|
||||
}
|
||||
|
||||
func dataMarshal(data interface{}) (d string, e error) {
|
||||
func DataMarshal(data interface{}) (d string, e error) {
|
||||
m, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@ -96,7 +96,7 @@ func dataMarshal(data interface{}) (d string, e error) {
|
||||
return string(m), nil
|
||||
}
|
||||
|
||||
func closeWithTimeout(p *expect.ExpectProcess, d time.Duration) error {
|
||||
func CloseWithTimeout(p *expect.ExpectProcess, d time.Duration) error {
|
||||
errc := make(chan error, 1)
|
||||
go func() { errc <- p.Close() }()
|
||||
select {
|
||||
@ -105,15 +105,15 @@ func closeWithTimeout(p *expect.ExpectProcess, d time.Duration) error {
|
||||
case <-time.After(d):
|
||||
p.Stop()
|
||||
// retry close after stopping to collect SIGQUIT data, if any
|
||||
closeWithTimeout(p, time.Second)
|
||||
CloseWithTimeout(p, time.Second)
|
||||
}
|
||||
return fmt.Errorf("took longer than %v to Close process %+v", d, p)
|
||||
}
|
||||
|
||||
func toTLS(s string) string {
|
||||
func ToTLS(s string) string {
|
||||
return strings.Replace(s, "http://", "https://", 1)
|
||||
}
|
||||
|
||||
func skipInShortMode(t testing.TB) {
|
||||
func SkipInShortMode(t testing.TB) {
|
||||
testutil.SkipTestIfShortMode(t, "e2e tests are not running in --short mode")
|
||||
}
|
@ -17,4 +17,4 @@
|
||||
|
||||
package e2e
|
||||
|
||||
func addV2Args(args []string) []string { return args }
|
||||
func AddV2Args(args []string) []string { return args }
|
@ -17,6 +17,6 @@
|
||||
|
||||
package e2e
|
||||
|
||||
func addV2Args(args []string) []string {
|
||||
func AddV2Args(args []string) []string {
|
||||
return append(args, "--experimental-enable-v2v3", "v2/")
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -26,8 +26,8 @@ import (
|
||||
|
||||
const ThroughProxy = false
|
||||
|
||||
func toGRPC(c *clientv3.Client) grpcAPI {
|
||||
return grpcAPI{
|
||||
func ToGRPC(c *clientv3.Client) GrpcAPI {
|
||||
return GrpcAPI{
|
||||
pb.NewClusterClient(c.ActiveConnection()),
|
||||
pb.NewKVClient(c.ActiveConnection()),
|
||||
pb.NewLeaseClient(c.ActiveConnection()),
|
@ -39,13 +39,13 @@ const proxyNamespace = "proxy-namespace"
|
||||
type grpcClientProxy struct {
|
||||
ctx context.Context
|
||||
ctxCancel func()
|
||||
grpc grpcAPI
|
||||
grpc GrpcAPI
|
||||
wdonec <-chan struct{}
|
||||
kvdonec <-chan struct{}
|
||||
lpdonec <-chan struct{}
|
||||
}
|
||||
|
||||
func toGRPC(c *clientv3.Client) grpcAPI {
|
||||
func ToGRPC(c *clientv3.Client) GrpcAPI {
|
||||
pmu.Lock()
|
||||
defer pmu.Unlock()
|
||||
|
||||
@ -74,7 +74,7 @@ func toGRPC(c *clientv3.Client) grpcAPI {
|
||||
lockp := grpcproxy.NewLockProxy(c)
|
||||
electp := grpcproxy.NewElectionProxy(c)
|
||||
|
||||
grpc := grpcAPI{
|
||||
grpc := GrpcAPI{
|
||||
adapter.ClusterServerToClusterClient(clp),
|
||||
adapter.KvServerToKvClient(kvp),
|
||||
adapter.LeaseServerToLeaseClient(lp),
|
||||
@ -112,7 +112,7 @@ func newClientV3(cfg clientv3.Config) (*clientv3.Client, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rpc := toGRPC(c)
|
||||
rpc := ToGRPC(c)
|
||||
c.KV = clientv3.NewKVFromKVClient(rpc.KV, c)
|
||||
pmu.Lock()
|
||||
lc := c.Lease
|
@ -25,14 +25,14 @@ import (
|
||||
"testing"
|
||||
|
||||
"go.etcd.io/etcd/client/v2"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
// TestV2NoRetryEOF tests destructive api calls won't retry on a disconnection.
|
||||
func TestV2NoRetryEOF(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
// generate an EOF response; specify address so appears first in sorted ep list
|
||||
lEOF := integration.NewListenerWithAddr(t, fmt.Sprintf("127.0.0.1:%05d", os.Getpid()))
|
||||
lEOF := integration2.NewListenerWithAddr(t, fmt.Sprintf("127.0.0.1:%05d", os.Getpid()))
|
||||
defer lEOF.Close()
|
||||
tries := uint32(0)
|
||||
go func() {
|
||||
@ -45,8 +45,8 @@ func TestV2NoRetryEOF(t *testing.T) {
|
||||
conn.Close()
|
||||
}
|
||||
}()
|
||||
eofURL := integration.URLScheme + "://" + lEOF.Addr().String()
|
||||
cli := integration.MustNewHTTPClient(t, []string{eofURL, eofURL}, nil)
|
||||
eofURL := integration2.URLScheme + "://" + lEOF.Addr().String()
|
||||
cli := integration2.MustNewHTTPClient(t, []string{eofURL, eofURL}, nil)
|
||||
kapi := client.NewKeysAPI(cli)
|
||||
for i, f := range noRetryList(kapi) {
|
||||
startTries := atomic.LoadUint32(&tries)
|
||||
@ -62,17 +62,17 @@ func TestV2NoRetryEOF(t *testing.T) {
|
||||
|
||||
// TestV2NoRetryNoLeader tests destructive api calls won't retry if given an error code.
|
||||
func TestV2NoRetryNoLeader(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
lHTTP := integration.NewListenerWithAddr(t, fmt.Sprintf("127.0.0.1:%05d", os.Getpid()))
|
||||
integration2.BeforeTest(t)
|
||||
lHTTP := integration2.NewListenerWithAddr(t, fmt.Sprintf("127.0.0.1:%05d", os.Getpid()))
|
||||
eh := &errHandler{errCode: http.StatusServiceUnavailable}
|
||||
srv := httptest.NewUnstartedServer(eh)
|
||||
defer lHTTP.Close()
|
||||
defer srv.Close()
|
||||
srv.Listener = lHTTP
|
||||
go srv.Start()
|
||||
lHTTPURL := integration.URLScheme + "://" + lHTTP.Addr().String()
|
||||
lHTTPURL := integration2.URLScheme + "://" + lHTTP.Addr().String()
|
||||
|
||||
cli := integration.MustNewHTTPClient(t, []string{lHTTPURL, lHTTPURL}, nil)
|
||||
cli := integration2.MustNewHTTPClient(t, []string{lHTTPURL, lHTTPURL}, nil)
|
||||
kapi := client.NewKeysAPI(cli)
|
||||
// test error code
|
||||
for i, f := range noRetryList(kapi) {
|
||||
@ -88,12 +88,12 @@ func TestV2NoRetryNoLeader(t *testing.T) {
|
||||
|
||||
// TestV2RetryRefuse tests destructive api calls will retry if a connection is refused.
|
||||
func TestV2RetryRefuse(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
cl := integration.NewCluster(t, 1)
|
||||
integration2.BeforeTest(t)
|
||||
cl := integration2.NewCluster(t, 1)
|
||||
cl.Launch(t)
|
||||
defer cl.Terminate(t)
|
||||
// test connection refused; expect no error failover
|
||||
cli := integration.MustNewHTTPClient(t, []string{integration.URLScheme + "://refuseconn:123", cl.URL(0)}, nil)
|
||||
cli := integration2.MustNewHTTPClient(t, []string{integration2.URLScheme + "://refuseconn:123", cl.URL(0)}, nil)
|
||||
kapi := client.NewKeysAPI(cli)
|
||||
if _, err := kapi.Set(context.Background(), "/delkey", "def", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -23,13 +23,13 @@ import (
|
||||
"time"
|
||||
|
||||
"go.etcd.io/etcd/client/pkg/v3/types"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func TestMemberList(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
capi := clus.RandClient()
|
||||
@ -45,9 +45,9 @@ func TestMemberList(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMemberAdd(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
capi := clus.RandClient()
|
||||
@ -64,9 +64,9 @@ func TestMemberAdd(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMemberAddWithExistingURLs(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
capi := clus.RandClient()
|
||||
@ -88,9 +88,9 @@ func TestMemberAddWithExistingURLs(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMemberRemove(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
capi := clus.Client(1)
|
||||
@ -126,9 +126,9 @@ func TestMemberRemove(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMemberUpdate(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
capi := clus.RandClient()
|
||||
@ -154,9 +154,9 @@ func TestMemberUpdate(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMemberAddUpdateWrongURLs(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
capi := clus.RandClient()
|
||||
@ -187,9 +187,9 @@ func TestMemberAddUpdateWrongURLs(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMemberAddForLearner(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
capi := clus.RandClient()
|
||||
@ -216,9 +216,9 @@ func TestMemberAddForLearner(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMemberPromote(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// member promote request can be sent to any server in cluster,
|
||||
@ -293,9 +293,9 @@ func TestMemberPromote(t *testing.T) {
|
||||
|
||||
// TestMemberPromoteMemberNotLearner ensures that promoting a voting member fails.
|
||||
func TestMemberPromoteMemberNotLearner(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// member promote request can be sent to any server in cluster,
|
||||
@ -329,9 +329,9 @@ func TestMemberPromoteMemberNotLearner(t *testing.T) {
|
||||
|
||||
// TestMemberPromoteMemberNotExist ensures that promoting a member that does not exist in cluster fails.
|
||||
func TestMemberPromoteMemberNotExist(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// member promote request can be sent to any server in cluster,
|
||||
@ -378,10 +378,10 @@ func TestMemberPromoteMemberNotExist(t *testing.T) {
|
||||
|
||||
// TestMaxLearnerInCluster verifies that the maximum number of learners allowed in a cluster is 1
|
||||
func TestMaxLearnerInCluster(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
// 1. start with a cluster with 3 voting member and 0 learner member
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// 2. adding a learner member should succeed
|
||||
|
@ -23,13 +23,13 @@ import (
|
||||
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/client/v3/concurrency"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func TestResumeElection(t *testing.T) {
|
||||
const prefix = "/resume-election/"
|
||||
|
||||
cli, err := integration.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()})
|
||||
cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
@ -20,11 +20,11 @@ import (
|
||||
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/client/v3/concurrency"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func TestMutexLockSessionExpired(t *testing.T) {
|
||||
cli, err := integration.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()})
|
||||
cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -24,7 +24,7 @@ import (
|
||||
|
||||
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"go.etcd.io/etcd/tests/v3/integration/clientv3"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
@ -33,9 +33,9 @@ import (
|
||||
// blackholed endpoint, client balancer switches to healthy one.
|
||||
// TODO: test server-to-client keepalive ping
|
||||
func TestBalancerUnderBlackholeKeepAliveWatch(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{
|
||||
Size: 2,
|
||||
GRPCKeepAliveMinTime: time.Millisecond, // avoid too_many_pings
|
||||
UseBridge: true,
|
||||
@ -58,9 +58,9 @@ func TestBalancerUnderBlackholeKeepAliveWatch(t *testing.T) {
|
||||
// TODO: only send healthy endpoint to gRPC so gRPC wont waste time to
|
||||
// dial for unhealthy endpoint.
|
||||
// then we can reduce 3s to 1s.
|
||||
timeout := pingInterval + integration.RequestWaitTimeout
|
||||
timeout := pingInterval + integration2.RequestWaitTimeout
|
||||
|
||||
cli, err := integration.NewClient(t, ccfg)
|
||||
cli, err := integration2.NewClient(t, ccfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -166,9 +166,9 @@ func TestBalancerUnderBlackholeNoKeepAliveSerializableGet(t *testing.T) {
|
||||
// testBalancerUnderBlackholeNoKeepAlive ensures that first request to blackholed endpoint
|
||||
// fails due to context timeout, but succeeds on next try, with endpoint switch.
|
||||
func testBalancerUnderBlackholeNoKeepAlive(t *testing.T, op func(*clientv3.Client, context.Context) error) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{
|
||||
Size: 2,
|
||||
SkipCreatingClient: true,
|
||||
UseBridge: true,
|
||||
@ -182,7 +182,7 @@ func testBalancerUnderBlackholeNoKeepAlive(t *testing.T, op func(*clientv3.Clien
|
||||
DialTimeout: 1 * time.Second,
|
||||
DialOptions: []grpc.DialOption{grpc.WithBlock()},
|
||||
}
|
||||
cli, err := integration.NewClient(t, ccfg)
|
||||
cli, err := integration2.NewClient(t, ccfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -24,31 +24,31 @@ import (
|
||||
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||
"go.etcd.io/etcd/client/pkg/v3/transport"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
clientv3test "go.etcd.io/etcd/tests/v3/integration/clientv3"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
testTLSInfo = transport.TLSInfo{
|
||||
KeyFile: integration.MustAbsPath("../../../fixtures/server.key.insecure"),
|
||||
CertFile: integration.MustAbsPath("../../../fixtures/server.crt"),
|
||||
TrustedCAFile: integration.MustAbsPath("../../../fixtures/ca.crt"),
|
||||
KeyFile: integration2.MustAbsPath("../../../fixtures/server.key.insecure"),
|
||||
CertFile: integration2.MustAbsPath("../../../fixtures/server.crt"),
|
||||
TrustedCAFile: integration2.MustAbsPath("../../../fixtures/ca.crt"),
|
||||
ClientCertAuth: true,
|
||||
}
|
||||
|
||||
testTLSInfoExpired = transport.TLSInfo{
|
||||
KeyFile: integration.MustAbsPath("../../fixtures-expired/server.key.insecure"),
|
||||
CertFile: integration.MustAbsPath("../../fixtures-expired/server.crt"),
|
||||
TrustedCAFile: integration.MustAbsPath("../../fixtures-expired/ca.crt"),
|
||||
KeyFile: integration2.MustAbsPath("../../fixtures-expired/server.key.insecure"),
|
||||
CertFile: integration2.MustAbsPath("../../fixtures-expired/server.crt"),
|
||||
TrustedCAFile: integration2.MustAbsPath("../../fixtures-expired/ca.crt"),
|
||||
ClientCertAuth: true,
|
||||
}
|
||||
)
|
||||
|
||||
// TestDialTLSExpired tests client with expired certs fails to dial.
|
||||
func TestDialTLSExpired(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, PeerTLS: &testTLSInfo, ClientTLS: &testTLSInfo, SkipCreatingClient: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, PeerTLS: &testTLSInfo, ClientTLS: &testTLSInfo, SkipCreatingClient: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
tls, err := testTLSInfoExpired.ClientConfig()
|
||||
@ -56,7 +56,7 @@ func TestDialTLSExpired(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// expect remote errors "tls: bad certificate"
|
||||
_, err = integration.NewClient(t, clientv3.Config{
|
||||
_, err = integration2.NewClient(t, clientv3.Config{
|
||||
Endpoints: []string{clus.Members[0].GRPCURL()},
|
||||
DialTimeout: 3 * time.Second,
|
||||
DialOptions: []grpc.DialOption{grpc.WithBlock()},
|
||||
@ -70,11 +70,11 @@ func TestDialTLSExpired(t *testing.T) {
|
||||
// TestDialTLSNoConfig ensures the client fails to dial / times out
|
||||
// when TLS endpoints (https, unixs) are given but no tls config.
|
||||
func TestDialTLSNoConfig(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, ClientTLS: &testTLSInfo, SkipCreatingClient: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, ClientTLS: &testTLSInfo, SkipCreatingClient: true})
|
||||
defer clus.Terminate(t)
|
||||
// expect "signed by unknown authority"
|
||||
c, err := integration.NewClient(t, clientv3.Config{
|
||||
c, err := integration2.NewClient(t, clientv3.Config{
|
||||
Endpoints: []string{clus.Members[0].GRPCURL()},
|
||||
DialTimeout: time.Second,
|
||||
DialOptions: []grpc.DialOption{grpc.WithBlock()},
|
||||
@ -101,8 +101,8 @@ func TestDialSetEndpointsAfterFail(t *testing.T) {
|
||||
|
||||
// testDialSetEndpoints ensures SetEndpoints can replace unavailable endpoints with available ones.
|
||||
func testDialSetEndpoints(t *testing.T, setBefore bool) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, SkipCreatingClient: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, SkipCreatingClient: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// get endpoint list
|
||||
@ -117,7 +117,7 @@ func testDialSetEndpoints(t *testing.T, setBefore bool) {
|
||||
DialTimeout: 1 * time.Second,
|
||||
DialOptions: []grpc.DialOption{grpc.WithBlock()},
|
||||
}
|
||||
cli, err := integration.NewClient(t, cfg)
|
||||
cli, err := integration2.NewClient(t, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -134,7 +134,7 @@ func testDialSetEndpoints(t *testing.T, setBefore bool) {
|
||||
cli.SetEndpoints(eps[toKill%3], eps[(toKill+1)%3])
|
||||
}
|
||||
time.Sleep(time.Second * 2)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), integration.RequestWaitTimeout)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), integration2.RequestWaitTimeout)
|
||||
if _, err = cli.Get(ctx, "foo", clientv3.WithSerializable()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -144,8 +144,8 @@ func testDialSetEndpoints(t *testing.T, setBefore bool) {
|
||||
// TestSwitchSetEndpoints ensures SetEndpoints can switch one endpoint
|
||||
// with a new one that doesn't include original endpoint.
|
||||
func TestSwitchSetEndpoints(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// get non partitioned members endpoints
|
||||
@ -164,9 +164,9 @@ func TestSwitchSetEndpoints(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRejectOldCluster(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
// 2 endpoints to test multi-endpoint Status
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2, SkipCreatingClient: true})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 2, SkipCreatingClient: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cfg := clientv3.Config{
|
||||
@ -175,7 +175,7 @@ func TestRejectOldCluster(t *testing.T) {
|
||||
DialOptions: []grpc.DialOption{grpc.WithBlock()},
|
||||
RejectOldCluster: true,
|
||||
}
|
||||
cli, err := integration.NewClient(t, cfg)
|
||||
cli, err := integration2.NewClient(t, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -185,8 +185,8 @@ func TestRejectOldCluster(t *testing.T) {
|
||||
// TestDialForeignEndpoint checks an endpoint that is not registered
|
||||
// with the balancer can be dialed.
|
||||
func TestDialForeignEndpoint(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 2})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
conn, err := clus.Client(0).Dial(clus.Client(1).Endpoints()[0])
|
||||
@ -208,8 +208,8 @@ func TestDialForeignEndpoint(t *testing.T) {
|
||||
// TestSetEndpointAndPut checks that a Put following a SetEndpoints
|
||||
// to a working endpoint will always succeed.
|
||||
func TestSetEndpointAndPut(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 2})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
clus.Client(1).SetEndpoints(clus.Members[0].GRPCURL())
|
||||
|
@ -26,7 +26,7 @@ import (
|
||||
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"go.etcd.io/etcd/tests/v3/integration/clientv3"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
@ -103,9 +103,9 @@ func TestBalancerUnderNetworkPartitionSerializableGet(t *testing.T) {
|
||||
}
|
||||
|
||||
func testBalancerUnderNetworkPartition(t *testing.T, op func(*clientv3.Client, context.Context) error, timeout time.Duration) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{
|
||||
Size: 3,
|
||||
SkipCreatingClient: true,
|
||||
})
|
||||
@ -119,7 +119,7 @@ func testBalancerUnderNetworkPartition(t *testing.T, op func(*clientv3.Client, c
|
||||
DialTimeout: 3 * time.Second,
|
||||
DialOptions: []grpc.DialOption{grpc.WithBlock()},
|
||||
}
|
||||
cli, err := integration.NewClient(t, ccfg)
|
||||
cli, err := integration2.NewClient(t, ccfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -159,9 +159,9 @@ func testBalancerUnderNetworkPartition(t *testing.T, op func(*clientv3.Client, c
|
||||
// switches endpoint when leader fails and linearizable get requests returns
|
||||
// "etcdserver: request timed out".
|
||||
func TestBalancerUnderNetworkPartitionLinearizableGetLeaderElection(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{
|
||||
Size: 3,
|
||||
SkipCreatingClient: true,
|
||||
})
|
||||
@ -172,7 +172,7 @@ func TestBalancerUnderNetworkPartitionLinearizableGetLeaderElection(t *testing.T
|
||||
|
||||
timeout := 3 * clus.Members[(lead+1)%2].ServerConfig.ReqTimeout()
|
||||
|
||||
cli, err := integration.NewClient(t, clientv3.Config{
|
||||
cli, err := integration2.NewClient(t, clientv3.Config{
|
||||
Endpoints: []string{eps[(lead+1)%2]},
|
||||
DialTimeout: 2 * time.Second,
|
||||
DialOptions: []grpc.DialOption{grpc.WithBlock()},
|
||||
@ -214,9 +214,9 @@ func TestBalancerUnderNetworkPartitionWatchFollower(t *testing.T) {
|
||||
// testBalancerUnderNetworkPartitionWatch ensures watch stream
|
||||
// to a partitioned node be closed when context requires leader.
|
||||
func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{
|
||||
Size: 3,
|
||||
SkipCreatingClient: true,
|
||||
})
|
||||
@ -230,7 +230,7 @@ func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) {
|
||||
}
|
||||
|
||||
// pin eps[target]
|
||||
watchCli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{eps[target]}})
|
||||
watchCli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[target]}})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -248,7 +248,7 @@ func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) {
|
||||
wch := watchCli.Watch(clientv3.WithRequireLeader(context.Background()), "foo", clientv3.WithCreatedNotify())
|
||||
select {
|
||||
case <-wch:
|
||||
case <-time.After(integration.RequestWaitTimeout):
|
||||
case <-time.After(integration2.RequestWaitTimeout):
|
||||
t.Fatal("took too long to create watch")
|
||||
}
|
||||
|
||||
@ -268,15 +268,15 @@ func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) {
|
||||
if err = ev.Err(); err != rpctypes.ErrNoLeader {
|
||||
t.Fatalf("expected %v, got %v", rpctypes.ErrNoLeader, err)
|
||||
}
|
||||
case <-time.After(integration.RequestWaitTimeout): // enough time to detect leader lost
|
||||
case <-time.After(integration2.RequestWaitTimeout): // enough time to detect leader lost
|
||||
t.Fatal("took too long to detect leader lost")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDropReadUnderNetworkPartition(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{
|
||||
Size: 3,
|
||||
SkipCreatingClient: true,
|
||||
})
|
||||
@ -289,7 +289,7 @@ func TestDropReadUnderNetworkPartition(t *testing.T) {
|
||||
DialTimeout: 10 * time.Second,
|
||||
DialOptions: []grpc.DialOption{grpc.WithBlock()},
|
||||
}
|
||||
cli, err := integration.NewClient(t, ccfg)
|
||||
cli, err := integration2.NewClient(t, ccfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -23,16 +23,16 @@ import (
|
||||
|
||||
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"go.etcd.io/etcd/tests/v3/integration/clientv3"
|
||||
)
|
||||
|
||||
// TestBalancerUnderServerShutdownWatch expects that watch client
|
||||
// switch its endpoints when the member of the pinned endpoint fails.
|
||||
func TestBalancerUnderServerShutdownWatch(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{
|
||||
Size: 3,
|
||||
SkipCreatingClient: true,
|
||||
UseBridge: true,
|
||||
@ -44,7 +44,7 @@ func TestBalancerUnderServerShutdownWatch(t *testing.T) {
|
||||
lead := clus.WaitLeader(t)
|
||||
|
||||
// pin eps[lead]
|
||||
watchCli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{eps[lead]}})
|
||||
watchCli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[lead]}})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -61,7 +61,7 @@ func TestBalancerUnderServerShutdownWatch(t *testing.T) {
|
||||
wch := watchCli.Watch(context.Background(), key, clientv3.WithCreatedNotify())
|
||||
select {
|
||||
case <-wch:
|
||||
case <-time.After(integration.RequestWaitTimeout):
|
||||
case <-time.After(integration2.RequestWaitTimeout):
|
||||
t.Fatal("took too long to create watch")
|
||||
}
|
||||
|
||||
@ -90,7 +90,7 @@ func TestBalancerUnderServerShutdownWatch(t *testing.T) {
|
||||
clus.Members[lead].Terminate(t)
|
||||
|
||||
// writes to eps[lead+1]
|
||||
putCli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{eps[(lead+1)%3]}})
|
||||
putCli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[(lead+1)%3]}})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -143,9 +143,9 @@ func TestBalancerUnderServerShutdownTxn(t *testing.T) {
|
||||
// the pinned endpoint is shut down, the balancer switches its endpoints
|
||||
// and all subsequent put/delete/txn requests succeed with new endpoints.
|
||||
func testBalancerUnderServerShutdownMutable(t *testing.T, op func(*clientv3.Client, context.Context) error) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{
|
||||
Size: 3,
|
||||
SkipCreatingClient: true,
|
||||
})
|
||||
@ -154,7 +154,7 @@ func testBalancerUnderServerShutdownMutable(t *testing.T, op func(*clientv3.Clie
|
||||
eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL(), clus.Members[2].GRPCURL()}
|
||||
|
||||
// pin eps[0]
|
||||
cli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{eps[0]}})
|
||||
cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[0]}})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -201,9 +201,9 @@ func TestBalancerUnderServerShutdownGetSerializable(t *testing.T) {
|
||||
// the pinned endpoint is shut down, the balancer switches its endpoints
|
||||
// and all subsequent range requests succeed with new endpoints.
|
||||
func testBalancerUnderServerShutdownImmutable(t *testing.T, op func(*clientv3.Client, context.Context) error, timeout time.Duration) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{
|
||||
Size: 3,
|
||||
SkipCreatingClient: true,
|
||||
})
|
||||
@ -212,7 +212,7 @@ func testBalancerUnderServerShutdownImmutable(t *testing.T, op func(*clientv3.Cl
|
||||
eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL(), clus.Members[2].GRPCURL()}
|
||||
|
||||
// pin eps[0]
|
||||
cli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{eps[0]}})
|
||||
cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[0]}})
|
||||
if err != nil {
|
||||
t.Errorf("failed to create client: %v", err)
|
||||
}
|
||||
@ -274,9 +274,9 @@ type pinTestOpt struct {
|
||||
// testBalancerUnderServerStopInflightRangeOnRestart expects
|
||||
// inflight range request reconnects on server restart.
|
||||
func testBalancerUnderServerStopInflightRangeOnRestart(t *testing.T, linearizable bool, opt pinTestOpt) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
cfg := &integration.ClusterConfig{
|
||||
cfg := &integration2.ClusterConfig{
|
||||
Size: 2,
|
||||
SkipCreatingClient: true,
|
||||
UseBridge: true,
|
||||
@ -285,7 +285,7 @@ func testBalancerUnderServerStopInflightRangeOnRestart(t *testing.T, linearizabl
|
||||
cfg.Size = 3
|
||||
}
|
||||
|
||||
clus := integration.NewClusterV3(t, cfg)
|
||||
clus := integration2.NewClusterV3(t, cfg)
|
||||
defer clus.Terminate(t)
|
||||
eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL()}
|
||||
if linearizable {
|
||||
@ -300,7 +300,7 @@ func testBalancerUnderServerStopInflightRangeOnRestart(t *testing.T, linearizabl
|
||||
}
|
||||
|
||||
// pin eps[target]
|
||||
cli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{eps[target]}})
|
||||
cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[target]}})
|
||||
if err != nil {
|
||||
t.Errorf("failed to create client: %v", err)
|
||||
}
|
||||
@ -361,7 +361,7 @@ func testBalancerUnderServerStopInflightRangeOnRestart(t *testing.T, linearizabl
|
||||
clus.Members[target].Restart(t)
|
||||
|
||||
select {
|
||||
case <-time.After(clientTimeout + integration.RequestWaitTimeout):
|
||||
case <-time.After(clientTimeout + integration2.RequestWaitTimeout):
|
||||
t.Fatalf("timed out waiting for Get [linearizable: %v, opt: %+v]", linearizable, opt)
|
||||
case <-donec:
|
||||
}
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
"time"
|
||||
|
||||
"go.etcd.io/etcd/client/pkg/v3/testutil"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
)
|
||||
|
||||
@ -29,7 +30,7 @@ const (
|
||||
)
|
||||
|
||||
var lazyCluster = integration.NewLazyClusterWithConfig(
|
||||
integration.ClusterConfig{
|
||||
integration2.ClusterConfig{
|
||||
Size: 3,
|
||||
WatchProgressNotifyInterval: 200 * time.Millisecond})
|
||||
|
||||
|
@ -20,19 +20,19 @@ import (
|
||||
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
recipe "go.etcd.io/etcd/client/v3/experimental/recipes"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func TestBarrierSingleNode(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
testBarrier(t, 5, func() *clientv3.Client { return clus.Client(0) })
|
||||
}
|
||||
|
||||
func TestBarrierMultiNode(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
testBarrier(t, 5, func() *clientv3.Client { return clus.RandClient() })
|
||||
}
|
||||
|
@ -20,13 +20,13 @@ import (
|
||||
|
||||
"go.etcd.io/etcd/client/v3/concurrency"
|
||||
recipe "go.etcd.io/etcd/client/v3/experimental/recipes"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func TestDoubleBarrier(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
waiters := 10
|
||||
@ -98,9 +98,9 @@ func TestDoubleBarrier(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDoubleBarrierFailover(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
waiters := 10
|
||||
|
@ -24,29 +24,29 @@ import (
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/client/v3/concurrency"
|
||||
recipe "go.etcd.io/etcd/client/v3/experimental/recipes"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func TestMutexLockSingleNode(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
var clients []*clientv3.Client
|
||||
testMutexLock(t, 5, integration.MakeSingleNodeClients(t, clus, &clients))
|
||||
integration.CloseClients(t, clients)
|
||||
testMutexLock(t, 5, integration2.MakeSingleNodeClients(t, clus, &clients))
|
||||
integration2.CloseClients(t, clients)
|
||||
}
|
||||
|
||||
func TestMutexLockMultiNode(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
var clients []*clientv3.Client
|
||||
testMutexLock(t, 5, integration.MakeMultiNodeClients(t, clus, &clients))
|
||||
integration.CloseClients(t, clients)
|
||||
testMutexLock(t, 5, integration2.MakeMultiNodeClients(t, clus, &clients))
|
||||
integration2.CloseClients(t, clients)
|
||||
}
|
||||
|
||||
func testMutexLock(t *testing.T, waiters int, chooseClient func() *clientv3.Client) {
|
||||
@ -93,27 +93,27 @@ func testMutexLock(t *testing.T, waiters int, chooseClient func() *clientv3.Clie
|
||||
}
|
||||
|
||||
func TestMutexTryLockSingleNode(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
var clients []*clientv3.Client
|
||||
testMutexTryLock(t, 5, integration.MakeSingleNodeClients(t, clus, &clients))
|
||||
integration.CloseClients(t, clients)
|
||||
testMutexTryLock(t, 5, integration2.MakeSingleNodeClients(t, clus, &clients))
|
||||
integration2.CloseClients(t, clients)
|
||||
}
|
||||
|
||||
func TestMutexTryLockMultiNode(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
var clients []*clientv3.Client
|
||||
testMutexTryLock(t, 5, integration.MakeMultiNodeClients(t, clus, &clients))
|
||||
integration.CloseClients(t, clients)
|
||||
testMutexTryLock(t, 5, integration2.MakeMultiNodeClients(t, clus, &clients))
|
||||
integration2.CloseClients(t, clients)
|
||||
}
|
||||
|
||||
func testMutexTryLock(t *testing.T, lockers int, chooseClient func() *clientv3.Client) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
lockedC := make(chan *concurrency.Mutex)
|
||||
notlockedC := make(chan *concurrency.Mutex)
|
||||
@ -163,9 +163,9 @@ func testMutexTryLock(t *testing.T, lockers int, chooseClient func() *clientv3.C
|
||||
// TestMutexSessionRelock ensures that acquiring the same lock with the same
|
||||
// session will not result in deadlock.
|
||||
func TestMutexSessionRelock(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
session, err := concurrency.NewSession(clus.RandClient())
|
||||
if err != nil {
|
||||
@ -187,9 +187,9 @@ func TestMutexSessionRelock(t *testing.T) {
|
||||
// waiters older than the new owner are gone by testing the case where
|
||||
// the waiter prior to the acquirer expires before the current holder.
|
||||
func TestMutexWaitsOnCurrentHolder(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cctx := context.Background()
|
||||
@ -295,9 +295,9 @@ func TestMutexWaitsOnCurrentHolder(t *testing.T) {
|
||||
}
|
||||
|
||||
func BenchmarkMutex4Waiters(b *testing.B) {
|
||||
integration.BeforeTest(b)
|
||||
integration2.BeforeTest(b)
|
||||
// XXX switch tests to use TB interface
|
||||
clus := integration.NewClusterV3(nil, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(nil, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(nil)
|
||||
for i := 0; i < b.N; i++ {
|
||||
testMutexLock(nil, 4, func() *clientv3.Client { return clus.RandClient() })
|
||||
@ -305,15 +305,15 @@ func BenchmarkMutex4Waiters(b *testing.B) {
|
||||
}
|
||||
|
||||
func TestRWMutexSingleNode(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
testRWMutex(t, 5, func() *clientv3.Client { return clus.Client(0) })
|
||||
}
|
||||
|
||||
func TestRWMutexMultiNode(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
testRWMutex(t, 5, func() *clientv3.Client { return clus.RandClient() })
|
||||
}
|
||||
|
@ -21,7 +21,7 @@ import (
|
||||
"testing"
|
||||
|
||||
recipe "go.etcd.io/etcd/client/v3/experimental/recipes"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -31,9 +31,9 @@ const (
|
||||
|
||||
// TestQueueOneReaderOneWriter confirms the queue is FIFO
|
||||
func TestQueueOneReaderOneWriter(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
done := make(chan struct{})
|
||||
@ -78,10 +78,10 @@ func TestQueueManyReaderManyWriter(t *testing.T) {
|
||||
|
||||
// BenchmarkQueue benchmarks Queues using many/many readers/writers
|
||||
func BenchmarkQueue(b *testing.B) {
|
||||
integration.BeforeTest(b)
|
||||
integration2.BeforeTest(b)
|
||||
|
||||
// XXX switch tests to use TB interface
|
||||
clus := integration.NewClusterV3(nil, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(nil, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(nil)
|
||||
for i := 0; i < b.N; i++ {
|
||||
testQueueNReaderMWriter(nil, manyQueueClients, manyQueueClients)
|
||||
@ -90,9 +90,9 @@ func BenchmarkQueue(b *testing.B) {
|
||||
|
||||
// TestPrQueueOneReaderOneWriter tests whether priority queues respect priorities.
|
||||
func TestPrQueueOneReaderOneWriter(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// write out five items with random priority
|
||||
@ -124,9 +124,9 @@ func TestPrQueueOneReaderOneWriter(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPrQueueManyReaderManyWriter(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
rqs := newPriorityQueues(clus, manyQueueClients)
|
||||
wqs := newPriorityQueues(clus, manyQueueClients)
|
||||
@ -135,10 +135,10 @@ func TestPrQueueManyReaderManyWriter(t *testing.T) {
|
||||
|
||||
// BenchmarkQueue benchmarks Queues using n/n readers/writers
|
||||
func BenchmarkPrQueueOneReaderOneWriter(b *testing.B) {
|
||||
integration.BeforeTest(b)
|
||||
integration2.BeforeTest(b)
|
||||
|
||||
// XXX switch tests to use TB interface
|
||||
clus := integration.NewClusterV3(nil, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(nil, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(nil)
|
||||
rqs := newPriorityQueues(clus, 1)
|
||||
wqs := newPriorityQueues(clus, 1)
|
||||
@ -148,13 +148,13 @@ func BenchmarkPrQueueOneReaderOneWriter(b *testing.B) {
|
||||
}
|
||||
|
||||
func testQueueNReaderMWriter(t *testing.T, n int, m int) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
testReadersWriters(t, newQueues(clus, n), newQueues(clus, m))
|
||||
}
|
||||
|
||||
func newQueues(clus *integration.ClusterV3, n int) (qs []testQueue) {
|
||||
func newQueues(clus *integration2.ClusterV3, n int) (qs []testQueue) {
|
||||
for i := 0; i < n; i++ {
|
||||
etcdc := clus.RandClient()
|
||||
qs = append(qs, recipe.NewQueue(etcdc, "q"))
|
||||
@ -162,7 +162,7 @@ func newQueues(clus *integration.ClusterV3, n int) (qs []testQueue) {
|
||||
return qs
|
||||
}
|
||||
|
||||
func newPriorityQueues(clus *integration.ClusterV3, n int) (qs []testQueue) {
|
||||
func newPriorityQueues(clus *integration2.ClusterV3, n int) (qs []testQueue) {
|
||||
for i := 0; i < n; i++ {
|
||||
etcdc := clus.RandClient()
|
||||
q := &flatPriorityQueue{recipe.NewPriorityQueue(etcdc, "prq")}
|
||||
|
@ -29,20 +29,20 @@ import (
|
||||
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
|
||||
"go.etcd.io/etcd/api/v3/version"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
func TestKVPutError(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
var (
|
||||
maxReqBytes = 1.5 * 1024 * 1024 // hard coded max in v3_server.go
|
||||
quota = int64(int(maxReqBytes*1.2) + 8*os.Getpagesize()) // make sure we have enough overhead in backend quota. See discussion in #6486.
|
||||
)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, QuotaBackendBytes: quota, ClientMaxCallSendMsgSize: 100 * 1024 * 1024})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, QuotaBackendBytes: quota, ClientMaxCallSendMsgSize: 100 * 1024 * 1024})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
@ -72,9 +72,9 @@ func TestKVPutError(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestKVPut(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lapi := clus.RandClient()
|
||||
@ -117,9 +117,9 @@ func TestKVPut(t *testing.T) {
|
||||
|
||||
// TestKVPutWithIgnoreValue ensures that Put with WithIgnoreValue does not clobber the old value.
|
||||
func TestKVPutWithIgnoreValue(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
@ -150,9 +150,9 @@ func TestKVPutWithIgnoreValue(t *testing.T) {
|
||||
|
||||
// TestKVPutWithIgnoreLease ensures that Put with WithIgnoreLease does not affect the existing lease for the key.
|
||||
func TestKVPutWithIgnoreLease(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
@ -189,9 +189,9 @@ func TestKVPutWithIgnoreLease(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestKVPutWithRequireLeader(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
clus.Members[1].Stop(t)
|
||||
@ -235,9 +235,9 @@ func TestKVPutWithRequireLeader(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestKVRange(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
@ -464,9 +464,9 @@ func TestKVRange(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestKVGetErrConnClosed(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
@ -486,16 +486,16 @@ func TestKVGetErrConnClosed(t *testing.T) {
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(integration.RequestWaitTimeout):
|
||||
case <-time.After(integration2.RequestWaitTimeout):
|
||||
t.Fatal("kv.Get took too long")
|
||||
case <-donec:
|
||||
}
|
||||
}
|
||||
|
||||
func TestKVNewAfterClose(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
@ -513,16 +513,16 @@ func TestKVNewAfterClose(t *testing.T) {
|
||||
close(donec)
|
||||
}()
|
||||
select {
|
||||
case <-time.After(integration.RequestWaitTimeout):
|
||||
case <-time.After(integration2.RequestWaitTimeout):
|
||||
t.Fatal("kv.Get took too long")
|
||||
case <-donec:
|
||||
}
|
||||
}
|
||||
|
||||
func TestKVDeleteRange(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
@ -592,9 +592,9 @@ func TestKVDeleteRange(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestKVDelete(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
@ -624,9 +624,9 @@ func TestKVDelete(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestKVCompactError(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
@ -654,9 +654,9 @@ func TestKVCompactError(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestKVCompact(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
@ -709,10 +709,10 @@ func TestKVCompact(t *testing.T) {
|
||||
|
||||
// TestKVGetRetry ensures get will retry on disconnect.
|
||||
func TestKVGetRetry(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clusterSize := 3
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: clusterSize, UseBridge: true})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: clusterSize, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// because killing leader and following election
|
||||
@ -763,9 +763,9 @@ func TestKVGetRetry(t *testing.T) {
|
||||
|
||||
// TestKVPutFailGetRetry ensures a get will retry following a failed put.
|
||||
func TestKVPutFailGetRetry(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.Client(0)
|
||||
@ -803,9 +803,9 @@ func TestKVPutFailGetRetry(t *testing.T) {
|
||||
|
||||
// TestKVGetCancel tests that a context cancel on a Get terminates as expected.
|
||||
func TestKVGetCancel(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
oldconn := clus.Client(0).ActiveConnection()
|
||||
@ -826,9 +826,9 @@ func TestKVGetCancel(t *testing.T) {
|
||||
|
||||
// TestKVGetStoppedServerAndClose ensures closing after a failed Get works.
|
||||
func TestKVGetStoppedServerAndClose(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
@ -844,9 +844,9 @@ func TestKVGetStoppedServerAndClose(t *testing.T) {
|
||||
|
||||
// TestKVPutStoppedServerAndClose ensures closing after a failed Put works.
|
||||
func TestKVPutStoppedServerAndClose(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
@ -875,8 +875,8 @@ func TestKVPutStoppedServerAndClose(t *testing.T) {
|
||||
// TestKVPutAtMostOnce ensures that a Put will only occur at most once
|
||||
// in the presence of network errors.
|
||||
func TestKVPutAtMostOnce(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
if _, err := clus.Client(0).Put(context.TODO(), "k", "1"); err != nil {
|
||||
@ -911,7 +911,7 @@ func TestKVPutAtMostOnce(t *testing.T) {
|
||||
|
||||
// TestKVLargeRequests tests various client/server side request limits.
|
||||
func TestKVLargeRequests(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
tests := []struct {
|
||||
// make sure that "MaxCallSendMsgSize" < server-side default send/recv limit
|
||||
maxRequestBytesServer uint
|
||||
@ -970,8 +970,8 @@ func TestKVLargeRequests(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for i, test := range tests {
|
||||
clus := integration.NewClusterV3(t,
|
||||
&integration.ClusterConfig{
|
||||
clus := integration2.NewClusterV3(t,
|
||||
&integration2.ClusterConfig{
|
||||
Size: 1,
|
||||
MaxRequestBytes: test.maxRequestBytesServer,
|
||||
ClientMaxCallSendMsgSize: test.maxCallSendBytesClient,
|
||||
@ -1003,9 +1003,9 @@ func TestKVLargeRequests(t *testing.T) {
|
||||
|
||||
// TestKVForLearner ensures learner member only accepts serializable read request.
|
||||
func TestKVForLearner(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// we have to add and launch learner member after initial cluster was created, because
|
||||
@ -1034,7 +1034,7 @@ func TestKVForLearner(t *testing.T) {
|
||||
DialOptions: []grpc.DialOption{grpc.WithBlock()},
|
||||
}
|
||||
// this client only has endpoint of the learner member
|
||||
cli, err := integration.NewClient(t, cfg)
|
||||
cli, err := integration2.NewClient(t, cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create clientv3: %v", err)
|
||||
}
|
||||
@ -1082,9 +1082,9 @@ func TestKVForLearner(t *testing.T) {
|
||||
|
||||
// TestBalancerSupportLearner verifies that balancer's retry and failover mechanism supports cluster with learner member
|
||||
func TestBalancerSupportLearner(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// we have to add and launch learner member after initial cluster was created, because
|
||||
@ -1106,7 +1106,7 @@ func TestBalancerSupportLearner(t *testing.T) {
|
||||
DialTimeout: 5 * time.Second,
|
||||
DialOptions: []grpc.DialOption{grpc.WithBlock()},
|
||||
}
|
||||
cli, err := integration.NewClient(t, cfg)
|
||||
cli, err := integration2.NewClient(t, cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create clientv3: %v", err)
|
||||
}
|
||||
|
@ -26,13 +26,13 @@ import (
|
||||
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/client/v3/concurrency"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func TestLeaseNotFoundError(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
@ -44,9 +44,9 @@ func TestLeaseNotFoundError(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeaseGrant(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lapi := clus.RandClient()
|
||||
@ -70,9 +70,9 @@ func TestLeaseGrant(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeaseRevoke(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lapi := clus.RandClient()
|
||||
@ -96,9 +96,9 @@ func TestLeaseRevoke(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeaseKeepAliveOnce(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lapi := clus.RandClient()
|
||||
@ -120,9 +120,9 @@ func TestLeaseKeepAliveOnce(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeaseKeepAlive(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lapi := clus.Client(0)
|
||||
@ -160,9 +160,9 @@ func TestLeaseKeepAlive(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeaseKeepAliveOneSecond(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
@ -188,9 +188,9 @@ func TestLeaseKeepAliveOneSecond(t *testing.T) {
|
||||
func TestLeaseKeepAliveHandleFailure(t *testing.T) {
|
||||
t.Skip("test it when we have a cluster client")
|
||||
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// TODO: change this line to get a cluster client
|
||||
@ -243,9 +243,9 @@ type leaseCh struct {
|
||||
|
||||
// TestLeaseKeepAliveNotFound ensures a revoked lease won't halt other leases.
|
||||
func TestLeaseKeepAliveNotFound(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.RandClient()
|
||||
@ -276,9 +276,9 @@ func TestLeaseKeepAliveNotFound(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeaseGrantErrConnClosed(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
@ -298,7 +298,7 @@ func TestLeaseGrantErrConnClosed(t *testing.T) {
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(integration.RequestWaitTimeout):
|
||||
case <-time.After(integration2.RequestWaitTimeout):
|
||||
t.Fatal("le.Grant took too long")
|
||||
case <-donec:
|
||||
}
|
||||
@ -308,9 +308,9 @@ func TestLeaseGrantErrConnClosed(t *testing.T) {
|
||||
// queue is full thus dropping keepalive response sends,
|
||||
// keepalive request is sent with the same rate of TTL / 3.
|
||||
func TestLeaseKeepAliveFullResponseQueue(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lapi := clus.Client(0)
|
||||
@ -348,9 +348,9 @@ func TestLeaseKeepAliveFullResponseQueue(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeaseGrantNewAfterClose(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
@ -368,16 +368,16 @@ func TestLeaseGrantNewAfterClose(t *testing.T) {
|
||||
close(donec)
|
||||
}()
|
||||
select {
|
||||
case <-time.After(integration.RequestWaitTimeout):
|
||||
case <-time.After(integration2.RequestWaitTimeout):
|
||||
t.Fatal("le.Grant took too long")
|
||||
case <-donec:
|
||||
}
|
||||
}
|
||||
|
||||
func TestLeaseRevokeNewAfterClose(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
@ -402,7 +402,7 @@ func TestLeaseRevokeNewAfterClose(t *testing.T) {
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case <-time.After(integration.RequestWaitTimeout):
|
||||
case <-time.After(integration2.RequestWaitTimeout):
|
||||
t.Fatal("le.Revoke took too long")
|
||||
case errMsg := <-errMsgCh:
|
||||
if errMsg != "" {
|
||||
@ -414,9 +414,9 @@ func TestLeaseRevokeNewAfterClose(t *testing.T) {
|
||||
// TestLeaseKeepAliveCloseAfterDisconnectRevoke ensures the keep alive channel is closed
|
||||
// following a disconnection, lease revoke, then reconnect.
|
||||
func TestLeaseKeepAliveCloseAfterDisconnectRevoke(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
@ -460,9 +460,9 @@ func TestLeaseKeepAliveCloseAfterDisconnectRevoke(t *testing.T) {
|
||||
// TestLeaseKeepAliveInitTimeout ensures the keep alive channel closes if
|
||||
// the initial keep alive request never gets a response.
|
||||
func TestLeaseKeepAliveInitTimeout(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
@ -493,9 +493,9 @@ func TestLeaseKeepAliveInitTimeout(t *testing.T) {
|
||||
// TestLeaseKeepAliveInitTimeout ensures the keep alive channel closes if
|
||||
// a keep alive request after the first never gets a response.
|
||||
func TestLeaseKeepAliveTTLTimeout(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
@ -528,9 +528,9 @@ func TestLeaseKeepAliveTTLTimeout(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeaseTimeToLive(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
c := clus.RandClient()
|
||||
@ -586,9 +586,9 @@ func TestLeaseTimeToLive(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeaseTimeToLiveLeaseNotFound(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.RandClient()
|
||||
@ -621,9 +621,9 @@ func TestLeaseTimeToLiveLeaseNotFound(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeaseLeases(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.RandClient()
|
||||
@ -654,9 +654,9 @@ func TestLeaseLeases(t *testing.T) {
|
||||
// TestLeaseRenewLostQuorum ensures keepalives work after losing quorum
|
||||
// for a while.
|
||||
func TestLeaseRenewLostQuorum(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
@ -702,9 +702,9 @@ func TestLeaseRenewLostQuorum(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeaseKeepAliveLoopExit(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
ctx := context.Background()
|
||||
@ -727,8 +727,8 @@ func TestLeaseKeepAliveLoopExit(t *testing.T) {
|
||||
// before, during, and after quorum loss to confirm Grant/KeepAlive tolerates
|
||||
// transient cluster failure.
|
||||
func TestV3LeaseFailureOverlap(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 2, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
numReqs := 5
|
||||
@ -780,9 +780,9 @@ func TestV3LeaseFailureOverlap(t *testing.T) {
|
||||
|
||||
// TestLeaseWithRequireLeader checks keep-alive channel close when no leader.
|
||||
func TestLeaseWithRequireLeader(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2, UseBridge: true})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 2, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
c := clus.Client(0)
|
||||
|
@ -28,13 +28,13 @@ import (
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/client/v3/concurrency"
|
||||
"go.etcd.io/etcd/client/v3/leasing"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func TestLeasingPutGet(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lKV1, closeLKV1, err := leasing.NewKV(clus.Client(0), "foo/")
|
||||
@ -91,8 +91,8 @@ func TestLeasingPutGet(t *testing.T) {
|
||||
|
||||
// TestLeasingInterval checks the leasing KV fetches key intervals.
|
||||
func TestLeasingInterval(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -130,8 +130,8 @@ func TestLeasingInterval(t *testing.T) {
|
||||
|
||||
// TestLeasingPutInvalidateNew checks the leasing KV updates its cache on a Put to a new key.
|
||||
func TestLeasingPutInvalidateNew(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -160,8 +160,8 @@ func TestLeasingPutInvalidateNew(t *testing.T) {
|
||||
|
||||
// TestLeasingPutInvalidateExisting checks the leasing KV updates its cache on a Put to an existing key.
|
||||
func TestLeasingPutInvalidateExisting(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
if _, err := clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil {
|
||||
@ -194,8 +194,8 @@ func TestLeasingPutInvalidateExisting(t *testing.T) {
|
||||
|
||||
// TestLeasingGetNoLeaseTTL checks a key with a TTL is not leased.
|
||||
func TestLeasingGetNoLeaseTTL(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -223,8 +223,8 @@ func TestLeasingGetNoLeaseTTL(t *testing.T) {
|
||||
// TestLeasingGetSerializable checks the leasing KV can make serialized requests
|
||||
// when the etcd cluster is partitioned.
|
||||
func TestLeasingGetSerializable(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 2, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -263,8 +263,8 @@ func TestLeasingGetSerializable(t *testing.T) {
|
||||
|
||||
// TestLeasingPrevKey checks the cache respects WithPrevKV on puts.
|
||||
func TestLeasingPrevKey(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 2})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -289,8 +289,8 @@ func TestLeasingPrevKey(t *testing.T) {
|
||||
|
||||
// TestLeasingRevGet checks the cache respects Get by Revision.
|
||||
func TestLeasingRevGet(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -325,8 +325,8 @@ func TestLeasingRevGet(t *testing.T) {
|
||||
|
||||
// TestLeasingGetWithOpts checks options that can be served through the cache do not depend on the server.
|
||||
func TestLeasingGetWithOpts(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -370,8 +370,8 @@ func TestLeasingGetWithOpts(t *testing.T) {
|
||||
// TestLeasingConcurrentPut ensures that a get after concurrent puts returns
|
||||
// the recently put data.
|
||||
func TestLeasingConcurrentPut(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -417,8 +417,8 @@ func TestLeasingConcurrentPut(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeasingDisconnectedGet(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -446,8 +446,8 @@ func TestLeasingDisconnectedGet(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeasingDeleteOwner(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -480,8 +480,8 @@ func TestLeasingDeleteOwner(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeasingDeleteNonOwner(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv1, closeLKV1, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -515,8 +515,8 @@ func TestLeasingDeleteNonOwner(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeasingOverwriteResponse(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -549,8 +549,8 @@ func TestLeasingOverwriteResponse(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeasingOwnerPutResponse(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -587,8 +587,8 @@ func TestLeasingOwnerPutResponse(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeasingTxnOwnerGetRange(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -616,8 +616,8 @@ func TestLeasingTxnOwnerGetRange(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeasingTxnOwnerGet(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
client := clus.Client(0)
|
||||
@ -702,8 +702,8 @@ func TestLeasingTxnOwnerGet(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeasingTxnOwnerDeleteRange(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -741,8 +741,8 @@ func TestLeasingTxnOwnerDeleteRange(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeasingTxnOwnerDelete(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -772,8 +772,8 @@ func TestLeasingTxnOwnerDelete(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeasingTxnOwnerIf(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -866,8 +866,8 @@ func TestLeasingTxnOwnerIf(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeasingTxnCancel(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv1, closeLKV1, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -900,8 +900,8 @@ func TestLeasingTxnCancel(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeasingTxnNonOwnerPut(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -978,8 +978,8 @@ func TestLeasingTxnNonOwnerPut(t *testing.T) {
|
||||
// TestLeasingTxnRandIfThenOrElse randomly leases keys two separate clients, then
|
||||
// issues a random If/{Then,Else} transaction on those keys to one client.
|
||||
func TestLeasingTxnRandIfThenOrElse(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv1, closeLKV1, err1 := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -1084,8 +1084,8 @@ func TestLeasingTxnRandIfThenOrElse(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeasingOwnerPutError(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -1105,8 +1105,8 @@ func TestLeasingOwnerPutError(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeasingOwnerDeleteError(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -1126,8 +1126,8 @@ func TestLeasingOwnerDeleteError(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeasingNonOwnerPutError(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -1151,8 +1151,8 @@ func TestLeasingOwnerDeleteFrom(t *testing.T) {
|
||||
}
|
||||
|
||||
func testLeasingOwnerDelete(t *testing.T, del clientv3.Op) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "0/")
|
||||
@ -1200,8 +1200,8 @@ func testLeasingOwnerDelete(t *testing.T, del clientv3.Op) {
|
||||
}
|
||||
|
||||
func TestLeasingDeleteRangeBounds(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
delkv, closeDelKV, err := leasing.NewKV(clus.Client(0), "0/")
|
||||
@ -1258,8 +1258,8 @@ func TestLeaseDeleteRangeContendDel(t *testing.T) {
|
||||
}
|
||||
|
||||
func testLeasingDeleteRangeContend(t *testing.T, op clientv3.Op) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
delkv, closeDelKV, err := leasing.NewKV(clus.Client(0), "0/")
|
||||
@ -1316,8 +1316,8 @@ func testLeasingDeleteRangeContend(t *testing.T, op clientv3.Op) {
|
||||
}
|
||||
|
||||
func TestLeasingPutGetDeleteConcurrent(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkvs := make([]clientv3.KV, 16)
|
||||
@ -1375,8 +1375,8 @@ func TestLeasingPutGetDeleteConcurrent(t *testing.T) {
|
||||
// TestLeasingReconnectOwnerRevoke checks that revocation works if
|
||||
// disconnected when trying to submit revoke txn.
|
||||
func TestLeasingReconnectOwnerRevoke(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv1, closeLKV1, err1 := leasing.NewKV(clus.Client(0), "foo/")
|
||||
@ -1436,8 +1436,8 @@ func TestLeasingReconnectOwnerRevoke(t *testing.T) {
|
||||
// TestLeasingReconnectOwnerRevokeCompact checks that revocation works if
|
||||
// disconnected and the watch is compacted.
|
||||
func TestLeasingReconnectOwnerRevokeCompact(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv1, closeLKV1, err1 := leasing.NewKV(clus.Client(0), "foo/")
|
||||
@ -1489,8 +1489,8 @@ func TestLeasingReconnectOwnerRevokeCompact(t *testing.T) {
|
||||
// TestLeasingReconnectOwnerConsistency checks a write error on an owner will
|
||||
// not cause inconsistency between the server and the client.
|
||||
func TestLeasingReconnectOwnerConsistency(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/")
|
||||
@ -1563,8 +1563,8 @@ func TestLeasingReconnectOwnerConsistency(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeasingTxnAtomicCache(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/")
|
||||
@ -1649,8 +1649,8 @@ func TestLeasingTxnAtomicCache(t *testing.T) {
|
||||
|
||||
// TestLeasingReconnectTxn checks that Txn is resilient to disconnects.
|
||||
func TestLeasingReconnectTxn(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/")
|
||||
@ -1685,8 +1685,8 @@ func TestLeasingReconnectTxn(t *testing.T) {
|
||||
// TestLeasingReconnectNonOwnerGet checks a get error on an owner will
|
||||
// not cause inconsistency between the server and the client.
|
||||
func TestLeasingReconnectNonOwnerGet(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/")
|
||||
@ -1736,8 +1736,8 @@ func TestLeasingReconnectNonOwnerGet(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeasingTxnRangeCmp(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/")
|
||||
@ -1771,8 +1771,8 @@ func TestLeasingTxnRangeCmp(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeasingDo(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/")
|
||||
@ -1813,8 +1813,8 @@ func TestLeasingDo(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeasingTxnOwnerPutBranch(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/")
|
||||
@ -1907,8 +1907,8 @@ func randCmps(pfx string, dat []*clientv3.PutResponse) (cmps []clientv3.Cmp, the
|
||||
}
|
||||
|
||||
func TestLeasingSessionExpire(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/", concurrency.WithTTL(1))
|
||||
@ -1931,7 +1931,7 @@ func TestLeasingSessionExpire(t *testing.T) {
|
||||
}
|
||||
waitForExpireAck(t, lkv)
|
||||
clus.Members[0].Restart(t)
|
||||
integration.WaitClientV3(t, lkv2)
|
||||
integration2.WaitClientV3(t, lkv2)
|
||||
if _, err = lkv2.Put(context.TODO(), "abc", "def"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -1983,8 +1983,8 @@ func TestLeasingSessionExpireCancel(t *testing.T) {
|
||||
}
|
||||
for i := range tests {
|
||||
t.Run(fmt.Sprintf("test %d", i), func(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/", concurrency.WithTTL(1))
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"go.uber.org/zap/zaptest"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
@ -34,13 +35,12 @@ import (
|
||||
"go.etcd.io/etcd/server/v3/lease"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/mvcc"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
)
|
||||
|
||||
func TestMaintenanceHashKV(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
@ -71,9 +71,9 @@ func TestMaintenanceHashKV(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMaintenanceMoveLeader(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
oldLeadIdx := clus.WaitLeader(t)
|
||||
@ -102,9 +102,9 @@ func TestMaintenanceMoveLeader(t *testing.T) {
|
||||
// TestMaintenanceSnapshotCancel ensures that context cancel
|
||||
// before snapshot reading returns corresponding context errors.
|
||||
func TestMaintenanceSnapshotCancel(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// reading snapshot with canceled context should error out
|
||||
@ -145,9 +145,9 @@ func TestMaintenanceSnapshotTimeout(t *testing.T) {
|
||||
// testMaintenanceSnapshotTimeout given snapshot function ensures that it
|
||||
// returns corresponding context errors when context timeout happened before snapshot reading
|
||||
func testMaintenanceSnapshotTimeout(t *testing.T, snapshot func(context.Context, *clientv3.Client) (io.ReadCloser, error)) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// reading snapshot with deadline exceeded should error out
|
||||
@ -190,9 +190,9 @@ func TestMaintenanceSnapshotErrorInflight(t *testing.T) {
|
||||
// testMaintenanceSnapshotErrorInflight given snapshot function ensures that ReaderCloser returned by it
|
||||
// will fail to read with corresponding context errors on inflight context cancel timeout.
|
||||
func testMaintenanceSnapshotErrorInflight(t *testing.T, snapshot func(context.Context, *clientv3.Client) (io.ReadCloser, error)) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// take about 1-second to read snapshot
|
||||
@ -247,10 +247,10 @@ func testMaintenanceSnapshotErrorInflight(t *testing.T, snapshot func(context.Co
|
||||
|
||||
// TestMaintenanceSnapshotWithVersionVersion ensures that SnapshotWithVersion returns correct version value.
|
||||
func TestMaintenanceSnapshotWithVersionVersion(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
// Set SnapshotCount to 1 to force raft snapshot to ensure that storage version is set
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, SnapshotCount: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, SnapshotCount: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// Put some keys to ensure that wal snapshot is triggered
|
||||
@ -270,9 +270,9 @@ func TestMaintenanceSnapshotWithVersionVersion(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMaintenanceStatus(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
clus.WaitLeader(t)
|
||||
@ -282,7 +282,7 @@ func TestMaintenanceStatus(t *testing.T) {
|
||||
eps[i] = clus.Members[i].GRPCURL()
|
||||
}
|
||||
|
||||
cli, err := integration.NewClient(t, clientv3.Config{Endpoints: eps, DialOptions: []grpc.DialOption{grpc.WithBlock()}})
|
||||
cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: eps, DialOptions: []grpc.DialOption{grpc.WithBlock()}})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -25,17 +25,16 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"go.etcd.io/etcd/client/pkg/v3/transport"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
|
||||
grpcprom "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"go.etcd.io/etcd/client/pkg/v3/transport"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func TestV3ClientMetrics(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
var (
|
||||
addr = "localhost:27989"
|
||||
@ -71,7 +70,7 @@ func TestV3ClientMetrics(t *testing.T) {
|
||||
|
||||
url := "unix://" + addr + "/metrics"
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, SkipCreatingClient: true})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, SkipCreatingClient: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cfg := clientv3.Config{
|
||||
@ -81,7 +80,7 @@ func TestV3ClientMetrics(t *testing.T) {
|
||||
grpc.WithStreamInterceptor(grpcprom.StreamClientInterceptor),
|
||||
},
|
||||
}
|
||||
cli, cerr := integration.NewClient(t, cfg)
|
||||
cli, cerr := integration2.NewClient(t, cfg)
|
||||
if cerr != nil {
|
||||
t.Fatal(cerr)
|
||||
}
|
||||
|
@ -24,13 +24,13 @@ import (
|
||||
|
||||
"go.etcd.io/etcd/api/v3/mvccpb"
|
||||
"go.etcd.io/etcd/client/v3/mirror"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func TestMirrorSync(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
c := clus.Client(0)
|
||||
@ -72,9 +72,9 @@ func TestMirrorSync(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMirrorSyncBase(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
cluster := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer cluster.Terminate(t)
|
||||
|
||||
cli := cluster.Client(0)
|
||||
|
@ -22,13 +22,13 @@ import (
|
||||
"go.etcd.io/etcd/api/v3/mvccpb"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/client/v3/namespace"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func TestNamespacePutGet(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
c := clus.Client(0)
|
||||
@ -55,9 +55,9 @@ func TestNamespacePutGet(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNamespaceWatch(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
c := clus.Client(0)
|
||||
|
@ -21,14 +21,13 @@ import (
|
||||
|
||||
etcd "go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/client/v3/naming/endpoints"
|
||||
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func TestEndpointManager(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
em, err := endpoints.NewManager(clus.RandClient(), "foo")
|
||||
@ -88,9 +87,9 @@ func TestEndpointManager(t *testing.T) {
|
||||
// correctly with multiple hosts and correctly receive multiple
|
||||
// updates in a single revision.
|
||||
func TestEndpointManagerAtomicity(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
c := clus.RandClient()
|
||||
@ -130,9 +129,9 @@ func TestEndpointManagerAtomicity(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestEndpointManagerCRUD(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
em, err := endpoints.NewManager(clus.RandClient(), "foo")
|
||||
|
@ -23,8 +23,7 @@ import (
|
||||
"go.etcd.io/etcd/client/v3/naming/endpoints"
|
||||
"go.etcd.io/etcd/client/v3/naming/resolver"
|
||||
"go.etcd.io/etcd/pkg/v3/grpc_testing"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"google.golang.org/grpc"
|
||||
testpb "google.golang.org/grpc/test/grpc_testing"
|
||||
)
|
||||
@ -32,7 +31,7 @@ import (
|
||||
// This test mimics scenario described in grpc_naming.md doc.
|
||||
|
||||
func TestEtcdGrpcResolver(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
s1PayloadBody := []byte{'1'}
|
||||
s1 := grpc_testing.NewDummyStubServer(s1PayloadBody)
|
||||
@ -48,7 +47,7 @@ func TestEtcdGrpcResolver(t *testing.T) {
|
||||
}
|
||||
defer s2.Stop()
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
em, err := endpoints.NewManager(clus.Client(0), "foo")
|
||||
|
@ -23,14 +23,14 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/client/v3/ordering"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func TestDetectKvOrderViolation(t *testing.T) {
|
||||
var errOrderViolation = errors.New("DetectedOrderViolation")
|
||||
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cfg := clientv3.Config{
|
||||
@ -40,7 +40,7 @@ func TestDetectKvOrderViolation(t *testing.T) {
|
||||
clus.Members[2].GRPCURL(),
|
||||
},
|
||||
}
|
||||
cli, err := integration.NewClient(t, cfg)
|
||||
cli, err := integration2.NewClient(t, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -96,8 +96,8 @@ func TestDetectKvOrderViolation(t *testing.T) {
|
||||
func TestDetectTxnOrderViolation(t *testing.T) {
|
||||
var errOrderViolation = errors.New("DetectedOrderViolation")
|
||||
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cfg := clientv3.Config{
|
||||
@ -107,7 +107,7 @@ func TestDetectTxnOrderViolation(t *testing.T) {
|
||||
clus.Members[2].GRPCURL(),
|
||||
},
|
||||
}
|
||||
cli, err := integration.NewClient(t, cfg)
|
||||
cli, err := integration2.NewClient(t, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -21,12 +21,12 @@ import (
|
||||
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/client/v3/ordering"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func TestEndpointSwitchResolvesViolation(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
eps := []string{
|
||||
clus.Members[0].GRPCURL(),
|
||||
@ -34,7 +34,7 @@ func TestEndpointSwitchResolvesViolation(t *testing.T) {
|
||||
clus.Members[2].GRPCURL(),
|
||||
}
|
||||
cfg := clientv3.Config{Endpoints: []string{clus.Members[0].GRPCURL()}}
|
||||
cli, err := integration.NewClient(t, cfg)
|
||||
cli, err := integration2.NewClient(t, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -79,8 +79,8 @@ func TestEndpointSwitchResolvesViolation(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUnresolvableOrderViolation(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 5, SkipCreatingClient: true, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 5, SkipCreatingClient: true, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
cfg := clientv3.Config{
|
||||
Endpoints: []string{
|
||||
@ -91,7 +91,7 @@ func TestUnresolvableOrderViolation(t *testing.T) {
|
||||
clus.Members[4].GRPCURL(),
|
||||
},
|
||||
}
|
||||
cli, err := integration.NewClient(t, cfg)
|
||||
cli, err := integration2.NewClient(t, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -19,13 +19,13 @@ import (
|
||||
"testing"
|
||||
|
||||
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func TestRoleError(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
authapi := clus.RandClient()
|
||||
|
@ -29,7 +29,7 @@ import (
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/client/v3/snapshot"
|
||||
"go.etcd.io/etcd/server/v3/embed"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"go.uber.org/zap/zaptest"
|
||||
)
|
||||
|
||||
@ -78,7 +78,7 @@ func newEmbedConfig(t *testing.T) *embed.Config {
|
||||
clusterN := 1
|
||||
urls := newEmbedURLs(clusterN * 2)
|
||||
cURLs, pURLs := urls[:clusterN], urls[clusterN:]
|
||||
cfg := integration.NewEmbedConfig(t, "default")
|
||||
cfg := integration2.NewEmbedConfig(t, "default")
|
||||
cfg.ClusterState = "new"
|
||||
cfg.LCUrls, cfg.ACUrls = cURLs, cURLs
|
||||
cfg.LPUrls, cfg.APUrls = pURLs, pURLs
|
||||
@ -105,7 +105,7 @@ func createSnapshotFile(t *testing.T, cfg *embed.Config, kvs []kv) (version stri
|
||||
}
|
||||
|
||||
ccfg := clientv3.Config{Endpoints: []string{cfg.ACUrls[0].String()}}
|
||||
cli, err := integration.NewClient(t, ccfg)
|
||||
cli, err := integration2.NewClient(t, ccfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -23,13 +23,13 @@ import (
|
||||
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/server/v3/embed"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func TestTxnError(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
@ -51,9 +51,9 @@ func TestTxnError(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTxnWriteFail(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.Client(0)
|
||||
@ -101,9 +101,9 @@ func TestTxnWriteFail(t *testing.T) {
|
||||
func TestTxnReadRetry(t *testing.T) {
|
||||
t.Skipf("skipping txn read retry test: re-enable after we do retry on txn read request")
|
||||
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.Client(0)
|
||||
@ -140,9 +140,9 @@ func TestTxnReadRetry(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTxnSuccess(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.Client(0)
|
||||
@ -163,9 +163,9 @@ func TestTxnSuccess(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTxnCompareRange(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.Client(0)
|
||||
@ -190,9 +190,9 @@ func TestTxnCompareRange(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTxnNested(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.Client(0)
|
||||
|
@ -21,14 +21,14 @@ import (
|
||||
|
||||
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func TestUserError(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
authapi := clus.RandClient()
|
||||
@ -55,9 +55,9 @@ func TestUserError(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUserErrorAuth(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
authapi := clus.RandClient()
|
||||
@ -75,16 +75,16 @@ func TestUserErrorAuth(t *testing.T) {
|
||||
DialOptions: []grpc.DialOption{grpc.WithBlock()},
|
||||
}
|
||||
cfg.Username, cfg.Password = "wrong-id", "123"
|
||||
if _, err := integration.NewClient(t, cfg); err != rpctypes.ErrAuthFailed {
|
||||
if _, err := integration2.NewClient(t, cfg); err != rpctypes.ErrAuthFailed {
|
||||
t.Fatalf("expected %v, got %v", rpctypes.ErrAuthFailed, err)
|
||||
}
|
||||
cfg.Username, cfg.Password = "root", "wrong-pass"
|
||||
if _, err := integration.NewClient(t, cfg); err != rpctypes.ErrAuthFailed {
|
||||
if _, err := integration2.NewClient(t, cfg); err != rpctypes.ErrAuthFailed {
|
||||
t.Fatalf("expected %v, got %v", rpctypes.ErrAuthFailed, err)
|
||||
}
|
||||
|
||||
cfg.Username, cfg.Password = "root", "123"
|
||||
authed, err := integration.NewClient(t, cfg)
|
||||
authed, err := integration2.NewClient(t, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -112,9 +112,9 @@ func authSetupRoot(t *testing.T, auth clientv3.Auth) {
|
||||
|
||||
// Client can connect to etcd even if they supply credentials and the server is in AuthDisable mode.
|
||||
func TestGetTokenWithoutAuth(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 2})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
authapi := clus.RandClient()
|
||||
@ -135,7 +135,7 @@ func TestGetTokenWithoutAuth(t *testing.T) {
|
||||
Password: "123",
|
||||
}
|
||||
|
||||
client, err = integration.NewClient(t, cfg)
|
||||
client, err = integration2.NewClient(t, cfg)
|
||||
if err == nil {
|
||||
defer client.Close()
|
||||
}
|
||||
|
@ -26,7 +26,7 @@ import (
|
||||
|
||||
"go.etcd.io/etcd/client/pkg/v3/testutil"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
// TestWatchFragmentDisable ensures that large watch
|
||||
@ -64,16 +64,16 @@ func TestWatchFragmentEnableWithGRPCLimit(t *testing.T) {
|
||||
// testWatchFragment triggers watch response that spans over multiple
|
||||
// revisions exceeding server request limits when combined.
|
||||
func testWatchFragment(t *testing.T, fragment, exceedRecvLimit bool) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
cfg := &integration.ClusterConfig{
|
||||
cfg := &integration2.ClusterConfig{
|
||||
Size: 1,
|
||||
MaxRequestBytes: 1.5 * 1024 * 1024,
|
||||
}
|
||||
if exceedRecvLimit {
|
||||
cfg.ClientMaxCallRecvMsgSize = 1.5 * 1024 * 1024
|
||||
}
|
||||
clus := integration.NewClusterV3(t, cfg)
|
||||
clus := integration2.NewClusterV3(t, cfg)
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
|
@ -29,14 +29,14 @@ import (
|
||||
"go.etcd.io/etcd/api/v3/version"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
type watcherTest func(*testing.T, *watchctx)
|
||||
|
||||
type watchctx struct {
|
||||
clus *integration.ClusterV3
|
||||
clus *integration2.ClusterV3
|
||||
w clientv3.Watcher
|
||||
kv clientv3.KV
|
||||
wclientMember int
|
||||
@ -45,9 +45,9 @@ type watchctx struct {
|
||||
}
|
||||
|
||||
func runWatchTest(t *testing.T, f watcherTest) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
wclientMember := rand.Intn(3)
|
||||
@ -299,7 +299,7 @@ func TestWatchCancelRunning(t *testing.T) {
|
||||
}
|
||||
|
||||
func testWatchCancelRunning(t *testing.T, wctx *watchctx) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
if wctx.ch = wctx.w.Watch(ctx, "a"); wctx.ch == nil {
|
||||
@ -347,8 +347,8 @@ func putAndWatch(t *testing.T, wctx *watchctx, key, val string) {
|
||||
}
|
||||
|
||||
func TestWatchResumeInitRev(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
@ -402,9 +402,9 @@ func TestWatchResumeInitRev(t *testing.T) {
|
||||
// either a compaction error or all keys by staying in sync before the compaction
|
||||
// is finally applied.
|
||||
func TestWatchResumeCompacted(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// create a waiting watcher at rev 1
|
||||
@ -489,9 +489,9 @@ func TestWatchResumeCompacted(t *testing.T) {
|
||||
// TestWatchCompactRevision ensures the CompactRevision error is given on a
|
||||
// compaction event ahead of a watcher.
|
||||
func TestWatchCompactRevision(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// set some keys
|
||||
@ -531,7 +531,7 @@ func TestWatchWithProgressNotify(t *testing.T) { testWatchWithProgressNot
|
||||
func TestWatchWithProgressNotifyNoEvent(t *testing.T) { testWatchWithProgressNotify(t, false) }
|
||||
|
||||
func testWatchWithProgressNotify(t *testing.T, watchOnPut bool) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
// accelerate report interval so test terminates quickly
|
||||
oldpi := v3rpc.GetProgressReportInterval()
|
||||
@ -540,7 +540,7 @@ func testWatchWithProgressNotify(t *testing.T, watchOnPut bool) {
|
||||
pi := 3 * time.Second
|
||||
defer func() { v3rpc.SetProgressReportInterval(oldpi) }()
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
wc := clus.RandClient()
|
||||
@ -585,11 +585,11 @@ func testWatchWithProgressNotify(t *testing.T, watchOnPut bool) {
|
||||
}
|
||||
|
||||
func TestConfigurableWatchProgressNotifyInterval(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
progressInterval := 200 * time.Millisecond
|
||||
clus := integration.NewClusterV3(t,
|
||||
&integration.ClusterConfig{
|
||||
clus := integration2.NewClusterV3(t,
|
||||
&integration2.ClusterConfig{
|
||||
Size: 3,
|
||||
WatchProgressNotifyInterval: progressInterval,
|
||||
})
|
||||
@ -611,7 +611,7 @@ func TestConfigurableWatchProgressNotifyInterval(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestWatchRequestProgress(t *testing.T) {
|
||||
if integration.ThroughProxy {
|
||||
if integration2.ThroughProxy {
|
||||
t.Skipf("grpc-proxy does not support WatchProgress yet")
|
||||
}
|
||||
testCases := []struct {
|
||||
@ -625,11 +625,11 @@ func TestWatchRequestProgress(t *testing.T) {
|
||||
|
||||
for _, c := range testCases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
watchTimeout := 3 * time.Second
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
wc := clus.RandClient()
|
||||
@ -686,9 +686,9 @@ func TestWatchRequestProgress(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestWatchEventType(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
cluster := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer cluster.Terminate(t)
|
||||
|
||||
client := cluster.RandClient()
|
||||
@ -760,9 +760,9 @@ func TestWatchEventType(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestWatchErrConnClosed(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
@ -783,16 +783,16 @@ func TestWatchErrConnClosed(t *testing.T) {
|
||||
clus.TakeClient(0)
|
||||
|
||||
select {
|
||||
case <-time.After(integration.RequestWaitTimeout):
|
||||
case <-time.After(integration2.RequestWaitTimeout):
|
||||
t.Fatal("wc.Watch took too long")
|
||||
case <-donec:
|
||||
}
|
||||
}
|
||||
|
||||
func TestWatchAfterClose(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
@ -810,7 +810,7 @@ func TestWatchAfterClose(t *testing.T) {
|
||||
close(donec)
|
||||
}()
|
||||
select {
|
||||
case <-time.After(integration.RequestWaitTimeout):
|
||||
case <-time.After(integration2.RequestWaitTimeout):
|
||||
t.Fatal("wc.Watch took too long")
|
||||
case <-donec:
|
||||
}
|
||||
@ -818,9 +818,9 @@ func TestWatchAfterClose(t *testing.T) {
|
||||
|
||||
// TestWatchWithRequireLeader checks the watch channel closes when no leader.
|
||||
func TestWatchWithRequireLeader(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// Put a key for the non-require leader watch to read as an event.
|
||||
@ -856,7 +856,7 @@ func TestWatchWithRequireLeader(t *testing.T) {
|
||||
if resp.Err() != rpctypes.ErrNoLeader {
|
||||
t.Fatalf("expected %v watch response error, got %+v", rpctypes.ErrNoLeader, resp)
|
||||
}
|
||||
case <-time.After(integration.RequestWaitTimeout):
|
||||
case <-time.After(integration2.RequestWaitTimeout):
|
||||
t.Fatal("watch without leader took too long to close")
|
||||
}
|
||||
|
||||
@ -865,7 +865,7 @@ func TestWatchWithRequireLeader(t *testing.T) {
|
||||
if ok {
|
||||
t.Fatalf("expected closed channel, got response %v", resp)
|
||||
}
|
||||
case <-time.After(integration.RequestWaitTimeout):
|
||||
case <-time.After(integration2.RequestWaitTimeout):
|
||||
t.Fatal("waited too long for channel to close")
|
||||
}
|
||||
|
||||
@ -892,9 +892,9 @@ func TestWatchWithRequireLeader(t *testing.T) {
|
||||
|
||||
// TestWatchWithFilter checks that watch filtering works.
|
||||
func TestWatchWithFilter(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
cluster := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer cluster.Terminate(t)
|
||||
|
||||
client := cluster.RandClient()
|
||||
@ -931,9 +931,9 @@ func TestWatchWithFilter(t *testing.T) {
|
||||
// TestWatchWithCreatedNotification checks that WithCreatedNotify returns a
|
||||
// Created watch response.
|
||||
func TestWatchWithCreatedNotification(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
cluster := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer cluster.Terminate(t)
|
||||
|
||||
client := cluster.RandClient()
|
||||
@ -953,9 +953,9 @@ func TestWatchWithCreatedNotification(t *testing.T) {
|
||||
// a watcher with created notify does not post duplicate
|
||||
// created events from disconnect.
|
||||
func TestWatchWithCreatedNotificationDropConn(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
cluster := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer cluster.Terminate(t)
|
||||
|
||||
client := cluster.RandClient()
|
||||
@ -982,9 +982,9 @@ func TestWatchWithCreatedNotificationDropConn(t *testing.T) {
|
||||
|
||||
// TestWatchCancelOnServer ensures client watcher cancels propagate back to the server.
|
||||
func TestWatchCancelOnServer(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
cluster := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer cluster.Terminate(t)
|
||||
|
||||
client := cluster.RandClient()
|
||||
@ -1050,20 +1050,20 @@ func TestWatchCancelOnServer(t *testing.T) {
|
||||
// 4. watcher client finishes tearing down stream on "ctx"
|
||||
// 5. w2 comes back canceled
|
||||
func TestWatchOverlapContextCancel(t *testing.T) {
|
||||
f := func(clus *integration.ClusterV3) {}
|
||||
f := func(clus *integration2.ClusterV3) {}
|
||||
testWatchOverlapContextCancel(t, f)
|
||||
}
|
||||
|
||||
func TestWatchOverlapDropConnContextCancel(t *testing.T) {
|
||||
f := func(clus *integration.ClusterV3) {
|
||||
f := func(clus *integration2.ClusterV3) {
|
||||
clus.Members[0].Bridge().DropConnections()
|
||||
}
|
||||
testWatchOverlapContextCancel(t, f)
|
||||
}
|
||||
|
||||
func testWatchOverlapContextCancel(t *testing.T, f func(*integration.ClusterV3)) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
func testWatchOverlapContextCancel(t *testing.T, f func(*integration2.ClusterV3)) {
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
n := 100
|
||||
@ -1123,8 +1123,8 @@ func testWatchOverlapContextCancel(t *testing.T, f func(*integration.ClusterV3))
|
||||
// TestWatchCancelAndCloseClient ensures that canceling a watcher then immediately
|
||||
// closing the client does not return a client closing error.
|
||||
func TestWatchCancelAndCloseClient(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
cli := clus.Client(0)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@ -1153,8 +1153,8 @@ func TestWatchCancelAndCloseClient(t *testing.T) {
|
||||
// to put them in resuming mode, cancels them so some resumes by cancel fail,
|
||||
// then closes the watcher interface to ensure correct clean up.
|
||||
func TestWatchStressResumeClose(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
cli := clus.Client(0)
|
||||
|
||||
@ -1175,8 +1175,8 @@ func TestWatchStressResumeClose(t *testing.T) {
|
||||
// TestWatchCancelDisconnected ensures canceling a watcher works when
|
||||
// its grpc stream is disconnected / reconnecting.
|
||||
func TestWatchCancelDisconnected(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
cli := clus.Client(0)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
|
||||
"go.etcd.io/etcd/client/v2"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver"
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -34,7 +35,7 @@ func init() {
|
||||
log.SetFlags(log.Ltime | log.Lmicroseconds | log.Lshortfile)
|
||||
if t := os.Getenv("ETCD_ELECTION_TIMEOUT_TICKS"); t != "" {
|
||||
if i, err := strconv.ParseInt(t, 10, 64); err == nil {
|
||||
electionTicks = int(i)
|
||||
integration.ElectionTicks = int(i)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -43,16 +44,16 @@ func TestClusterOf1(t *testing.T) { testCluster(t, 1) }
|
||||
func TestClusterOf3(t *testing.T) { testCluster(t, 3) }
|
||||
|
||||
func testCluster(t *testing.T, size int) {
|
||||
BeforeTest(t)
|
||||
c := NewCluster(t, size)
|
||||
integration.BeforeTest(t)
|
||||
c := integration.NewCluster(t, size)
|
||||
c.Launch(t)
|
||||
defer c.Terminate(t)
|
||||
clusterMustProgress(t, c.Members)
|
||||
}
|
||||
|
||||
func TestTLSClusterOf3(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
c := NewClusterByConfig(t, &ClusterConfig{Size: 3, PeerTLS: &testTLSInfo})
|
||||
integration.BeforeTest(t)
|
||||
c := integration.NewClusterByConfig(t, &integration.ClusterConfig{Size: 3, PeerTLS: &integration.TestTLSInfo})
|
||||
c.Launch(t)
|
||||
defer c.Terminate(t)
|
||||
clusterMustProgress(t, c.Members)
|
||||
@ -61,8 +62,8 @@ func TestTLSClusterOf3(t *testing.T) {
|
||||
// Test that a cluster can progress when using separate client and server certs when peering. This supports certificate
|
||||
// authorities that don't issue dual-usage certificates.
|
||||
func TestTLSClusterOf3WithSpecificUsage(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
c := NewClusterByConfig(t, &ClusterConfig{Size: 3, PeerTLS: &testTLSInfoWithSpecificUsage})
|
||||
integration.BeforeTest(t)
|
||||
c := integration.NewClusterByConfig(t, &integration.ClusterConfig{Size: 3, PeerTLS: &integration.TestTLSInfoWithSpecificUsage})
|
||||
c.Launch(t)
|
||||
defer c.Terminate(t)
|
||||
clusterMustProgress(t, c.Members)
|
||||
@ -72,22 +73,22 @@ func TestClusterOf1UsingDiscovery(t *testing.T) { testClusterUsingDiscovery(t, 1
|
||||
func TestClusterOf3UsingDiscovery(t *testing.T) { testClusterUsingDiscovery(t, 3) }
|
||||
|
||||
func testClusterUsingDiscovery(t *testing.T, size int) {
|
||||
BeforeTest(t)
|
||||
dc := NewCluster(t, 1)
|
||||
integration.BeforeTest(t)
|
||||
dc := integration.NewCluster(t, 1)
|
||||
dc.Launch(t)
|
||||
defer dc.Terminate(t)
|
||||
// init discovery token space
|
||||
dcc := MustNewHTTPClient(t, dc.URLs(), nil)
|
||||
dcc := integration.MustNewHTTPClient(t, dc.URLs(), nil)
|
||||
dkapi := client.NewKeysAPI(dcc)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout)
|
||||
if _, err := dkapi.Create(ctx, "/_config/size", fmt.Sprintf("%d", size)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cancel()
|
||||
|
||||
c := NewClusterByConfig(
|
||||
c := integration.NewClusterByConfig(
|
||||
t,
|
||||
&ClusterConfig{Size: size, DiscoveryURL: dc.URL(0) + "/v2/keys"},
|
||||
&integration.ClusterConfig{Size: size, DiscoveryURL: dc.URL(0) + "/v2/keys"},
|
||||
)
|
||||
c.Launch(t)
|
||||
defer c.Terminate(t)
|
||||
@ -95,23 +96,23 @@ func testClusterUsingDiscovery(t *testing.T, size int) {
|
||||
}
|
||||
|
||||
func TestTLSClusterOf3UsingDiscovery(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
dc := NewCluster(t, 1)
|
||||
integration.BeforeTest(t)
|
||||
dc := integration.NewCluster(t, 1)
|
||||
dc.Launch(t)
|
||||
defer dc.Terminate(t)
|
||||
// init discovery token space
|
||||
dcc := MustNewHTTPClient(t, dc.URLs(), nil)
|
||||
dcc := integration.MustNewHTTPClient(t, dc.URLs(), nil)
|
||||
dkapi := client.NewKeysAPI(dcc)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout)
|
||||
if _, err := dkapi.Create(ctx, "/_config/size", fmt.Sprintf("%d", 3)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cancel()
|
||||
|
||||
c := NewClusterByConfig(t,
|
||||
&ClusterConfig{
|
||||
c := integration.NewClusterByConfig(t,
|
||||
&integration.ClusterConfig{
|
||||
Size: 3,
|
||||
PeerTLS: &testTLSInfo,
|
||||
PeerTLS: &integration.TestTLSInfo,
|
||||
DiscoveryURL: dc.URL(0) + "/v2/keys"},
|
||||
)
|
||||
c.Launch(t)
|
||||
@ -123,8 +124,8 @@ func TestDoubleClusterSizeOf1(t *testing.T) { testDoubleClusterSize(t, 1) }
|
||||
func TestDoubleClusterSizeOf3(t *testing.T) { testDoubleClusterSize(t, 3) }
|
||||
|
||||
func testDoubleClusterSize(t *testing.T, size int) {
|
||||
BeforeTest(t)
|
||||
c := NewCluster(t, size)
|
||||
integration.BeforeTest(t)
|
||||
c := integration.NewCluster(t, size)
|
||||
c.Launch(t)
|
||||
defer c.Terminate(t)
|
||||
|
||||
@ -135,8 +136,8 @@ func testDoubleClusterSize(t *testing.T, size int) {
|
||||
}
|
||||
|
||||
func TestDoubleTLSClusterSizeOf3(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
c := NewClusterByConfig(t, &ClusterConfig{Size: 3, PeerTLS: &testTLSInfo})
|
||||
integration.BeforeTest(t)
|
||||
c := integration.NewClusterByConfig(t, &integration.ClusterConfig{Size: 3, PeerTLS: &integration.TestTLSInfo})
|
||||
c.Launch(t)
|
||||
defer c.Terminate(t)
|
||||
|
||||
@ -150,16 +151,16 @@ func TestDecreaseClusterSizeOf3(t *testing.T) { testDecreaseClusterSize(t, 3) }
|
||||
func TestDecreaseClusterSizeOf5(t *testing.T) { testDecreaseClusterSize(t, 5) }
|
||||
|
||||
func testDecreaseClusterSize(t *testing.T, size int) {
|
||||
BeforeTest(t)
|
||||
c := NewCluster(t, size)
|
||||
integration.BeforeTest(t)
|
||||
c := integration.NewCluster(t, size)
|
||||
c.Launch(t)
|
||||
defer c.Terminate(t)
|
||||
|
||||
// TODO: remove the last but one member
|
||||
for i := 0; i < size-1; i++ {
|
||||
id := c.Members[len(c.Members)-1].s.ID()
|
||||
id := c.Members[len(c.Members)-1].Server.ID()
|
||||
// may hit second leader election on slow machines
|
||||
if err := c.removeMember(t, uint64(id)); err != nil {
|
||||
if err := c.RemoveMember(t, uint64(id)); err != nil {
|
||||
if strings.Contains(err.Error(), "no leader") {
|
||||
t.Logf("got leader error (%v)", err)
|
||||
i--
|
||||
@ -167,24 +168,24 @@ func testDecreaseClusterSize(t *testing.T, size int) {
|
||||
}
|
||||
t.Fatal(err)
|
||||
}
|
||||
c.waitLeader(t, c.Members)
|
||||
c.WaitMembersForLeader(t, c.Members)
|
||||
}
|
||||
clusterMustProgress(t, c.Members)
|
||||
}
|
||||
|
||||
func TestForceNewCluster(t *testing.T) {
|
||||
c := newCluster(t, &ClusterConfig{Size: 3, UseBridge: true})
|
||||
c := integration.NewClusterFromConfig(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
c.Launch(t)
|
||||
cc := MustNewHTTPClient(t, []string{c.Members[0].URL()}, nil)
|
||||
cc := integration.MustNewHTTPClient(t, []string{c.Members[0].URL()}, nil)
|
||||
kapi := client.NewKeysAPI(cc)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout)
|
||||
resp, err := kapi.Create(ctx, "/foo", "bar")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected create error: %v", err)
|
||||
}
|
||||
cancel()
|
||||
// ensure create has been applied in this machine
|
||||
ctx, cancel = context.WithTimeout(context.Background(), requestTimeout)
|
||||
ctx, cancel = context.WithTimeout(context.Background(), integration.RequestTimeout)
|
||||
if _, err = kapi.Watcher("/foo", &client.WatcherOptions{AfterIndex: resp.Node.ModifiedIndex - 1}).Next(ctx); err != nil {
|
||||
t.Fatalf("unexpected watch error: %v", err)
|
||||
}
|
||||
@ -199,13 +200,13 @@ func TestForceNewCluster(t *testing.T) {
|
||||
t.Fatalf("unexpected ForceRestart error: %v", err)
|
||||
}
|
||||
defer c.Members[0].Terminate(t)
|
||||
c.waitLeader(t, c.Members[:1])
|
||||
c.WaitMembersForLeader(t, c.Members[:1])
|
||||
|
||||
// use new http client to init new connection
|
||||
cc = MustNewHTTPClient(t, []string{c.Members[0].URL()}, nil)
|
||||
cc = integration.MustNewHTTPClient(t, []string{c.Members[0].URL()}, nil)
|
||||
kapi = client.NewKeysAPI(cc)
|
||||
// ensure force restart keep the old data, and new cluster can make progress
|
||||
ctx, cancel = context.WithTimeout(context.Background(), requestTimeout)
|
||||
// ensure force restart keep the old data, and new Cluster can make progress
|
||||
ctx, cancel = context.WithTimeout(context.Background(), integration.RequestTimeout)
|
||||
if _, err := kapi.Watcher("/foo", &client.WatcherOptions{AfterIndex: resp.Node.ModifiedIndex - 1}).Next(ctx); err != nil {
|
||||
t.Fatalf("unexpected watch error: %v", err)
|
||||
}
|
||||
@ -214,38 +215,38 @@ func TestForceNewCluster(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAddMemberAfterClusterFullRotation(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
c := NewCluster(t, 3)
|
||||
integration.BeforeTest(t)
|
||||
c := integration.NewCluster(t, 3)
|
||||
c.Launch(t)
|
||||
defer c.Terminate(t)
|
||||
|
||||
// remove all the previous three members and add in three new members.
|
||||
for i := 0; i < 3; i++ {
|
||||
c.RemoveMember(t, uint64(c.Members[0].s.ID()))
|
||||
c.waitLeader(t, c.Members)
|
||||
c.MustRemoveMember(t, uint64(c.Members[0].Server.ID()))
|
||||
c.WaitMembersForLeader(t, c.Members)
|
||||
|
||||
c.AddMember(t)
|
||||
c.waitLeader(t, c.Members)
|
||||
c.WaitMembersForLeader(t, c.Members)
|
||||
}
|
||||
|
||||
c.AddMember(t)
|
||||
c.waitLeader(t, c.Members)
|
||||
c.WaitMembersForLeader(t, c.Members)
|
||||
|
||||
clusterMustProgress(t, c.Members)
|
||||
}
|
||||
|
||||
// Ensure we can remove a member then add a new one back immediately.
|
||||
func TestIssue2681(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
c := NewCluster(t, 5)
|
||||
integration.BeforeTest(t)
|
||||
c := integration.NewCluster(t, 5)
|
||||
c.Launch(t)
|
||||
defer c.Terminate(t)
|
||||
|
||||
c.RemoveMember(t, uint64(c.Members[4].s.ID()))
|
||||
c.waitLeader(t, c.Members)
|
||||
c.MustRemoveMember(t, uint64(c.Members[4].Server.ID()))
|
||||
c.WaitMembersForLeader(t, c.Members)
|
||||
|
||||
c.AddMember(t)
|
||||
c.waitLeader(t, c.Members)
|
||||
c.WaitMembersForLeader(t, c.Members)
|
||||
clusterMustProgress(t, c.Members)
|
||||
}
|
||||
|
||||
@ -256,8 +257,8 @@ func TestIssue2746(t *testing.T) { testIssue2746(t, 5) }
|
||||
func TestIssue2746WithThree(t *testing.T) { testIssue2746(t, 3) }
|
||||
|
||||
func testIssue2746(t *testing.T, members int) {
|
||||
BeforeTest(t)
|
||||
c := NewCluster(t, members)
|
||||
integration.BeforeTest(t)
|
||||
c := integration.NewCluster(t, members)
|
||||
|
||||
for _, m := range c.Members {
|
||||
m.SnapshotCount = 10
|
||||
@ -271,32 +272,32 @@ func testIssue2746(t *testing.T, members int) {
|
||||
clusterMustProgress(t, c.Members)
|
||||
}
|
||||
|
||||
c.RemoveMember(t, uint64(c.Members[members-1].s.ID()))
|
||||
c.waitLeader(t, c.Members)
|
||||
c.MustRemoveMember(t, uint64(c.Members[members-1].Server.ID()))
|
||||
c.WaitMembersForLeader(t, c.Members)
|
||||
|
||||
c.AddMember(t)
|
||||
c.waitLeader(t, c.Members)
|
||||
c.WaitMembersForLeader(t, c.Members)
|
||||
clusterMustProgress(t, c.Members)
|
||||
}
|
||||
|
||||
// Ensure etcd will not panic when removing a just started member.
|
||||
func TestIssue2904(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
// start 1-member cluster to ensure member 0 is the leader of the cluster.
|
||||
c := newCluster(t, &ClusterConfig{Size: 1, UseBridge: true})
|
||||
integration.BeforeTest(t)
|
||||
// start 1-member Cluster to ensure member 0 is the leader of the Cluster.
|
||||
c := integration.NewClusterFromConfig(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
c.Launch(t)
|
||||
defer c.Terminate(t)
|
||||
|
||||
c.AddMember(t)
|
||||
c.Members[1].Stop(t)
|
||||
|
||||
// send remove member-1 request to the cluster.
|
||||
cc := MustNewHTTPClient(t, c.URLs(), nil)
|
||||
// send remove member-1 request to the Cluster.
|
||||
cc := integration.MustNewHTTPClient(t, c.URLs(), nil)
|
||||
ma := client.NewMembersAPI(cc)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout)
|
||||
// the proposal is not committed because member 1 is stopped, but the
|
||||
// proposal is appended to leader's raft log.
|
||||
ma.Remove(ctx, c.Members[1].s.ID().String())
|
||||
// proposal is appended to leader'Server raft log.
|
||||
ma.Remove(ctx, c.Members[1].Server.ID().String())
|
||||
cancel()
|
||||
|
||||
// restart member, and expect it to send UpdateAttributes request.
|
||||
@ -305,21 +306,21 @@ func TestIssue2904(t *testing.T) {
|
||||
c.Members[1].Restart(t)
|
||||
// when the member comes back, it ack the proposal to remove itself,
|
||||
// and apply it.
|
||||
<-c.Members[1].s.StopNotify()
|
||||
<-c.Members[1].Server.StopNotify()
|
||||
|
||||
// terminate removed member
|
||||
c.Members[1].Terminate(t)
|
||||
c.Members = c.Members[:1]
|
||||
// wait member to be removed.
|
||||
c.waitMembersMatch(t, c.HTTPMembers())
|
||||
c.WaitMembersMatch(t, c.HTTPMembers())
|
||||
}
|
||||
|
||||
// TestIssue3699 tests minority failure during cluster configuration; it was
|
||||
// deadlocking.
|
||||
func TestIssue3699(t *testing.T) {
|
||||
// start a cluster of 3 nodes a, b, c
|
||||
BeforeTest(t)
|
||||
c := newCluster(t, &ClusterConfig{Size: 3, UseBridge: true})
|
||||
// start a Cluster of 3 nodes a, b, c
|
||||
integration.BeforeTest(t)
|
||||
c := integration.NewClusterFromConfig(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
c.Launch(t)
|
||||
defer c.Terminate(t)
|
||||
|
||||
@ -330,16 +331,16 @@ func TestIssue3699(t *testing.T) {
|
||||
c.AddMember(t)
|
||||
|
||||
// electing node d as leader makes node a unable to participate
|
||||
leaderID := c.waitLeader(t, c.Members)
|
||||
leaderID := c.WaitMembersForLeader(t, c.Members)
|
||||
for leaderID != 3 {
|
||||
c.Members[leaderID].Stop(t)
|
||||
<-c.Members[leaderID].s.StopNotify()
|
||||
<-c.Members[leaderID].Server.StopNotify()
|
||||
// do not restart the killed member immediately.
|
||||
// the member will advance its election timeout after restart,
|
||||
// so it will have a better chance to become the leader again.
|
||||
time.Sleep(time.Duration(electionTicks * int(tickDuration)))
|
||||
time.Sleep(time.Duration(integration.ElectionTicks * int(integration.TickDuration)))
|
||||
c.Members[leaderID].Restart(t)
|
||||
leaderID = c.waitLeader(t, c.Members)
|
||||
leaderID = c.WaitMembersForLeader(t, c.Members)
|
||||
}
|
||||
|
||||
// bring back node a
|
||||
@ -351,17 +352,17 @@ func TestIssue3699(t *testing.T) {
|
||||
// waiting for ReadyNotify can take several seconds
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatalf("waited too long for ready notification")
|
||||
case <-c.Members[0].s.StopNotify():
|
||||
case <-c.Members[0].Server.StopNotify():
|
||||
t.Fatalf("should not be stopped")
|
||||
case <-c.Members[0].s.ReadyNotify():
|
||||
case <-c.Members[0].Server.ReadyNotify():
|
||||
}
|
||||
// must waitLeader so goroutines don't leak on terminate
|
||||
c.waitLeader(t, c.Members)
|
||||
// must WaitMembersForLeader so goroutines don't leak on terminate
|
||||
c.WaitMembersForLeader(t, c.Members)
|
||||
|
||||
// try to participate in cluster
|
||||
cc := MustNewHTTPClient(t, []string{c.URL(0)}, c.cfg.ClientTLS)
|
||||
// try to participate in Cluster
|
||||
cc := integration.MustNewHTTPClient(t, []string{c.URL(0)}, c.Cfg.ClientTLS)
|
||||
kapi := client.NewKeysAPI(cc)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout)
|
||||
if _, err := kapi.Set(ctx, "/foo", "bar", nil); err != nil {
|
||||
t.Fatalf("unexpected error on Set (%v)", err)
|
||||
}
|
||||
@ -370,21 +371,21 @@ func TestIssue3699(t *testing.T) {
|
||||
|
||||
// TestRejectUnhealthyAdd ensures an unhealthy cluster rejects adding members.
|
||||
func TestRejectUnhealthyAdd(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
c := newCluster(t, &ClusterConfig{Size: 3, UseBridge: true})
|
||||
integration.BeforeTest(t)
|
||||
c := integration.NewClusterFromConfig(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
for _, m := range c.Members {
|
||||
m.ServerConfig.StrictReconfigCheck = true
|
||||
}
|
||||
c.Launch(t)
|
||||
defer c.Terminate(t)
|
||||
|
||||
// make cluster unhealthy and wait for downed peer
|
||||
// make Cluster unhealthy and wait for downed peer
|
||||
c.Members[0].Stop(t)
|
||||
c.WaitLeader(t)
|
||||
|
||||
// all attempts to add member should fail
|
||||
for i := 1; i < len(c.Members); i++ {
|
||||
err := c.addMemberByURL(t, c.URL(i), "unix://foo:12345")
|
||||
err := c.AddMemberByURL(t, c.URL(i), "unix://foo:12345")
|
||||
if err == nil {
|
||||
t.Fatalf("should have failed adding peer")
|
||||
}
|
||||
@ -399,23 +400,23 @@ func TestRejectUnhealthyAdd(t *testing.T) {
|
||||
c.WaitLeader(t)
|
||||
time.Sleep(2 * etcdserver.HealthInterval)
|
||||
|
||||
// add member should succeed now that it's healthy
|
||||
// add member should succeed now that it'Server healthy
|
||||
var err error
|
||||
for i := 1; i < len(c.Members); i++ {
|
||||
if err = c.addMemberByURL(t, c.URL(i), "unix://foo:12345"); err == nil {
|
||||
if err = c.AddMemberByURL(t, c.URL(i), "unix://foo:12345"); err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("should have added peer to healthy cluster (%v)", err)
|
||||
t.Fatalf("should have added peer to healthy Cluster (%v)", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestRejectUnhealthyRemove ensures an unhealthy cluster rejects removing members
|
||||
// if quorum will be lost.
|
||||
func TestRejectUnhealthyRemove(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
c := newCluster(t, &ClusterConfig{Size: 5, UseBridge: true})
|
||||
integration.BeforeTest(t)
|
||||
c := integration.NewClusterFromConfig(t, &integration.ClusterConfig{Size: 5, UseBridge: true})
|
||||
for _, m := range c.Members {
|
||||
m.ServerConfig.StrictReconfigCheck = true
|
||||
}
|
||||
@ -428,7 +429,7 @@ func TestRejectUnhealthyRemove(t *testing.T) {
|
||||
c.WaitLeader(t)
|
||||
|
||||
// reject remove active member since (3,2)-(1,0) => (2,2) lacks quorum
|
||||
err := c.removeMember(t, uint64(c.Members[2].s.ID()))
|
||||
err := c.RemoveMember(t, uint64(c.Members[2].Server.ID()))
|
||||
if err == nil {
|
||||
t.Fatalf("should reject quorum breaking remove")
|
||||
}
|
||||
@ -438,10 +439,10 @@ func TestRejectUnhealthyRemove(t *testing.T) {
|
||||
}
|
||||
|
||||
// member stopped after launch; wait for missing heartbeats
|
||||
time.Sleep(time.Duration(electionTicks * int(tickDuration)))
|
||||
time.Sleep(time.Duration(integration.ElectionTicks * int(integration.TickDuration)))
|
||||
|
||||
// permit remove dead member since (3,2) - (0,1) => (3,1) has quorum
|
||||
if err = c.removeMember(t, uint64(c.Members[0].s.ID())); err != nil {
|
||||
if err = c.RemoveMember(t, uint64(c.Members[0].Server.ID())); err != nil {
|
||||
t.Fatalf("should accept removing down member")
|
||||
}
|
||||
|
||||
@ -452,7 +453,7 @@ func TestRejectUnhealthyRemove(t *testing.T) {
|
||||
time.Sleep((3 * etcdserver.HealthInterval) / 2)
|
||||
|
||||
// accept remove member since (4,1)-(1,0) => (3,1) has quorum
|
||||
if err = c.removeMember(t, uint64(c.Members[0].s.ID())); err != nil {
|
||||
if err = c.RemoveMember(t, uint64(c.Members[0].Server.ID())); err != nil {
|
||||
t.Fatalf("expected to remove member, got error %v", err)
|
||||
}
|
||||
}
|
||||
@ -461,10 +462,10 @@ func TestRejectUnhealthyRemove(t *testing.T) {
|
||||
// if 'initial-cluster-state' is set 'new' and old data directory still exists
|
||||
// (see https://github.com/etcd-io/etcd/issues/7512 for more).
|
||||
func TestRestartRemoved(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
// 1. start single-member cluster
|
||||
c := newCluster(t, &ClusterConfig{Size: 1, UseBridge: true})
|
||||
// 1. start single-member Cluster
|
||||
c := integration.NewClusterFromConfig(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
for _, m := range c.Members {
|
||||
m.ServerConfig.StrictReconfigCheck = true
|
||||
}
|
||||
@ -476,10 +477,10 @@ func TestRestartRemoved(t *testing.T) {
|
||||
c.WaitLeader(t)
|
||||
|
||||
oldm := c.Members[0]
|
||||
oldm.keepDataDirTerminate = true
|
||||
oldm.KeepDataDirTerminate = true
|
||||
|
||||
// 3. remove first member, shut down without deleting data
|
||||
if err := c.removeMember(t, uint64(c.Members[0].s.ID())); err != nil {
|
||||
if err := c.RemoveMember(t, uint64(c.Members[0].Server.ID())); err != nil {
|
||||
t.Fatalf("expected to remove member, got error %v", err)
|
||||
}
|
||||
c.WaitLeader(t)
|
||||
@ -495,7 +496,7 @@ func TestRestartRemoved(t *testing.T) {
|
||||
os.RemoveAll(oldm.ServerConfig.DataDir)
|
||||
}()
|
||||
select {
|
||||
case <-oldm.s.StopNotify():
|
||||
case <-oldm.Server.StopNotify():
|
||||
case <-time.After(time.Minute):
|
||||
t.Fatalf("removed member didn't exit within %v", time.Minute)
|
||||
}
|
||||
@ -504,8 +505,8 @@ func TestRestartRemoved(t *testing.T) {
|
||||
// clusterMustProgress ensures that cluster can make progress. It creates
|
||||
// a random key first, and check the new key could be got from all client urls
|
||||
// of the cluster.
|
||||
func clusterMustProgress(t *testing.T, membs []*member) {
|
||||
cc := MustNewHTTPClient(t, []string{membs[0].URL()}, nil)
|
||||
func clusterMustProgress(t *testing.T, membs []*integration.Member) {
|
||||
cc := integration.MustNewHTTPClient(t, []string{membs[0].URL()}, nil)
|
||||
kapi := client.NewKeysAPI(cc)
|
||||
key := fmt.Sprintf("foo%d", rand.Int())
|
||||
var (
|
||||
@ -514,7 +515,7 @@ func clusterMustProgress(t *testing.T, membs []*member) {
|
||||
)
|
||||
// retry in case of leader loss induced by slow CI
|
||||
for i := 0; i < 3; i++ {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout)
|
||||
resp, err = kapi.Create(ctx, "/"+key, "bar")
|
||||
cancel()
|
||||
if err == nil {
|
||||
@ -528,9 +529,9 @@ func clusterMustProgress(t *testing.T, membs []*member) {
|
||||
|
||||
for i, m := range membs {
|
||||
u := m.URL()
|
||||
mcc := MustNewHTTPClient(t, []string{u}, nil)
|
||||
mcc := integration.MustNewHTTPClient(t, []string{u}, nil)
|
||||
mkapi := client.NewKeysAPI(mcc)
|
||||
mctx, mcancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
mctx, mcancel := context.WithTimeout(context.Background(), integration.RequestTimeout)
|
||||
if _, err := mkapi.Watcher(key, &client.WatcherOptions{AfterIndex: resp.Node.ModifiedIndex - 1}).Next(mctx); err != nil {
|
||||
t.Fatalf("#%d: watch on %s error: %v", i, u, err)
|
||||
}
|
||||
@ -539,8 +540,8 @@ func clusterMustProgress(t *testing.T, membs []*member) {
|
||||
}
|
||||
|
||||
func TestSpeedyTerminate(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3, UseBridge: true})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
// Stop/Restart so requests will time out on lost leaders
|
||||
for i := 0; i < 3; i++ {
|
||||
clus.Members[i].Stop(t)
|
||||
@ -553,7 +554,7 @@ func TestSpeedyTerminate(t *testing.T) {
|
||||
}()
|
||||
select {
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatalf("cluster took too long to terminate")
|
||||
t.Fatalf("Cluster took too long to terminate")
|
||||
case <-donec:
|
||||
}
|
||||
}
|
||||
|
@ -34,14 +34,14 @@ import (
|
||||
"go.etcd.io/etcd/client/pkg/v3/transport"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/server/v3/embed"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
var (
|
||||
testTLSInfo = transport.TLSInfo{
|
||||
KeyFile: integration.MustAbsPath("../../fixtures/server.key.insecure"),
|
||||
CertFile: integration.MustAbsPath("../../fixtures/server.crt"),
|
||||
TrustedCAFile: integration.MustAbsPath("../../fixtures/ca.crt"),
|
||||
KeyFile: integration2.MustAbsPath("../../fixtures/server.key.insecure"),
|
||||
CertFile: integration2.MustAbsPath("../../fixtures/server.crt"),
|
||||
TrustedCAFile: integration2.MustAbsPath("../../fixtures/ca.crt"),
|
||||
ClientCertAuth: true,
|
||||
}
|
||||
)
|
||||
@ -160,7 +160,7 @@ func testEmbedEtcdGracefulStop(t *testing.T, secure bool) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
cli, err := integration.NewClient(t, clientCfg)
|
||||
cli, err := integration2.NewClient(t, clientCfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
"time"
|
||||
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
@ -94,14 +95,14 @@ func TestAuthority(t *testing.T) {
|
||||
for _, tc := range tcs {
|
||||
for _, clusterSize := range []int{1, 3} {
|
||||
t.Run(fmt.Sprintf("Size: %d, Scenario: %q", clusterSize, tc.name), func(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
cfg := ClusterConfig{
|
||||
integration.BeforeTest(t)
|
||||
cfg := integration.ClusterConfig{
|
||||
Size: clusterSize,
|
||||
UseTCP: tc.useTCP,
|
||||
UseIP: tc.useTCP,
|
||||
}
|
||||
cfg, tlsConfig := setupTLS(t, tc.useTLS, cfg)
|
||||
clus := NewClusterV3(t, &cfg)
|
||||
clus := integration.NewClusterV3(t, &cfg)
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := setupClient(t, tc.clientURLPattern, clus, tlsConfig)
|
||||
@ -118,11 +119,11 @@ func TestAuthority(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func setupTLS(t *testing.T, useTLS bool, cfg ClusterConfig) (ClusterConfig, *tls.Config) {
|
||||
func setupTLS(t *testing.T, useTLS bool, cfg integration.ClusterConfig) (integration.ClusterConfig, *tls.Config) {
|
||||
t.Helper()
|
||||
if useTLS {
|
||||
cfg.ClientTLS = &testTLSInfo
|
||||
tlsConfig, err := testTLSInfo.ClientConfig()
|
||||
cfg.ClientTLS = &integration.TestTLSInfo
|
||||
tlsConfig, err := integration.TestTLSInfo.ClientConfig()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -131,7 +132,7 @@ func setupTLS(t *testing.T, useTLS bool, cfg ClusterConfig) (ClusterConfig, *tls
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func setupClient(t *testing.T, endpointPattern string, clus *ClusterV3, tlsConfig *tls.Config) *clientv3.Client {
|
||||
func setupClient(t *testing.T, endpointPattern string, clus *integration.ClusterV3, tlsConfig *tls.Config) *clientv3.Client {
|
||||
t.Helper()
|
||||
endpoints := templateEndpoints(t, endpointPattern, clus)
|
||||
kv, err := clientv3.New(clientv3.Config{
|
||||
@ -146,13 +147,13 @@ func setupClient(t *testing.T, endpointPattern string, clus *ClusterV3, tlsConfi
|
||||
return kv
|
||||
}
|
||||
|
||||
func templateEndpoints(t *testing.T, pattern string, clus *ClusterV3) []string {
|
||||
func templateEndpoints(t *testing.T, pattern string, clus *integration.ClusterV3) []string {
|
||||
t.Helper()
|
||||
endpoints := []string{}
|
||||
for _, m := range clus.Members {
|
||||
ent := pattern
|
||||
if strings.Contains(ent, "%d") {
|
||||
ent = fmt.Sprintf(ent, GrpcPortNumber(m.UniqNumber, m.MemberNumber))
|
||||
ent = fmt.Sprintf(ent, integration.GrpcPortNumber(m.UniqNumber, m.MemberNumber))
|
||||
}
|
||||
if strings.Contains(ent, "%s") {
|
||||
ent = fmt.Sprintf(ent, m.Name)
|
||||
@ -165,11 +166,11 @@ func templateEndpoints(t *testing.T, pattern string, clus *ClusterV3) []string {
|
||||
return endpoints
|
||||
}
|
||||
|
||||
func templateAuthority(t *testing.T, pattern string, m *member) string {
|
||||
func templateAuthority(t *testing.T, pattern string, m *integration.Member) string {
|
||||
t.Helper()
|
||||
authority := pattern
|
||||
if strings.Contains(authority, "%d") {
|
||||
authority = fmt.Sprintf(authority, GrpcPortNumber(m.UniqNumber, m.MemberNumber))
|
||||
authority = fmt.Sprintf(authority, integration.GrpcPortNumber(m.UniqNumber, m.MemberNumber))
|
||||
}
|
||||
if strings.Contains(authority, "%s") {
|
||||
authority = fmt.Sprintf(authority, m.Name)
|
||||
@ -180,7 +181,7 @@ func templateAuthority(t *testing.T, pattern string, m *member) string {
|
||||
return authority
|
||||
}
|
||||
|
||||
func assertAuthority(t *testing.T, expectedAuthority string, clus *ClusterV3) {
|
||||
func assertAuthority(t *testing.T, expectedAuthority string, clus *integration.ClusterV3) {
|
||||
t.Helper()
|
||||
requestsFound := 0
|
||||
for _, m := range clus.Members {
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
|
||||
"go.etcd.io/etcd/client/pkg/v3/testutil"
|
||||
"go.etcd.io/etcd/client/pkg/v3/transport"
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
// Infrastructure to provision a single shared cluster for tests - only
|
||||
@ -42,7 +43,7 @@ type LazyCluster interface {
|
||||
EndpointsV3() []string
|
||||
|
||||
// Cluster - calls to this method might initialize the cluster.
|
||||
Cluster() *ClusterV3
|
||||
Cluster() *integration.ClusterV3
|
||||
|
||||
// Transport - call to this method might initialize the cluster.
|
||||
Transport() *http.Transport
|
||||
@ -53,8 +54,8 @@ type LazyCluster interface {
|
||||
}
|
||||
|
||||
type lazyCluster struct {
|
||||
cfg ClusterConfig
|
||||
cluster *ClusterV3
|
||||
cfg integration.ClusterConfig
|
||||
cluster *integration.ClusterV3
|
||||
transport *http.Transport
|
||||
once sync.Once
|
||||
tb testutil.TB
|
||||
@ -64,12 +65,12 @@ type lazyCluster struct {
|
||||
// NewLazyCluster returns a new test cluster handler that gets created on the
|
||||
// first call to GetEndpoints() or GetTransport()
|
||||
func NewLazyCluster() LazyCluster {
|
||||
return NewLazyClusterWithConfig(ClusterConfig{Size: 1})
|
||||
return NewLazyClusterWithConfig(integration.ClusterConfig{Size: 1})
|
||||
}
|
||||
|
||||
// NewLazyClusterWithConfig returns a new test cluster handler that gets created
|
||||
// on the first call to GetEndpoints() or GetTransport()
|
||||
func NewLazyClusterWithConfig(cfg ClusterConfig) LazyCluster {
|
||||
func NewLazyClusterWithConfig(cfg integration.ClusterConfig) LazyCluster {
|
||||
tb, closer := testutil.NewTestingTBProthesis("lazy_cluster")
|
||||
return &lazyCluster{cfg: cfg, tb: tb, closer: closer}
|
||||
}
|
||||
@ -81,7 +82,7 @@ func (lc *lazyCluster) mustLazyInit() {
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
lc.cluster = NewClusterV3(lc.tb, &lc.cfg)
|
||||
lc.cluster = integration.NewClusterV3(lc.tb, &lc.cfg)
|
||||
})
|
||||
}
|
||||
|
||||
@ -105,7 +106,7 @@ func (lc *lazyCluster) EndpointsV3() []string {
|
||||
return lc.Cluster().Client(0).Endpoints()
|
||||
}
|
||||
|
||||
func (lc *lazyCluster) Cluster() *ClusterV3 {
|
||||
func (lc *lazyCluster) Cluster() *integration.ClusterV3 {
|
||||
lc.mustLazyInit()
|
||||
return lc.cluster
|
||||
}
|
||||
|
@ -23,52 +23,53 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.etcd.io/etcd/client/v2"
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func TestPauseMember(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
c := NewCluster(t, 5)
|
||||
c := integration.NewCluster(t, 5)
|
||||
c.Launch(t)
|
||||
defer c.Terminate(t)
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
c.Members[i].Pause()
|
||||
membs := append([]*member{}, c.Members[:i]...)
|
||||
membs := append([]*integration.Member{}, c.Members[:i]...)
|
||||
membs = append(membs, c.Members[i+1:]...)
|
||||
c.waitLeader(t, membs)
|
||||
c.WaitMembersForLeader(t, membs)
|
||||
clusterMustProgress(t, membs)
|
||||
c.Members[i].Resume()
|
||||
}
|
||||
c.waitLeader(t, c.Members)
|
||||
c.WaitMembersForLeader(t, c.Members)
|
||||
clusterMustProgress(t, c.Members)
|
||||
}
|
||||
|
||||
func TestRestartMember(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
c := newCluster(t, &ClusterConfig{Size: 3, UseBridge: true})
|
||||
integration.BeforeTest(t)
|
||||
c := integration.NewClusterFromConfig(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
c.Launch(t)
|
||||
defer c.Terminate(t)
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
c.Members[i].Stop(t)
|
||||
membs := append([]*member{}, c.Members[:i]...)
|
||||
membs := append([]*integration.Member{}, c.Members[:i]...)
|
||||
membs = append(membs, c.Members[i+1:]...)
|
||||
c.waitLeader(t, membs)
|
||||
c.WaitMembersForLeader(t, membs)
|
||||
clusterMustProgress(t, membs)
|
||||
err := c.Members[i].Restart(t)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
c.waitLeader(t, c.Members)
|
||||
c.WaitMembersForLeader(t, c.Members)
|
||||
clusterMustProgress(t, c.Members)
|
||||
}
|
||||
|
||||
func TestLaunchDuplicateMemberShouldFail(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
size := 3
|
||||
c := NewCluster(t, size)
|
||||
c := integration.NewCluster(t, size)
|
||||
m := c.Members[0].Clone(t)
|
||||
var err error
|
||||
m.DataDir, err = ioutil.TempDir(t.TempDir(), "etcd")
|
||||
@ -87,8 +88,8 @@ func TestLaunchDuplicateMemberShouldFail(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSnapshotAndRestartMember(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
m := mustNewMember(t, memberConfig{name: "snapAndRestartTest", useBridge: true})
|
||||
integration.BeforeTest(t)
|
||||
m := integration.MustNewMember(t, integration.MemberConfig{Name: "snapAndRestartTest", UseBridge: true})
|
||||
m.SnapshotCount = 100
|
||||
m.Launch()
|
||||
defer m.Terminate(t)
|
||||
@ -97,9 +98,9 @@ func TestSnapshotAndRestartMember(t *testing.T) {
|
||||
resps := make([]*client.Response, 120)
|
||||
var err error
|
||||
for i := 0; i < 120; i++ {
|
||||
cc := MustNewHTTPClient(t, []string{m.URL()}, nil)
|
||||
cc := integration.MustNewHTTPClient(t, []string{m.URL()}, nil)
|
||||
kapi := client.NewKeysAPI(cc)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout)
|
||||
key := fmt.Sprintf("foo%d", i)
|
||||
resps[i], err = kapi.Create(ctx, "/"+key, "bar")
|
||||
if err != nil {
|
||||
@ -112,9 +113,9 @@ func TestSnapshotAndRestartMember(t *testing.T) {
|
||||
|
||||
m.WaitOK(t)
|
||||
for i := 0; i < 120; i++ {
|
||||
cc := MustNewHTTPClient(t, []string{m.URL()}, nil)
|
||||
cc := integration.MustNewHTTPClient(t, []string{m.URL()}, nil)
|
||||
kapi := client.NewKeysAPI(cc)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout)
|
||||
key := fmt.Sprintf("foo%d", i)
|
||||
resp, err := kapi.Get(ctx, "/"+key, nil)
|
||||
if err != nil {
|
||||
|
@ -25,12 +25,13 @@ import (
|
||||
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||
"go.etcd.io/etcd/client/pkg/v3/transport"
|
||||
"go.etcd.io/etcd/server/v3/storage"
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
// TestMetricDbSizeBoot checks that the db size metric is set on boot.
|
||||
func TestMetricDbSizeBoot(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
v, err := clus.Members[0].Metric("etcd_debugging_mvcc_db_total_size_in_bytes")
|
||||
@ -49,12 +50,12 @@ func TestMetricDbSizeDefrag(t *testing.T) {
|
||||
|
||||
// testMetricDbSizeDefrag checks that the db size metric is set after defrag.
|
||||
func testMetricDbSizeDefrag(t *testing.T, name string) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kvc := toGRPC(clus.Client(0)).KV
|
||||
mc := toGRPC(clus.Client(0)).Maintenance
|
||||
kvc := integration.ToGRPC(clus.Client(0)).KV
|
||||
mc := integration.ToGRPC(clus.Client(0)).Maintenance
|
||||
|
||||
// expand the db size
|
||||
numPuts := 25 // large enough to write more than 1 page
|
||||
@ -163,8 +164,8 @@ func testMetricDbSizeDefrag(t *testing.T, name string) {
|
||||
}
|
||||
|
||||
func TestMetricQuotaBackendBytes(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
qs, err := clus.Members[0].Metric("etcd_server_quota_backend_bytes")
|
||||
@ -181,8 +182,8 @@ func TestMetricQuotaBackendBytes(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMetricsHealth(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
tr, err := transport.NewTransport(transport.TLSInfo{}, 5*time.Second)
|
||||
|
@ -18,12 +18,14 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func TestNetworkPartition5MembersLeaderInMinority(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 5})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 5})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
leadIndex := clus.WaitLeader(t)
|
||||
@ -32,20 +34,20 @@ func TestNetworkPartition5MembersLeaderInMinority(t *testing.T) {
|
||||
minority := []int{leadIndex, (leadIndex + 1) % 5}
|
||||
majority := []int{(leadIndex + 2) % 5, (leadIndex + 3) % 5, (leadIndex + 4) % 5}
|
||||
|
||||
minorityMembers := getMembersByIndexSlice(clus.cluster, minority)
|
||||
majorityMembers := getMembersByIndexSlice(clus.cluster, majority)
|
||||
minorityMembers := getMembersByIndexSlice(clus.Cluster, minority)
|
||||
majorityMembers := getMembersByIndexSlice(clus.Cluster, majority)
|
||||
|
||||
// network partition (bi-directional)
|
||||
injectPartition(t, minorityMembers, majorityMembers)
|
||||
|
||||
// minority leader must be lost
|
||||
clus.waitNoLeader(minorityMembers)
|
||||
clus.WaitMembersNoLeader(minorityMembers)
|
||||
|
||||
// wait extra election timeout
|
||||
time.Sleep(2 * majorityMembers[0].ElectionTimeout())
|
||||
|
||||
// new leader must be from majority
|
||||
clus.waitLeader(t, majorityMembers)
|
||||
clus.WaitMembersForLeader(t, majorityMembers)
|
||||
|
||||
// recover network partition (bi-directional)
|
||||
recoverPartition(t, minorityMembers, majorityMembers)
|
||||
@ -69,9 +71,9 @@ func TestNetworkPartition5MembersLeaderInMajority(t *testing.T) {
|
||||
}
|
||||
|
||||
func testNetworkPartition5MembersLeaderInMajority(t *testing.T) error {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 5})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 5})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
leadIndex := clus.WaitLeader(t)
|
||||
@ -80,21 +82,21 @@ func testNetworkPartition5MembersLeaderInMajority(t *testing.T) error {
|
||||
majority := []int{leadIndex, (leadIndex + 1) % 5, (leadIndex + 2) % 5}
|
||||
minority := []int{(leadIndex + 3) % 5, (leadIndex + 4) % 5}
|
||||
|
||||
majorityMembers := getMembersByIndexSlice(clus.cluster, majority)
|
||||
minorityMembers := getMembersByIndexSlice(clus.cluster, minority)
|
||||
majorityMembers := getMembersByIndexSlice(clus.Cluster, majority)
|
||||
minorityMembers := getMembersByIndexSlice(clus.Cluster, minority)
|
||||
|
||||
// network partition (bi-directional)
|
||||
injectPartition(t, majorityMembers, minorityMembers)
|
||||
|
||||
// minority leader must be lost
|
||||
clus.waitNoLeader(minorityMembers)
|
||||
clus.WaitMembersNoLeader(minorityMembers)
|
||||
|
||||
// wait extra election timeout
|
||||
time.Sleep(2 * majorityMembers[0].ElectionTimeout())
|
||||
|
||||
// leader must be hold in majority
|
||||
leadIndex2 := clus.waitLeader(t, majorityMembers)
|
||||
leadID, leadID2 := clus.Members[leadIndex].s.ID(), majorityMembers[leadIndex2].s.ID()
|
||||
leadIndex2 := clus.WaitMembersForLeader(t, majorityMembers)
|
||||
leadID, leadID2 := clus.Members[leadIndex].Server.ID(), majorityMembers[leadIndex2].Server.ID()
|
||||
if leadID != leadID2 {
|
||||
return fmt.Errorf("unexpected leader change from %s, got %s", leadID, leadID2)
|
||||
}
|
||||
@ -108,9 +110,9 @@ func testNetworkPartition5MembersLeaderInMajority(t *testing.T) error {
|
||||
}
|
||||
|
||||
func TestNetworkPartition4Members(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 4})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 4})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
leadIndex := clus.WaitLeader(t)
|
||||
@ -119,8 +121,8 @@ func TestNetworkPartition4Members(t *testing.T) {
|
||||
groupA := []int{leadIndex, (leadIndex + 1) % 4}
|
||||
groupB := []int{(leadIndex + 2) % 4, (leadIndex + 3) % 4}
|
||||
|
||||
leaderPartition := getMembersByIndexSlice(clus.cluster, groupA)
|
||||
followerPartition := getMembersByIndexSlice(clus.cluster, groupB)
|
||||
leaderPartition := getMembersByIndexSlice(clus.Cluster, groupA)
|
||||
followerPartition := getMembersByIndexSlice(clus.Cluster, groupB)
|
||||
|
||||
// network partition (bi-directional)
|
||||
injectPartition(t, leaderPartition, followerPartition)
|
||||
@ -137,21 +139,21 @@ func TestNetworkPartition4Members(t *testing.T) {
|
||||
clusterMustProgress(t, clus.Members)
|
||||
}
|
||||
|
||||
func getMembersByIndexSlice(clus *cluster, idxs []int) []*member {
|
||||
ms := make([]*member, len(idxs))
|
||||
func getMembersByIndexSlice(clus *integration.Cluster, idxs []int) []*integration.Member {
|
||||
ms := make([]*integration.Member, len(idxs))
|
||||
for i, idx := range idxs {
|
||||
ms[i] = clus.Members[idx]
|
||||
}
|
||||
return ms
|
||||
}
|
||||
|
||||
func injectPartition(t *testing.T, src, others []*member) {
|
||||
func injectPartition(t *testing.T, src, others []*integration.Member) {
|
||||
for _, m := range src {
|
||||
m.InjectPartition(t, others...)
|
||||
}
|
||||
}
|
||||
|
||||
func recoverPartition(t *testing.T, src, others []*member) {
|
||||
func recoverPartition(t *testing.T, src, others []*integration.Member) {
|
||||
for _, m := range src {
|
||||
m.RecoverPartition(t, others...)
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/server/v3/proxy/grpcproxy"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"go.uber.org/zap/zaptest"
|
||||
|
||||
"go.uber.org/zap"
|
||||
@ -31,9 +31,9 @@ import (
|
||||
)
|
||||
|
||||
func TestClusterProxyMemberList(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cts := newClusterProxyServer(zaptest.NewLogger(t), []string{clus.Members[0].GRPCURL()}, t)
|
||||
@ -43,7 +43,7 @@ func TestClusterProxyMemberList(t *testing.T) {
|
||||
Endpoints: []string{cts.caddr},
|
||||
DialTimeout: 5 * time.Second,
|
||||
}
|
||||
client, err := integration.NewClient(t, cfg)
|
||||
client, err := integration2.NewClient(t, cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("err %v, want nil", err)
|
||||
}
|
||||
@ -95,7 +95,7 @@ func newClusterProxyServer(lg *zap.Logger, endpoints []string, t *testing.T) *cl
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: 5 * time.Second,
|
||||
}
|
||||
client, err := integration.NewClient(t, cfg)
|
||||
client, err := integration2.NewClient(t, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -23,15 +23,14 @@ import (
|
||||
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/server/v3/proxy/grpcproxy"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func TestKVProxyRange(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kvts := newKVProxyServer([]string{clus.Members[0].GRPCURL()}, t)
|
||||
@ -42,7 +41,7 @@ func TestKVProxyRange(t *testing.T) {
|
||||
Endpoints: []string{kvts.l.Addr().String()},
|
||||
DialTimeout: 5 * time.Second,
|
||||
}
|
||||
client, err := integration.NewClient(t, cfg)
|
||||
client, err := integration2.NewClient(t, cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("err = %v, want nil", err)
|
||||
}
|
||||
@ -71,7 +70,7 @@ func newKVProxyServer(endpoints []string, t *testing.T) *kvproxyTestServer {
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: 5 * time.Second,
|
||||
}
|
||||
client, err := integration.NewClient(t, cfg)
|
||||
client, err := integration2.NewClient(t, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -21,14 +21,14 @@ import (
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/client/v3/naming/endpoints"
|
||||
"go.etcd.io/etcd/server/v3/proxy/grpcproxy"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"go.uber.org/zap/zaptest"
|
||||
)
|
||||
|
||||
func TestRegister(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
cli := clus.Client(0)
|
||||
paddr := clus.Members[0].GRPCURL()
|
||||
|
@ -24,14 +24,14 @@ import (
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/server/v3/embed"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
// TestSnapshotV3RestoreMultiMemberAdd ensures that multiple members
|
||||
// can boot into the same cluster after being restored from a same
|
||||
// snapshot file, and also be able to add another member to the cluster.
|
||||
func TestSnapshotV3RestoreMultiMemberAdd(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
kvs := []kv{{"foo1", "bar1"}, {"foo2", "bar2"}, {"foo3", "bar3"}}
|
||||
dbPath := createSnapshotFile(t, kvs)
|
||||
@ -48,7 +48,7 @@ func TestSnapshotV3RestoreMultiMemberAdd(t *testing.T) {
|
||||
// wait for health interval + leader election
|
||||
time.Sleep(etcdserver.HealthInterval + 2*time.Second)
|
||||
|
||||
cli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{cURLs[0].String()}})
|
||||
cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{cURLs[0].String()}})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -63,7 +63,7 @@ func TestSnapshotV3RestoreMultiMemberAdd(t *testing.T) {
|
||||
// wait for membership reconfiguration apply
|
||||
time.Sleep(testutil.ApplyTimeout)
|
||||
|
||||
cfg := integration.NewEmbedConfig(t, "3")
|
||||
cfg := integration2.NewEmbedConfig(t, "3")
|
||||
cfg.InitialClusterToken = testClusterTkn
|
||||
cfg.ClusterState = "existing"
|
||||
cfg.LCUrls, cfg.ACUrls = newCURLs, newCURLs
|
||||
@ -88,7 +88,7 @@ func TestSnapshotV3RestoreMultiMemberAdd(t *testing.T) {
|
||||
t.Fatalf("failed to start the newly added etcd member")
|
||||
}
|
||||
|
||||
cli2, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{newCURLs[0].String()}})
|
||||
cli2, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{newCURLs[0].String()}})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -29,7 +29,7 @@ import (
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/etcdutl/v3/snapshot"
|
||||
"go.etcd.io/etcd/server/v3/embed"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"go.uber.org/zap/zapcore"
|
||||
"go.uber.org/zap/zaptest"
|
||||
)
|
||||
@ -37,7 +37,7 @@ import (
|
||||
// TestSnapshotV3RestoreSingle tests single node cluster restoring
|
||||
// from a snapshot file.
|
||||
func TestSnapshotV3RestoreSingle(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
kvs := []kv{{"foo1", "bar1"}, {"foo2", "bar2"}, {"foo3", "bar3"}}
|
||||
dbPath := createSnapshotFile(t, kvs)
|
||||
|
||||
@ -45,7 +45,7 @@ func TestSnapshotV3RestoreSingle(t *testing.T) {
|
||||
urls := newEmbedURLs(clusterN * 2)
|
||||
cURLs, pURLs := urls[:clusterN], urls[clusterN:]
|
||||
|
||||
cfg := integration.NewEmbedConfig(t, "s1")
|
||||
cfg := integration2.NewEmbedConfig(t, "s1")
|
||||
cfg.InitialClusterToken = testClusterTkn
|
||||
cfg.ClusterState = "existing"
|
||||
cfg.LCUrls, cfg.ACUrls = cURLs, cURLs
|
||||
@ -82,7 +82,7 @@ func TestSnapshotV3RestoreSingle(t *testing.T) {
|
||||
}
|
||||
|
||||
var cli *clientv3.Client
|
||||
cli, err = integration.NewClient(t, clientv3.Config{Endpoints: []string{cfg.ACUrls[0].String()}})
|
||||
cli, err = integration2.NewClient(t, clientv3.Config{Endpoints: []string{cfg.ACUrls[0].String()}})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -103,7 +103,7 @@ func TestSnapshotV3RestoreSingle(t *testing.T) {
|
||||
// can boot into the same cluster after being restored from a same
|
||||
// snapshot file.
|
||||
func TestSnapshotV3RestoreMulti(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
kvs := []kv{{"foo1", "bar1"}, {"foo2", "bar2"}, {"foo3", "bar3"}}
|
||||
dbPath := createSnapshotFile(t, kvs)
|
||||
|
||||
@ -119,7 +119,7 @@ func TestSnapshotV3RestoreMulti(t *testing.T) {
|
||||
time.Sleep(time.Second)
|
||||
|
||||
for i := 0; i < clusterN; i++ {
|
||||
cli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{cURLs[i].String()}})
|
||||
cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{cURLs[i].String()}})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -139,8 +139,8 @@ func TestSnapshotV3RestoreMulti(t *testing.T) {
|
||||
|
||||
// TestCorruptedBackupFileCheck tests if we can correctly identify a corrupted backup file.
|
||||
func TestCorruptedBackupFileCheck(t *testing.T) {
|
||||
dbPath := integration.MustAbsPath("testdata/corrupted_backup.db")
|
||||
integration.BeforeTest(t)
|
||||
dbPath := integration2.MustAbsPath("testdata/corrupted_backup.db")
|
||||
integration2.BeforeTest(t)
|
||||
if _, err := os.Stat(dbPath); err != nil {
|
||||
t.Fatalf("test file [%s] does not exist: %v", dbPath, err)
|
||||
}
|
||||
@ -175,7 +175,7 @@ func createSnapshotFile(t *testing.T, kvs []kv) string {
|
||||
urls := newEmbedURLs(clusterN * 2)
|
||||
cURLs, pURLs := urls[:clusterN], urls[clusterN:]
|
||||
|
||||
cfg := integration.NewEmbedConfig(t, "default")
|
||||
cfg := integration2.NewEmbedConfig(t, "default")
|
||||
cfg.ClusterState = "new"
|
||||
cfg.LCUrls, cfg.ACUrls = cURLs, cURLs
|
||||
cfg.LPUrls, cfg.APUrls = pURLs, pURLs
|
||||
@ -194,7 +194,7 @@ func createSnapshotFile(t *testing.T, kvs []kv) string {
|
||||
}
|
||||
|
||||
ccfg := clientv3.Config{Endpoints: []string{cfg.ACUrls[0].String()}}
|
||||
cli, err := integration.NewClient(t, ccfg)
|
||||
cli, err := integration2.NewClient(t, ccfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -234,7 +234,7 @@ func restoreCluster(t *testing.T, clusterN int, dbPath string) (
|
||||
|
||||
cfgs := make([]*embed.Config, clusterN)
|
||||
for i := 0; i < clusterN; i++ {
|
||||
cfg := integration.NewEmbedConfig(t, fmt.Sprintf("m%d", i))
|
||||
cfg := integration2.NewEmbedConfig(t, fmt.Sprintf("m%d", i))
|
||||
cfg.InitialClusterToken = testClusterTkn
|
||||
cfg.ClusterState = "existing"
|
||||
cfg.LCUrls, cfg.ACUrls = []url.URL{cURLs[i]}, []url.URL{cURLs[i]}
|
||||
|
@ -15,12 +15,13 @@
|
||||
package integration_test
|
||||
|
||||
import (
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
"testing"
|
||||
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func TestBeforeTestWithoutLeakDetection(t *testing.T) {
|
||||
integration.BeforeTest(t, integration.WithoutGoLeakDetection(), integration.WithoutSkipInShort())
|
||||
integration2.BeforeTest(t, integration2.WithoutGoLeakDetection(), integration2.WithoutSkipInShort())
|
||||
// Intentional leak that should get ignored
|
||||
go func() {
|
||||
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
|
||||
"github.com/coreos/go-semver/semver"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"go.etcd.io/etcd/client/pkg/v3/testutil"
|
||||
@ -33,7 +34,7 @@ import (
|
||||
func TestEtcdVersionFromWAL(t *testing.T) {
|
||||
testutil.SkipTestIfShortMode(t,
|
||||
"Wal creation tests are depending on embedded etcd server so are integration-level tests.")
|
||||
cfg := NewEmbedConfig(t, "default")
|
||||
cfg := integration.NewEmbedConfig(t, "default")
|
||||
srv, err := embed.StartEtcd(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -45,7 +46,7 @@ func TestEtcdVersionFromWAL(t *testing.T) {
|
||||
}
|
||||
|
||||
ccfg := clientv3.Config{Endpoints: []string{cfg.ACUrls[0].String()}}
|
||||
cli, err := NewClient(t, ccfg)
|
||||
cli, err := integration.NewClient(t, ccfg)
|
||||
if err != nil {
|
||||
srv.Close()
|
||||
t.Fatal(err)
|
||||
|
@ -27,11 +27,12 @@ import (
|
||||
"time"
|
||||
|
||||
"go.etcd.io/etcd/client/pkg/v3/transport"
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func TestV2Set(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
cl := NewCluster(t, 1)
|
||||
integration.BeforeTest(t)
|
||||
cl := integration.NewCluster(t, 1)
|
||||
cl.Launch(t)
|
||||
defer cl.Terminate(t)
|
||||
|
||||
@ -92,8 +93,8 @@ func TestV2Set(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV2CreateUpdate(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
cl := NewCluster(t, 1)
|
||||
integration.BeforeTest(t)
|
||||
cl := integration.NewCluster(t, 1)
|
||||
cl.Launch(t)
|
||||
defer cl.Terminate(t)
|
||||
|
||||
@ -228,8 +229,8 @@ func TestV2CreateUpdate(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV2CAS(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
cl := NewCluster(t, 1)
|
||||
integration.BeforeTest(t)
|
||||
cl := integration.NewCluster(t, 1)
|
||||
cl.Launch(t)
|
||||
defer cl.Terminate(t)
|
||||
|
||||
@ -376,8 +377,8 @@ func TestV2CAS(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV2Delete(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
cl := NewCluster(t, 1)
|
||||
integration.BeforeTest(t)
|
||||
cl := integration.NewCluster(t, 1)
|
||||
cl.Launch(t)
|
||||
defer cl.Terminate(t)
|
||||
|
||||
@ -476,8 +477,8 @@ func TestV2Delete(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV2CAD(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
cl := NewCluster(t, 1)
|
||||
integration.BeforeTest(t)
|
||||
cl := integration.NewCluster(t, 1)
|
||||
cl.Launch(t)
|
||||
defer cl.Terminate(t)
|
||||
|
||||
@ -576,8 +577,8 @@ func TestV2CAD(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV2Unique(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
cl := NewCluster(t, 1)
|
||||
integration.BeforeTest(t)
|
||||
cl := integration.NewCluster(t, 1)
|
||||
cl.Launch(t)
|
||||
defer cl.Terminate(t)
|
||||
|
||||
@ -643,8 +644,8 @@ func TestV2Unique(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV2Get(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
cl := NewCluster(t, 1)
|
||||
integration.BeforeTest(t)
|
||||
cl := integration.NewCluster(t, 1)
|
||||
cl.Launch(t)
|
||||
defer cl.Terminate(t)
|
||||
|
||||
@ -741,8 +742,8 @@ func TestV2Get(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV2QuorumGet(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
cl := NewCluster(t, 1)
|
||||
integration.BeforeTest(t)
|
||||
cl := integration.NewCluster(t, 1)
|
||||
cl.Launch(t)
|
||||
defer cl.Terminate(t)
|
||||
|
||||
@ -839,8 +840,8 @@ func TestV2QuorumGet(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV2Watch(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
cl := NewCluster(t, 1)
|
||||
integration.BeforeTest(t)
|
||||
cl := integration.NewCluster(t, 1)
|
||||
cl.Launch(t)
|
||||
defer cl.Terminate(t)
|
||||
|
||||
@ -877,8 +878,8 @@ func TestV2Watch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV2WatchWithIndex(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
cl := NewCluster(t, 1)
|
||||
integration.BeforeTest(t)
|
||||
cl := integration.NewCluster(t, 1)
|
||||
cl.Launch(t)
|
||||
defer cl.Terminate(t)
|
||||
|
||||
@ -944,8 +945,8 @@ func TestV2WatchWithIndex(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV2WatchKeyInDir(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
cl := NewCluster(t, 1)
|
||||
integration.BeforeTest(t)
|
||||
cl := integration.NewCluster(t, 1)
|
||||
cl.Launch(t)
|
||||
defer cl.Terminate(t)
|
||||
|
||||
@ -1005,8 +1006,8 @@ func TestV2WatchKeyInDir(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV2Head(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
cl := NewCluster(t, 1)
|
||||
integration.BeforeTest(t)
|
||||
cl := integration.NewCluster(t, 1)
|
||||
cl.Launch(t)
|
||||
defer cl.Terminate(t)
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user