From ca9b720c1d53fc1304eb6872d29e180f08fb1ef0 Mon Sep 17 00:00:00 2001 From: Marek Siarkowicz Date: Fri, 8 Oct 2021 21:59:56 +0200 Subject: [PATCH] tests: Move integration setup to separa framework package --- tests/framework/e2e/etcd_spawn_cov.go | 2 +- tests/framework/e2e/flags.go | 2 +- tests/{ => framework}/integration/bridge.go | 0 tests/{ => framework}/integration/cluster.go | 752 +++++++++--------- .../integration/cluster_direct.go | 4 +- .../integration/cluster_proxy.go | 8 +- tests/{ => framework}/integration/testing.go | 0 tests/integration/client/client_test.go | 24 +- tests/integration/clientv3/cluster_test.go | 46 +- .../clientv3/concurrency/election_test.go | 4 +- .../clientv3/concurrency/mutex_test.go | 4 +- .../clientv3/connectivity/black_hole_test.go | 16 +- .../clientv3/connectivity/dial_test.go | 52 +- .../connectivity/network_partition_test.go | 30 +- .../connectivity/server_shutdown_test.go | 34 +- .../clientv3/examples/main_test.go | 3 +- .../experimental/recipes/v3_barrier_test.go | 10 +- .../recipes/v3_double_barrier_test.go | 10 +- .../experimental/recipes/v3_lock_test.go | 56 +- .../experimental/recipes/v3_queue_test.go | 30 +- tests/integration/clientv3/kv_test.go | 96 +-- .../integration/clientv3/lease/lease_test.go | 96 +-- .../clientv3/lease/leasing_test.go | 168 ++-- .../integration/clientv3/maintenance_test.go | 32 +- tests/integration/clientv3/metrics_test.go | 13 +- tests/integration/clientv3/mirror_test.go | 10 +- tests/integration/clientv3/namespace_test.go | 10 +- .../clientv3/naming/endpoints_test.go | 15 +- .../clientv3/naming/resolver_test.go | 7 +- .../integration/clientv3/ordering_kv_test.go | 14 +- .../clientv3/ordering_util_test.go | 14 +- tests/integration/clientv3/role_test.go | 6 +- .../clientv3/snapshot/v3_snapshot_test.go | 6 +- tests/integration/clientv3/txn_test.go | 26 +- tests/integration/clientv3/user_test.go | 22 +- .../clientv3/watch_fragment_test.go | 8 +- tests/integration/clientv3/watch_test.go | 100 +-- tests/integration/cluster_test.go | 209 ++--- tests/integration/embed/embed_test.go | 10 +- tests/integration/grpc_test.go | 25 +- tests/integration/lazy_cluster.go | 15 +- tests/integration/member_test.go | 37 +- tests/integration/metrics_test.go | 21 +- tests/integration/network_partition_test.go | 44 +- .../proxy/grpcproxy/cluster_test.go | 10 +- tests/integration/proxy/grpcproxy/kv_test.go | 11 +- .../proxy/grpcproxy/register_test.go | 6 +- tests/integration/snapshot/member_test.go | 10 +- .../integration/snapshot/v3_snapshot_test.go | 22 +- tests/integration/testing_test.go | 5 +- tests/integration/utl_wal_version_test.go | 5 +- tests/integration/v2_http_kv_test.go | 49 +- .../v2store/store_tag_not_v2v3_test.go | 4 +- .../v2store/store_tag_v2v3_test.go | 8 +- tests/integration/v2store/store_v2v3_test.go | 18 +- tests/integration/v3_alarm_test.go | 29 +- tests/integration/v3_auth_test.go | 73 +- tests/integration/v3_election_test.go | 31 +- tests/integration/v3_grpc_inflight_test.go | 15 +- tests/integration/v3_grpc_test.go | 227 +++--- tests/integration/v3_health_test.go | 5 +- tests/integration/v3_kv_test.go | 5 +- tests/integration/v3_leadership_test.go | 59 +- tests/integration/v3_lease_test.go | 205 ++--- tests/integration/v3_stm_test.go | 25 +- tests/integration/v3_tls_test.go | 9 +- tests/integration/v3_watch_restore_test.go | 19 +- tests/integration/v3_watch_test.go | 127 +-- tests/integration/v3election_grpc_test.go | 21 +- tests/integration/v3lock_grpc_test.go | 11 +- 70 files changed, 1561 insertions(+), 1539 deletions(-) rename tests/{ => framework}/integration/bridge.go (100%) rename tests/{ => framework}/integration/cluster.go (66%) rename tests/{ => framework}/integration/cluster_direct.go (95%) rename tests/{ => framework}/integration/cluster_proxy.go (97%) rename tests/{ => framework}/integration/testing.go (100%) diff --git a/tests/framework/e2e/etcd_spawn_cov.go b/tests/framework/e2e/etcd_spawn_cov.go index de4e404f4..7c4ff8c0f 100644 --- a/tests/framework/e2e/etcd_spawn_cov.go +++ b/tests/framework/e2e/etcd_spawn_cov.go @@ -26,7 +26,7 @@ import ( "go.etcd.io/etcd/client/pkg/v3/fileutil" "go.etcd.io/etcd/pkg/v3/expect" - "go.etcd.io/etcd/tests/v3/integration" + "go.etcd.io/etcd/tests/v3/framework/integration" "go.uber.org/zap" ) diff --git a/tests/framework/e2e/flags.go b/tests/framework/e2e/flags.go index 139cf29c5..9a2c933d2 100644 --- a/tests/framework/e2e/flags.go +++ b/tests/framework/e2e/flags.go @@ -19,7 +19,7 @@ import ( "os" "runtime" - "go.etcd.io/etcd/tests/v3/integration" + "go.etcd.io/etcd/tests/v3/framework/integration" ) var ( diff --git a/tests/integration/bridge.go b/tests/framework/integration/bridge.go similarity index 100% rename from tests/integration/bridge.go rename to tests/framework/integration/bridge.go diff --git a/tests/integration/cluster.go b/tests/framework/integration/cluster.go similarity index 66% rename from tests/integration/cluster.go rename to tests/framework/integration/cluster.go index 528bcb902..55d9e28c4 100644 --- a/tests/integration/cluster.go +++ b/tests/framework/integration/cluster.go @@ -67,31 +67,31 @@ import ( const ( // RequestWaitTimeout is the time duration to wait for a request to go through or detect leader loss. RequestWaitTimeout = 5 * time.Second - tickDuration = 10 * time.Millisecond - requestTimeout = 20 * time.Second + TickDuration = 10 * time.Millisecond + RequestTimeout = 20 * time.Second - clusterName = "etcd" - basePort = 21000 + ClusterName = "etcd" + BasePort = 21000 URLScheme = "unix" URLSchemeTLS = "unixs" - baseGRPCPort = 30000 + BaseGRPCPort = 30000 ) var ( - electionTicks = 10 + ElectionTicks = 10 - // integration test uses unique ports, counting up, to listen for each + // LocalListenCount integration test uses unique ports, counting up, to listen for each // member, ensuring restarted members can listen on the same port again. - localListenCount = int64(0) + LocalListenCount = int64(0) - testTLSInfo = transport.TLSInfo{ + TestTLSInfo = transport.TLSInfo{ KeyFile: MustAbsPath("../fixtures/server.key.insecure"), CertFile: MustAbsPath("../fixtures/server.crt"), TrustedCAFile: MustAbsPath("../fixtures/ca.crt"), ClientCertAuth: true, } - testTLSInfoWithSpecificUsage = transport.TLSInfo{ + TestTLSInfoWithSpecificUsage = transport.TLSInfo{ KeyFile: MustAbsPath("../fixtures/server-serverusage.key.insecure"), CertFile: MustAbsPath("../fixtures/server-serverusage.crt"), ClientKeyFile: MustAbsPath("../fixtures/client-clientusage.key.insecure"), @@ -100,33 +100,33 @@ var ( ClientCertAuth: true, } - testTLSInfoIP = transport.TLSInfo{ + TestTLSInfoIP = transport.TLSInfo{ KeyFile: MustAbsPath("../fixtures/server-ip.key.insecure"), CertFile: MustAbsPath("../fixtures/server-ip.crt"), TrustedCAFile: MustAbsPath("../fixtures/ca.crt"), ClientCertAuth: true, } - testTLSInfoExpired = transport.TLSInfo{ + TestTLSInfoExpired = transport.TLSInfo{ KeyFile: MustAbsPath("./fixtures-expired/server.key.insecure"), CertFile: MustAbsPath("./fixtures-expired/server.crt"), TrustedCAFile: MustAbsPath("./fixtures-expired/ca.crt"), ClientCertAuth: true, } - testTLSInfoExpiredIP = transport.TLSInfo{ + TestTLSInfoExpiredIP = transport.TLSInfo{ KeyFile: MustAbsPath("./fixtures-expired/server-ip.key.insecure"), CertFile: MustAbsPath("./fixtures-expired/server-ip.crt"), TrustedCAFile: MustAbsPath("./fixtures-expired/ca.crt"), ClientCertAuth: true, } - defaultTokenJWT = fmt.Sprintf("jwt,pub-key=%s,priv-key=%s,sign-method=RS256,ttl=1s", + DefaultTokenJWT = fmt.Sprintf("jwt,pub-key=%s,priv-key=%s,sign-method=RS256,ttl=1s", MustAbsPath("../fixtures/server.crt"), MustAbsPath("../fixtures/server.key.insecure")) - // uniqueNumber is used to generate unique port numbers + // UniqueNumber is used to generate unique port numbers // Should only be accessed via atomic package methods. - uniqueNumber int32 + UniqueNumber int32 ) type ClusterConfig struct { @@ -171,33 +171,33 @@ type ClusterConfig struct { WatchProgressNotifyInterval time.Duration } -type cluster struct { - cfg *ClusterConfig - Members []*member - lastMemberNum int +type Cluster struct { + Cfg *ClusterConfig + Members []*Member + LastMemberNum int } -func (c *cluster) generateMemberName() string { - c.lastMemberNum++ - return fmt.Sprintf("m%v", c.lastMemberNum-1) +func (c *Cluster) generateMemberName() string { + c.LastMemberNum++ + return fmt.Sprintf("m%v", c.LastMemberNum-1) } -func schemeFromTLSInfo(tls *transport.TLSInfo) string { +func SchemeFromTLSInfo(tls *transport.TLSInfo) string { if tls == nil { return URLScheme } return URLSchemeTLS } -func (c *cluster) fillClusterForMembers() error { - if c.cfg.DiscoveryURL != "" { - // cluster will be discovered +func (c *Cluster) fillClusterForMembers() error { + if c.Cfg.DiscoveryURL != "" { + // Cluster will be discovered return nil } addrs := make([]string, 0) for _, m := range c.Members { - scheme := schemeFromTLSInfo(m.PeerTLSInfo) + scheme := SchemeFromTLSInfo(m.PeerTLSInfo) for _, l := range m.PeerListeners { addrs = append(addrs, fmt.Sprintf("%s=%s://%s", m.Name, scheme, l.Addr().String())) } @@ -213,11 +213,11 @@ func (c *cluster) fillClusterForMembers() error { return nil } -func newCluster(t testutil.TB, cfg *ClusterConfig) *cluster { - testutil.SkipTestIfShortMode(t, "Cannot start etcd cluster in --short tests") +func NewClusterFromConfig(t testutil.TB, cfg *ClusterConfig) *Cluster { + testutil.SkipTestIfShortMode(t, "Cannot start etcd Cluster in --short tests") - c := &cluster{cfg: cfg} - ms := make([]*member, cfg.Size) + c := &Cluster{Cfg: cfg} + ms := make([]*Member, cfg.Size) for i := 0; i < cfg.Size; i++ { ms[i] = c.mustNewMember(t, int64(i)) } @@ -229,24 +229,24 @@ func newCluster(t testutil.TB, cfg *ClusterConfig) *cluster { return c } -// NewCluster returns an unlaunched cluster of the given size which has been +// NewCluster returns an unlaunched Cluster of the given size which has been // set to use static bootstrap. -func NewCluster(t testutil.TB, size int) *cluster { +func NewCluster(t testutil.TB, size int) *Cluster { t.Helper() - return newCluster(t, &ClusterConfig{Size: size}) + return NewClusterFromConfig(t, &ClusterConfig{Size: size}) } -// NewClusterByConfig returns an unlaunched cluster defined by a cluster configuration -func NewClusterByConfig(t testutil.TB, cfg *ClusterConfig) *cluster { - return newCluster(t, cfg) +// NewClusterByConfig returns an unlaunched Cluster defined by a Cluster configuration +func NewClusterByConfig(t testutil.TB, cfg *ClusterConfig) *Cluster { + return NewClusterFromConfig(t, cfg) } -func (c *cluster) Launch(t testutil.TB) { +func (c *Cluster) Launch(t testutil.TB) { errc := make(chan error) for _, m := range c.Members { // Members are launched in separate goroutines because if they boot // using discovery url, they have to wait for others to register to continue. - go func(m *member) { + go func(m *Member) { errc <- m.Launch() }(m) } @@ -256,28 +256,28 @@ func (c *cluster) Launch(t testutil.TB) { t.Fatalf("error setting up member: %v", err) } } - // wait cluster to be stable to receive future client requests - c.waitMembersMatch(t, c.HTTPMembers()) + // wait Cluster to be stable to receive future client requests + c.WaitMembersMatch(t, c.HTTPMembers()) c.waitVersion() for _, m := range c.Members { t.Logf(" - %v -> %v (%v)", m.Name, m.ID(), m.GRPCURL()) } } -func (c *cluster) URL(i int) string { +func (c *Cluster) URL(i int) string { return c.Members[i].ClientURLs[0].String() } -// URLs returns a list of all active client URLs in the cluster -func (c *cluster) URLs() []string { +// URLs returns a list of all active client URLs in the Cluster +func (c *Cluster) URLs() []string { return getMembersURLs(c.Members) } -func getMembersURLs(members []*member) []string { +func getMembersURLs(members []*Member) []string { urls := make([]string, 0) for _, m := range members { select { - case <-m.s.StopNotify(): + case <-m.Server.StopNotify(): continue default: } @@ -289,11 +289,11 @@ func getMembersURLs(members []*member) []string { } // HTTPMembers returns a list of all active members as client.Members -func (c *cluster) HTTPMembers() []client.Member { +func (c *Cluster) HTTPMembers() []client.Member { ms := []client.Member{} for _, m := range c.Members { - pScheme := schemeFromTLSInfo(m.PeerTLSInfo) - cScheme := schemeFromTLSInfo(m.ClientTLSInfo) + pScheme := SchemeFromTLSInfo(m.PeerTLSInfo) + cScheme := SchemeFromTLSInfo(m.ClientTLSInfo) cm := client.Member{Name: m.Name} for _, ln := range m.PeerListeners { cm.PeerURLs = append(cm.PeerURLs, pScheme+"://"+ln.Addr().String()) @@ -306,33 +306,33 @@ func (c *cluster) HTTPMembers() []client.Member { return ms } -func (c *cluster) mustNewMember(t testutil.TB, memberNumber int64) *member { - m := mustNewMember(t, - memberConfig{ - name: c.generateMemberName(), - memberNumber: memberNumber, - authToken: c.cfg.AuthToken, - peerTLS: c.cfg.PeerTLS, - clientTLS: c.cfg.ClientTLS, - quotaBackendBytes: c.cfg.QuotaBackendBytes, - maxTxnOps: c.cfg.MaxTxnOps, - maxRequestBytes: c.cfg.MaxRequestBytes, - snapshotCount: c.cfg.SnapshotCount, - snapshotCatchUpEntries: c.cfg.SnapshotCatchUpEntries, - grpcKeepAliveMinTime: c.cfg.GRPCKeepAliveMinTime, - grpcKeepAliveInterval: c.cfg.GRPCKeepAliveInterval, - grpcKeepAliveTimeout: c.cfg.GRPCKeepAliveTimeout, - clientMaxCallSendMsgSize: c.cfg.ClientMaxCallSendMsgSize, - clientMaxCallRecvMsgSize: c.cfg.ClientMaxCallRecvMsgSize, - useIP: c.cfg.UseIP, - useBridge: c.cfg.UseBridge, - useTCP: c.cfg.UseTCP, - enableLeaseCheckpoint: c.cfg.EnableLeaseCheckpoint, - leaseCheckpointInterval: c.cfg.LeaseCheckpointInterval, - WatchProgressNotifyInterval: c.cfg.WatchProgressNotifyInterval, +func (c *Cluster) mustNewMember(t testutil.TB, memberNumber int64) *Member { + m := MustNewMember(t, + MemberConfig{ + Name: c.generateMemberName(), + MemberNumber: memberNumber, + AuthToken: c.Cfg.AuthToken, + PeerTLS: c.Cfg.PeerTLS, + ClientTLS: c.Cfg.ClientTLS, + QuotaBackendBytes: c.Cfg.QuotaBackendBytes, + MaxTxnOps: c.Cfg.MaxTxnOps, + MaxRequestBytes: c.Cfg.MaxRequestBytes, + SnapshotCount: c.Cfg.SnapshotCount, + SnapshotCatchUpEntries: c.Cfg.SnapshotCatchUpEntries, + GrpcKeepAliveMinTime: c.Cfg.GRPCKeepAliveMinTime, + GrpcKeepAliveInterval: c.Cfg.GRPCKeepAliveInterval, + GrpcKeepAliveTimeout: c.Cfg.GRPCKeepAliveTimeout, + ClientMaxCallSendMsgSize: c.Cfg.ClientMaxCallSendMsgSize, + ClientMaxCallRecvMsgSize: c.Cfg.ClientMaxCallRecvMsgSize, + UseIP: c.Cfg.UseIP, + UseBridge: c.Cfg.UseBridge, + UseTCP: c.Cfg.UseTCP, + EnableLeaseCheckpoint: c.Cfg.EnableLeaseCheckpoint, + LeaseCheckpointInterval: c.Cfg.LeaseCheckpointInterval, + WatchProgressNotifyInterval: c.Cfg.WatchProgressNotifyInterval, }) - m.DiscoveryURL = c.cfg.DiscoveryURL - if c.cfg.UseGRPC { + m.DiscoveryURL = c.Cfg.DiscoveryURL + if c.Cfg.UseGRPC { if err := m.listenGRPC(); err != nil { t.Fatal(err) } @@ -341,17 +341,17 @@ func (c *cluster) mustNewMember(t testutil.TB, memberNumber int64) *member { } // addMember return PeerURLs of the added member. -func (c *cluster) addMember(t testutil.TB) types.URLs { +func (c *Cluster) addMember(t testutil.TB) types.URLs { m := c.mustNewMember(t, 0) - scheme := schemeFromTLSInfo(c.cfg.PeerTLS) + scheme := SchemeFromTLSInfo(c.Cfg.PeerTLS) - // send add request to the cluster + // send add request to the Cluster var err error for i := 0; i < len(c.Members); i++ { clientURL := c.URL(i) peerURL := scheme + "://" + m.PeerListeners[0].Addr().String() - if err = c.addMemberByURL(t, clientURL, peerURL); err == nil { + if err = c.AddMemberByURL(t, clientURL, peerURL); err == nil { break } } @@ -369,74 +369,74 @@ func (c *cluster) addMember(t testutil.TB) types.URLs { t.Fatal(err) } c.Members = append(c.Members, m) - // wait cluster to be stable to receive future client requests - c.waitMembersMatch(t, c.HTTPMembers()) + // wait Cluster to be stable to receive future client requests + c.WaitMembersMatch(t, c.HTTPMembers()) return m.PeerURLs } -func (c *cluster) addMemberByURL(t testutil.TB, clientURL, peerURL string) error { - cc := MustNewHTTPClient(t, []string{clientURL}, c.cfg.ClientTLS) +func (c *Cluster) AddMemberByURL(t testutil.TB, clientURL, peerURL string) error { + cc := MustNewHTTPClient(t, []string{clientURL}, c.Cfg.ClientTLS) ma := client.NewMembersAPI(cc) - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) + ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout) _, err := ma.Add(ctx, peerURL) cancel() if err != nil { return err } - // wait for the add node entry applied in the cluster + // wait for the add node entry applied in the Cluster members := append(c.HTTPMembers(), client.Member{PeerURLs: []string{peerURL}, ClientURLs: []string{}}) - c.waitMembersMatch(t, members) + c.WaitMembersMatch(t, members) return nil } // AddMember return PeerURLs of the added member. -func (c *cluster) AddMember(t testutil.TB) types.URLs { +func (c *Cluster) AddMember(t testutil.TB) types.URLs { return c.addMember(t) } -func (c *cluster) RemoveMember(t testutil.TB, id uint64) { - if err := c.removeMember(t, id); err != nil { +func (c *Cluster) MustRemoveMember(t testutil.TB, id uint64) { + if err := c.RemoveMember(t, id); err != nil { t.Fatal(err) } } -func (c *cluster) removeMember(t testutil.TB, id uint64) error { - // send remove request to the cluster - cc := MustNewHTTPClient(t, c.URLs(), c.cfg.ClientTLS) +func (c *Cluster) RemoveMember(t testutil.TB, id uint64) error { + // send remove request to the Cluster + cc := MustNewHTTPClient(t, c.URLs(), c.Cfg.ClientTLS) ma := client.NewMembersAPI(cc) - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) + ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout) err := ma.Remove(ctx, types.ID(id).String()) cancel() if err != nil { return err } - newMembers := make([]*member, 0) + newMembers := make([]*Member, 0) for _, m := range c.Members { - if uint64(m.s.ID()) != id { + if uint64(m.Server.ID()) != id { newMembers = append(newMembers, m) } else { select { - case <-m.s.StopNotify(): + case <-m.Server.StopNotify(): m.Terminate(t) // 1s stop delay + election timeout + 1s disk and network delay + connection write timeout // TODO: remove connection write timeout by selecting on http response closeNotifier // blocking on https://github.com/golang/go/issues/9524 - case <-time.After(time.Second + time.Duration(electionTicks)*tickDuration + time.Second + rafthttp.ConnWriteTimeout): - t.Fatalf("failed to remove member %s in time", m.s.ID()) + case <-time.After(time.Second + time.Duration(ElectionTicks)*TickDuration + time.Second + rafthttp.ConnWriteTimeout): + t.Fatalf("failed to remove member %s in time", m.Server.ID()) } } } c.Members = newMembers - c.waitMembersMatch(t, c.HTTPMembers()) + c.WaitMembersMatch(t, c.HTTPMembers()) return nil } -func (c *cluster) Terminate(t testutil.TB) { +func (c *Cluster) Terminate(t testutil.TB) { var wg sync.WaitGroup wg.Add(len(c.Members)) for _, m := range c.Members { - go func(mm *member) { + go func(mm *Member) { defer wg.Done() mm.Terminate(t) }(m) @@ -444,39 +444,39 @@ func (c *cluster) Terminate(t testutil.TB) { wg.Wait() } -func (c *cluster) waitMembersMatch(t testutil.TB, membs []client.Member) { +func (c *Cluster) WaitMembersMatch(t testutil.TB, membs []client.Member) { for _, u := range c.URLs() { - cc := MustNewHTTPClient(t, []string{u}, c.cfg.ClientTLS) + cc := MustNewHTTPClient(t, []string{u}, c.Cfg.ClientTLS) ma := client.NewMembersAPI(cc) for { - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) + ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout) ms, err := ma.List(ctx) cancel() if err == nil && isMembersEqual(ms, membs) { break } - time.Sleep(tickDuration) + time.Sleep(TickDuration) } } } // WaitLeader returns index of the member in c.Members that is leader (or -1). -func (c *cluster) WaitLeader(t testutil.TB) int { return c.waitLeader(t, c.Members) } +func (c *Cluster) WaitLeader(t testutil.TB) int { return c.WaitMembersForLeader(t, c.Members) } -// waitLeader waits until given members agree on the same leader, +// WaitMembersForLeader waits until given members agree on the same leader, // and returns its 'index' in the 'membs' list (or -1). -func (c *cluster) waitLeader(t testutil.TB, membs []*member) int { +func (c *Cluster) WaitMembersForLeader(t testutil.TB, membs []*Member) int { possibleLead := make(map[uint64]bool) var lead uint64 for _, m := range membs { - possibleLead[uint64(m.s.ID())] = true + possibleLead[uint64(m.Server.ID())] = true } cc := MustNewHTTPClient(t, getMembersURLs(membs), nil) kapi := client.NewKeysAPI(cc) // ensure leader is up via linearizable get for { - ctx, cancel := context.WithTimeout(context.Background(), 10*tickDuration+time.Second) + ctx, cancel := context.WithTimeout(context.Background(), 10*TickDuration+time.Second) _, err := kapi.Get(ctx, "0", &client.GetOptions{Quorum: true}) cancel() if err == nil || strings.Contains(err.Error(), "Key not found") { @@ -488,21 +488,21 @@ func (c *cluster) waitLeader(t testutil.TB, membs []*member) int { lead = 0 for _, m := range membs { select { - case <-m.s.StopNotify(): + case <-m.Server.StopNotify(): continue default: } - if lead != 0 && lead != m.s.Lead() { + if lead != 0 && lead != m.Server.Lead() { lead = 0 - time.Sleep(10 * tickDuration) + time.Sleep(10 * TickDuration) break } - lead = m.s.Lead() + lead = m.Server.Lead() } } for i, m := range membs { - if uint64(m.s.ID()) == lead { + if uint64(m.Server.ID()) == lead { return i } } @@ -510,35 +510,35 @@ func (c *cluster) waitLeader(t testutil.TB, membs []*member) int { return -1 } -func (c *cluster) WaitNoLeader() { c.waitNoLeader(c.Members) } +func (c *Cluster) WaitNoLeader() { c.WaitMembersNoLeader(c.Members) } -// waitNoLeader waits until given members lose leader. -func (c *cluster) waitNoLeader(membs []*member) { +// WaitMembersNoLeader waits until given members lose leader. +func (c *Cluster) WaitMembersNoLeader(membs []*Member) { noLeader := false for !noLeader { noLeader = true for _, m := range membs { select { - case <-m.s.StopNotify(): + case <-m.Server.StopNotify(): continue default: } - if m.s.Lead() != 0 { + if m.Server.Lead() != 0 { noLeader = false - time.Sleep(10 * tickDuration) + time.Sleep(10 * TickDuration) break } } } } -func (c *cluster) waitVersion() { +func (c *Cluster) waitVersion() { for _, m := range c.Members { for { - if m.s.ClusterVersion() != nil { + if m.Server.ClusterVersion() != nil { break } - time.Sleep(tickDuration) + time.Sleep(TickDuration) } } } @@ -555,9 +555,9 @@ func isMembersEqual(membs []client.Member, wmembs []client.Member) bool { } func newLocalListener(t testutil.TB) net.Listener { - c := atomic.AddInt64(&localListenCount, 1) + c := atomic.AddInt64(&LocalListenCount, 1) // Go 1.8+ allows only numbers in port - addr := fmt.Sprintf("127.0.0.1:%05d%05d", c+basePort, os.Getpid()) + addr := fmt.Sprintf("127.0.0.1:%05d%05d", c+BasePort, os.Getpid()) return NewListenerWithAddr(t, addr) } @@ -569,82 +569,82 @@ func NewListenerWithAddr(t testutil.TB, addr string) net.Listener { return l } -type member struct { +type Member struct { config.ServerConfig UniqNumber int64 MemberNumber int64 PeerListeners, ClientListeners []net.Listener - grpcListener net.Listener + GrpcListener net.Listener // PeerTLSInfo enables peer TLS when set PeerTLSInfo *transport.TLSInfo // ClientTLSInfo enables client TLS when set ClientTLSInfo *transport.TLSInfo DialOptions []grpc.DialOption - raftHandler *testutil.PauseableHandler - s *etcdserver.EtcdServer - serverClosers []func() + RaftHandler *testutil.PauseableHandler + Server *etcdserver.EtcdServer + ServerClosers []func() - grpcServerOpts []grpc.ServerOption - grpcServer *grpc.Server - grpcServerPeer *grpc.Server - grpcURL string - grpcBridge *bridge + GrpcServerOpts []grpc.ServerOption + GrpcServer *grpc.Server + GrpcServerPeer *grpc.Server + GrpcURL string + GrpcBridge *bridge - // serverClient is a clientv3 that directly calls the etcdserver. - serverClient *clientv3.Client + // ServerClient is a clientv3 that directly calls the etcdserver. + ServerClient *clientv3.Client - keepDataDirTerminate bool - clientMaxCallSendMsgSize int - clientMaxCallRecvMsgSize int - useIP bool - useBridge bool - useTCP bool + KeepDataDirTerminate bool + ClientMaxCallSendMsgSize int + ClientMaxCallRecvMsgSize int + UseIP bool + UseBridge bool + UseTCP bool - isLearner bool - closed bool + IsLearner bool + Closed bool - grpcServerRecorder *grpc_testing.GrpcRecorder + GrpcServerRecorder *grpc_testing.GrpcRecorder } -func (m *member) GRPCURL() string { return m.grpcURL } +func (m *Member) GRPCURL() string { return m.GrpcURL } -type memberConfig struct { - name string - uniqNumber int64 - memberNumber int64 - peerTLS *transport.TLSInfo - clientTLS *transport.TLSInfo - authToken string - quotaBackendBytes int64 - maxTxnOps uint - maxRequestBytes uint - snapshotCount uint64 - snapshotCatchUpEntries uint64 - grpcKeepAliveMinTime time.Duration - grpcKeepAliveInterval time.Duration - grpcKeepAliveTimeout time.Duration - clientMaxCallSendMsgSize int - clientMaxCallRecvMsgSize int - useIP bool - useBridge bool - useTCP bool - enableLeaseCheckpoint bool - leaseCheckpointInterval time.Duration +type MemberConfig struct { + Name string + UniqNumber int64 + MemberNumber int64 + PeerTLS *transport.TLSInfo + ClientTLS *transport.TLSInfo + AuthToken string + QuotaBackendBytes int64 + MaxTxnOps uint + MaxRequestBytes uint + SnapshotCount uint64 + SnapshotCatchUpEntries uint64 + GrpcKeepAliveMinTime time.Duration + GrpcKeepAliveInterval time.Duration + GrpcKeepAliveTimeout time.Duration + ClientMaxCallSendMsgSize int + ClientMaxCallRecvMsgSize int + UseIP bool + UseBridge bool + UseTCP bool + EnableLeaseCheckpoint bool + LeaseCheckpointInterval time.Duration WatchProgressNotifyInterval time.Duration } -// mustNewMember return an inited member with the given name. If peerTLS is +// MustNewMember return an inited member with the given name. If peerTLS is // set, it will use https scheme to communicate between peers. -func mustNewMember(t testutil.TB, mcfg memberConfig) *member { +func MustNewMember(t testutil.TB, mcfg MemberConfig) *Member { var err error - m := &member{ - MemberNumber: mcfg.memberNumber, - UniqNumber: atomic.AddInt64(&localListenCount, 1), + m := &Member{ + MemberNumber: mcfg.MemberNumber, + UniqNumber: atomic.AddInt64(&LocalListenCount, 1), } - peerScheme := schemeFromTLSInfo(mcfg.peerTLS) - clientScheme := schemeFromTLSInfo(mcfg.clientTLS) + peerScheme := SchemeFromTLSInfo(mcfg.PeerTLS) + clientScheme := SchemeFromTLSInfo(mcfg.ClientTLS) pln := newLocalListener(t) m.PeerListeners = []net.Listener{pln} @@ -652,7 +652,7 @@ func mustNewMember(t testutil.TB, mcfg memberConfig) *member { if err != nil { t.Fatal(err) } - m.PeerTLSInfo = mcfg.peerTLS + m.PeerTLSInfo = mcfg.PeerTLS cln := newLocalListener(t) m.ClientListeners = []net.Listener{cln} @@ -660,75 +660,75 @@ func mustNewMember(t testutil.TB, mcfg memberConfig) *member { if err != nil { t.Fatal(err) } - m.ClientTLSInfo = mcfg.clientTLS + m.ClientTLSInfo = mcfg.ClientTLS - m.Name = mcfg.name + m.Name = mcfg.Name m.DataDir, err = ioutil.TempDir(t.TempDir(), "etcd") if err != nil { t.Fatal(err) } - clusterStr := fmt.Sprintf("%s=%s://%s", mcfg.name, peerScheme, pln.Addr().String()) + clusterStr := fmt.Sprintf("%s=%s://%s", mcfg.Name, peerScheme, pln.Addr().String()) m.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr) if err != nil { t.Fatal(err) } - m.InitialClusterToken = clusterName + m.InitialClusterToken = ClusterName m.NewCluster = true m.BootstrapTimeout = 10 * time.Millisecond if m.PeerTLSInfo != nil { m.ServerConfig.PeerTLSInfo = *m.PeerTLSInfo } - m.ElectionTicks = electionTicks + m.ElectionTicks = ElectionTicks m.InitialElectionTickAdvance = true - m.TickMs = uint(tickDuration / time.Millisecond) - m.QuotaBackendBytes = mcfg.quotaBackendBytes - m.MaxTxnOps = mcfg.maxTxnOps + m.TickMs = uint(TickDuration / time.Millisecond) + m.QuotaBackendBytes = mcfg.QuotaBackendBytes + m.MaxTxnOps = mcfg.MaxTxnOps if m.MaxTxnOps == 0 { m.MaxTxnOps = embed.DefaultMaxTxnOps } - m.MaxRequestBytes = mcfg.maxRequestBytes + m.MaxRequestBytes = mcfg.MaxRequestBytes if m.MaxRequestBytes == 0 { m.MaxRequestBytes = embed.DefaultMaxRequestBytes } m.SnapshotCount = etcdserver.DefaultSnapshotCount - if mcfg.snapshotCount != 0 { - m.SnapshotCount = mcfg.snapshotCount + if mcfg.SnapshotCount != 0 { + m.SnapshotCount = mcfg.SnapshotCount } m.SnapshotCatchUpEntries = etcdserver.DefaultSnapshotCatchUpEntries - if mcfg.snapshotCatchUpEntries != 0 { - m.SnapshotCatchUpEntries = mcfg.snapshotCatchUpEntries + if mcfg.SnapshotCatchUpEntries != 0 { + m.SnapshotCatchUpEntries = mcfg.SnapshotCatchUpEntries } // for the purpose of integration testing, simple token is enough m.AuthToken = "simple" - if mcfg.authToken != "" { - m.AuthToken = mcfg.authToken + if mcfg.AuthToken != "" { + m.AuthToken = mcfg.AuthToken } m.BcryptCost = uint(bcrypt.MinCost) // use min bcrypt cost to speedy up integration testing - m.grpcServerOpts = []grpc.ServerOption{} - if mcfg.grpcKeepAliveMinTime > time.Duration(0) { - m.grpcServerOpts = append(m.grpcServerOpts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ - MinTime: mcfg.grpcKeepAliveMinTime, + m.GrpcServerOpts = []grpc.ServerOption{} + if mcfg.GrpcKeepAliveMinTime > time.Duration(0) { + m.GrpcServerOpts = append(m.GrpcServerOpts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ + MinTime: mcfg.GrpcKeepAliveMinTime, PermitWithoutStream: false, })) } - if mcfg.grpcKeepAliveInterval > time.Duration(0) && - mcfg.grpcKeepAliveTimeout > time.Duration(0) { - m.grpcServerOpts = append(m.grpcServerOpts, grpc.KeepaliveParams(keepalive.ServerParameters{ - Time: mcfg.grpcKeepAliveInterval, - Timeout: mcfg.grpcKeepAliveTimeout, + if mcfg.GrpcKeepAliveInterval > time.Duration(0) && + mcfg.GrpcKeepAliveTimeout > time.Duration(0) { + m.GrpcServerOpts = append(m.GrpcServerOpts, grpc.KeepaliveParams(keepalive.ServerParameters{ + Time: mcfg.GrpcKeepAliveInterval, + Timeout: mcfg.GrpcKeepAliveTimeout, })) } - m.clientMaxCallSendMsgSize = mcfg.clientMaxCallSendMsgSize - m.clientMaxCallRecvMsgSize = mcfg.clientMaxCallRecvMsgSize - m.useIP = mcfg.useIP - m.useBridge = mcfg.useBridge - m.useTCP = mcfg.useTCP - m.EnableLeaseCheckpoint = mcfg.enableLeaseCheckpoint - m.LeaseCheckpointInterval = mcfg.leaseCheckpointInterval + m.ClientMaxCallSendMsgSize = mcfg.ClientMaxCallSendMsgSize + m.ClientMaxCallRecvMsgSize = mcfg.ClientMaxCallRecvMsgSize + m.UseIP = mcfg.UseIP + m.UseBridge = mcfg.UseBridge + m.UseTCP = mcfg.UseTCP + m.EnableLeaseCheckpoint = mcfg.EnableLeaseCheckpoint + m.LeaseCheckpointInterval = mcfg.LeaseCheckpointInterval m.WatchProgressNotifyInterval = mcfg.WatchProgressNotifyInterval @@ -737,8 +737,8 @@ func mustNewMember(t testutil.TB, mcfg memberConfig) *member { m.WarningUnaryRequestDuration = embed.DefaultWarningUnaryRequestDuration m.V2Deprecation = config.V2_DEPR_DEFAULT - m.grpcServerRecorder = &grpc_testing.GrpcRecorder{} - m.Logger = memberLogger(t, mcfg.name) + m.GrpcServerRecorder = &grpc_testing.GrpcRecorder{} + m.Logger = memberLogger(t, mcfg.Name) t.Cleanup(func() { // if we didn't cleanup the logger, the consecutive test // might reuse this (t). @@ -758,7 +758,7 @@ func memberLogger(t testutil.TB, name string) *zap.Logger { } // listenGRPC starts a grpc server over a unix domain socket on the member -func (m *member) listenGRPC() error { +func (m *Member) listenGRPC() error { // prefix with localhost so cert has right domain network, host, port := m.grpcAddr() grpcAddr := host + ":" + port @@ -767,34 +767,34 @@ func (m *member) listenGRPC() error { if err != nil { return fmt.Errorf("listen failed on grpc socket %s (%v)", grpcAddr, err) } - m.grpcURL = fmt.Sprintf("%s://%s", m.clientScheme(), grpcAddr) - if m.useBridge { + m.GrpcURL = fmt.Sprintf("%s://%s", m.clientScheme(), grpcAddr) + if m.UseBridge { _, err = m.addBridge() if err != nil { grpcListener.Close() return err } } - m.grpcListener = grpcListener + m.GrpcListener = grpcListener return nil } -func (m *member) clientScheme() string { +func (m *Member) clientScheme() string { switch { - case m.useTCP && m.ClientTLSInfo != nil: + case m.UseTCP && m.ClientTLSInfo != nil: return "https" - case m.useTCP && m.ClientTLSInfo == nil: + case m.UseTCP && m.ClientTLSInfo == nil: return "http" - case !m.useTCP && m.ClientTLSInfo != nil: + case !m.UseTCP && m.ClientTLSInfo != nil: return "unixs" - case !m.useTCP && m.ClientTLSInfo == nil: + case !m.UseTCP && m.ClientTLSInfo == nil: return "unix" } m.Logger.Panic("Failed to determine client schema") return "" } -func (m *member) addBridge() (*bridge, error) { +func (m *Member) addBridge() (*bridge, error) { network, host, port := m.grpcAddr() grpcAddr := host + ":" + port bridgeAddr := grpcAddr + "0" @@ -803,41 +803,41 @@ func (m *member) addBridge() (*bridge, error) { if err != nil { return nil, fmt.Errorf("listen failed on bridge socket %s (%v)", bridgeAddr, err) } - m.grpcBridge, err = newBridge(dialer{network: network, addr: grpcAddr}, bridgeListener) + m.GrpcBridge, err = newBridge(dialer{network: network, addr: grpcAddr}, bridgeListener) if err != nil { bridgeListener.Close() return nil, err } - m.grpcURL = m.clientScheme() + "://" + bridgeAddr - return m.grpcBridge, nil + m.GrpcURL = m.clientScheme() + "://" + bridgeAddr + return m.GrpcBridge, nil } -func (m *member) Bridge() *bridge { - if !m.useBridge { - m.Logger.Panic("Bridge not available. Please configure using bridge before creating cluster.") +func (m *Member) Bridge() *bridge { + if !m.UseBridge { + m.Logger.Panic("Bridge not available. Please configure using bridge before creating Cluster.") } - return m.grpcBridge + return m.GrpcBridge } -func (m *member) grpcAddr() (network, host, port string) { +func (m *Member) grpcAddr() (network, host, port string) { // prefix with localhost so cert has right domain host = "localhost" - if m.useIP { // for IP-only TLS certs + if m.UseIP { // for IP-only TLS certs host = "127.0.0.1" } network = "unix" - if m.useTCP { + if m.UseTCP { network = "tcp" } port = m.Name - if m.useTCP { + if m.UseTCP { port = fmt.Sprintf("%d", GrpcPortNumber(m.UniqNumber, m.MemberNumber)) } return network, host, port } func GrpcPortNumber(uniqNumber, memberNumber int64) int64 { - return baseGRPCPort + uniqNumber*10 + memberNumber + return BaseGRPCPort + uniqNumber*10 + memberNumber } type dialer struct { @@ -849,24 +849,24 @@ func (d dialer) Dial() (net.Conn, error) { return net.Dial(d.network, d.addr) } -func (m *member) ElectionTimeout() time.Duration { - return time.Duration(m.s.Cfg.ElectionTicks*int(m.s.Cfg.TickMs)) * time.Millisecond +func (m *Member) ElectionTimeout() time.Duration { + return time.Duration(m.Server.Cfg.ElectionTicks*int(m.Server.Cfg.TickMs)) * time.Millisecond } -func (m *member) ID() types.ID { return m.s.ID() } +func (m *Member) ID() types.ID { return m.Server.ID() } // NewClientV3 creates a new grpc client connection to the member -func NewClientV3(m *member) (*clientv3.Client, error) { - if m.grpcURL == "" { +func NewClientV3(m *Member) (*clientv3.Client, error) { + if m.GrpcURL == "" { return nil, fmt.Errorf("member not configured for grpc") } cfg := clientv3.Config{ - Endpoints: []string{m.grpcURL}, + Endpoints: []string{m.GrpcURL}, DialTimeout: 5 * time.Second, DialOptions: []grpc.DialOption{grpc.WithBlock()}, - MaxCallSendMsgSize: m.clientMaxCallSendMsgSize, - MaxCallRecvMsgSize: m.clientMaxCallRecvMsgSize, + MaxCallSendMsgSize: m.ClientMaxCallSendMsgSize, + MaxCallRecvMsgSize: m.ClientMaxCallRecvMsgSize, Logger: m.Logger.Named("client"), } @@ -885,8 +885,8 @@ func NewClientV3(m *member) (*clientv3.Client, error) { // Clone returns a member with the same server configuration. The returned // member will not set PeerListeners and ClientListeners. -func (m *member) Clone(t testutil.TB) *member { - mm := &member{} +func (m *Member) Clone(t testutil.TB) *Member { + mm := &Member{} mm.ServerConfig = m.ServerConfig var err error @@ -918,20 +918,20 @@ func (m *member) Clone(t testutil.TB) *member { // Launch starts a member based on ServerConfig, PeerListeners // and ClientListeners. -func (m *member) Launch() error { +func (m *Member) Launch() error { m.Logger.Info( "launching a member", zap.String("name", m.Name), zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), - zap.String("grpc-url", m.grpcURL), + zap.String("grpc-url", m.GrpcURL), ) var err error - if m.s, err = etcdserver.NewServer(m.ServerConfig); err != nil { + if m.Server, err = etcdserver.NewServer(m.ServerConfig); err != nil { return fmt.Errorf("failed to initialize the etcd server: %v", err) } - m.s.SyncTicker = time.NewTicker(500 * time.Millisecond) - m.s.Start() + m.Server.SyncTicker = time.NewTicker(500 * time.Millisecond) + m.Server.Start() var peerTLScfg *tls.Config if m.PeerTLSInfo != nil && !m.PeerTLSInfo.Empty() { @@ -940,7 +940,7 @@ func (m *member) Launch() error { } } - if m.grpcListener != nil { + if m.GrpcListener != nil { var ( tlscfg *tls.Config ) @@ -950,23 +950,23 @@ func (m *member) Launch() error { return err } } - m.grpcServer = v3rpc.Server(m.s, tlscfg, m.grpcServerRecorder.UnaryInterceptor(), m.grpcServerOpts...) - m.grpcServerPeer = v3rpc.Server(m.s, peerTLScfg, m.grpcServerRecorder.UnaryInterceptor()) - m.serverClient = v3client.New(m.s) - lockpb.RegisterLockServer(m.grpcServer, v3lock.NewLockServer(m.serverClient)) - epb.RegisterElectionServer(m.grpcServer, v3election.NewElectionServer(m.serverClient)) - go m.grpcServer.Serve(m.grpcListener) + m.GrpcServer = v3rpc.Server(m.Server, tlscfg, m.GrpcServerRecorder.UnaryInterceptor(), m.GrpcServerOpts...) + m.GrpcServerPeer = v3rpc.Server(m.Server, peerTLScfg, m.GrpcServerRecorder.UnaryInterceptor()) + m.ServerClient = v3client.New(m.Server) + lockpb.RegisterLockServer(m.GrpcServer, v3lock.NewLockServer(m.ServerClient)) + epb.RegisterElectionServer(m.GrpcServer, v3election.NewElectionServer(m.ServerClient)) + go m.GrpcServer.Serve(m.GrpcListener) } - m.raftHandler = &testutil.PauseableHandler{Next: etcdhttp.NewPeerHandler(m.Logger, m.s)} + m.RaftHandler = &testutil.PauseableHandler{Next: etcdhttp.NewPeerHandler(m.Logger, m.Server)} - h := (http.Handler)(m.raftHandler) - if m.grpcListener != nil { + h := (http.Handler)(m.RaftHandler) + if m.GrpcListener != nil { h = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.ProtoMajor == 2 && strings.Contains(r.Header.Get("Content-Type"), "application/grpc") { - m.grpcServerPeer.ServeHTTP(w, r) + m.GrpcServerPeer.ServeHTTP(w, r) } else { - m.raftHandler.ServeHTTP(w, r) + m.RaftHandler.ServeHTTP(w, r) } }) } @@ -976,9 +976,9 @@ func (m *member) Launch() error { // don't hang on matcher after closing listener cm.SetReadTimeout(time.Second) - if m.grpcServer != nil { + if m.GrpcServer != nil { grpcl := cm.Match(cmux.HTTP2()) - go m.grpcServerPeer.Serve(grpcl) + go m.GrpcServerPeer.Serve(grpcl) } // serve http1/http2 rafthttp/grpc @@ -1010,7 +1010,7 @@ func (m *member) Launch() error { hs.Close() <-donec } - m.serverClosers = append(m.serverClosers, closer) + m.ServerClosers = append(m.ServerClosers, closer) } for _, ln := range m.ClientListeners { hs := &httptest.Server{ @@ -1018,7 +1018,7 @@ func (m *member) Launch() error { Config: &http.Server{ Handler: v2http.NewClientHandler( m.Logger, - m.s, + m.Server, m.ServerConfig.ReqTimeout(), ), ErrorLog: log.New(ioutil.Discard, "net/http", 0), @@ -1038,7 +1038,7 @@ func (m *member) Launch() error { // Previously, // 1. Server has non-empty (*tls.Config).Certificates on client hello // 2. Server calls (*tls.Config).GetCertificate iff: - // - Server's (*tls.Config).Certificates is not empty, or + // - Server'Server (*tls.Config).Certificates is not empty, or // - Client supplies SNI; non-empty (*tls.ClientHelloInfo).ServerName // // When (*tls.Config).Certificates is always populated on initial handshake, @@ -1056,7 +1056,7 @@ func (m *member) Launch() error { // // This introduces another problem with "httptest.Server": // when server initial certificates are empty, certificates - // are overwritten by Go's internal test certs, which have + // are overwritten by Go'Server internal test certs, which have // different SAN fields (e.g. example.com). To work around, // re-overwrite (*tls.Config).Certificates before starting // test server. @@ -1073,7 +1073,7 @@ func (m *member) Launch() error { hs.CloseClientConnections() hs.Close() } - m.serverClosers = append(m.serverClosers, closer) + m.ServerClosers = append(m.ServerClosers, closer) } m.Logger.Info( @@ -1081,30 +1081,30 @@ func (m *member) Launch() error { zap.String("name", m.Name), zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), - zap.String("grpc-url", m.grpcURL), + zap.String("grpc-url", m.GrpcURL), ) return nil } -func (m *member) RecordedRequests() []grpc_testing.RequestInfo { - return m.grpcServerRecorder.RecordedRequests() +func (m *Member) RecordedRequests() []grpc_testing.RequestInfo { + return m.GrpcServerRecorder.RecordedRequests() } -func (m *member) WaitOK(t testutil.TB) { +func (m *Member) WaitOK(t testutil.TB) { m.WaitStarted(t) - for m.s.Leader() == 0 { - time.Sleep(tickDuration) + for m.Server.Leader() == 0 { + time.Sleep(TickDuration) } } -func (m *member) WaitStarted(t testutil.TB) { +func (m *Member) WaitStarted(t testutil.TB) { cc := MustNewHTTPClient(t, []string{m.URL()}, m.ClientTLSInfo) kapi := client.NewKeysAPI(cc) for { - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) + ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout) _, err := kapi.Get(ctx, "/", nil) if err != nil { - time.Sleep(tickDuration) + time.Sleep(TickDuration) continue } cancel() @@ -1113,51 +1113,51 @@ func (m *member) WaitStarted(t testutil.TB) { } func WaitClientV3(t testutil.TB, kv clientv3.KV) { - timeout := time.Now().Add(requestTimeout) + timeout := time.Now().Add(RequestTimeout) var err error for time.Now().Before(timeout) { - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) + ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout) _, err = kv.Get(ctx, "/") cancel() if err == nil { return } - time.Sleep(tickDuration) + time.Sleep(TickDuration) } if err != nil { t.Fatalf("timed out waiting for client: %v", err) } } -func (m *member) URL() string { return m.ClientURLs[0].String() } +func (m *Member) URL() string { return m.ClientURLs[0].String() } -func (m *member) Pause() { - m.raftHandler.Pause() - m.s.PauseSending() +func (m *Member) Pause() { + m.RaftHandler.Pause() + m.Server.PauseSending() } -func (m *member) Resume() { - m.raftHandler.Resume() - m.s.ResumeSending() +func (m *Member) Resume() { + m.RaftHandler.Resume() + m.Server.ResumeSending() } -// Close stops the member's etcdserver and closes its connections -func (m *member) Close() { - if m.grpcBridge != nil { - m.grpcBridge.Close() - m.grpcBridge = nil +// Close stops the member'Server etcdserver and closes its connections +func (m *Member) Close() { + if m.GrpcBridge != nil { + m.GrpcBridge.Close() + m.GrpcBridge = nil } - if m.serverClient != nil { - m.serverClient.Close() - m.serverClient = nil + if m.ServerClient != nil { + m.ServerClient.Close() + m.ServerClient = nil } - if m.grpcServer != nil { + if m.GrpcServer != nil { ch := make(chan struct{}) go func() { defer close(ch) // close listeners to stop accepting new connections, // will block on any existing transports - m.grpcServer.GracefulStop() + m.GrpcServer.GracefulStop() }() // wait until all pending RPCs are finished select { @@ -1165,21 +1165,21 @@ func (m *member) Close() { case <-time.After(2 * time.Second): // took too long, manually close open transports // e.g. watch streams - m.grpcServer.Stop() + m.GrpcServer.Stop() <-ch } - m.grpcServer = nil - m.grpcServerPeer.GracefulStop() - m.grpcServerPeer.Stop() - m.grpcServerPeer = nil + m.GrpcServer = nil + m.GrpcServerPeer.GracefulStop() + m.GrpcServerPeer.Stop() + m.GrpcServerPeer = nil } - if m.s != nil { - m.s.HardStop() + if m.Server != nil { + m.Server.HardStop() } - for _, f := range m.serverClosers { + for _, f := range m.ServerClosers { f() } - if !m.closed { + if !m.Closed { // Avoid verification of the same file multiple times // (that might not exist any longer) verify.MustVerifyIfEnabled(verify.Config{ @@ -1188,51 +1188,51 @@ func (m *member) Close() { ExactIndex: false, }) } - m.closed = true + m.Closed = true } // Stop stops the member, but the data dir of the member is preserved. -func (m *member) Stop(_ testutil.TB) { +func (m *Member) Stop(_ testutil.TB) { m.Logger.Info( "stopping a member", zap.String("name", m.Name), zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), - zap.String("grpc-url", m.grpcURL), + zap.String("grpc-url", m.GrpcURL), ) m.Close() - m.serverClosers = nil + m.ServerClosers = nil m.Logger.Info( "stopped a member", zap.String("name", m.Name), zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), - zap.String("grpc-url", m.grpcURL), + zap.String("grpc-url", m.GrpcURL), ) } -// checkLeaderTransition waits for leader transition, returning the new leader ID. -func checkLeaderTransition(m *member, oldLead uint64) uint64 { - interval := time.Duration(m.s.Cfg.TickMs) * time.Millisecond - for m.s.Lead() == 0 || (m.s.Lead() == oldLead) { +// CheckLeaderTransition waits for leader transition, returning the new leader ID. +func CheckLeaderTransition(m *Member, oldLead uint64) uint64 { + interval := time.Duration(m.Server.Cfg.TickMs) * time.Millisecond + for m.Server.Lead() == 0 || (m.Server.Lead() == oldLead) { time.Sleep(interval) } - return m.s.Lead() + return m.Server.Lead() } // StopNotify unblocks when a member stop completes -func (m *member) StopNotify() <-chan struct{} { - return m.s.StopNotify() +func (m *Member) StopNotify() <-chan struct{} { + return m.Server.StopNotify() } // Restart starts the member using the preserved data dir. -func (m *member) Restart(t testutil.TB) error { +func (m *Member) Restart(t testutil.TB) error { m.Logger.Info( "restarting a member", zap.String("name", m.Name), zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), - zap.String("grpc-url", m.grpcURL), + zap.String("grpc-url", m.GrpcURL), ) newPeerListeners := make([]net.Listener, 0) for _, ln := range m.PeerListeners { @@ -1245,7 +1245,7 @@ func (m *member) Restart(t testutil.TB) error { } m.ClientListeners = newClientListeners - if m.grpcListener != nil { + if m.GrpcListener != nil { if err := m.listenGRPC(); err != nil { t.Fatal(err) } @@ -1257,23 +1257,23 @@ func (m *member) Restart(t testutil.TB) error { zap.String("name", m.Name), zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), - zap.String("grpc-url", m.grpcURL), + zap.String("grpc-url", m.GrpcURL), zap.Error(err), ) return err } // Terminate stops the member and removes the data dir. -func (m *member) Terminate(t testutil.TB) { +func (m *Member) Terminate(t testutil.TB) { m.Logger.Info( "terminating a member", zap.String("name", m.Name), zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), - zap.String("grpc-url", m.grpcURL), + zap.String("grpc-url", m.GrpcURL), ) m.Close() - if !m.keepDataDirTerminate { + if !m.KeepDataDirTerminate { if err := os.RemoveAll(m.ServerConfig.DataDir); err != nil { t.Fatal(err) } @@ -1283,12 +1283,12 @@ func (m *member) Terminate(t testutil.TB) { zap.String("name", m.Name), zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), - zap.String("grpc-url", m.grpcURL), + zap.String("grpc-url", m.GrpcURL), ) } // Metric gets the metric value for a member -func (m *member) Metric(metricName string, expectLabels ...string) (string, error) { +func (m *Member) Metric(metricName string, expectLabels ...string) (string, error) { cfgtls := transport.TLSInfo{} tr, err := transport.NewTimeoutTransport(cfgtls, time.Second, time.Second, time.Second) if err != nil { @@ -1325,25 +1325,25 @@ func (m *member) Metric(metricName string, expectLabels ...string) (string, erro } // InjectPartition drops connections from m to others, vice versa. -func (m *member) InjectPartition(t testutil.TB, others ...*member) { +func (m *Member) InjectPartition(t testutil.TB, others ...*Member) { for _, other := range others { - m.s.CutPeer(other.s.ID()) - other.s.CutPeer(m.s.ID()) - t.Logf("network partition injected between: %v <-> %v", m.s.ID(), other.s.ID()) + m.Server.CutPeer(other.Server.ID()) + other.Server.CutPeer(m.Server.ID()) + t.Logf("network partition injected between: %v <-> %v", m.Server.ID(), other.Server.ID()) } } // RecoverPartition recovers connections from m to others, vice versa. -func (m *member) RecoverPartition(t testutil.TB, others ...*member) { +func (m *Member) RecoverPartition(t testutil.TB, others ...*Member) { for _, other := range others { - m.s.MendPeer(other.s.ID()) - other.s.MendPeer(m.s.ID()) - t.Logf("network partition between: %v <-> %v", m.s.ID(), other.s.ID()) + m.Server.MendPeer(other.Server.ID()) + other.Server.MendPeer(m.Server.ID()) + t.Logf("network partition between: %v <-> %v", m.Server.ID(), other.Server.ID()) } } -func (m *member) ReadyNotify() <-chan struct{} { - return m.s.ReadyNotify() +func (m *Member) ReadyNotify() <-chan struct{} { + return m.Server.ReadyNotify() } func MustNewHTTPClient(t testutil.TB, eps []string, tls *transport.TLSInfo) client.Client { @@ -1377,15 +1377,15 @@ func (p SortableMemberSliceByPeerURLs) Less(i, j int) bool { func (p SortableMemberSliceByPeerURLs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } type ClusterV3 struct { - *cluster + *Cluster mu sync.Mutex - clients []*clientv3.Client + Clients []*clientv3.Client clusterClient *clientv3.Client } -// NewClusterV3 returns a launched cluster with a grpc client connection -// for each cluster member. +// NewClusterV3 returns a launched Cluster with a grpc client connection +// for each Cluster member. func NewClusterV3(t testutil.TB, cfg *ClusterConfig) *ClusterV3 { t.Helper() @@ -1394,7 +1394,7 @@ func NewClusterV3(t testutil.TB, cfg *ClusterConfig) *ClusterV3 { cfg.UseGRPC = true clus := &ClusterV3{ - cluster: NewClusterByConfig(t, cfg), + Cluster: NewClusterByConfig(t, cfg), } clus.Launch(t) @@ -1404,7 +1404,7 @@ func NewClusterV3(t testutil.TB, cfg *ClusterConfig) *ClusterV3 { if err != nil { t.Fatalf("cannot create client: %v", err) } - clus.clients = append(clus.clients, client) + clus.Clients = append(clus.Clients, client) } } @@ -1413,13 +1413,13 @@ func NewClusterV3(t testutil.TB, cfg *ClusterConfig) *ClusterV3 { func (c *ClusterV3) TakeClient(idx int) { c.mu.Lock() - c.clients[idx] = nil + c.Clients[idx] = nil c.mu.Unlock() } func (c *ClusterV3) Terminate(t testutil.TB) { c.mu.Lock() - for _, client := range c.clients { + for _, client := range c.Clients { if client == nil { continue } @@ -1433,22 +1433,22 @@ func (c *ClusterV3) Terminate(t testutil.TB) { } } c.mu.Unlock() - c.cluster.Terminate(t) + c.Cluster.Terminate(t) } func (c *ClusterV3) RandClient() *clientv3.Client { - return c.clients[rand.Intn(len(c.clients))] + return c.Clients[rand.Intn(len(c.Clients))] } func (c *ClusterV3) Client(i int) *clientv3.Client { - return c.clients[i] + return c.Clients[i] } func (c *ClusterV3) ClusterClient() (client *clientv3.Client, err error) { if c.clusterClient == nil { endpoints := []string{} for _, m := range c.Members { - endpoints = append(endpoints, m.grpcURL) + endpoints = append(endpoints, m.GrpcURL) } cfg := clientv3.Config{ Endpoints: endpoints, @@ -1504,26 +1504,26 @@ func CloseClients(t testutil.TB, clients []*clientv3.Client) { } } -type grpcAPI struct { - // Cluster is the cluster API for the client's connection. +type GrpcAPI struct { + // Cluster is the Cluster API for the client'Server connection. Cluster pb.ClusterClient - // KV is the keyvalue API for the client's connection. + // KV is the keyvalue API for the client'Server connection. KV pb.KVClient - // Lease is the lease API for the client's connection. + // Lease is the lease API for the client'Server connection. Lease pb.LeaseClient - // Watch is the watch API for the client's connection. + // Watch is the watch API for the client'Server connection. Watch pb.WatchClient - // Maintenance is the maintenance API for the client's connection. + // Maintenance is the maintenance API for the client'Server connection. Maintenance pb.MaintenanceClient - // Auth is the authentication API for the client's connection. + // Auth is the authentication API for the client'Server connection. Auth pb.AuthClient - // Lock is the lock API for the client's connection. + // Lock is the lock API for the client'Server connection. Lock lockpb.LockClient - // Election is the election API for the client's connection. + // Election is the election API for the client'Server connection. Election epb.ElectionClient } -// GetLearnerMembers returns the list of learner members in cluster using MemberList API. +// GetLearnerMembers returns the list of learner members in Cluster using MemberList API. func (c *ClusterV3) GetLearnerMembers() ([]*pb.Member, error) { cli := c.Client(0) resp, err := cli.MemberList(context.Background()) @@ -1539,13 +1539,13 @@ func (c *ClusterV3) GetLearnerMembers() ([]*pb.Member, error) { return learners, nil } -// AddAndLaunchLearnerMember creates a leaner member, adds it to cluster +// AddAndLaunchLearnerMember creates a leaner member, adds it to Cluster // via v3 MemberAdd API, and then launches the new member. func (c *ClusterV3) AddAndLaunchLearnerMember(t testutil.TB) { m := c.mustNewMember(t, 0) - m.isLearner = true + m.IsLearner = true - scheme := schemeFromTLSInfo(c.cfg.PeerTLS) + scheme := SchemeFromTLSInfo(c.Cfg.PeerTLS) peerURLs := []string{scheme + "://" + m.PeerListeners[0].Addr().String()} cli := c.Client(0) @@ -1570,7 +1570,7 @@ func (c *ClusterV3) AddAndLaunchLearnerMember(t testutil.TB) { c.waitMembersMatch(t) } -// getMembers returns a list of members in cluster, in format of etcdserverpb.Member +// getMembers returns a list of members in Cluster, in format of etcdserverpb.Member func (c *ClusterV3) getMembers() []*pb.Member { var mems []*pb.Member for _, m := range c.Members { @@ -1578,7 +1578,7 @@ func (c *ClusterV3) getMembers() []*pb.Member { Name: m.Name, PeerURLs: m.PeerURLs.StringSlice(), ClientURLs: m.ClientURLs.StringSlice(), - IsLearner: m.isLearner, + IsLearner: m.IsLearner, } mems = append(mems, mem) } @@ -1586,27 +1586,27 @@ func (c *ClusterV3) getMembers() []*pb.Member { } // waitMembersMatch waits until v3rpc MemberList returns the 'same' members info as the -// local 'c.Members', which is the local recording of members in the testing cluster. With +// local 'c.Members', which is the local recording of members in the testing Cluster. With // the exception that the local recording c.Members does not have info on Member.ID, which -// is generated when the member is been added to cluster. +// is generated when the member is been added to Cluster. // // Note: // A successful match means the Member.clientURLs are matched. This means member has already -// finished publishing its server attributes to cluster. Publishing attributes is a cluster-wide +// finished publishing its server attributes to Cluster. Publishing attributes is a Cluster-wide // write request (in v2 server). Therefore, at this point, any raft log entries prior to this // would have already been applied. // -// If a new member was added to an existing cluster, at this point, it has finished publishing -// its own server attributes to the cluster. And therefore by the same argument, it has already +// If a new member was added to an existing Cluster, at this point, it has finished publishing +// its own server attributes to the Cluster. And therefore by the same argument, it has already // applied the raft log entries (especially those of type raftpb.ConfChangeType). At this point, -// the new member has the correct view of the cluster configuration. +// the new member has the correct view of the Cluster configuration. // // Special note on learner member: -// Learner member is only added to a cluster via v3rpc MemberAdd API (as of v3.4). When starting -// the learner member, its initial view of the cluster created by peerURLs map does not have info +// Learner member is only added to a Cluster via v3rpc MemberAdd API (as of v3.4). When starting +// the learner member, its initial view of the Cluster created by peerURLs map does not have info // on whether or not the new member itself is learner. But at this point, a successful match does // indicate that the new learner member has applied the raftpb.ConfChangeAddLearnerNode entry -// which was used to add the learner itself to the cluster, and therefore it has the correct info +// which was used to add the learner itself to the Cluster, and therefore it has the correct info // on learner. func (c *ClusterV3) waitMembersMatch(t testutil.TB) { wMembers := c.getMembers() @@ -1629,7 +1629,7 @@ func (c *ClusterV3) waitMembersMatch(t testutil.TB) { return } - time.Sleep(tickDuration) + time.Sleep(TickDuration) } } @@ -1642,9 +1642,9 @@ func (p SortableProtoMemberSliceByPeerURLs) Less(i, j int) bool { func (p SortableProtoMemberSliceByPeerURLs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } // MustNewMember creates a new member instance based on the response of V3 Member Add API. -func (c *ClusterV3) MustNewMember(t testutil.TB, resp *clientv3.MemberAddResponse) *member { +func (c *ClusterV3) MustNewMember(t testutil.TB, resp *clientv3.MemberAddResponse) *Member { m := c.mustNewMember(t, 0) - m.isLearner = resp.Member.IsLearner + m.IsLearner = resp.Member.IsLearner m.NewCluster = false m.InitialPeerURLsMap = types.URLsMap{} diff --git a/tests/integration/cluster_direct.go b/tests/framework/integration/cluster_direct.go similarity index 95% rename from tests/integration/cluster_direct.go rename to tests/framework/integration/cluster_direct.go index 0334146d0..dad487525 100644 --- a/tests/integration/cluster_direct.go +++ b/tests/framework/integration/cluster_direct.go @@ -26,8 +26,8 @@ import ( const ThroughProxy = false -func toGRPC(c *clientv3.Client) grpcAPI { - return grpcAPI{ +func ToGRPC(c *clientv3.Client) GrpcAPI { + return GrpcAPI{ pb.NewClusterClient(c.ActiveConnection()), pb.NewKVClient(c.ActiveConnection()), pb.NewLeaseClient(c.ActiveConnection()), diff --git a/tests/integration/cluster_proxy.go b/tests/framework/integration/cluster_proxy.go similarity index 97% rename from tests/integration/cluster_proxy.go rename to tests/framework/integration/cluster_proxy.go index 25e042359..a5266d09e 100644 --- a/tests/integration/cluster_proxy.go +++ b/tests/framework/integration/cluster_proxy.go @@ -39,13 +39,13 @@ const proxyNamespace = "proxy-namespace" type grpcClientProxy struct { ctx context.Context ctxCancel func() - grpc grpcAPI + grpc GrpcAPI wdonec <-chan struct{} kvdonec <-chan struct{} lpdonec <-chan struct{} } -func toGRPC(c *clientv3.Client) grpcAPI { +func ToGRPC(c *clientv3.Client) GrpcAPI { pmu.Lock() defer pmu.Unlock() @@ -74,7 +74,7 @@ func toGRPC(c *clientv3.Client) grpcAPI { lockp := grpcproxy.NewLockProxy(c) electp := grpcproxy.NewElectionProxy(c) - grpc := grpcAPI{ + grpc := GrpcAPI{ adapter.ClusterServerToClusterClient(clp), adapter.KvServerToKvClient(kvp), adapter.LeaseServerToLeaseClient(lp), @@ -112,7 +112,7 @@ func newClientV3(cfg clientv3.Config) (*clientv3.Client, error) { if err != nil { return nil, err } - rpc := toGRPC(c) + rpc := ToGRPC(c) c.KV = clientv3.NewKVFromKVClient(rpc.KV, c) pmu.Lock() lc := c.Lease diff --git a/tests/integration/testing.go b/tests/framework/integration/testing.go similarity index 100% rename from tests/integration/testing.go rename to tests/framework/integration/testing.go diff --git a/tests/integration/client/client_test.go b/tests/integration/client/client_test.go index 630344cb8..aa0271e95 100644 --- a/tests/integration/client/client_test.go +++ b/tests/integration/client/client_test.go @@ -25,14 +25,14 @@ import ( "testing" "go.etcd.io/etcd/client/v2" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) // TestV2NoRetryEOF tests destructive api calls won't retry on a disconnection. func TestV2NoRetryEOF(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) // generate an EOF response; specify address so appears first in sorted ep list - lEOF := integration.NewListenerWithAddr(t, fmt.Sprintf("127.0.0.1:%05d", os.Getpid())) + lEOF := integration2.NewListenerWithAddr(t, fmt.Sprintf("127.0.0.1:%05d", os.Getpid())) defer lEOF.Close() tries := uint32(0) go func() { @@ -45,8 +45,8 @@ func TestV2NoRetryEOF(t *testing.T) { conn.Close() } }() - eofURL := integration.URLScheme + "://" + lEOF.Addr().String() - cli := integration.MustNewHTTPClient(t, []string{eofURL, eofURL}, nil) + eofURL := integration2.URLScheme + "://" + lEOF.Addr().String() + cli := integration2.MustNewHTTPClient(t, []string{eofURL, eofURL}, nil) kapi := client.NewKeysAPI(cli) for i, f := range noRetryList(kapi) { startTries := atomic.LoadUint32(&tries) @@ -62,17 +62,17 @@ func TestV2NoRetryEOF(t *testing.T) { // TestV2NoRetryNoLeader tests destructive api calls won't retry if given an error code. func TestV2NoRetryNoLeader(t *testing.T) { - integration.BeforeTest(t) - lHTTP := integration.NewListenerWithAddr(t, fmt.Sprintf("127.0.0.1:%05d", os.Getpid())) + integration2.BeforeTest(t) + lHTTP := integration2.NewListenerWithAddr(t, fmt.Sprintf("127.0.0.1:%05d", os.Getpid())) eh := &errHandler{errCode: http.StatusServiceUnavailable} srv := httptest.NewUnstartedServer(eh) defer lHTTP.Close() defer srv.Close() srv.Listener = lHTTP go srv.Start() - lHTTPURL := integration.URLScheme + "://" + lHTTP.Addr().String() + lHTTPURL := integration2.URLScheme + "://" + lHTTP.Addr().String() - cli := integration.MustNewHTTPClient(t, []string{lHTTPURL, lHTTPURL}, nil) + cli := integration2.MustNewHTTPClient(t, []string{lHTTPURL, lHTTPURL}, nil) kapi := client.NewKeysAPI(cli) // test error code for i, f := range noRetryList(kapi) { @@ -88,12 +88,12 @@ func TestV2NoRetryNoLeader(t *testing.T) { // TestV2RetryRefuse tests destructive api calls will retry if a connection is refused. func TestV2RetryRefuse(t *testing.T) { - integration.BeforeTest(t) - cl := integration.NewCluster(t, 1) + integration2.BeforeTest(t) + cl := integration2.NewCluster(t, 1) cl.Launch(t) defer cl.Terminate(t) // test connection refused; expect no error failover - cli := integration.MustNewHTTPClient(t, []string{integration.URLScheme + "://refuseconn:123", cl.URL(0)}, nil) + cli := integration2.MustNewHTTPClient(t, []string{integration2.URLScheme + "://refuseconn:123", cl.URL(0)}, nil) kapi := client.NewKeysAPI(cli) if _, err := kapi.Set(context.Background(), "/delkey", "def", nil); err != nil { t.Fatal(err) diff --git a/tests/integration/clientv3/cluster_test.go b/tests/integration/clientv3/cluster_test.go index eff15cf7d..b9a63c55c 100644 --- a/tests/integration/clientv3/cluster_test.go +++ b/tests/integration/clientv3/cluster_test.go @@ -23,13 +23,13 @@ import ( "time" "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) func TestMemberList(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) capi := clus.RandClient() @@ -45,9 +45,9 @@ func TestMemberList(t *testing.T) { } func TestMemberAdd(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) capi := clus.RandClient() @@ -64,9 +64,9 @@ func TestMemberAdd(t *testing.T) { } func TestMemberAddWithExistingURLs(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) capi := clus.RandClient() @@ -88,9 +88,9 @@ func TestMemberAddWithExistingURLs(t *testing.T) { } func TestMemberRemove(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) capi := clus.Client(1) @@ -126,9 +126,9 @@ func TestMemberRemove(t *testing.T) { } func TestMemberUpdate(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) capi := clus.RandClient() @@ -154,9 +154,9 @@ func TestMemberUpdate(t *testing.T) { } func TestMemberAddUpdateWrongURLs(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) capi := clus.RandClient() @@ -187,9 +187,9 @@ func TestMemberAddUpdateWrongURLs(t *testing.T) { } func TestMemberAddForLearner(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) capi := clus.RandClient() @@ -216,9 +216,9 @@ func TestMemberAddForLearner(t *testing.T) { } func TestMemberPromote(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) // member promote request can be sent to any server in cluster, @@ -293,9 +293,9 @@ func TestMemberPromote(t *testing.T) { // TestMemberPromoteMemberNotLearner ensures that promoting a voting member fails. func TestMemberPromoteMemberNotLearner(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) // member promote request can be sent to any server in cluster, @@ -329,9 +329,9 @@ func TestMemberPromoteMemberNotLearner(t *testing.T) { // TestMemberPromoteMemberNotExist ensures that promoting a member that does not exist in cluster fails. func TestMemberPromoteMemberNotExist(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) // member promote request can be sent to any server in cluster, @@ -378,10 +378,10 @@ func TestMemberPromoteMemberNotExist(t *testing.T) { // TestMaxLearnerInCluster verifies that the maximum number of learners allowed in a cluster is 1 func TestMaxLearnerInCluster(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) // 1. start with a cluster with 3 voting member and 0 learner member - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) // 2. adding a learner member should succeed diff --git a/tests/integration/clientv3/concurrency/election_test.go b/tests/integration/clientv3/concurrency/election_test.go index 650bdc015..d101f15d2 100644 --- a/tests/integration/clientv3/concurrency/election_test.go +++ b/tests/integration/clientv3/concurrency/election_test.go @@ -23,13 +23,13 @@ import ( "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/concurrency" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) func TestResumeElection(t *testing.T) { const prefix = "/resume-election/" - cli, err := integration.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()}) + cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()}) if err != nil { log.Fatal(err) } diff --git a/tests/integration/clientv3/concurrency/mutex_test.go b/tests/integration/clientv3/concurrency/mutex_test.go index 1d264bf4e..8220788cf 100644 --- a/tests/integration/clientv3/concurrency/mutex_test.go +++ b/tests/integration/clientv3/concurrency/mutex_test.go @@ -20,11 +20,11 @@ import ( "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/concurrency" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) func TestMutexLockSessionExpired(t *testing.T) { - cli, err := integration.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()}) + cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()}) if err != nil { t.Fatal(err) } diff --git a/tests/integration/clientv3/connectivity/black_hole_test.go b/tests/integration/clientv3/connectivity/black_hole_test.go index 4445c69f6..1c501c08a 100644 --- a/tests/integration/clientv3/connectivity/black_hole_test.go +++ b/tests/integration/clientv3/connectivity/black_hole_test.go @@ -24,7 +24,7 @@ import ( "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" "go.etcd.io/etcd/tests/v3/integration/clientv3" "google.golang.org/grpc" ) @@ -33,9 +33,9 @@ import ( // blackholed endpoint, client balancer switches to healthy one. // TODO: test server-to-client keepalive ping func TestBalancerUnderBlackholeKeepAliveWatch(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{ + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{ Size: 2, GRPCKeepAliveMinTime: time.Millisecond, // avoid too_many_pings UseBridge: true, @@ -58,9 +58,9 @@ func TestBalancerUnderBlackholeKeepAliveWatch(t *testing.T) { // TODO: only send healthy endpoint to gRPC so gRPC wont waste time to // dial for unhealthy endpoint. // then we can reduce 3s to 1s. - timeout := pingInterval + integration.RequestWaitTimeout + timeout := pingInterval + integration2.RequestWaitTimeout - cli, err := integration.NewClient(t, ccfg) + cli, err := integration2.NewClient(t, ccfg) if err != nil { t.Fatal(err) } @@ -166,9 +166,9 @@ func TestBalancerUnderBlackholeNoKeepAliveSerializableGet(t *testing.T) { // testBalancerUnderBlackholeNoKeepAlive ensures that first request to blackholed endpoint // fails due to context timeout, but succeeds on next try, with endpoint switch. func testBalancerUnderBlackholeNoKeepAlive(t *testing.T, op func(*clientv3.Client, context.Context) error) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{ + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{ Size: 2, SkipCreatingClient: true, UseBridge: true, @@ -182,7 +182,7 @@ func testBalancerUnderBlackholeNoKeepAlive(t *testing.T, op func(*clientv3.Clien DialTimeout: 1 * time.Second, DialOptions: []grpc.DialOption{grpc.WithBlock()}, } - cli, err := integration.NewClient(t, ccfg) + cli, err := integration2.NewClient(t, ccfg) if err != nil { t.Fatal(err) } diff --git a/tests/integration/clientv3/connectivity/dial_test.go b/tests/integration/clientv3/connectivity/dial_test.go index 52dcca69e..719de550a 100644 --- a/tests/integration/clientv3/connectivity/dial_test.go +++ b/tests/integration/clientv3/connectivity/dial_test.go @@ -24,31 +24,31 @@ import ( pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/client/pkg/v3/transport" "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" clientv3test "go.etcd.io/etcd/tests/v3/integration/clientv3" "google.golang.org/grpc" ) var ( testTLSInfo = transport.TLSInfo{ - KeyFile: integration.MustAbsPath("../../../fixtures/server.key.insecure"), - CertFile: integration.MustAbsPath("../../../fixtures/server.crt"), - TrustedCAFile: integration.MustAbsPath("../../../fixtures/ca.crt"), + KeyFile: integration2.MustAbsPath("../../../fixtures/server.key.insecure"), + CertFile: integration2.MustAbsPath("../../../fixtures/server.crt"), + TrustedCAFile: integration2.MustAbsPath("../../../fixtures/ca.crt"), ClientCertAuth: true, } testTLSInfoExpired = transport.TLSInfo{ - KeyFile: integration.MustAbsPath("../../fixtures-expired/server.key.insecure"), - CertFile: integration.MustAbsPath("../../fixtures-expired/server.crt"), - TrustedCAFile: integration.MustAbsPath("../../fixtures-expired/ca.crt"), + KeyFile: integration2.MustAbsPath("../../fixtures-expired/server.key.insecure"), + CertFile: integration2.MustAbsPath("../../fixtures-expired/server.crt"), + TrustedCAFile: integration2.MustAbsPath("../../fixtures-expired/ca.crt"), ClientCertAuth: true, } ) // TestDialTLSExpired tests client with expired certs fails to dial. func TestDialTLSExpired(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, PeerTLS: &testTLSInfo, ClientTLS: &testTLSInfo, SkipCreatingClient: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, PeerTLS: &testTLSInfo, ClientTLS: &testTLSInfo, SkipCreatingClient: true}) defer clus.Terminate(t) tls, err := testTLSInfoExpired.ClientConfig() @@ -56,7 +56,7 @@ func TestDialTLSExpired(t *testing.T) { t.Fatal(err) } // expect remote errors "tls: bad certificate" - _, err = integration.NewClient(t, clientv3.Config{ + _, err = integration2.NewClient(t, clientv3.Config{ Endpoints: []string{clus.Members[0].GRPCURL()}, DialTimeout: 3 * time.Second, DialOptions: []grpc.DialOption{grpc.WithBlock()}, @@ -70,11 +70,11 @@ func TestDialTLSExpired(t *testing.T) { // TestDialTLSNoConfig ensures the client fails to dial / times out // when TLS endpoints (https, unixs) are given but no tls config. func TestDialTLSNoConfig(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, ClientTLS: &testTLSInfo, SkipCreatingClient: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, ClientTLS: &testTLSInfo, SkipCreatingClient: true}) defer clus.Terminate(t) // expect "signed by unknown authority" - c, err := integration.NewClient(t, clientv3.Config{ + c, err := integration2.NewClient(t, clientv3.Config{ Endpoints: []string{clus.Members[0].GRPCURL()}, DialTimeout: time.Second, DialOptions: []grpc.DialOption{grpc.WithBlock()}, @@ -101,8 +101,8 @@ func TestDialSetEndpointsAfterFail(t *testing.T) { // testDialSetEndpoints ensures SetEndpoints can replace unavailable endpoints with available ones. func testDialSetEndpoints(t *testing.T, setBefore bool) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, SkipCreatingClient: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, SkipCreatingClient: true}) defer clus.Terminate(t) // get endpoint list @@ -117,7 +117,7 @@ func testDialSetEndpoints(t *testing.T, setBefore bool) { DialTimeout: 1 * time.Second, DialOptions: []grpc.DialOption{grpc.WithBlock()}, } - cli, err := integration.NewClient(t, cfg) + cli, err := integration2.NewClient(t, cfg) if err != nil { t.Fatal(err) } @@ -134,7 +134,7 @@ func testDialSetEndpoints(t *testing.T, setBefore bool) { cli.SetEndpoints(eps[toKill%3], eps[(toKill+1)%3]) } time.Sleep(time.Second * 2) - ctx, cancel := context.WithTimeout(context.Background(), integration.RequestWaitTimeout) + ctx, cancel := context.WithTimeout(context.Background(), integration2.RequestWaitTimeout) if _, err = cli.Get(ctx, "foo", clientv3.WithSerializable()); err != nil { t.Fatal(err) } @@ -144,8 +144,8 @@ func testDialSetEndpoints(t *testing.T, setBefore bool) { // TestSwitchSetEndpoints ensures SetEndpoints can switch one endpoint // with a new one that doesn't include original endpoint. func TestSwitchSetEndpoints(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) // get non partitioned members endpoints @@ -164,9 +164,9 @@ func TestSwitchSetEndpoints(t *testing.T) { } func TestRejectOldCluster(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) // 2 endpoints to test multi-endpoint Status - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2, SkipCreatingClient: true}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 2, SkipCreatingClient: true}) defer clus.Terminate(t) cfg := clientv3.Config{ @@ -175,7 +175,7 @@ func TestRejectOldCluster(t *testing.T) { DialOptions: []grpc.DialOption{grpc.WithBlock()}, RejectOldCluster: true, } - cli, err := integration.NewClient(t, cfg) + cli, err := integration2.NewClient(t, cfg) if err != nil { t.Fatal(err) } @@ -185,8 +185,8 @@ func TestRejectOldCluster(t *testing.T) { // TestDialForeignEndpoint checks an endpoint that is not registered // with the balancer can be dialed. func TestDialForeignEndpoint(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 2}) defer clus.Terminate(t) conn, err := clus.Client(0).Dial(clus.Client(1).Endpoints()[0]) @@ -208,8 +208,8 @@ func TestDialForeignEndpoint(t *testing.T) { // TestSetEndpointAndPut checks that a Put following a SetEndpoints // to a working endpoint will always succeed. func TestSetEndpointAndPut(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 2}) defer clus.Terminate(t) clus.Client(1).SetEndpoints(clus.Members[0].GRPCURL()) diff --git a/tests/integration/clientv3/connectivity/network_partition_test.go b/tests/integration/clientv3/connectivity/network_partition_test.go index c2650ebcd..32abb6e12 100644 --- a/tests/integration/clientv3/connectivity/network_partition_test.go +++ b/tests/integration/clientv3/connectivity/network_partition_test.go @@ -26,7 +26,7 @@ import ( pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" "go.etcd.io/etcd/tests/v3/integration/clientv3" "google.golang.org/grpc" ) @@ -103,9 +103,9 @@ func TestBalancerUnderNetworkPartitionSerializableGet(t *testing.T) { } func testBalancerUnderNetworkPartition(t *testing.T, op func(*clientv3.Client, context.Context) error, timeout time.Duration) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{ + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{ Size: 3, SkipCreatingClient: true, }) @@ -119,7 +119,7 @@ func testBalancerUnderNetworkPartition(t *testing.T, op func(*clientv3.Client, c DialTimeout: 3 * time.Second, DialOptions: []grpc.DialOption{grpc.WithBlock()}, } - cli, err := integration.NewClient(t, ccfg) + cli, err := integration2.NewClient(t, ccfg) if err != nil { t.Fatal(err) } @@ -159,9 +159,9 @@ func testBalancerUnderNetworkPartition(t *testing.T, op func(*clientv3.Client, c // switches endpoint when leader fails and linearizable get requests returns // "etcdserver: request timed out". func TestBalancerUnderNetworkPartitionLinearizableGetLeaderElection(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{ + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{ Size: 3, SkipCreatingClient: true, }) @@ -172,7 +172,7 @@ func TestBalancerUnderNetworkPartitionLinearizableGetLeaderElection(t *testing.T timeout := 3 * clus.Members[(lead+1)%2].ServerConfig.ReqTimeout() - cli, err := integration.NewClient(t, clientv3.Config{ + cli, err := integration2.NewClient(t, clientv3.Config{ Endpoints: []string{eps[(lead+1)%2]}, DialTimeout: 2 * time.Second, DialOptions: []grpc.DialOption{grpc.WithBlock()}, @@ -214,9 +214,9 @@ func TestBalancerUnderNetworkPartitionWatchFollower(t *testing.T) { // testBalancerUnderNetworkPartitionWatch ensures watch stream // to a partitioned node be closed when context requires leader. func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{ + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{ Size: 3, SkipCreatingClient: true, }) @@ -230,7 +230,7 @@ func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) { } // pin eps[target] - watchCli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{eps[target]}}) + watchCli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[target]}}) if err != nil { t.Fatal(err) } @@ -248,7 +248,7 @@ func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) { wch := watchCli.Watch(clientv3.WithRequireLeader(context.Background()), "foo", clientv3.WithCreatedNotify()) select { case <-wch: - case <-time.After(integration.RequestWaitTimeout): + case <-time.After(integration2.RequestWaitTimeout): t.Fatal("took too long to create watch") } @@ -268,15 +268,15 @@ func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) { if err = ev.Err(); err != rpctypes.ErrNoLeader { t.Fatalf("expected %v, got %v", rpctypes.ErrNoLeader, err) } - case <-time.After(integration.RequestWaitTimeout): // enough time to detect leader lost + case <-time.After(integration2.RequestWaitTimeout): // enough time to detect leader lost t.Fatal("took too long to detect leader lost") } } func TestDropReadUnderNetworkPartition(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{ + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{ Size: 3, SkipCreatingClient: true, }) @@ -289,7 +289,7 @@ func TestDropReadUnderNetworkPartition(t *testing.T) { DialTimeout: 10 * time.Second, DialOptions: []grpc.DialOption{grpc.WithBlock()}, } - cli, err := integration.NewClient(t, ccfg) + cli, err := integration2.NewClient(t, ccfg) if err != nil { t.Fatal(err) } diff --git a/tests/integration/clientv3/connectivity/server_shutdown_test.go b/tests/integration/clientv3/connectivity/server_shutdown_test.go index 5b888e6fe..2d14a3999 100644 --- a/tests/integration/clientv3/connectivity/server_shutdown_test.go +++ b/tests/integration/clientv3/connectivity/server_shutdown_test.go @@ -23,16 +23,16 @@ import ( "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" "go.etcd.io/etcd/tests/v3/integration/clientv3" ) // TestBalancerUnderServerShutdownWatch expects that watch client // switch its endpoints when the member of the pinned endpoint fails. func TestBalancerUnderServerShutdownWatch(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{ + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{ Size: 3, SkipCreatingClient: true, UseBridge: true, @@ -44,7 +44,7 @@ func TestBalancerUnderServerShutdownWatch(t *testing.T) { lead := clus.WaitLeader(t) // pin eps[lead] - watchCli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{eps[lead]}}) + watchCli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[lead]}}) if err != nil { t.Fatal(err) } @@ -61,7 +61,7 @@ func TestBalancerUnderServerShutdownWatch(t *testing.T) { wch := watchCli.Watch(context.Background(), key, clientv3.WithCreatedNotify()) select { case <-wch: - case <-time.After(integration.RequestWaitTimeout): + case <-time.After(integration2.RequestWaitTimeout): t.Fatal("took too long to create watch") } @@ -90,7 +90,7 @@ func TestBalancerUnderServerShutdownWatch(t *testing.T) { clus.Members[lead].Terminate(t) // writes to eps[lead+1] - putCli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{eps[(lead+1)%3]}}) + putCli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[(lead+1)%3]}}) if err != nil { t.Fatal(err) } @@ -143,9 +143,9 @@ func TestBalancerUnderServerShutdownTxn(t *testing.T) { // the pinned endpoint is shut down, the balancer switches its endpoints // and all subsequent put/delete/txn requests succeed with new endpoints. func testBalancerUnderServerShutdownMutable(t *testing.T, op func(*clientv3.Client, context.Context) error) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{ + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{ Size: 3, SkipCreatingClient: true, }) @@ -154,7 +154,7 @@ func testBalancerUnderServerShutdownMutable(t *testing.T, op func(*clientv3.Clie eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL(), clus.Members[2].GRPCURL()} // pin eps[0] - cli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{eps[0]}}) + cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[0]}}) if err != nil { t.Fatal(err) } @@ -201,9 +201,9 @@ func TestBalancerUnderServerShutdownGetSerializable(t *testing.T) { // the pinned endpoint is shut down, the balancer switches its endpoints // and all subsequent range requests succeed with new endpoints. func testBalancerUnderServerShutdownImmutable(t *testing.T, op func(*clientv3.Client, context.Context) error, timeout time.Duration) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{ + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{ Size: 3, SkipCreatingClient: true, }) @@ -212,7 +212,7 @@ func testBalancerUnderServerShutdownImmutable(t *testing.T, op func(*clientv3.Cl eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL(), clus.Members[2].GRPCURL()} // pin eps[0] - cli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{eps[0]}}) + cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[0]}}) if err != nil { t.Errorf("failed to create client: %v", err) } @@ -274,9 +274,9 @@ type pinTestOpt struct { // testBalancerUnderServerStopInflightRangeOnRestart expects // inflight range request reconnects on server restart. func testBalancerUnderServerStopInflightRangeOnRestart(t *testing.T, linearizable bool, opt pinTestOpt) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - cfg := &integration.ClusterConfig{ + cfg := &integration2.ClusterConfig{ Size: 2, SkipCreatingClient: true, UseBridge: true, @@ -285,7 +285,7 @@ func testBalancerUnderServerStopInflightRangeOnRestart(t *testing.T, linearizabl cfg.Size = 3 } - clus := integration.NewClusterV3(t, cfg) + clus := integration2.NewClusterV3(t, cfg) defer clus.Terminate(t) eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL()} if linearizable { @@ -300,7 +300,7 @@ func testBalancerUnderServerStopInflightRangeOnRestart(t *testing.T, linearizabl } // pin eps[target] - cli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{eps[target]}}) + cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[target]}}) if err != nil { t.Errorf("failed to create client: %v", err) } @@ -361,7 +361,7 @@ func testBalancerUnderServerStopInflightRangeOnRestart(t *testing.T, linearizabl clus.Members[target].Restart(t) select { - case <-time.After(clientTimeout + integration.RequestWaitTimeout): + case <-time.After(clientTimeout + integration2.RequestWaitTimeout): t.Fatalf("timed out waiting for Get [linearizable: %v, opt: %+v]", linearizable, opt) case <-donec: } diff --git a/tests/integration/clientv3/examples/main_test.go b/tests/integration/clientv3/examples/main_test.go index 3a61a962f..d347fa730 100644 --- a/tests/integration/clientv3/examples/main_test.go +++ b/tests/integration/clientv3/examples/main_test.go @@ -20,6 +20,7 @@ import ( "time" "go.etcd.io/etcd/client/pkg/v3/testutil" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" "go.etcd.io/etcd/tests/v3/integration" ) @@ -29,7 +30,7 @@ const ( ) var lazyCluster = integration.NewLazyClusterWithConfig( - integration.ClusterConfig{ + integration2.ClusterConfig{ Size: 3, WatchProgressNotifyInterval: 200 * time.Millisecond}) diff --git a/tests/integration/clientv3/experimental/recipes/v3_barrier_test.go b/tests/integration/clientv3/experimental/recipes/v3_barrier_test.go index 52bde238d..5692db140 100644 --- a/tests/integration/clientv3/experimental/recipes/v3_barrier_test.go +++ b/tests/integration/clientv3/experimental/recipes/v3_barrier_test.go @@ -20,19 +20,19 @@ import ( "go.etcd.io/etcd/client/v3" recipe "go.etcd.io/etcd/client/v3/experimental/recipes" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) func TestBarrierSingleNode(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) testBarrier(t, 5, func() *clientv3.Client { return clus.Client(0) }) } func TestBarrierMultiNode(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) testBarrier(t, 5, func() *clientv3.Client { return clus.RandClient() }) } diff --git a/tests/integration/clientv3/experimental/recipes/v3_double_barrier_test.go b/tests/integration/clientv3/experimental/recipes/v3_double_barrier_test.go index 463bb6051..e886b90b4 100644 --- a/tests/integration/clientv3/experimental/recipes/v3_double_barrier_test.go +++ b/tests/integration/clientv3/experimental/recipes/v3_double_barrier_test.go @@ -20,13 +20,13 @@ import ( "go.etcd.io/etcd/client/v3/concurrency" recipe "go.etcd.io/etcd/client/v3/experimental/recipes" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) func TestDoubleBarrier(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) waiters := 10 @@ -98,9 +98,9 @@ func TestDoubleBarrier(t *testing.T) { } func TestDoubleBarrierFailover(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) waiters := 10 diff --git a/tests/integration/clientv3/experimental/recipes/v3_lock_test.go b/tests/integration/clientv3/experimental/recipes/v3_lock_test.go index 7104c3ce7..cc873adf2 100644 --- a/tests/integration/clientv3/experimental/recipes/v3_lock_test.go +++ b/tests/integration/clientv3/experimental/recipes/v3_lock_test.go @@ -24,29 +24,29 @@ import ( "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/concurrency" recipe "go.etcd.io/etcd/client/v3/experimental/recipes" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) func TestMutexLockSingleNode(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) var clients []*clientv3.Client - testMutexLock(t, 5, integration.MakeSingleNodeClients(t, clus, &clients)) - integration.CloseClients(t, clients) + testMutexLock(t, 5, integration2.MakeSingleNodeClients(t, clus, &clients)) + integration2.CloseClients(t, clients) } func TestMutexLockMultiNode(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) var clients []*clientv3.Client - testMutexLock(t, 5, integration.MakeMultiNodeClients(t, clus, &clients)) - integration.CloseClients(t, clients) + testMutexLock(t, 5, integration2.MakeMultiNodeClients(t, clus, &clients)) + integration2.CloseClients(t, clients) } func testMutexLock(t *testing.T, waiters int, chooseClient func() *clientv3.Client) { @@ -93,27 +93,27 @@ func testMutexLock(t *testing.T, waiters int, chooseClient func() *clientv3.Clie } func TestMutexTryLockSingleNode(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) var clients []*clientv3.Client - testMutexTryLock(t, 5, integration.MakeSingleNodeClients(t, clus, &clients)) - integration.CloseClients(t, clients) + testMutexTryLock(t, 5, integration2.MakeSingleNodeClients(t, clus, &clients)) + integration2.CloseClients(t, clients) } func TestMutexTryLockMultiNode(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) var clients []*clientv3.Client - testMutexTryLock(t, 5, integration.MakeMultiNodeClients(t, clus, &clients)) - integration.CloseClients(t, clients) + testMutexTryLock(t, 5, integration2.MakeMultiNodeClients(t, clus, &clients)) + integration2.CloseClients(t, clients) } func testMutexTryLock(t *testing.T, lockers int, chooseClient func() *clientv3.Client) { - integration.BeforeTest(t) + integration2.BeforeTest(t) lockedC := make(chan *concurrency.Mutex) notlockedC := make(chan *concurrency.Mutex) @@ -163,9 +163,9 @@ func testMutexTryLock(t *testing.T, lockers int, chooseClient func() *clientv3.C // TestMutexSessionRelock ensures that acquiring the same lock with the same // session will not result in deadlock. func TestMutexSessionRelock(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) session, err := concurrency.NewSession(clus.RandClient()) if err != nil { @@ -187,9 +187,9 @@ func TestMutexSessionRelock(t *testing.T) { // waiters older than the new owner are gone by testing the case where // the waiter prior to the acquirer expires before the current holder. func TestMutexWaitsOnCurrentHolder(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) cctx := context.Background() @@ -295,9 +295,9 @@ func TestMutexWaitsOnCurrentHolder(t *testing.T) { } func BenchmarkMutex4Waiters(b *testing.B) { - integration.BeforeTest(b) + integration2.BeforeTest(b) // XXX switch tests to use TB interface - clus := integration.NewClusterV3(nil, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(nil, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(nil) for i := 0; i < b.N; i++ { testMutexLock(nil, 4, func() *clientv3.Client { return clus.RandClient() }) @@ -305,15 +305,15 @@ func BenchmarkMutex4Waiters(b *testing.B) { } func TestRWMutexSingleNode(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) testRWMutex(t, 5, func() *clientv3.Client { return clus.Client(0) }) } func TestRWMutexMultiNode(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) testRWMutex(t, 5, func() *clientv3.Client { return clus.RandClient() }) } diff --git a/tests/integration/clientv3/experimental/recipes/v3_queue_test.go b/tests/integration/clientv3/experimental/recipes/v3_queue_test.go index 45d1855b9..112d55e1f 100644 --- a/tests/integration/clientv3/experimental/recipes/v3_queue_test.go +++ b/tests/integration/clientv3/experimental/recipes/v3_queue_test.go @@ -21,7 +21,7 @@ import ( "testing" recipe "go.etcd.io/etcd/client/v3/experimental/recipes" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) const ( @@ -31,9 +31,9 @@ const ( // TestQueueOneReaderOneWriter confirms the queue is FIFO func TestQueueOneReaderOneWriter(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) done := make(chan struct{}) @@ -78,10 +78,10 @@ func TestQueueManyReaderManyWriter(t *testing.T) { // BenchmarkQueue benchmarks Queues using many/many readers/writers func BenchmarkQueue(b *testing.B) { - integration.BeforeTest(b) + integration2.BeforeTest(b) // XXX switch tests to use TB interface - clus := integration.NewClusterV3(nil, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(nil, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(nil) for i := 0; i < b.N; i++ { testQueueNReaderMWriter(nil, manyQueueClients, manyQueueClients) @@ -90,9 +90,9 @@ func BenchmarkQueue(b *testing.B) { // TestPrQueueOneReaderOneWriter tests whether priority queues respect priorities. func TestPrQueueOneReaderOneWriter(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) // write out five items with random priority @@ -124,9 +124,9 @@ func TestPrQueueOneReaderOneWriter(t *testing.T) { } func TestPrQueueManyReaderManyWriter(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) rqs := newPriorityQueues(clus, manyQueueClients) wqs := newPriorityQueues(clus, manyQueueClients) @@ -135,10 +135,10 @@ func TestPrQueueManyReaderManyWriter(t *testing.T) { // BenchmarkQueue benchmarks Queues using n/n readers/writers func BenchmarkPrQueueOneReaderOneWriter(b *testing.B) { - integration.BeforeTest(b) + integration2.BeforeTest(b) // XXX switch tests to use TB interface - clus := integration.NewClusterV3(nil, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(nil, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(nil) rqs := newPriorityQueues(clus, 1) wqs := newPriorityQueues(clus, 1) @@ -148,13 +148,13 @@ func BenchmarkPrQueueOneReaderOneWriter(b *testing.B) { } func testQueueNReaderMWriter(t *testing.T, n int, m int) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) testReadersWriters(t, newQueues(clus, n), newQueues(clus, m)) } -func newQueues(clus *integration.ClusterV3, n int) (qs []testQueue) { +func newQueues(clus *integration2.ClusterV3, n int) (qs []testQueue) { for i := 0; i < n; i++ { etcdc := clus.RandClient() qs = append(qs, recipe.NewQueue(etcdc, "q")) @@ -162,7 +162,7 @@ func newQueues(clus *integration.ClusterV3, n int) (qs []testQueue) { return qs } -func newPriorityQueues(clus *integration.ClusterV3, n int) (qs []testQueue) { +func newPriorityQueues(clus *integration2.ClusterV3, n int) (qs []testQueue) { for i := 0; i < n; i++ { etcdc := clus.RandClient() q := &flatPriorityQueue{recipe.NewPriorityQueue(etcdc, "prq")} diff --git a/tests/integration/clientv3/kv_test.go b/tests/integration/clientv3/kv_test.go index 8dd98466d..43cd50fc9 100644 --- a/tests/integration/clientv3/kv_test.go +++ b/tests/integration/clientv3/kv_test.go @@ -29,20 +29,20 @@ import ( "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/api/v3/version" "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) func TestKVPutError(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) var ( maxReqBytes = 1.5 * 1024 * 1024 // hard coded max in v3_server.go quota = int64(int(maxReqBytes*1.2) + 8*os.Getpagesize()) // make sure we have enough overhead in backend quota. See discussion in #6486. ) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, QuotaBackendBytes: quota, ClientMaxCallSendMsgSize: 100 * 1024 * 1024}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, QuotaBackendBytes: quota, ClientMaxCallSendMsgSize: 100 * 1024 * 1024}) defer clus.Terminate(t) kv := clus.RandClient() @@ -72,9 +72,9 @@ func TestKVPutError(t *testing.T) { } func TestKVPut(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) lapi := clus.RandClient() @@ -117,9 +117,9 @@ func TestKVPut(t *testing.T) { // TestKVPutWithIgnoreValue ensures that Put with WithIgnoreValue does not clobber the old value. func TestKVPutWithIgnoreValue(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) kv := clus.RandClient() @@ -150,9 +150,9 @@ func TestKVPutWithIgnoreValue(t *testing.T) { // TestKVPutWithIgnoreLease ensures that Put with WithIgnoreLease does not affect the existing lease for the key. func TestKVPutWithIgnoreLease(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) kv := clus.RandClient() @@ -189,9 +189,9 @@ func TestKVPutWithIgnoreLease(t *testing.T) { } func TestKVPutWithRequireLeader(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) clus.Members[1].Stop(t) @@ -235,9 +235,9 @@ func TestKVPutWithRequireLeader(t *testing.T) { } func TestKVRange(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) kv := clus.RandClient() @@ -464,9 +464,9 @@ func TestKVRange(t *testing.T) { } func TestKVGetErrConnClosed(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.Client(0) @@ -486,16 +486,16 @@ func TestKVGetErrConnClosed(t *testing.T) { }() select { - case <-time.After(integration.RequestWaitTimeout): + case <-time.After(integration2.RequestWaitTimeout): t.Fatal("kv.Get took too long") case <-donec: } } func TestKVNewAfterClose(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.Client(0) @@ -513,16 +513,16 @@ func TestKVNewAfterClose(t *testing.T) { close(donec) }() select { - case <-time.After(integration.RequestWaitTimeout): + case <-time.After(integration2.RequestWaitTimeout): t.Fatal("kv.Get took too long") case <-donec: } } func TestKVDeleteRange(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) kv := clus.RandClient() @@ -592,9 +592,9 @@ func TestKVDeleteRange(t *testing.T) { } func TestKVDelete(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) kv := clus.RandClient() @@ -624,9 +624,9 @@ func TestKVDelete(t *testing.T) { } func TestKVCompactError(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) kv := clus.RandClient() @@ -654,9 +654,9 @@ func TestKVCompactError(t *testing.T) { } func TestKVCompact(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) kv := clus.RandClient() @@ -709,10 +709,10 @@ func TestKVCompact(t *testing.T) { // TestKVGetRetry ensures get will retry on disconnect. func TestKVGetRetry(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) clusterSize := 3 - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: clusterSize, UseBridge: true}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: clusterSize, UseBridge: true}) defer clus.Terminate(t) // because killing leader and following election @@ -763,9 +763,9 @@ func TestKVGetRetry(t *testing.T) { // TestKVPutFailGetRetry ensures a get will retry following a failed put. func TestKVPutFailGetRetry(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) kv := clus.Client(0) @@ -803,9 +803,9 @@ func TestKVPutFailGetRetry(t *testing.T) { // TestKVGetCancel tests that a context cancel on a Get terminates as expected. func TestKVGetCancel(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) oldconn := clus.Client(0).ActiveConnection() @@ -826,9 +826,9 @@ func TestKVGetCancel(t *testing.T) { // TestKVGetStoppedServerAndClose ensures closing after a failed Get works. func TestKVGetStoppedServerAndClose(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.Client(0) @@ -844,9 +844,9 @@ func TestKVGetStoppedServerAndClose(t *testing.T) { // TestKVPutStoppedServerAndClose ensures closing after a failed Put works. func TestKVPutStoppedServerAndClose(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.Client(0) @@ -875,8 +875,8 @@ func TestKVPutStoppedServerAndClose(t *testing.T) { // TestKVPutAtMostOnce ensures that a Put will only occur at most once // in the presence of network errors. func TestKVPutAtMostOnce(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) if _, err := clus.Client(0).Put(context.TODO(), "k", "1"); err != nil { @@ -911,7 +911,7 @@ func TestKVPutAtMostOnce(t *testing.T) { // TestKVLargeRequests tests various client/server side request limits. func TestKVLargeRequests(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) tests := []struct { // make sure that "MaxCallSendMsgSize" < server-side default send/recv limit maxRequestBytesServer uint @@ -970,8 +970,8 @@ func TestKVLargeRequests(t *testing.T) { }, } for i, test := range tests { - clus := integration.NewClusterV3(t, - &integration.ClusterConfig{ + clus := integration2.NewClusterV3(t, + &integration2.ClusterConfig{ Size: 1, MaxRequestBytes: test.maxRequestBytesServer, ClientMaxCallSendMsgSize: test.maxCallSendBytesClient, @@ -1003,9 +1003,9 @@ func TestKVLargeRequests(t *testing.T) { // TestKVForLearner ensures learner member only accepts serializable read request. func TestKVForLearner(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) // we have to add and launch learner member after initial cluster was created, because @@ -1034,7 +1034,7 @@ func TestKVForLearner(t *testing.T) { DialOptions: []grpc.DialOption{grpc.WithBlock()}, } // this client only has endpoint of the learner member - cli, err := integration.NewClient(t, cfg) + cli, err := integration2.NewClient(t, cfg) if err != nil { t.Fatalf("failed to create clientv3: %v", err) } @@ -1082,9 +1082,9 @@ func TestKVForLearner(t *testing.T) { // TestBalancerSupportLearner verifies that balancer's retry and failover mechanism supports cluster with learner member func TestBalancerSupportLearner(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) // we have to add and launch learner member after initial cluster was created, because @@ -1106,7 +1106,7 @@ func TestBalancerSupportLearner(t *testing.T) { DialTimeout: 5 * time.Second, DialOptions: []grpc.DialOption{grpc.WithBlock()}, } - cli, err := integration.NewClient(t, cfg) + cli, err := integration2.NewClient(t, cfg) if err != nil { t.Fatalf("failed to create clientv3: %v", err) } diff --git a/tests/integration/clientv3/lease/lease_test.go b/tests/integration/clientv3/lease/lease_test.go index 6a6cf7dd3..d06cffe05 100644 --- a/tests/integration/clientv3/lease/lease_test.go +++ b/tests/integration/clientv3/lease/lease_test.go @@ -26,13 +26,13 @@ import ( "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/concurrency" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) func TestLeaseNotFoundError(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) kv := clus.RandClient() @@ -44,9 +44,9 @@ func TestLeaseNotFoundError(t *testing.T) { } func TestLeaseGrant(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) lapi := clus.RandClient() @@ -70,9 +70,9 @@ func TestLeaseGrant(t *testing.T) { } func TestLeaseRevoke(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) lapi := clus.RandClient() @@ -96,9 +96,9 @@ func TestLeaseRevoke(t *testing.T) { } func TestLeaseKeepAliveOnce(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) lapi := clus.RandClient() @@ -120,9 +120,9 @@ func TestLeaseKeepAliveOnce(t *testing.T) { } func TestLeaseKeepAlive(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) lapi := clus.Client(0) @@ -160,9 +160,9 @@ func TestLeaseKeepAlive(t *testing.T) { } func TestLeaseKeepAliveOneSecond(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.Client(0) @@ -188,9 +188,9 @@ func TestLeaseKeepAliveOneSecond(t *testing.T) { func TestLeaseKeepAliveHandleFailure(t *testing.T) { t.Skip("test it when we have a cluster client") - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) // TODO: change this line to get a cluster client @@ -243,9 +243,9 @@ type leaseCh struct { // TestLeaseKeepAliveNotFound ensures a revoked lease won't halt other leases. func TestLeaseKeepAliveNotFound(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.RandClient() @@ -276,9 +276,9 @@ func TestLeaseKeepAliveNotFound(t *testing.T) { } func TestLeaseGrantErrConnClosed(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.Client(0) @@ -298,7 +298,7 @@ func TestLeaseGrantErrConnClosed(t *testing.T) { }() select { - case <-time.After(integration.RequestWaitTimeout): + case <-time.After(integration2.RequestWaitTimeout): t.Fatal("le.Grant took too long") case <-donec: } @@ -308,9 +308,9 @@ func TestLeaseGrantErrConnClosed(t *testing.T) { // queue is full thus dropping keepalive response sends, // keepalive request is sent with the same rate of TTL / 3. func TestLeaseKeepAliveFullResponseQueue(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) lapi := clus.Client(0) @@ -348,9 +348,9 @@ func TestLeaseKeepAliveFullResponseQueue(t *testing.T) { } func TestLeaseGrantNewAfterClose(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.Client(0) @@ -368,16 +368,16 @@ func TestLeaseGrantNewAfterClose(t *testing.T) { close(donec) }() select { - case <-time.After(integration.RequestWaitTimeout): + case <-time.After(integration2.RequestWaitTimeout): t.Fatal("le.Grant took too long") case <-donec: } } func TestLeaseRevokeNewAfterClose(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.Client(0) @@ -402,7 +402,7 @@ func TestLeaseRevokeNewAfterClose(t *testing.T) { } }() select { - case <-time.After(integration.RequestWaitTimeout): + case <-time.After(integration2.RequestWaitTimeout): t.Fatal("le.Revoke took too long") case errMsg := <-errMsgCh: if errMsg != "" { @@ -414,9 +414,9 @@ func TestLeaseRevokeNewAfterClose(t *testing.T) { // TestLeaseKeepAliveCloseAfterDisconnectRevoke ensures the keep alive channel is closed // following a disconnection, lease revoke, then reconnect. func TestLeaseKeepAliveCloseAfterDisconnectRevoke(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) cli := clus.Client(0) @@ -460,9 +460,9 @@ func TestLeaseKeepAliveCloseAfterDisconnectRevoke(t *testing.T) { // TestLeaseKeepAliveInitTimeout ensures the keep alive channel closes if // the initial keep alive request never gets a response. func TestLeaseKeepAliveInitTimeout(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) cli := clus.Client(0) @@ -493,9 +493,9 @@ func TestLeaseKeepAliveInitTimeout(t *testing.T) { // TestLeaseKeepAliveInitTimeout ensures the keep alive channel closes if // a keep alive request after the first never gets a response. func TestLeaseKeepAliveTTLTimeout(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) cli := clus.Client(0) @@ -528,9 +528,9 @@ func TestLeaseKeepAliveTTLTimeout(t *testing.T) { } func TestLeaseTimeToLive(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) c := clus.RandClient() @@ -586,9 +586,9 @@ func TestLeaseTimeToLive(t *testing.T) { } func TestLeaseTimeToLiveLeaseNotFound(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.RandClient() @@ -621,9 +621,9 @@ func TestLeaseTimeToLiveLeaseNotFound(t *testing.T) { } func TestLeaseLeases(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.RandClient() @@ -654,9 +654,9 @@ func TestLeaseLeases(t *testing.T) { // TestLeaseRenewLostQuorum ensures keepalives work after losing quorum // for a while. func TestLeaseRenewLostQuorum(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) cli := clus.Client(0) @@ -702,9 +702,9 @@ func TestLeaseRenewLostQuorum(t *testing.T) { } func TestLeaseKeepAliveLoopExit(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) ctx := context.Background() @@ -727,8 +727,8 @@ func TestLeaseKeepAliveLoopExit(t *testing.T) { // before, during, and after quorum loss to confirm Grant/KeepAlive tolerates // transient cluster failure. func TestV3LeaseFailureOverlap(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 2, UseBridge: true}) defer clus.Terminate(t) numReqs := 5 @@ -780,9 +780,9 @@ func TestV3LeaseFailureOverlap(t *testing.T) { // TestLeaseWithRequireLeader checks keep-alive channel close when no leader. func TestLeaseWithRequireLeader(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2, UseBridge: true}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 2, UseBridge: true}) defer clus.Terminate(t) c := clus.Client(0) diff --git a/tests/integration/clientv3/lease/leasing_test.go b/tests/integration/clientv3/lease/leasing_test.go index 3e935d8e3..60bd02719 100644 --- a/tests/integration/clientv3/lease/leasing_test.go +++ b/tests/integration/clientv3/lease/leasing_test.go @@ -28,13 +28,13 @@ import ( clientv3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/concurrency" "go.etcd.io/etcd/client/v3/leasing" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) func TestLeasingPutGet(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) lKV1, closeLKV1, err := leasing.NewKV(clus.Client(0), "foo/") @@ -91,8 +91,8 @@ func TestLeasingPutGet(t *testing.T) { // TestLeasingInterval checks the leasing KV fetches key intervals. func TestLeasingInterval(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -130,8 +130,8 @@ func TestLeasingInterval(t *testing.T) { // TestLeasingPutInvalidateNew checks the leasing KV updates its cache on a Put to a new key. func TestLeasingPutInvalidateNew(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -160,8 +160,8 @@ func TestLeasingPutInvalidateNew(t *testing.T) { // TestLeasingPutInvalidateExisting checks the leasing KV updates its cache on a Put to an existing key. func TestLeasingPutInvalidateExisting(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) if _, err := clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil { @@ -194,8 +194,8 @@ func TestLeasingPutInvalidateExisting(t *testing.T) { // TestLeasingGetNoLeaseTTL checks a key with a TTL is not leased. func TestLeasingGetNoLeaseTTL(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -223,8 +223,8 @@ func TestLeasingGetNoLeaseTTL(t *testing.T) { // TestLeasingGetSerializable checks the leasing KV can make serialized requests // when the etcd cluster is partitioned. func TestLeasingGetSerializable(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 2, UseBridge: true}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -263,8 +263,8 @@ func TestLeasingGetSerializable(t *testing.T) { // TestLeasingPrevKey checks the cache respects WithPrevKV on puts. func TestLeasingPrevKey(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 2}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -289,8 +289,8 @@ func TestLeasingPrevKey(t *testing.T) { // TestLeasingRevGet checks the cache respects Get by Revision. func TestLeasingRevGet(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -325,8 +325,8 @@ func TestLeasingRevGet(t *testing.T) { // TestLeasingGetWithOpts checks options that can be served through the cache do not depend on the server. func TestLeasingGetWithOpts(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -370,8 +370,8 @@ func TestLeasingGetWithOpts(t *testing.T) { // TestLeasingConcurrentPut ensures that a get after concurrent puts returns // the recently put data. func TestLeasingConcurrentPut(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -417,8 +417,8 @@ func TestLeasingConcurrentPut(t *testing.T) { } func TestLeasingDisconnectedGet(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -446,8 +446,8 @@ func TestLeasingDisconnectedGet(t *testing.T) { } func TestLeasingDeleteOwner(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -480,8 +480,8 @@ func TestLeasingDeleteOwner(t *testing.T) { } func TestLeasingDeleteNonOwner(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) lkv1, closeLKV1, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -515,8 +515,8 @@ func TestLeasingDeleteNonOwner(t *testing.T) { } func TestLeasingOverwriteResponse(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -549,8 +549,8 @@ func TestLeasingOverwriteResponse(t *testing.T) { } func TestLeasingOwnerPutResponse(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -587,8 +587,8 @@ func TestLeasingOwnerPutResponse(t *testing.T) { } func TestLeasingTxnOwnerGetRange(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -616,8 +616,8 @@ func TestLeasingTxnOwnerGetRange(t *testing.T) { } func TestLeasingTxnOwnerGet(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) client := clus.Client(0) @@ -702,8 +702,8 @@ func TestLeasingTxnOwnerGet(t *testing.T) { } func TestLeasingTxnOwnerDeleteRange(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -741,8 +741,8 @@ func TestLeasingTxnOwnerDeleteRange(t *testing.T) { } func TestLeasingTxnOwnerDelete(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -772,8 +772,8 @@ func TestLeasingTxnOwnerDelete(t *testing.T) { } func TestLeasingTxnOwnerIf(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -866,8 +866,8 @@ func TestLeasingTxnOwnerIf(t *testing.T) { } func TestLeasingTxnCancel(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) lkv1, closeLKV1, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -900,8 +900,8 @@ func TestLeasingTxnCancel(t *testing.T) { } func TestLeasingTxnNonOwnerPut(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -978,8 +978,8 @@ func TestLeasingTxnNonOwnerPut(t *testing.T) { // TestLeasingTxnRandIfThenOrElse randomly leases keys two separate clients, then // issues a random If/{Then,Else} transaction on those keys to one client. func TestLeasingTxnRandIfThenOrElse(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) lkv1, closeLKV1, err1 := leasing.NewKV(clus.Client(0), "pfx/") @@ -1084,8 +1084,8 @@ func TestLeasingTxnRandIfThenOrElse(t *testing.T) { } func TestLeasingOwnerPutError(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -1105,8 +1105,8 @@ func TestLeasingOwnerPutError(t *testing.T) { } func TestLeasingOwnerDeleteError(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -1126,8 +1126,8 @@ func TestLeasingOwnerDeleteError(t *testing.T) { } func TestLeasingNonOwnerPutError(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") @@ -1151,8 +1151,8 @@ func TestLeasingOwnerDeleteFrom(t *testing.T) { } func testLeasingOwnerDelete(t *testing.T, del clientv3.Op) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "0/") @@ -1200,8 +1200,8 @@ func testLeasingOwnerDelete(t *testing.T, del clientv3.Op) { } func TestLeasingDeleteRangeBounds(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) delkv, closeDelKV, err := leasing.NewKV(clus.Client(0), "0/") @@ -1258,8 +1258,8 @@ func TestLeaseDeleteRangeContendDel(t *testing.T) { } func testLeasingDeleteRangeContend(t *testing.T, op clientv3.Op) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) delkv, closeDelKV, err := leasing.NewKV(clus.Client(0), "0/") @@ -1316,8 +1316,8 @@ func testLeasingDeleteRangeContend(t *testing.T, op clientv3.Op) { } func TestLeasingPutGetDeleteConcurrent(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) lkvs := make([]clientv3.KV, 16) @@ -1375,8 +1375,8 @@ func TestLeasingPutGetDeleteConcurrent(t *testing.T) { // TestLeasingReconnectOwnerRevoke checks that revocation works if // disconnected when trying to submit revoke txn. func TestLeasingReconnectOwnerRevoke(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) lkv1, closeLKV1, err1 := leasing.NewKV(clus.Client(0), "foo/") @@ -1436,8 +1436,8 @@ func TestLeasingReconnectOwnerRevoke(t *testing.T) { // TestLeasingReconnectOwnerRevokeCompact checks that revocation works if // disconnected and the watch is compacted. func TestLeasingReconnectOwnerRevokeCompact(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) lkv1, closeLKV1, err1 := leasing.NewKV(clus.Client(0), "foo/") @@ -1489,8 +1489,8 @@ func TestLeasingReconnectOwnerRevokeCompact(t *testing.T) { // TestLeasingReconnectOwnerConsistency checks a write error on an owner will // not cause inconsistency between the server and the client. func TestLeasingReconnectOwnerConsistency(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/") @@ -1563,8 +1563,8 @@ func TestLeasingReconnectOwnerConsistency(t *testing.T) { } func TestLeasingTxnAtomicCache(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/") @@ -1649,8 +1649,8 @@ func TestLeasingTxnAtomicCache(t *testing.T) { // TestLeasingReconnectTxn checks that Txn is resilient to disconnects. func TestLeasingReconnectTxn(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/") @@ -1685,8 +1685,8 @@ func TestLeasingReconnectTxn(t *testing.T) { // TestLeasingReconnectNonOwnerGet checks a get error on an owner will // not cause inconsistency between the server and the client. func TestLeasingReconnectNonOwnerGet(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/") @@ -1736,8 +1736,8 @@ func TestLeasingReconnectNonOwnerGet(t *testing.T) { } func TestLeasingTxnRangeCmp(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/") @@ -1771,8 +1771,8 @@ func TestLeasingTxnRangeCmp(t *testing.T) { } func TestLeasingDo(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/") @@ -1813,8 +1813,8 @@ func TestLeasingDo(t *testing.T) { } func TestLeasingTxnOwnerPutBranch(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/") @@ -1907,8 +1907,8 @@ func randCmps(pfx string, dat []*clientv3.PutResponse) (cmps []clientv3.Cmp, the } func TestLeasingSessionExpire(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/", concurrency.WithTTL(1)) @@ -1931,7 +1931,7 @@ func TestLeasingSessionExpire(t *testing.T) { } waitForExpireAck(t, lkv) clus.Members[0].Restart(t) - integration.WaitClientV3(t, lkv2) + integration2.WaitClientV3(t, lkv2) if _, err = lkv2.Put(context.TODO(), "abc", "def"); err != nil { t.Fatal(err) } @@ -1983,8 +1983,8 @@ func TestLeasingSessionExpireCancel(t *testing.T) { } for i := range tests { t.Run(fmt.Sprintf("test %d", i), func(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/", concurrency.WithTTL(1)) diff --git a/tests/integration/clientv3/maintenance_test.go b/tests/integration/clientv3/maintenance_test.go index e48a4a4fa..945fde082 100644 --- a/tests/integration/clientv3/maintenance_test.go +++ b/tests/integration/clientv3/maintenance_test.go @@ -25,6 +25,7 @@ import ( "testing" "time" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" "go.uber.org/zap/zaptest" "google.golang.org/grpc" @@ -34,13 +35,12 @@ import ( "go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/storage/backend" "go.etcd.io/etcd/server/v3/storage/mvcc" - "go.etcd.io/etcd/tests/v3/integration" ) func TestMaintenanceHashKV(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) for i := 0; i < 3; i++ { @@ -71,9 +71,9 @@ func TestMaintenanceHashKV(t *testing.T) { } func TestMaintenanceMoveLeader(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) oldLeadIdx := clus.WaitLeader(t) @@ -102,9 +102,9 @@ func TestMaintenanceMoveLeader(t *testing.T) { // TestMaintenanceSnapshotCancel ensures that context cancel // before snapshot reading returns corresponding context errors. func TestMaintenanceSnapshotCancel(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) // reading snapshot with canceled context should error out @@ -145,9 +145,9 @@ func TestMaintenanceSnapshotTimeout(t *testing.T) { // testMaintenanceSnapshotTimeout given snapshot function ensures that it // returns corresponding context errors when context timeout happened before snapshot reading func testMaintenanceSnapshotTimeout(t *testing.T, snapshot func(context.Context, *clientv3.Client) (io.ReadCloser, error)) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) // reading snapshot with deadline exceeded should error out @@ -190,9 +190,9 @@ func TestMaintenanceSnapshotErrorInflight(t *testing.T) { // testMaintenanceSnapshotErrorInflight given snapshot function ensures that ReaderCloser returned by it // will fail to read with corresponding context errors on inflight context cancel timeout. func testMaintenanceSnapshotErrorInflight(t *testing.T, snapshot func(context.Context, *clientv3.Client) (io.ReadCloser, error)) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) // take about 1-second to read snapshot @@ -247,10 +247,10 @@ func testMaintenanceSnapshotErrorInflight(t *testing.T, snapshot func(context.Co // TestMaintenanceSnapshotWithVersionVersion ensures that SnapshotWithVersion returns correct version value. func TestMaintenanceSnapshotWithVersionVersion(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) // Set SnapshotCount to 1 to force raft snapshot to ensure that storage version is set - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, SnapshotCount: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, SnapshotCount: 1}) defer clus.Terminate(t) // Put some keys to ensure that wal snapshot is triggered @@ -270,9 +270,9 @@ func TestMaintenanceSnapshotWithVersionVersion(t *testing.T) { } func TestMaintenanceStatus(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) clus.WaitLeader(t) @@ -282,7 +282,7 @@ func TestMaintenanceStatus(t *testing.T) { eps[i] = clus.Members[i].GRPCURL() } - cli, err := integration.NewClient(t, clientv3.Config{Endpoints: eps, DialOptions: []grpc.DialOption{grpc.WithBlock()}}) + cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: eps, DialOptions: []grpc.DialOption{grpc.WithBlock()}}) if err != nil { t.Fatal(err) } diff --git a/tests/integration/clientv3/metrics_test.go b/tests/integration/clientv3/metrics_test.go index 4e2202cee..07c36a81c 100644 --- a/tests/integration/clientv3/metrics_test.go +++ b/tests/integration/clientv3/metrics_test.go @@ -25,17 +25,16 @@ import ( "testing" "time" - "go.etcd.io/etcd/client/pkg/v3/transport" - "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/integration" - grpcprom "github.com/grpc-ecosystem/go-grpc-prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" + "go.etcd.io/etcd/client/pkg/v3/transport" + "go.etcd.io/etcd/client/v3" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" "google.golang.org/grpc" ) func TestV3ClientMetrics(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) var ( addr = "localhost:27989" @@ -71,7 +70,7 @@ func TestV3ClientMetrics(t *testing.T) { url := "unix://" + addr + "/metrics" - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, SkipCreatingClient: true}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, SkipCreatingClient: true}) defer clus.Terminate(t) cfg := clientv3.Config{ @@ -81,7 +80,7 @@ func TestV3ClientMetrics(t *testing.T) { grpc.WithStreamInterceptor(grpcprom.StreamClientInterceptor), }, } - cli, cerr := integration.NewClient(t, cfg) + cli, cerr := integration2.NewClient(t, cfg) if cerr != nil { t.Fatal(cerr) } diff --git a/tests/integration/clientv3/mirror_test.go b/tests/integration/clientv3/mirror_test.go index c9246e0f2..36dc71dcc 100644 --- a/tests/integration/clientv3/mirror_test.go +++ b/tests/integration/clientv3/mirror_test.go @@ -24,13 +24,13 @@ import ( "go.etcd.io/etcd/api/v3/mvccpb" "go.etcd.io/etcd/client/v3/mirror" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) func TestMirrorSync(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) c := clus.Client(0) @@ -72,9 +72,9 @@ func TestMirrorSync(t *testing.T) { } func TestMirrorSyncBase(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + cluster := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer cluster.Terminate(t) cli := cluster.Client(0) diff --git a/tests/integration/clientv3/namespace_test.go b/tests/integration/clientv3/namespace_test.go index b7e07f4f3..4b76ca34e 100644 --- a/tests/integration/clientv3/namespace_test.go +++ b/tests/integration/clientv3/namespace_test.go @@ -22,13 +22,13 @@ import ( "go.etcd.io/etcd/api/v3/mvccpb" "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/namespace" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) func TestNamespacePutGet(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) c := clus.Client(0) @@ -55,9 +55,9 @@ func TestNamespacePutGet(t *testing.T) { } func TestNamespaceWatch(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) c := clus.Client(0) diff --git a/tests/integration/clientv3/naming/endpoints_test.go b/tests/integration/clientv3/naming/endpoints_test.go index 707616851..d5cbbf8cf 100644 --- a/tests/integration/clientv3/naming/endpoints_test.go +++ b/tests/integration/clientv3/naming/endpoints_test.go @@ -21,14 +21,13 @@ import ( etcd "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/naming/endpoints" - - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) func TestEndpointManager(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) em, err := endpoints.NewManager(clus.RandClient(), "foo") @@ -88,9 +87,9 @@ func TestEndpointManager(t *testing.T) { // correctly with multiple hosts and correctly receive multiple // updates in a single revision. func TestEndpointManagerAtomicity(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) c := clus.RandClient() @@ -130,9 +129,9 @@ func TestEndpointManagerAtomicity(t *testing.T) { } func TestEndpointManagerCRUD(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) em, err := endpoints.NewManager(clus.RandClient(), "foo") diff --git a/tests/integration/clientv3/naming/resolver_test.go b/tests/integration/clientv3/naming/resolver_test.go index 980580c16..445ebca86 100644 --- a/tests/integration/clientv3/naming/resolver_test.go +++ b/tests/integration/clientv3/naming/resolver_test.go @@ -23,8 +23,7 @@ import ( "go.etcd.io/etcd/client/v3/naming/endpoints" "go.etcd.io/etcd/client/v3/naming/resolver" "go.etcd.io/etcd/pkg/v3/grpc_testing" - "go.etcd.io/etcd/tests/v3/integration" - + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" "google.golang.org/grpc" testpb "google.golang.org/grpc/test/grpc_testing" ) @@ -32,7 +31,7 @@ import ( // This test mimics scenario described in grpc_naming.md doc. func TestEtcdGrpcResolver(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) s1PayloadBody := []byte{'1'} s1 := grpc_testing.NewDummyStubServer(s1PayloadBody) @@ -48,7 +47,7 @@ func TestEtcdGrpcResolver(t *testing.T) { } defer s2.Stop() - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) em, err := endpoints.NewManager(clus.Client(0), "foo") diff --git a/tests/integration/clientv3/ordering_kv_test.go b/tests/integration/clientv3/ordering_kv_test.go index b6b3ce71f..f213d497c 100644 --- a/tests/integration/clientv3/ordering_kv_test.go +++ b/tests/integration/clientv3/ordering_kv_test.go @@ -23,14 +23,14 @@ import ( "github.com/stretchr/testify/assert" "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/ordering" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) func TestDetectKvOrderViolation(t *testing.T) { var errOrderViolation = errors.New("DetectedOrderViolation") - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) cfg := clientv3.Config{ @@ -40,7 +40,7 @@ func TestDetectKvOrderViolation(t *testing.T) { clus.Members[2].GRPCURL(), }, } - cli, err := integration.NewClient(t, cfg) + cli, err := integration2.NewClient(t, cfg) if err != nil { t.Fatal(err) } @@ -96,8 +96,8 @@ func TestDetectKvOrderViolation(t *testing.T) { func TestDetectTxnOrderViolation(t *testing.T) { var errOrderViolation = errors.New("DetectedOrderViolation") - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) cfg := clientv3.Config{ @@ -107,7 +107,7 @@ func TestDetectTxnOrderViolation(t *testing.T) { clus.Members[2].GRPCURL(), }, } - cli, err := integration.NewClient(t, cfg) + cli, err := integration2.NewClient(t, cfg) if err != nil { t.Fatal(err) } diff --git a/tests/integration/clientv3/ordering_util_test.go b/tests/integration/clientv3/ordering_util_test.go index a4b65ec38..701be05b8 100644 --- a/tests/integration/clientv3/ordering_util_test.go +++ b/tests/integration/clientv3/ordering_util_test.go @@ -21,12 +21,12 @@ import ( "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/ordering" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) func TestEndpointSwitchResolvesViolation(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) eps := []string{ clus.Members[0].GRPCURL(), @@ -34,7 +34,7 @@ func TestEndpointSwitchResolvesViolation(t *testing.T) { clus.Members[2].GRPCURL(), } cfg := clientv3.Config{Endpoints: []string{clus.Members[0].GRPCURL()}} - cli, err := integration.NewClient(t, cfg) + cli, err := integration2.NewClient(t, cfg) if err != nil { t.Fatal(err) } @@ -79,8 +79,8 @@ func TestEndpointSwitchResolvesViolation(t *testing.T) { } func TestUnresolvableOrderViolation(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 5, SkipCreatingClient: true, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 5, SkipCreatingClient: true, UseBridge: true}) defer clus.Terminate(t) cfg := clientv3.Config{ Endpoints: []string{ @@ -91,7 +91,7 @@ func TestUnresolvableOrderViolation(t *testing.T) { clus.Members[4].GRPCURL(), }, } - cli, err := integration.NewClient(t, cfg) + cli, err := integration2.NewClient(t, cfg) if err != nil { t.Fatal(err) } diff --git a/tests/integration/clientv3/role_test.go b/tests/integration/clientv3/role_test.go index 62ba49d3a..a10e6f648 100644 --- a/tests/integration/clientv3/role_test.go +++ b/tests/integration/clientv3/role_test.go @@ -19,13 +19,13 @@ import ( "testing" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) func TestRoleError(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) authapi := clus.RandClient() diff --git a/tests/integration/clientv3/snapshot/v3_snapshot_test.go b/tests/integration/clientv3/snapshot/v3_snapshot_test.go index c61188981..f6188b323 100644 --- a/tests/integration/clientv3/snapshot/v3_snapshot_test.go +++ b/tests/integration/clientv3/snapshot/v3_snapshot_test.go @@ -29,7 +29,7 @@ import ( "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/snapshot" "go.etcd.io/etcd/server/v3/embed" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" "go.uber.org/zap/zaptest" ) @@ -78,7 +78,7 @@ func newEmbedConfig(t *testing.T) *embed.Config { clusterN := 1 urls := newEmbedURLs(clusterN * 2) cURLs, pURLs := urls[:clusterN], urls[clusterN:] - cfg := integration.NewEmbedConfig(t, "default") + cfg := integration2.NewEmbedConfig(t, "default") cfg.ClusterState = "new" cfg.LCUrls, cfg.ACUrls = cURLs, cURLs cfg.LPUrls, cfg.APUrls = pURLs, pURLs @@ -105,7 +105,7 @@ func createSnapshotFile(t *testing.T, cfg *embed.Config, kvs []kv) (version stri } ccfg := clientv3.Config{Endpoints: []string{cfg.ACUrls[0].String()}} - cli, err := integration.NewClient(t, ccfg) + cli, err := integration2.NewClient(t, ccfg) if err != nil { t.Fatal(err) } diff --git a/tests/integration/clientv3/txn_test.go b/tests/integration/clientv3/txn_test.go index 679b9868f..b23573605 100644 --- a/tests/integration/clientv3/txn_test.go +++ b/tests/integration/clientv3/txn_test.go @@ -23,13 +23,13 @@ import ( "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/server/v3/embed" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) func TestTxnError(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) kv := clus.RandClient() @@ -51,9 +51,9 @@ func TestTxnError(t *testing.T) { } func TestTxnWriteFail(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) kv := clus.Client(0) @@ -101,9 +101,9 @@ func TestTxnWriteFail(t *testing.T) { func TestTxnReadRetry(t *testing.T) { t.Skipf("skipping txn read retry test: re-enable after we do retry on txn read request") - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) kv := clus.Client(0) @@ -140,9 +140,9 @@ func TestTxnReadRetry(t *testing.T) { } func TestTxnSuccess(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) kv := clus.Client(0) @@ -163,9 +163,9 @@ func TestTxnSuccess(t *testing.T) { } func TestTxnCompareRange(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) kv := clus.Client(0) @@ -190,9 +190,9 @@ func TestTxnCompareRange(t *testing.T) { } func TestTxnNested(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) kv := clus.Client(0) diff --git a/tests/integration/clientv3/user_test.go b/tests/integration/clientv3/user_test.go index fe8b4cde2..f0fe73a73 100644 --- a/tests/integration/clientv3/user_test.go +++ b/tests/integration/clientv3/user_test.go @@ -21,14 +21,14 @@ import ( "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" "google.golang.org/grpc" ) func TestUserError(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) authapi := clus.RandClient() @@ -55,9 +55,9 @@ func TestUserError(t *testing.T) { } func TestUserErrorAuth(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) authapi := clus.RandClient() @@ -75,16 +75,16 @@ func TestUserErrorAuth(t *testing.T) { DialOptions: []grpc.DialOption{grpc.WithBlock()}, } cfg.Username, cfg.Password = "wrong-id", "123" - if _, err := integration.NewClient(t, cfg); err != rpctypes.ErrAuthFailed { + if _, err := integration2.NewClient(t, cfg); err != rpctypes.ErrAuthFailed { t.Fatalf("expected %v, got %v", rpctypes.ErrAuthFailed, err) } cfg.Username, cfg.Password = "root", "wrong-pass" - if _, err := integration.NewClient(t, cfg); err != rpctypes.ErrAuthFailed { + if _, err := integration2.NewClient(t, cfg); err != rpctypes.ErrAuthFailed { t.Fatalf("expected %v, got %v", rpctypes.ErrAuthFailed, err) } cfg.Username, cfg.Password = "root", "123" - authed, err := integration.NewClient(t, cfg) + authed, err := integration2.NewClient(t, cfg) if err != nil { t.Fatal(err) } @@ -112,9 +112,9 @@ func authSetupRoot(t *testing.T, auth clientv3.Auth) { // Client can connect to etcd even if they supply credentials and the server is in AuthDisable mode. func TestGetTokenWithoutAuth(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 2}) defer clus.Terminate(t) authapi := clus.RandClient() @@ -135,7 +135,7 @@ func TestGetTokenWithoutAuth(t *testing.T) { Password: "123", } - client, err = integration.NewClient(t, cfg) + client, err = integration2.NewClient(t, cfg) if err == nil { defer client.Close() } diff --git a/tests/integration/clientv3/watch_fragment_test.go b/tests/integration/clientv3/watch_fragment_test.go index 7f564fe10..ab2367932 100644 --- a/tests/integration/clientv3/watch_fragment_test.go +++ b/tests/integration/clientv3/watch_fragment_test.go @@ -26,7 +26,7 @@ import ( "go.etcd.io/etcd/client/pkg/v3/testutil" "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) // TestWatchFragmentDisable ensures that large watch @@ -64,16 +64,16 @@ func TestWatchFragmentEnableWithGRPCLimit(t *testing.T) { // testWatchFragment triggers watch response that spans over multiple // revisions exceeding server request limits when combined. func testWatchFragment(t *testing.T, fragment, exceedRecvLimit bool) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - cfg := &integration.ClusterConfig{ + cfg := &integration2.ClusterConfig{ Size: 1, MaxRequestBytes: 1.5 * 1024 * 1024, } if exceedRecvLimit { cfg.ClientMaxCallRecvMsgSize = 1.5 * 1024 * 1024 } - clus := integration.NewClusterV3(t, cfg) + clus := integration2.NewClusterV3(t, cfg) defer clus.Terminate(t) cli := clus.Client(0) diff --git a/tests/integration/clientv3/watch_test.go b/tests/integration/clientv3/watch_test.go index 7a992ecf9..f7ca77039 100644 --- a/tests/integration/clientv3/watch_test.go +++ b/tests/integration/clientv3/watch_test.go @@ -29,14 +29,14 @@ import ( "go.etcd.io/etcd/api/v3/version" "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" "google.golang.org/grpc/metadata" ) type watcherTest func(*testing.T, *watchctx) type watchctx struct { - clus *integration.ClusterV3 + clus *integration2.ClusterV3 w clientv3.Watcher kv clientv3.KV wclientMember int @@ -45,9 +45,9 @@ type watchctx struct { } func runWatchTest(t *testing.T, f watcherTest) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) wclientMember := rand.Intn(3) @@ -299,7 +299,7 @@ func TestWatchCancelRunning(t *testing.T) { } func testWatchCancelRunning(t *testing.T, wctx *watchctx) { - integration.BeforeTest(t) + integration2.BeforeTest(t) ctx, cancel := context.WithCancel(context.Background()) if wctx.ch = wctx.w.Watch(ctx, "a"); wctx.ch == nil { @@ -347,8 +347,8 @@ func putAndWatch(t *testing.T, wctx *watchctx, key, val string) { } func TestWatchResumeInitRev(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) cli := clus.Client(0) @@ -402,9 +402,9 @@ func TestWatchResumeInitRev(t *testing.T) { // either a compaction error or all keys by staying in sync before the compaction // is finally applied. func TestWatchResumeCompacted(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) // create a waiting watcher at rev 1 @@ -489,9 +489,9 @@ func TestWatchResumeCompacted(t *testing.T) { // TestWatchCompactRevision ensures the CompactRevision error is given on a // compaction event ahead of a watcher. func TestWatchCompactRevision(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) // set some keys @@ -531,7 +531,7 @@ func TestWatchWithProgressNotify(t *testing.T) { testWatchWithProgressNot func TestWatchWithProgressNotifyNoEvent(t *testing.T) { testWatchWithProgressNotify(t, false) } func testWatchWithProgressNotify(t *testing.T, watchOnPut bool) { - integration.BeforeTest(t) + integration2.BeforeTest(t) // accelerate report interval so test terminates quickly oldpi := v3rpc.GetProgressReportInterval() @@ -540,7 +540,7 @@ func testWatchWithProgressNotify(t *testing.T, watchOnPut bool) { pi := 3 * time.Second defer func() { v3rpc.SetProgressReportInterval(oldpi) }() - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) wc := clus.RandClient() @@ -585,11 +585,11 @@ func testWatchWithProgressNotify(t *testing.T, watchOnPut bool) { } func TestConfigurableWatchProgressNotifyInterval(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) progressInterval := 200 * time.Millisecond - clus := integration.NewClusterV3(t, - &integration.ClusterConfig{ + clus := integration2.NewClusterV3(t, + &integration2.ClusterConfig{ Size: 3, WatchProgressNotifyInterval: progressInterval, }) @@ -611,7 +611,7 @@ func TestConfigurableWatchProgressNotifyInterval(t *testing.T) { } func TestWatchRequestProgress(t *testing.T) { - if integration.ThroughProxy { + if integration2.ThroughProxy { t.Skipf("grpc-proxy does not support WatchProgress yet") } testCases := []struct { @@ -625,11 +625,11 @@ func TestWatchRequestProgress(t *testing.T) { for _, c := range testCases { t.Run(c.name, func(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) watchTimeout := 3 * time.Second - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) wc := clus.RandClient() @@ -686,9 +686,9 @@ func TestWatchRequestProgress(t *testing.T) { } func TestWatchEventType(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + cluster := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer cluster.Terminate(t) client := cluster.RandClient() @@ -760,9 +760,9 @@ func TestWatchEventType(t *testing.T) { } func TestWatchErrConnClosed(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.Client(0) @@ -783,16 +783,16 @@ func TestWatchErrConnClosed(t *testing.T) { clus.TakeClient(0) select { - case <-time.After(integration.RequestWaitTimeout): + case <-time.After(integration2.RequestWaitTimeout): t.Fatal("wc.Watch took too long") case <-donec: } } func TestWatchAfterClose(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.Client(0) @@ -810,7 +810,7 @@ func TestWatchAfterClose(t *testing.T) { close(donec) }() select { - case <-time.After(integration.RequestWaitTimeout): + case <-time.After(integration2.RequestWaitTimeout): t.Fatal("wc.Watch took too long") case <-donec: } @@ -818,9 +818,9 @@ func TestWatchAfterClose(t *testing.T) { // TestWatchWithRequireLeader checks the watch channel closes when no leader. func TestWatchWithRequireLeader(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3}) defer clus.Terminate(t) // Put a key for the non-require leader watch to read as an event. @@ -856,7 +856,7 @@ func TestWatchWithRequireLeader(t *testing.T) { if resp.Err() != rpctypes.ErrNoLeader { t.Fatalf("expected %v watch response error, got %+v", rpctypes.ErrNoLeader, resp) } - case <-time.After(integration.RequestWaitTimeout): + case <-time.After(integration2.RequestWaitTimeout): t.Fatal("watch without leader took too long to close") } @@ -865,7 +865,7 @@ func TestWatchWithRequireLeader(t *testing.T) { if ok { t.Fatalf("expected closed channel, got response %v", resp) } - case <-time.After(integration.RequestWaitTimeout): + case <-time.After(integration2.RequestWaitTimeout): t.Fatal("waited too long for channel to close") } @@ -892,9 +892,9 @@ func TestWatchWithRequireLeader(t *testing.T) { // TestWatchWithFilter checks that watch filtering works. func TestWatchWithFilter(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + cluster := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer cluster.Terminate(t) client := cluster.RandClient() @@ -931,9 +931,9 @@ func TestWatchWithFilter(t *testing.T) { // TestWatchWithCreatedNotification checks that WithCreatedNotify returns a // Created watch response. func TestWatchWithCreatedNotification(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + cluster := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer cluster.Terminate(t) client := cluster.RandClient() @@ -953,9 +953,9 @@ func TestWatchWithCreatedNotification(t *testing.T) { // a watcher with created notify does not post duplicate // created events from disconnect. func TestWatchWithCreatedNotificationDropConn(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + cluster := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer cluster.Terminate(t) client := cluster.RandClient() @@ -982,9 +982,9 @@ func TestWatchWithCreatedNotificationDropConn(t *testing.T) { // TestWatchCancelOnServer ensures client watcher cancels propagate back to the server. func TestWatchCancelOnServer(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + cluster := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer cluster.Terminate(t) client := cluster.RandClient() @@ -1050,20 +1050,20 @@ func TestWatchCancelOnServer(t *testing.T) { // 4. watcher client finishes tearing down stream on "ctx" // 5. w2 comes back canceled func TestWatchOverlapContextCancel(t *testing.T) { - f := func(clus *integration.ClusterV3) {} + f := func(clus *integration2.ClusterV3) {} testWatchOverlapContextCancel(t, f) } func TestWatchOverlapDropConnContextCancel(t *testing.T) { - f := func(clus *integration.ClusterV3) { + f := func(clus *integration2.ClusterV3) { clus.Members[0].Bridge().DropConnections() } testWatchOverlapContextCancel(t, f) } -func testWatchOverlapContextCancel(t *testing.T, f func(*integration.ClusterV3)) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) +func testWatchOverlapContextCancel(t *testing.T, f func(*integration2.ClusterV3)) { + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) n := 100 @@ -1123,8 +1123,8 @@ func testWatchOverlapContextCancel(t *testing.T, f func(*integration.ClusterV3)) // TestWatchCancelAndCloseClient ensures that canceling a watcher then immediately // closing the client does not return a client closing error. func TestWatchCancelAndCloseClient(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.Client(0) ctx, cancel := context.WithCancel(context.Background()) @@ -1153,8 +1153,8 @@ func TestWatchCancelAndCloseClient(t *testing.T) { // to put them in resuming mode, cancels them so some resumes by cancel fail, // then closes the watcher interface to ensure correct clean up. func TestWatchStressResumeClose(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) cli := clus.Client(0) @@ -1175,8 +1175,8 @@ func TestWatchStressResumeClose(t *testing.T) { // TestWatchCancelDisconnected ensures canceling a watcher works when // its grpc stream is disconnected / reconnecting. func TestWatchCancelDisconnected(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.Client(0) ctx, cancel := context.WithCancel(context.Background()) diff --git a/tests/integration/cluster_test.go b/tests/integration/cluster_test.go index 2fb5a18d9..443d97d42 100644 --- a/tests/integration/cluster_test.go +++ b/tests/integration/cluster_test.go @@ -27,6 +27,7 @@ import ( "go.etcd.io/etcd/client/v2" "go.etcd.io/etcd/server/v3/etcdserver" + "go.etcd.io/etcd/tests/v3/framework/integration" ) func init() { @@ -34,7 +35,7 @@ func init() { log.SetFlags(log.Ltime | log.Lmicroseconds | log.Lshortfile) if t := os.Getenv("ETCD_ELECTION_TIMEOUT_TICKS"); t != "" { if i, err := strconv.ParseInt(t, 10, 64); err == nil { - electionTicks = int(i) + integration.ElectionTicks = int(i) } } } @@ -43,16 +44,16 @@ func TestClusterOf1(t *testing.T) { testCluster(t, 1) } func TestClusterOf3(t *testing.T) { testCluster(t, 3) } func testCluster(t *testing.T, size int) { - BeforeTest(t) - c := NewCluster(t, size) + integration.BeforeTest(t) + c := integration.NewCluster(t, size) c.Launch(t) defer c.Terminate(t) clusterMustProgress(t, c.Members) } func TestTLSClusterOf3(t *testing.T) { - BeforeTest(t) - c := NewClusterByConfig(t, &ClusterConfig{Size: 3, PeerTLS: &testTLSInfo}) + integration.BeforeTest(t) + c := integration.NewClusterByConfig(t, &integration.ClusterConfig{Size: 3, PeerTLS: &integration.TestTLSInfo}) c.Launch(t) defer c.Terminate(t) clusterMustProgress(t, c.Members) @@ -61,8 +62,8 @@ func TestTLSClusterOf3(t *testing.T) { // Test that a cluster can progress when using separate client and server certs when peering. This supports certificate // authorities that don't issue dual-usage certificates. func TestTLSClusterOf3WithSpecificUsage(t *testing.T) { - BeforeTest(t) - c := NewClusterByConfig(t, &ClusterConfig{Size: 3, PeerTLS: &testTLSInfoWithSpecificUsage}) + integration.BeforeTest(t) + c := integration.NewClusterByConfig(t, &integration.ClusterConfig{Size: 3, PeerTLS: &integration.TestTLSInfoWithSpecificUsage}) c.Launch(t) defer c.Terminate(t) clusterMustProgress(t, c.Members) @@ -72,22 +73,22 @@ func TestClusterOf1UsingDiscovery(t *testing.T) { testClusterUsingDiscovery(t, 1 func TestClusterOf3UsingDiscovery(t *testing.T) { testClusterUsingDiscovery(t, 3) } func testClusterUsingDiscovery(t *testing.T, size int) { - BeforeTest(t) - dc := NewCluster(t, 1) + integration.BeforeTest(t) + dc := integration.NewCluster(t, 1) dc.Launch(t) defer dc.Terminate(t) // init discovery token space - dcc := MustNewHTTPClient(t, dc.URLs(), nil) + dcc := integration.MustNewHTTPClient(t, dc.URLs(), nil) dkapi := client.NewKeysAPI(dcc) - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) + ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout) if _, err := dkapi.Create(ctx, "/_config/size", fmt.Sprintf("%d", size)); err != nil { t.Fatal(err) } cancel() - c := NewClusterByConfig( + c := integration.NewClusterByConfig( t, - &ClusterConfig{Size: size, DiscoveryURL: dc.URL(0) + "/v2/keys"}, + &integration.ClusterConfig{Size: size, DiscoveryURL: dc.URL(0) + "/v2/keys"}, ) c.Launch(t) defer c.Terminate(t) @@ -95,23 +96,23 @@ func testClusterUsingDiscovery(t *testing.T, size int) { } func TestTLSClusterOf3UsingDiscovery(t *testing.T) { - BeforeTest(t) - dc := NewCluster(t, 1) + integration.BeforeTest(t) + dc := integration.NewCluster(t, 1) dc.Launch(t) defer dc.Terminate(t) // init discovery token space - dcc := MustNewHTTPClient(t, dc.URLs(), nil) + dcc := integration.MustNewHTTPClient(t, dc.URLs(), nil) dkapi := client.NewKeysAPI(dcc) - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) + ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout) if _, err := dkapi.Create(ctx, "/_config/size", fmt.Sprintf("%d", 3)); err != nil { t.Fatal(err) } cancel() - c := NewClusterByConfig(t, - &ClusterConfig{ + c := integration.NewClusterByConfig(t, + &integration.ClusterConfig{ Size: 3, - PeerTLS: &testTLSInfo, + PeerTLS: &integration.TestTLSInfo, DiscoveryURL: dc.URL(0) + "/v2/keys"}, ) c.Launch(t) @@ -123,8 +124,8 @@ func TestDoubleClusterSizeOf1(t *testing.T) { testDoubleClusterSize(t, 1) } func TestDoubleClusterSizeOf3(t *testing.T) { testDoubleClusterSize(t, 3) } func testDoubleClusterSize(t *testing.T, size int) { - BeforeTest(t) - c := NewCluster(t, size) + integration.BeforeTest(t) + c := integration.NewCluster(t, size) c.Launch(t) defer c.Terminate(t) @@ -135,8 +136,8 @@ func testDoubleClusterSize(t *testing.T, size int) { } func TestDoubleTLSClusterSizeOf3(t *testing.T) { - BeforeTest(t) - c := NewClusterByConfig(t, &ClusterConfig{Size: 3, PeerTLS: &testTLSInfo}) + integration.BeforeTest(t) + c := integration.NewClusterByConfig(t, &integration.ClusterConfig{Size: 3, PeerTLS: &integration.TestTLSInfo}) c.Launch(t) defer c.Terminate(t) @@ -150,16 +151,16 @@ func TestDecreaseClusterSizeOf3(t *testing.T) { testDecreaseClusterSize(t, 3) } func TestDecreaseClusterSizeOf5(t *testing.T) { testDecreaseClusterSize(t, 5) } func testDecreaseClusterSize(t *testing.T, size int) { - BeforeTest(t) - c := NewCluster(t, size) + integration.BeforeTest(t) + c := integration.NewCluster(t, size) c.Launch(t) defer c.Terminate(t) // TODO: remove the last but one member for i := 0; i < size-1; i++ { - id := c.Members[len(c.Members)-1].s.ID() + id := c.Members[len(c.Members)-1].Server.ID() // may hit second leader election on slow machines - if err := c.removeMember(t, uint64(id)); err != nil { + if err := c.RemoveMember(t, uint64(id)); err != nil { if strings.Contains(err.Error(), "no leader") { t.Logf("got leader error (%v)", err) i-- @@ -167,24 +168,24 @@ func testDecreaseClusterSize(t *testing.T, size int) { } t.Fatal(err) } - c.waitLeader(t, c.Members) + c.WaitMembersForLeader(t, c.Members) } clusterMustProgress(t, c.Members) } func TestForceNewCluster(t *testing.T) { - c := newCluster(t, &ClusterConfig{Size: 3, UseBridge: true}) + c := integration.NewClusterFromConfig(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) c.Launch(t) - cc := MustNewHTTPClient(t, []string{c.Members[0].URL()}, nil) + cc := integration.MustNewHTTPClient(t, []string{c.Members[0].URL()}, nil) kapi := client.NewKeysAPI(cc) - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) + ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout) resp, err := kapi.Create(ctx, "/foo", "bar") if err != nil { t.Fatalf("unexpected create error: %v", err) } cancel() // ensure create has been applied in this machine - ctx, cancel = context.WithTimeout(context.Background(), requestTimeout) + ctx, cancel = context.WithTimeout(context.Background(), integration.RequestTimeout) if _, err = kapi.Watcher("/foo", &client.WatcherOptions{AfterIndex: resp.Node.ModifiedIndex - 1}).Next(ctx); err != nil { t.Fatalf("unexpected watch error: %v", err) } @@ -199,13 +200,13 @@ func TestForceNewCluster(t *testing.T) { t.Fatalf("unexpected ForceRestart error: %v", err) } defer c.Members[0].Terminate(t) - c.waitLeader(t, c.Members[:1]) + c.WaitMembersForLeader(t, c.Members[:1]) // use new http client to init new connection - cc = MustNewHTTPClient(t, []string{c.Members[0].URL()}, nil) + cc = integration.MustNewHTTPClient(t, []string{c.Members[0].URL()}, nil) kapi = client.NewKeysAPI(cc) - // ensure force restart keep the old data, and new cluster can make progress - ctx, cancel = context.WithTimeout(context.Background(), requestTimeout) + // ensure force restart keep the old data, and new Cluster can make progress + ctx, cancel = context.WithTimeout(context.Background(), integration.RequestTimeout) if _, err := kapi.Watcher("/foo", &client.WatcherOptions{AfterIndex: resp.Node.ModifiedIndex - 1}).Next(ctx); err != nil { t.Fatalf("unexpected watch error: %v", err) } @@ -214,38 +215,38 @@ func TestForceNewCluster(t *testing.T) { } func TestAddMemberAfterClusterFullRotation(t *testing.T) { - BeforeTest(t) - c := NewCluster(t, 3) + integration.BeforeTest(t) + c := integration.NewCluster(t, 3) c.Launch(t) defer c.Terminate(t) // remove all the previous three members and add in three new members. for i := 0; i < 3; i++ { - c.RemoveMember(t, uint64(c.Members[0].s.ID())) - c.waitLeader(t, c.Members) + c.MustRemoveMember(t, uint64(c.Members[0].Server.ID())) + c.WaitMembersForLeader(t, c.Members) c.AddMember(t) - c.waitLeader(t, c.Members) + c.WaitMembersForLeader(t, c.Members) } c.AddMember(t) - c.waitLeader(t, c.Members) + c.WaitMembersForLeader(t, c.Members) clusterMustProgress(t, c.Members) } // Ensure we can remove a member then add a new one back immediately. func TestIssue2681(t *testing.T) { - BeforeTest(t) - c := NewCluster(t, 5) + integration.BeforeTest(t) + c := integration.NewCluster(t, 5) c.Launch(t) defer c.Terminate(t) - c.RemoveMember(t, uint64(c.Members[4].s.ID())) - c.waitLeader(t, c.Members) + c.MustRemoveMember(t, uint64(c.Members[4].Server.ID())) + c.WaitMembersForLeader(t, c.Members) c.AddMember(t) - c.waitLeader(t, c.Members) + c.WaitMembersForLeader(t, c.Members) clusterMustProgress(t, c.Members) } @@ -256,8 +257,8 @@ func TestIssue2746(t *testing.T) { testIssue2746(t, 5) } func TestIssue2746WithThree(t *testing.T) { testIssue2746(t, 3) } func testIssue2746(t *testing.T, members int) { - BeforeTest(t) - c := NewCluster(t, members) + integration.BeforeTest(t) + c := integration.NewCluster(t, members) for _, m := range c.Members { m.SnapshotCount = 10 @@ -271,32 +272,32 @@ func testIssue2746(t *testing.T, members int) { clusterMustProgress(t, c.Members) } - c.RemoveMember(t, uint64(c.Members[members-1].s.ID())) - c.waitLeader(t, c.Members) + c.MustRemoveMember(t, uint64(c.Members[members-1].Server.ID())) + c.WaitMembersForLeader(t, c.Members) c.AddMember(t) - c.waitLeader(t, c.Members) + c.WaitMembersForLeader(t, c.Members) clusterMustProgress(t, c.Members) } // Ensure etcd will not panic when removing a just started member. func TestIssue2904(t *testing.T) { - BeforeTest(t) - // start 1-member cluster to ensure member 0 is the leader of the cluster. - c := newCluster(t, &ClusterConfig{Size: 1, UseBridge: true}) + integration.BeforeTest(t) + // start 1-member Cluster to ensure member 0 is the leader of the Cluster. + c := integration.NewClusterFromConfig(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) c.Launch(t) defer c.Terminate(t) c.AddMember(t) c.Members[1].Stop(t) - // send remove member-1 request to the cluster. - cc := MustNewHTTPClient(t, c.URLs(), nil) + // send remove member-1 request to the Cluster. + cc := integration.MustNewHTTPClient(t, c.URLs(), nil) ma := client.NewMembersAPI(cc) - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) + ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout) // the proposal is not committed because member 1 is stopped, but the - // proposal is appended to leader's raft log. - ma.Remove(ctx, c.Members[1].s.ID().String()) + // proposal is appended to leader'Server raft log. + ma.Remove(ctx, c.Members[1].Server.ID().String()) cancel() // restart member, and expect it to send UpdateAttributes request. @@ -305,21 +306,21 @@ func TestIssue2904(t *testing.T) { c.Members[1].Restart(t) // when the member comes back, it ack the proposal to remove itself, // and apply it. - <-c.Members[1].s.StopNotify() + <-c.Members[1].Server.StopNotify() // terminate removed member c.Members[1].Terminate(t) c.Members = c.Members[:1] // wait member to be removed. - c.waitMembersMatch(t, c.HTTPMembers()) + c.WaitMembersMatch(t, c.HTTPMembers()) } // TestIssue3699 tests minority failure during cluster configuration; it was // deadlocking. func TestIssue3699(t *testing.T) { - // start a cluster of 3 nodes a, b, c - BeforeTest(t) - c := newCluster(t, &ClusterConfig{Size: 3, UseBridge: true}) + // start a Cluster of 3 nodes a, b, c + integration.BeforeTest(t) + c := integration.NewClusterFromConfig(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) c.Launch(t) defer c.Terminate(t) @@ -330,16 +331,16 @@ func TestIssue3699(t *testing.T) { c.AddMember(t) // electing node d as leader makes node a unable to participate - leaderID := c.waitLeader(t, c.Members) + leaderID := c.WaitMembersForLeader(t, c.Members) for leaderID != 3 { c.Members[leaderID].Stop(t) - <-c.Members[leaderID].s.StopNotify() + <-c.Members[leaderID].Server.StopNotify() // do not restart the killed member immediately. // the member will advance its election timeout after restart, // so it will have a better chance to become the leader again. - time.Sleep(time.Duration(electionTicks * int(tickDuration))) + time.Sleep(time.Duration(integration.ElectionTicks * int(integration.TickDuration))) c.Members[leaderID].Restart(t) - leaderID = c.waitLeader(t, c.Members) + leaderID = c.WaitMembersForLeader(t, c.Members) } // bring back node a @@ -351,17 +352,17 @@ func TestIssue3699(t *testing.T) { // waiting for ReadyNotify can take several seconds case <-time.After(10 * time.Second): t.Fatalf("waited too long for ready notification") - case <-c.Members[0].s.StopNotify(): + case <-c.Members[0].Server.StopNotify(): t.Fatalf("should not be stopped") - case <-c.Members[0].s.ReadyNotify(): + case <-c.Members[0].Server.ReadyNotify(): } - // must waitLeader so goroutines don't leak on terminate - c.waitLeader(t, c.Members) + // must WaitMembersForLeader so goroutines don't leak on terminate + c.WaitMembersForLeader(t, c.Members) - // try to participate in cluster - cc := MustNewHTTPClient(t, []string{c.URL(0)}, c.cfg.ClientTLS) + // try to participate in Cluster + cc := integration.MustNewHTTPClient(t, []string{c.URL(0)}, c.Cfg.ClientTLS) kapi := client.NewKeysAPI(cc) - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) + ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout) if _, err := kapi.Set(ctx, "/foo", "bar", nil); err != nil { t.Fatalf("unexpected error on Set (%v)", err) } @@ -370,21 +371,21 @@ func TestIssue3699(t *testing.T) { // TestRejectUnhealthyAdd ensures an unhealthy cluster rejects adding members. func TestRejectUnhealthyAdd(t *testing.T) { - BeforeTest(t) - c := newCluster(t, &ClusterConfig{Size: 3, UseBridge: true}) + integration.BeforeTest(t) + c := integration.NewClusterFromConfig(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) for _, m := range c.Members { m.ServerConfig.StrictReconfigCheck = true } c.Launch(t) defer c.Terminate(t) - // make cluster unhealthy and wait for downed peer + // make Cluster unhealthy and wait for downed peer c.Members[0].Stop(t) c.WaitLeader(t) // all attempts to add member should fail for i := 1; i < len(c.Members); i++ { - err := c.addMemberByURL(t, c.URL(i), "unix://foo:12345") + err := c.AddMemberByURL(t, c.URL(i), "unix://foo:12345") if err == nil { t.Fatalf("should have failed adding peer") } @@ -399,23 +400,23 @@ func TestRejectUnhealthyAdd(t *testing.T) { c.WaitLeader(t) time.Sleep(2 * etcdserver.HealthInterval) - // add member should succeed now that it's healthy + // add member should succeed now that it'Server healthy var err error for i := 1; i < len(c.Members); i++ { - if err = c.addMemberByURL(t, c.URL(i), "unix://foo:12345"); err == nil { + if err = c.AddMemberByURL(t, c.URL(i), "unix://foo:12345"); err == nil { break } } if err != nil { - t.Fatalf("should have added peer to healthy cluster (%v)", err) + t.Fatalf("should have added peer to healthy Cluster (%v)", err) } } // TestRejectUnhealthyRemove ensures an unhealthy cluster rejects removing members // if quorum will be lost. func TestRejectUnhealthyRemove(t *testing.T) { - BeforeTest(t) - c := newCluster(t, &ClusterConfig{Size: 5, UseBridge: true}) + integration.BeforeTest(t) + c := integration.NewClusterFromConfig(t, &integration.ClusterConfig{Size: 5, UseBridge: true}) for _, m := range c.Members { m.ServerConfig.StrictReconfigCheck = true } @@ -428,7 +429,7 @@ func TestRejectUnhealthyRemove(t *testing.T) { c.WaitLeader(t) // reject remove active member since (3,2)-(1,0) => (2,2) lacks quorum - err := c.removeMember(t, uint64(c.Members[2].s.ID())) + err := c.RemoveMember(t, uint64(c.Members[2].Server.ID())) if err == nil { t.Fatalf("should reject quorum breaking remove") } @@ -438,10 +439,10 @@ func TestRejectUnhealthyRemove(t *testing.T) { } // member stopped after launch; wait for missing heartbeats - time.Sleep(time.Duration(electionTicks * int(tickDuration))) + time.Sleep(time.Duration(integration.ElectionTicks * int(integration.TickDuration))) // permit remove dead member since (3,2) - (0,1) => (3,1) has quorum - if err = c.removeMember(t, uint64(c.Members[0].s.ID())); err != nil { + if err = c.RemoveMember(t, uint64(c.Members[0].Server.ID())); err != nil { t.Fatalf("should accept removing down member") } @@ -452,7 +453,7 @@ func TestRejectUnhealthyRemove(t *testing.T) { time.Sleep((3 * etcdserver.HealthInterval) / 2) // accept remove member since (4,1)-(1,0) => (3,1) has quorum - if err = c.removeMember(t, uint64(c.Members[0].s.ID())); err != nil { + if err = c.RemoveMember(t, uint64(c.Members[0].Server.ID())); err != nil { t.Fatalf("expected to remove member, got error %v", err) } } @@ -461,10 +462,10 @@ func TestRejectUnhealthyRemove(t *testing.T) { // if 'initial-cluster-state' is set 'new' and old data directory still exists // (see https://github.com/etcd-io/etcd/issues/7512 for more). func TestRestartRemoved(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - // 1. start single-member cluster - c := newCluster(t, &ClusterConfig{Size: 1, UseBridge: true}) + // 1. start single-member Cluster + c := integration.NewClusterFromConfig(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) for _, m := range c.Members { m.ServerConfig.StrictReconfigCheck = true } @@ -476,10 +477,10 @@ func TestRestartRemoved(t *testing.T) { c.WaitLeader(t) oldm := c.Members[0] - oldm.keepDataDirTerminate = true + oldm.KeepDataDirTerminate = true // 3. remove first member, shut down without deleting data - if err := c.removeMember(t, uint64(c.Members[0].s.ID())); err != nil { + if err := c.RemoveMember(t, uint64(c.Members[0].Server.ID())); err != nil { t.Fatalf("expected to remove member, got error %v", err) } c.WaitLeader(t) @@ -495,7 +496,7 @@ func TestRestartRemoved(t *testing.T) { os.RemoveAll(oldm.ServerConfig.DataDir) }() select { - case <-oldm.s.StopNotify(): + case <-oldm.Server.StopNotify(): case <-time.After(time.Minute): t.Fatalf("removed member didn't exit within %v", time.Minute) } @@ -504,8 +505,8 @@ func TestRestartRemoved(t *testing.T) { // clusterMustProgress ensures that cluster can make progress. It creates // a random key first, and check the new key could be got from all client urls // of the cluster. -func clusterMustProgress(t *testing.T, membs []*member) { - cc := MustNewHTTPClient(t, []string{membs[0].URL()}, nil) +func clusterMustProgress(t *testing.T, membs []*integration.Member) { + cc := integration.MustNewHTTPClient(t, []string{membs[0].URL()}, nil) kapi := client.NewKeysAPI(cc) key := fmt.Sprintf("foo%d", rand.Int()) var ( @@ -514,7 +515,7 @@ func clusterMustProgress(t *testing.T, membs []*member) { ) // retry in case of leader loss induced by slow CI for i := 0; i < 3; i++ { - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) + ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout) resp, err = kapi.Create(ctx, "/"+key, "bar") cancel() if err == nil { @@ -528,9 +529,9 @@ func clusterMustProgress(t *testing.T, membs []*member) { for i, m := range membs { u := m.URL() - mcc := MustNewHTTPClient(t, []string{u}, nil) + mcc := integration.MustNewHTTPClient(t, []string{u}, nil) mkapi := client.NewKeysAPI(mcc) - mctx, mcancel := context.WithTimeout(context.Background(), requestTimeout) + mctx, mcancel := context.WithTimeout(context.Background(), integration.RequestTimeout) if _, err := mkapi.Watcher(key, &client.WatcherOptions{AfterIndex: resp.Node.ModifiedIndex - 1}).Next(mctx); err != nil { t.Fatalf("#%d: watch on %s error: %v", i, u, err) } @@ -539,8 +540,8 @@ func clusterMustProgress(t *testing.T, membs []*member) { } func TestSpeedyTerminate(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3, UseBridge: true}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) // Stop/Restart so requests will time out on lost leaders for i := 0; i < 3; i++ { clus.Members[i].Stop(t) @@ -553,7 +554,7 @@ func TestSpeedyTerminate(t *testing.T) { }() select { case <-time.After(10 * time.Second): - t.Fatalf("cluster took too long to terminate") + t.Fatalf("Cluster took too long to terminate") case <-donec: } } diff --git a/tests/integration/embed/embed_test.go b/tests/integration/embed/embed_test.go index c04bf97c9..3733684d2 100644 --- a/tests/integration/embed/embed_test.go +++ b/tests/integration/embed/embed_test.go @@ -34,14 +34,14 @@ import ( "go.etcd.io/etcd/client/pkg/v3/transport" "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/server/v3/embed" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) var ( testTLSInfo = transport.TLSInfo{ - KeyFile: integration.MustAbsPath("../../fixtures/server.key.insecure"), - CertFile: integration.MustAbsPath("../../fixtures/server.crt"), - TrustedCAFile: integration.MustAbsPath("../../fixtures/ca.crt"), + KeyFile: integration2.MustAbsPath("../../fixtures/server.key.insecure"), + CertFile: integration2.MustAbsPath("../../fixtures/server.crt"), + TrustedCAFile: integration2.MustAbsPath("../../fixtures/ca.crt"), ClientCertAuth: true, } ) @@ -160,7 +160,7 @@ func testEmbedEtcdGracefulStop(t *testing.T, secure bool) { t.Fatal(err) } } - cli, err := integration.NewClient(t, clientCfg) + cli, err := integration2.NewClient(t, clientCfg) if err != nil { t.Fatal(err) } diff --git a/tests/integration/grpc_test.go b/tests/integration/grpc_test.go index eb71191a3..dfa9fadcc 100644 --- a/tests/integration/grpc_test.go +++ b/tests/integration/grpc_test.go @@ -23,6 +23,7 @@ import ( "time" clientv3 "go.etcd.io/etcd/client/v3" + "go.etcd.io/etcd/tests/v3/framework/integration" "google.golang.org/grpc" ) @@ -94,14 +95,14 @@ func TestAuthority(t *testing.T) { for _, tc := range tcs { for _, clusterSize := range []int{1, 3} { t.Run(fmt.Sprintf("Size: %d, Scenario: %q", clusterSize, tc.name), func(t *testing.T) { - BeforeTest(t) - cfg := ClusterConfig{ + integration.BeforeTest(t) + cfg := integration.ClusterConfig{ Size: clusterSize, UseTCP: tc.useTCP, UseIP: tc.useTCP, } cfg, tlsConfig := setupTLS(t, tc.useTLS, cfg) - clus := NewClusterV3(t, &cfg) + clus := integration.NewClusterV3(t, &cfg) defer clus.Terminate(t) kv := setupClient(t, tc.clientURLPattern, clus, tlsConfig) @@ -118,11 +119,11 @@ func TestAuthority(t *testing.T) { } } -func setupTLS(t *testing.T, useTLS bool, cfg ClusterConfig) (ClusterConfig, *tls.Config) { +func setupTLS(t *testing.T, useTLS bool, cfg integration.ClusterConfig) (integration.ClusterConfig, *tls.Config) { t.Helper() if useTLS { - cfg.ClientTLS = &testTLSInfo - tlsConfig, err := testTLSInfo.ClientConfig() + cfg.ClientTLS = &integration.TestTLSInfo + tlsConfig, err := integration.TestTLSInfo.ClientConfig() if err != nil { t.Fatal(err) } @@ -131,7 +132,7 @@ func setupTLS(t *testing.T, useTLS bool, cfg ClusterConfig) (ClusterConfig, *tls return cfg, nil } -func setupClient(t *testing.T, endpointPattern string, clus *ClusterV3, tlsConfig *tls.Config) *clientv3.Client { +func setupClient(t *testing.T, endpointPattern string, clus *integration.ClusterV3, tlsConfig *tls.Config) *clientv3.Client { t.Helper() endpoints := templateEndpoints(t, endpointPattern, clus) kv, err := clientv3.New(clientv3.Config{ @@ -146,13 +147,13 @@ func setupClient(t *testing.T, endpointPattern string, clus *ClusterV3, tlsConfi return kv } -func templateEndpoints(t *testing.T, pattern string, clus *ClusterV3) []string { +func templateEndpoints(t *testing.T, pattern string, clus *integration.ClusterV3) []string { t.Helper() endpoints := []string{} for _, m := range clus.Members { ent := pattern if strings.Contains(ent, "%d") { - ent = fmt.Sprintf(ent, GrpcPortNumber(m.UniqNumber, m.MemberNumber)) + ent = fmt.Sprintf(ent, integration.GrpcPortNumber(m.UniqNumber, m.MemberNumber)) } if strings.Contains(ent, "%s") { ent = fmt.Sprintf(ent, m.Name) @@ -165,11 +166,11 @@ func templateEndpoints(t *testing.T, pattern string, clus *ClusterV3) []string { return endpoints } -func templateAuthority(t *testing.T, pattern string, m *member) string { +func templateAuthority(t *testing.T, pattern string, m *integration.Member) string { t.Helper() authority := pattern if strings.Contains(authority, "%d") { - authority = fmt.Sprintf(authority, GrpcPortNumber(m.UniqNumber, m.MemberNumber)) + authority = fmt.Sprintf(authority, integration.GrpcPortNumber(m.UniqNumber, m.MemberNumber)) } if strings.Contains(authority, "%s") { authority = fmt.Sprintf(authority, m.Name) @@ -180,7 +181,7 @@ func templateAuthority(t *testing.T, pattern string, m *member) string { return authority } -func assertAuthority(t *testing.T, expectedAuthority string, clus *ClusterV3) { +func assertAuthority(t *testing.T, expectedAuthority string, clus *integration.ClusterV3) { t.Helper() requestsFound := 0 for _, m := range clus.Members { diff --git a/tests/integration/lazy_cluster.go b/tests/integration/lazy_cluster.go index 4cc7ae765..e8ac1225d 100644 --- a/tests/integration/lazy_cluster.go +++ b/tests/integration/lazy_cluster.go @@ -22,6 +22,7 @@ import ( "go.etcd.io/etcd/client/pkg/v3/testutil" "go.etcd.io/etcd/client/pkg/v3/transport" + "go.etcd.io/etcd/tests/v3/framework/integration" ) // Infrastructure to provision a single shared cluster for tests - only @@ -42,7 +43,7 @@ type LazyCluster interface { EndpointsV3() []string // Cluster - calls to this method might initialize the cluster. - Cluster() *ClusterV3 + Cluster() *integration.ClusterV3 // Transport - call to this method might initialize the cluster. Transport() *http.Transport @@ -53,8 +54,8 @@ type LazyCluster interface { } type lazyCluster struct { - cfg ClusterConfig - cluster *ClusterV3 + cfg integration.ClusterConfig + cluster *integration.ClusterV3 transport *http.Transport once sync.Once tb testutil.TB @@ -64,12 +65,12 @@ type lazyCluster struct { // NewLazyCluster returns a new test cluster handler that gets created on the // first call to GetEndpoints() or GetTransport() func NewLazyCluster() LazyCluster { - return NewLazyClusterWithConfig(ClusterConfig{Size: 1}) + return NewLazyClusterWithConfig(integration.ClusterConfig{Size: 1}) } // NewLazyClusterWithConfig returns a new test cluster handler that gets created // on the first call to GetEndpoints() or GetTransport() -func NewLazyClusterWithConfig(cfg ClusterConfig) LazyCluster { +func NewLazyClusterWithConfig(cfg integration.ClusterConfig) LazyCluster { tb, closer := testutil.NewTestingTBProthesis("lazy_cluster") return &lazyCluster{cfg: cfg, tb: tb, closer: closer} } @@ -81,7 +82,7 @@ func (lc *lazyCluster) mustLazyInit() { if err != nil { log.Fatal(err) } - lc.cluster = NewClusterV3(lc.tb, &lc.cfg) + lc.cluster = integration.NewClusterV3(lc.tb, &lc.cfg) }) } @@ -105,7 +106,7 @@ func (lc *lazyCluster) EndpointsV3() []string { return lc.Cluster().Client(0).Endpoints() } -func (lc *lazyCluster) Cluster() *ClusterV3 { +func (lc *lazyCluster) Cluster() *integration.ClusterV3 { lc.mustLazyInit() return lc.cluster } diff --git a/tests/integration/member_test.go b/tests/integration/member_test.go index 99788b757..f8f146000 100644 --- a/tests/integration/member_test.go +++ b/tests/integration/member_test.go @@ -23,52 +23,53 @@ import ( "github.com/stretchr/testify/assert" "go.etcd.io/etcd/client/v2" + "go.etcd.io/etcd/tests/v3/framework/integration" ) func TestPauseMember(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - c := NewCluster(t, 5) + c := integration.NewCluster(t, 5) c.Launch(t) defer c.Terminate(t) for i := 0; i < 5; i++ { c.Members[i].Pause() - membs := append([]*member{}, c.Members[:i]...) + membs := append([]*integration.Member{}, c.Members[:i]...) membs = append(membs, c.Members[i+1:]...) - c.waitLeader(t, membs) + c.WaitMembersForLeader(t, membs) clusterMustProgress(t, membs) c.Members[i].Resume() } - c.waitLeader(t, c.Members) + c.WaitMembersForLeader(t, c.Members) clusterMustProgress(t, c.Members) } func TestRestartMember(t *testing.T) { - BeforeTest(t) - c := newCluster(t, &ClusterConfig{Size: 3, UseBridge: true}) + integration.BeforeTest(t) + c := integration.NewClusterFromConfig(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) c.Launch(t) defer c.Terminate(t) for i := 0; i < 3; i++ { c.Members[i].Stop(t) - membs := append([]*member{}, c.Members[:i]...) + membs := append([]*integration.Member{}, c.Members[:i]...) membs = append(membs, c.Members[i+1:]...) - c.waitLeader(t, membs) + c.WaitMembersForLeader(t, membs) clusterMustProgress(t, membs) err := c.Members[i].Restart(t) if err != nil { t.Fatal(err) } } - c.waitLeader(t, c.Members) + c.WaitMembersForLeader(t, c.Members) clusterMustProgress(t, c.Members) } func TestLaunchDuplicateMemberShouldFail(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) size := 3 - c := NewCluster(t, size) + c := integration.NewCluster(t, size) m := c.Members[0].Clone(t) var err error m.DataDir, err = ioutil.TempDir(t.TempDir(), "etcd") @@ -87,8 +88,8 @@ func TestLaunchDuplicateMemberShouldFail(t *testing.T) { } func TestSnapshotAndRestartMember(t *testing.T) { - BeforeTest(t) - m := mustNewMember(t, memberConfig{name: "snapAndRestartTest", useBridge: true}) + integration.BeforeTest(t) + m := integration.MustNewMember(t, integration.MemberConfig{Name: "snapAndRestartTest", UseBridge: true}) m.SnapshotCount = 100 m.Launch() defer m.Terminate(t) @@ -97,9 +98,9 @@ func TestSnapshotAndRestartMember(t *testing.T) { resps := make([]*client.Response, 120) var err error for i := 0; i < 120; i++ { - cc := MustNewHTTPClient(t, []string{m.URL()}, nil) + cc := integration.MustNewHTTPClient(t, []string{m.URL()}, nil) kapi := client.NewKeysAPI(cc) - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) + ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout) key := fmt.Sprintf("foo%d", i) resps[i], err = kapi.Create(ctx, "/"+key, "bar") if err != nil { @@ -112,9 +113,9 @@ func TestSnapshotAndRestartMember(t *testing.T) { m.WaitOK(t) for i := 0; i < 120; i++ { - cc := MustNewHTTPClient(t, []string{m.URL()}, nil) + cc := integration.MustNewHTTPClient(t, []string{m.URL()}, nil) kapi := client.NewKeysAPI(cc) - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) + ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout) key := fmt.Sprintf("foo%d", i) resp, err := kapi.Get(ctx, "/"+key, nil) if err != nil { diff --git a/tests/integration/metrics_test.go b/tests/integration/metrics_test.go index 86636ce06..00efe8b1b 100644 --- a/tests/integration/metrics_test.go +++ b/tests/integration/metrics_test.go @@ -25,12 +25,13 @@ import ( pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/client/pkg/v3/transport" "go.etcd.io/etcd/server/v3/storage" + "go.etcd.io/etcd/tests/v3/framework/integration" ) // TestMetricDbSizeBoot checks that the db size metric is set on boot. func TestMetricDbSizeBoot(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) v, err := clus.Members[0].Metric("etcd_debugging_mvcc_db_total_size_in_bytes") @@ -49,12 +50,12 @@ func TestMetricDbSizeDefrag(t *testing.T) { // testMetricDbSizeDefrag checks that the db size metric is set after defrag. func testMetricDbSizeDefrag(t *testing.T, name string) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) - kvc := toGRPC(clus.Client(0)).KV - mc := toGRPC(clus.Client(0)).Maintenance + kvc := integration.ToGRPC(clus.Client(0)).KV + mc := integration.ToGRPC(clus.Client(0)).Maintenance // expand the db size numPuts := 25 // large enough to write more than 1 page @@ -163,8 +164,8 @@ func testMetricDbSizeDefrag(t *testing.T, name string) { } func TestMetricQuotaBackendBytes(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) qs, err := clus.Members[0].Metric("etcd_server_quota_backend_bytes") @@ -181,8 +182,8 @@ func TestMetricQuotaBackendBytes(t *testing.T) { } func TestMetricsHealth(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) tr, err := transport.NewTransport(transport.TLSInfo{}, 5*time.Second) diff --git a/tests/integration/network_partition_test.go b/tests/integration/network_partition_test.go index 6abc36700..9ea4e4534 100644 --- a/tests/integration/network_partition_test.go +++ b/tests/integration/network_partition_test.go @@ -18,12 +18,14 @@ import ( "fmt" "testing" "time" + + "go.etcd.io/etcd/tests/v3/framework/integration" ) func TestNetworkPartition5MembersLeaderInMinority(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 5}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 5}) defer clus.Terminate(t) leadIndex := clus.WaitLeader(t) @@ -32,20 +34,20 @@ func TestNetworkPartition5MembersLeaderInMinority(t *testing.T) { minority := []int{leadIndex, (leadIndex + 1) % 5} majority := []int{(leadIndex + 2) % 5, (leadIndex + 3) % 5, (leadIndex + 4) % 5} - minorityMembers := getMembersByIndexSlice(clus.cluster, minority) - majorityMembers := getMembersByIndexSlice(clus.cluster, majority) + minorityMembers := getMembersByIndexSlice(clus.Cluster, minority) + majorityMembers := getMembersByIndexSlice(clus.Cluster, majority) // network partition (bi-directional) injectPartition(t, minorityMembers, majorityMembers) // minority leader must be lost - clus.waitNoLeader(minorityMembers) + clus.WaitMembersNoLeader(minorityMembers) // wait extra election timeout time.Sleep(2 * majorityMembers[0].ElectionTimeout()) // new leader must be from majority - clus.waitLeader(t, majorityMembers) + clus.WaitMembersForLeader(t, majorityMembers) // recover network partition (bi-directional) recoverPartition(t, minorityMembers, majorityMembers) @@ -69,9 +71,9 @@ func TestNetworkPartition5MembersLeaderInMajority(t *testing.T) { } func testNetworkPartition5MembersLeaderInMajority(t *testing.T) error { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 5}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 5}) defer clus.Terminate(t) leadIndex := clus.WaitLeader(t) @@ -80,21 +82,21 @@ func testNetworkPartition5MembersLeaderInMajority(t *testing.T) error { majority := []int{leadIndex, (leadIndex + 1) % 5, (leadIndex + 2) % 5} minority := []int{(leadIndex + 3) % 5, (leadIndex + 4) % 5} - majorityMembers := getMembersByIndexSlice(clus.cluster, majority) - minorityMembers := getMembersByIndexSlice(clus.cluster, minority) + majorityMembers := getMembersByIndexSlice(clus.Cluster, majority) + minorityMembers := getMembersByIndexSlice(clus.Cluster, minority) // network partition (bi-directional) injectPartition(t, majorityMembers, minorityMembers) // minority leader must be lost - clus.waitNoLeader(minorityMembers) + clus.WaitMembersNoLeader(minorityMembers) // wait extra election timeout time.Sleep(2 * majorityMembers[0].ElectionTimeout()) // leader must be hold in majority - leadIndex2 := clus.waitLeader(t, majorityMembers) - leadID, leadID2 := clus.Members[leadIndex].s.ID(), majorityMembers[leadIndex2].s.ID() + leadIndex2 := clus.WaitMembersForLeader(t, majorityMembers) + leadID, leadID2 := clus.Members[leadIndex].Server.ID(), majorityMembers[leadIndex2].Server.ID() if leadID != leadID2 { return fmt.Errorf("unexpected leader change from %s, got %s", leadID, leadID2) } @@ -108,9 +110,9 @@ func testNetworkPartition5MembersLeaderInMajority(t *testing.T) error { } func TestNetworkPartition4Members(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 4}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 4}) defer clus.Terminate(t) leadIndex := clus.WaitLeader(t) @@ -119,8 +121,8 @@ func TestNetworkPartition4Members(t *testing.T) { groupA := []int{leadIndex, (leadIndex + 1) % 4} groupB := []int{(leadIndex + 2) % 4, (leadIndex + 3) % 4} - leaderPartition := getMembersByIndexSlice(clus.cluster, groupA) - followerPartition := getMembersByIndexSlice(clus.cluster, groupB) + leaderPartition := getMembersByIndexSlice(clus.Cluster, groupA) + followerPartition := getMembersByIndexSlice(clus.Cluster, groupB) // network partition (bi-directional) injectPartition(t, leaderPartition, followerPartition) @@ -137,21 +139,21 @@ func TestNetworkPartition4Members(t *testing.T) { clusterMustProgress(t, clus.Members) } -func getMembersByIndexSlice(clus *cluster, idxs []int) []*member { - ms := make([]*member, len(idxs)) +func getMembersByIndexSlice(clus *integration.Cluster, idxs []int) []*integration.Member { + ms := make([]*integration.Member, len(idxs)) for i, idx := range idxs { ms[i] = clus.Members[idx] } return ms } -func injectPartition(t *testing.T, src, others []*member) { +func injectPartition(t *testing.T, src, others []*integration.Member) { for _, m := range src { m.InjectPartition(t, others...) } } -func recoverPartition(t *testing.T, src, others []*member) { +func recoverPartition(t *testing.T, src, others []*integration.Member) { for _, m := range src { m.RecoverPartition(t, others...) } diff --git a/tests/integration/proxy/grpcproxy/cluster_test.go b/tests/integration/proxy/grpcproxy/cluster_test.go index 162956444..22c6970c8 100644 --- a/tests/integration/proxy/grpcproxy/cluster_test.go +++ b/tests/integration/proxy/grpcproxy/cluster_test.go @@ -23,7 +23,7 @@ import ( pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/server/v3/proxy/grpcproxy" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" "go.uber.org/zap/zaptest" "go.uber.org/zap" @@ -31,9 +31,9 @@ import ( ) func TestClusterProxyMemberList(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) cts := newClusterProxyServer(zaptest.NewLogger(t), []string{clus.Members[0].GRPCURL()}, t) @@ -43,7 +43,7 @@ func TestClusterProxyMemberList(t *testing.T) { Endpoints: []string{cts.caddr}, DialTimeout: 5 * time.Second, } - client, err := integration.NewClient(t, cfg) + client, err := integration2.NewClient(t, cfg) if err != nil { t.Fatalf("err %v, want nil", err) } @@ -95,7 +95,7 @@ func newClusterProxyServer(lg *zap.Logger, endpoints []string, t *testing.T) *cl Endpoints: endpoints, DialTimeout: 5 * time.Second, } - client, err := integration.NewClient(t, cfg) + client, err := integration2.NewClient(t, cfg) if err != nil { t.Fatal(err) } diff --git a/tests/integration/proxy/grpcproxy/kv_test.go b/tests/integration/proxy/grpcproxy/kv_test.go index 4f9ee8d25..c319c54dd 100644 --- a/tests/integration/proxy/grpcproxy/kv_test.go +++ b/tests/integration/proxy/grpcproxy/kv_test.go @@ -23,15 +23,14 @@ import ( pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/server/v3/proxy/grpcproxy" - "go.etcd.io/etcd/tests/v3/integration" - + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" "google.golang.org/grpc" ) func TestKVProxyRange(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) kvts := newKVProxyServer([]string{clus.Members[0].GRPCURL()}, t) @@ -42,7 +41,7 @@ func TestKVProxyRange(t *testing.T) { Endpoints: []string{kvts.l.Addr().String()}, DialTimeout: 5 * time.Second, } - client, err := integration.NewClient(t, cfg) + client, err := integration2.NewClient(t, cfg) if err != nil { t.Fatalf("err = %v, want nil", err) } @@ -71,7 +70,7 @@ func newKVProxyServer(endpoints []string, t *testing.T) *kvproxyTestServer { Endpoints: endpoints, DialTimeout: 5 * time.Second, } - client, err := integration.NewClient(t, cfg) + client, err := integration2.NewClient(t, cfg) if err != nil { t.Fatal(err) } diff --git a/tests/integration/proxy/grpcproxy/register_test.go b/tests/integration/proxy/grpcproxy/register_test.go index d57d01a87..d93000a5e 100644 --- a/tests/integration/proxy/grpcproxy/register_test.go +++ b/tests/integration/proxy/grpcproxy/register_test.go @@ -21,14 +21,14 @@ import ( clientv3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/naming/endpoints" "go.etcd.io/etcd/server/v3/proxy/grpcproxy" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" "go.uber.org/zap/zaptest" ) func TestRegister(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.Client(0) paddr := clus.Members[0].GRPCURL() diff --git a/tests/integration/snapshot/member_test.go b/tests/integration/snapshot/member_test.go index 076d928bb..fb6091533 100644 --- a/tests/integration/snapshot/member_test.go +++ b/tests/integration/snapshot/member_test.go @@ -24,14 +24,14 @@ import ( "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/server/v3/embed" "go.etcd.io/etcd/server/v3/etcdserver" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) // TestSnapshotV3RestoreMultiMemberAdd ensures that multiple members // can boot into the same cluster after being restored from a same // snapshot file, and also be able to add another member to the cluster. func TestSnapshotV3RestoreMultiMemberAdd(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) kvs := []kv{{"foo1", "bar1"}, {"foo2", "bar2"}, {"foo3", "bar3"}} dbPath := createSnapshotFile(t, kvs) @@ -48,7 +48,7 @@ func TestSnapshotV3RestoreMultiMemberAdd(t *testing.T) { // wait for health interval + leader election time.Sleep(etcdserver.HealthInterval + 2*time.Second) - cli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{cURLs[0].String()}}) + cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{cURLs[0].String()}}) if err != nil { t.Fatal(err) } @@ -63,7 +63,7 @@ func TestSnapshotV3RestoreMultiMemberAdd(t *testing.T) { // wait for membership reconfiguration apply time.Sleep(testutil.ApplyTimeout) - cfg := integration.NewEmbedConfig(t, "3") + cfg := integration2.NewEmbedConfig(t, "3") cfg.InitialClusterToken = testClusterTkn cfg.ClusterState = "existing" cfg.LCUrls, cfg.ACUrls = newCURLs, newCURLs @@ -88,7 +88,7 @@ func TestSnapshotV3RestoreMultiMemberAdd(t *testing.T) { t.Fatalf("failed to start the newly added etcd member") } - cli2, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{newCURLs[0].String()}}) + cli2, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{newCURLs[0].String()}}) if err != nil { t.Fatal(err) } diff --git a/tests/integration/snapshot/v3_snapshot_test.go b/tests/integration/snapshot/v3_snapshot_test.go index 1cad1e1e6..c49798650 100644 --- a/tests/integration/snapshot/v3_snapshot_test.go +++ b/tests/integration/snapshot/v3_snapshot_test.go @@ -29,7 +29,7 @@ import ( "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/etcdutl/v3/snapshot" "go.etcd.io/etcd/server/v3/embed" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" "go.uber.org/zap/zapcore" "go.uber.org/zap/zaptest" ) @@ -37,7 +37,7 @@ import ( // TestSnapshotV3RestoreSingle tests single node cluster restoring // from a snapshot file. func TestSnapshotV3RestoreSingle(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) kvs := []kv{{"foo1", "bar1"}, {"foo2", "bar2"}, {"foo3", "bar3"}} dbPath := createSnapshotFile(t, kvs) @@ -45,7 +45,7 @@ func TestSnapshotV3RestoreSingle(t *testing.T) { urls := newEmbedURLs(clusterN * 2) cURLs, pURLs := urls[:clusterN], urls[clusterN:] - cfg := integration.NewEmbedConfig(t, "s1") + cfg := integration2.NewEmbedConfig(t, "s1") cfg.InitialClusterToken = testClusterTkn cfg.ClusterState = "existing" cfg.LCUrls, cfg.ACUrls = cURLs, cURLs @@ -82,7 +82,7 @@ func TestSnapshotV3RestoreSingle(t *testing.T) { } var cli *clientv3.Client - cli, err = integration.NewClient(t, clientv3.Config{Endpoints: []string{cfg.ACUrls[0].String()}}) + cli, err = integration2.NewClient(t, clientv3.Config{Endpoints: []string{cfg.ACUrls[0].String()}}) if err != nil { t.Fatal(err) } @@ -103,7 +103,7 @@ func TestSnapshotV3RestoreSingle(t *testing.T) { // can boot into the same cluster after being restored from a same // snapshot file. func TestSnapshotV3RestoreMulti(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) kvs := []kv{{"foo1", "bar1"}, {"foo2", "bar2"}, {"foo3", "bar3"}} dbPath := createSnapshotFile(t, kvs) @@ -119,7 +119,7 @@ func TestSnapshotV3RestoreMulti(t *testing.T) { time.Sleep(time.Second) for i := 0; i < clusterN; i++ { - cli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{cURLs[i].String()}}) + cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{cURLs[i].String()}}) if err != nil { t.Fatal(err) } @@ -139,8 +139,8 @@ func TestSnapshotV3RestoreMulti(t *testing.T) { // TestCorruptedBackupFileCheck tests if we can correctly identify a corrupted backup file. func TestCorruptedBackupFileCheck(t *testing.T) { - dbPath := integration.MustAbsPath("testdata/corrupted_backup.db") - integration.BeforeTest(t) + dbPath := integration2.MustAbsPath("testdata/corrupted_backup.db") + integration2.BeforeTest(t) if _, err := os.Stat(dbPath); err != nil { t.Fatalf("test file [%s] does not exist: %v", dbPath, err) } @@ -175,7 +175,7 @@ func createSnapshotFile(t *testing.T, kvs []kv) string { urls := newEmbedURLs(clusterN * 2) cURLs, pURLs := urls[:clusterN], urls[clusterN:] - cfg := integration.NewEmbedConfig(t, "default") + cfg := integration2.NewEmbedConfig(t, "default") cfg.ClusterState = "new" cfg.LCUrls, cfg.ACUrls = cURLs, cURLs cfg.LPUrls, cfg.APUrls = pURLs, pURLs @@ -194,7 +194,7 @@ func createSnapshotFile(t *testing.T, kvs []kv) string { } ccfg := clientv3.Config{Endpoints: []string{cfg.ACUrls[0].String()}} - cli, err := integration.NewClient(t, ccfg) + cli, err := integration2.NewClient(t, ccfg) if err != nil { t.Fatal(err) } @@ -234,7 +234,7 @@ func restoreCluster(t *testing.T, clusterN int, dbPath string) ( cfgs := make([]*embed.Config, clusterN) for i := 0; i < clusterN; i++ { - cfg := integration.NewEmbedConfig(t, fmt.Sprintf("m%d", i)) + cfg := integration2.NewEmbedConfig(t, fmt.Sprintf("m%d", i)) cfg.InitialClusterToken = testClusterTkn cfg.ClusterState = "existing" cfg.LCUrls, cfg.ACUrls = []url.URL{cURLs[i]}, []url.URL{cURLs[i]} diff --git a/tests/integration/testing_test.go b/tests/integration/testing_test.go index a225063b1..dfd75e893 100644 --- a/tests/integration/testing_test.go +++ b/tests/integration/testing_test.go @@ -15,12 +15,13 @@ package integration_test import ( - "go.etcd.io/etcd/tests/v3/integration" "testing" + + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) func TestBeforeTestWithoutLeakDetection(t *testing.T) { - integration.BeforeTest(t, integration.WithoutGoLeakDetection(), integration.WithoutSkipInShort()) + integration2.BeforeTest(t, integration2.WithoutGoLeakDetection(), integration2.WithoutSkipInShort()) // Intentional leak that should get ignored go func() { diff --git a/tests/integration/utl_wal_version_test.go b/tests/integration/utl_wal_version_test.go index deed4a46c..774b25385 100644 --- a/tests/integration/utl_wal_version_test.go +++ b/tests/integration/utl_wal_version_test.go @@ -21,6 +21,7 @@ import ( "github.com/coreos/go-semver/semver" "github.com/stretchr/testify/assert" + "go.etcd.io/etcd/tests/v3/framework/integration" "go.uber.org/zap" "go.etcd.io/etcd/client/pkg/v3/testutil" @@ -33,7 +34,7 @@ import ( func TestEtcdVersionFromWAL(t *testing.T) { testutil.SkipTestIfShortMode(t, "Wal creation tests are depending on embedded etcd server so are integration-level tests.") - cfg := NewEmbedConfig(t, "default") + cfg := integration.NewEmbedConfig(t, "default") srv, err := embed.StartEtcd(cfg) if err != nil { t.Fatal(err) @@ -45,7 +46,7 @@ func TestEtcdVersionFromWAL(t *testing.T) { } ccfg := clientv3.Config{Endpoints: []string{cfg.ACUrls[0].String()}} - cli, err := NewClient(t, ccfg) + cli, err := integration.NewClient(t, ccfg) if err != nil { srv.Close() t.Fatal(err) diff --git a/tests/integration/v2_http_kv_test.go b/tests/integration/v2_http_kv_test.go index 36f7cea82..128625a4c 100644 --- a/tests/integration/v2_http_kv_test.go +++ b/tests/integration/v2_http_kv_test.go @@ -27,11 +27,12 @@ import ( "time" "go.etcd.io/etcd/client/pkg/v3/transport" + "go.etcd.io/etcd/tests/v3/framework/integration" ) func TestV2Set(t *testing.T) { - BeforeTest(t) - cl := NewCluster(t, 1) + integration.BeforeTest(t) + cl := integration.NewCluster(t, 1) cl.Launch(t) defer cl.Terminate(t) @@ -92,8 +93,8 @@ func TestV2Set(t *testing.T) { } func TestV2CreateUpdate(t *testing.T) { - BeforeTest(t) - cl := NewCluster(t, 1) + integration.BeforeTest(t) + cl := integration.NewCluster(t, 1) cl.Launch(t) defer cl.Terminate(t) @@ -228,8 +229,8 @@ func TestV2CreateUpdate(t *testing.T) { } func TestV2CAS(t *testing.T) { - BeforeTest(t) - cl := NewCluster(t, 1) + integration.BeforeTest(t) + cl := integration.NewCluster(t, 1) cl.Launch(t) defer cl.Terminate(t) @@ -376,8 +377,8 @@ func TestV2CAS(t *testing.T) { } func TestV2Delete(t *testing.T) { - BeforeTest(t) - cl := NewCluster(t, 1) + integration.BeforeTest(t) + cl := integration.NewCluster(t, 1) cl.Launch(t) defer cl.Terminate(t) @@ -476,8 +477,8 @@ func TestV2Delete(t *testing.T) { } func TestV2CAD(t *testing.T) { - BeforeTest(t) - cl := NewCluster(t, 1) + integration.BeforeTest(t) + cl := integration.NewCluster(t, 1) cl.Launch(t) defer cl.Terminate(t) @@ -576,8 +577,8 @@ func TestV2CAD(t *testing.T) { } func TestV2Unique(t *testing.T) { - BeforeTest(t) - cl := NewCluster(t, 1) + integration.BeforeTest(t) + cl := integration.NewCluster(t, 1) cl.Launch(t) defer cl.Terminate(t) @@ -643,8 +644,8 @@ func TestV2Unique(t *testing.T) { } func TestV2Get(t *testing.T) { - BeforeTest(t) - cl := NewCluster(t, 1) + integration.BeforeTest(t) + cl := integration.NewCluster(t, 1) cl.Launch(t) defer cl.Terminate(t) @@ -741,8 +742,8 @@ func TestV2Get(t *testing.T) { } func TestV2QuorumGet(t *testing.T) { - BeforeTest(t) - cl := NewCluster(t, 1) + integration.BeforeTest(t) + cl := integration.NewCluster(t, 1) cl.Launch(t) defer cl.Terminate(t) @@ -839,8 +840,8 @@ func TestV2QuorumGet(t *testing.T) { } func TestV2Watch(t *testing.T) { - BeforeTest(t) - cl := NewCluster(t, 1) + integration.BeforeTest(t) + cl := integration.NewCluster(t, 1) cl.Launch(t) defer cl.Terminate(t) @@ -877,8 +878,8 @@ func TestV2Watch(t *testing.T) { } func TestV2WatchWithIndex(t *testing.T) { - BeforeTest(t) - cl := NewCluster(t, 1) + integration.BeforeTest(t) + cl := integration.NewCluster(t, 1) cl.Launch(t) defer cl.Terminate(t) @@ -944,8 +945,8 @@ func TestV2WatchWithIndex(t *testing.T) { } func TestV2WatchKeyInDir(t *testing.T) { - BeforeTest(t) - cl := NewCluster(t, 1) + integration.BeforeTest(t) + cl := integration.NewCluster(t, 1) cl.Launch(t) defer cl.Terminate(t) @@ -1005,8 +1006,8 @@ func TestV2WatchKeyInDir(t *testing.T) { } func TestV2Head(t *testing.T) { - BeforeTest(t) - cl := NewCluster(t, 1) + integration.BeforeTest(t) + cl := integration.NewCluster(t, 1) cl.Launch(t) defer cl.Terminate(t) diff --git a/tests/integration/v2store/store_tag_not_v2v3_test.go b/tests/integration/v2store/store_tag_not_v2v3_test.go index b76552f04..012fe4c52 100644 --- a/tests/integration/v2store/store_tag_not_v2v3_test.go +++ b/tests/integration/v2store/store_tag_not_v2v3_test.go @@ -23,7 +23,7 @@ import ( "github.com/stretchr/testify/assert" "go.etcd.io/etcd/client/pkg/v3/testutil" "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) type v2TestStore struct { @@ -41,7 +41,7 @@ func newTestStore(t *testing.T, ns ...string) StoreCloser { // Ensure that the store can recover from a previously saved state. func TestStoreRecover(t *testing.T) { - integration.BeforeTest(t) + integration2.BeforeTest(t) s := newTestStore(t) defer s.Close() var eidx uint64 = 4 diff --git a/tests/integration/v2store/store_tag_v2v3_test.go b/tests/integration/v2store/store_tag_v2v3_test.go index f4e8e21fb..970d9643a 100644 --- a/tests/integration/v2store/store_tag_v2v3_test.go +++ b/tests/integration/v2store/store_tag_v2v3_test.go @@ -22,20 +22,20 @@ import ( "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" "go.etcd.io/etcd/server/v3/etcdserver/api/v2v3" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) type v2v3TestStore struct { v2store.Store - clus *integration.ClusterV3 + clus *integration2.ClusterV3 t *testing.T } func (s *v2v3TestStore) Close() { s.clus.Terminate(s.t) } func newTestStore(t *testing.T, ns ...string) StoreCloser { - integration.BeforeTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + integration2.BeforeTest(t) + clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1}) return &v2v3TestStore{ v2v3.NewStore(clus.Client(0), "/v2/"), clus, diff --git a/tests/integration/v2store/store_v2v3_test.go b/tests/integration/v2store/store_v2v3_test.go index 3a6eab14a..2266cb918 100644 --- a/tests/integration/v2store/store_v2v3_test.go +++ b/tests/integration/v2store/store_v2v3_test.go @@ -21,15 +21,15 @@ import ( "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" "go.etcd.io/etcd/server/v3/etcdserver/api/v2v3" - "go.etcd.io/etcd/tests/v3/integration" + integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) // TODO: fix tests func runWithCluster(t testing.TB, runner func(testing.TB, []string)) { - integration.BeforeTest(t) - cfg := integration.ClusterConfig{Size: 1} - clus := integration.NewClusterV3(t, &cfg) + integration2.BeforeTest(t) + cfg := integration2.ClusterConfig{Size: 1} + clus := integration2.NewClusterV3(t, &cfg) defer clus.Terminate(t) endpoints := []string{clus.Client(0).Endpoints()[0]} runner(t, endpoints) @@ -38,7 +38,7 @@ func runWithCluster(t testing.TB, runner func(testing.TB, []string)) { func TestCreateKV(t *testing.T) { runWithCluster(t, testCreateKV) } func testCreateKV(t testing.TB, endpoints []string) { - integration.BeforeTest(t) + integration2.BeforeTest(t) testCases := []struct { key string value string @@ -54,7 +54,7 @@ func testCreateKV(t testing.TB, endpoints []string) { //{key: "hello", value: "3", unique: true, wantKeyMatch: false}, } - cli, err := integration.NewClient(t, clientv3.Config{Endpoints: endpoints}) + cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: endpoints}) if err != nil { t.Fatal(err) } @@ -102,7 +102,7 @@ func testSetKV(t testing.TB, endpoints []string) { {key: "/sdir/set", value: "4", wantIndexMatch: false}, } - cli, err := integration.NewClient(t, clientv3.Config{Endpoints: endpoints}) + cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: endpoints}) if err != nil { t.Fatal(err) } @@ -130,7 +130,7 @@ func testSetKV(t testing.TB, endpoints []string) { func TestCreateSetDir(t *testing.T) { runWithCluster(t, testCreateSetDir) } func testCreateSetDir(t testing.TB, endpoints []string) { - integration.BeforeTest(t) + integration2.BeforeTest(t) testCases := []struct { dir string }{ @@ -138,7 +138,7 @@ func testCreateSetDir(t testing.TB, endpoints []string) { {dir: "/ddir/1/2/3"}, } - cli, err := integration.NewClient(t, clientv3.Config{Endpoints: endpoints}) + cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: endpoints}) if err != nil { t.Fatal(err) } diff --git a/tests/integration/v3_alarm_test.go b/tests/integration/v3_alarm_test.go index dc2191253..929d9b926 100644 --- a/tests/integration/v3_alarm_test.go +++ b/tests/integration/v3_alarm_test.go @@ -27,24 +27,25 @@ import ( "go.etcd.io/etcd/pkg/v3/traceutil" "go.etcd.io/etcd/server/v3/storage/backend" "go.etcd.io/etcd/server/v3/storage/mvcc" + "go.etcd.io/etcd/tests/v3/framework/integration" "go.uber.org/zap/zaptest" ) // TestV3StorageQuotaApply tests the V3 server respects quotas during apply func TestV3StorageQuotaApply(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) quotasize := int64(16 * os.Getpagesize()) - clus := NewClusterV3(t, &ClusterConfig{Size: 2, UseBridge: true}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2, UseBridge: true}) defer clus.Terminate(t) - kvc0 := toGRPC(clus.Client(0)).KV - kvc1 := toGRPC(clus.Client(1)).KV + kvc0 := integration.ToGRPC(clus.Client(0)).KV + kvc1 := integration.ToGRPC(clus.Client(1)).KV // Set a quota on one node clus.Members[0].QuotaBackendBytes = quotasize clus.Members[0].Stop(t) clus.Members[0].Restart(t) - clus.waitLeader(t, clus.Members) + clus.WaitMembersForLeader(t, clus.Members) waitForRestart(t, kvc0) key := []byte("abc") @@ -73,7 +74,7 @@ func TestV3StorageQuotaApply(t *testing.T) { stopc := time.After(5 * time.Second) for { req := &pb.AlarmRequest{Action: pb.AlarmRequest_GET} - resp, aerr := clus.Members[0].s.Alarm(context.TODO(), req) + resp, aerr := clus.Members[0].Server.Alarm(context.TODO(), req) if aerr != nil { t.Fatal(aerr) } @@ -87,7 +88,7 @@ func TestV3StorageQuotaApply(t *testing.T) { } } - ctx, cancel := context.WithTimeout(context.TODO(), RequestWaitTimeout) + ctx, cancel := context.WithTimeout(context.TODO(), integration.RequestWaitTimeout) defer cancel() // small quota machine should reject put @@ -103,7 +104,7 @@ func TestV3StorageQuotaApply(t *testing.T) { // reset large quota node to ensure alarm persisted clus.Members[1].Stop(t) clus.Members[1].Restart(t) - clus.waitLeader(t, clus.Members) + clus.WaitMembersForLeader(t, clus.Members) if _, err := kvc1.Put(context.TODO(), &pb.PutRequest{Key: key, Value: smallbuf}); err == nil { t.Fatalf("alarmed instance should reject put after reset") @@ -112,12 +113,12 @@ func TestV3StorageQuotaApply(t *testing.T) { // TestV3AlarmDeactivate ensures that space alarms can be deactivated so puts go through. func TestV3AlarmDeactivate(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) - kvc := toGRPC(clus.RandClient()).KV - mt := toGRPC(clus.RandClient()).Maintenance + kvc := integration.ToGRPC(clus.RandClient()).KV + mt := integration.ToGRPC(clus.RandClient()).Maintenance alarmReq := &pb.AlarmRequest{ MemberID: 123, @@ -146,8 +147,8 @@ func TestV3AlarmDeactivate(t *testing.T) { } func TestV3CorruptAlarm(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3, UseBridge: true}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) var wg sync.WaitGroup diff --git a/tests/integration/v3_auth_test.go b/tests/integration/v3_auth_test.go index 286f2dbe6..5d915a964 100644 --- a/tests/integration/v3_auth_test.go +++ b/tests/integration/v3_auth_test.go @@ -26,18 +26,19 @@ import ( "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/client/pkg/v3/testutil" "go.etcd.io/etcd/client/v3" + "go.etcd.io/etcd/tests/v3/framework/integration" ) // TestV3AuthEmptyUserGet ensures that a get with an empty user will return an empty user error. func TestV3AuthEmptyUserGet(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) defer cancel() - api := toGRPC(clus.Client(0)) + api := integration.ToGRPC(clus.Client(0)) authSetupRoot(t, api.Auth) _, err := api.KV.Range(ctx, &pb.RangeRequest{Key: []byte("abc")}) @@ -49,13 +50,13 @@ func TestV3AuthEmptyUserGet(t *testing.T) { // TestV3AuthTokenWithDisable tests that auth won't crash if // given a valid token when authentication is disabled func TestV3AuthTokenWithDisable(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) - authSetupRoot(t, toGRPC(clus.Client(0)).Auth) + authSetupRoot(t, integration.ToGRPC(clus.Client(0)).Auth) - c, cerr := NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "root", Password: "123"}) + c, cerr := integration.NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "root", Password: "123"}) if cerr != nil { t.Fatal(cerr) } @@ -81,11 +82,11 @@ func TestV3AuthTokenWithDisable(t *testing.T) { } func TestV3AuthRevision(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) - api := toGRPC(clus.Client(0)) + api := integration.ToGRPC(clus.Client(0)) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) presp, perr := api.KV.Put(ctx, &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}) @@ -109,25 +110,25 @@ func TestV3AuthRevision(t *testing.T) { // TestV3AuthWithLeaseRevokeWithRoot ensures that granted leases // with root user be revoked after TTL. func TestV3AuthWithLeaseRevokeWithRoot(t *testing.T) { - testV3AuthWithLeaseRevokeWithRoot(t, ClusterConfig{Size: 1}) + testV3AuthWithLeaseRevokeWithRoot(t, integration.ClusterConfig{Size: 1}) } // TestV3AuthWithLeaseRevokeWithRootJWT creates a lease with a JWT-token enabled cluster. // And tests if server is able to revoke expiry lease item. func TestV3AuthWithLeaseRevokeWithRootJWT(t *testing.T) { - testV3AuthWithLeaseRevokeWithRoot(t, ClusterConfig{Size: 1, AuthToken: defaultTokenJWT}) + testV3AuthWithLeaseRevokeWithRoot(t, integration.ClusterConfig{Size: 1, AuthToken: integration.DefaultTokenJWT}) } -func testV3AuthWithLeaseRevokeWithRoot(t *testing.T, ccfg ClusterConfig) { - BeforeTest(t) +func testV3AuthWithLeaseRevokeWithRoot(t *testing.T, ccfg integration.ClusterConfig) { + integration.BeforeTest(t) - clus := NewClusterV3(t, &ccfg) + clus := integration.NewClusterV3(t, &ccfg) defer clus.Terminate(t) - api := toGRPC(clus.Client(0)) + api := integration.ToGRPC(clus.Client(0)) authSetupRoot(t, api.Auth) - rootc, cerr := NewClient(t, clientv3.Config{ + rootc, cerr := integration.NewClient(t, clientv3.Config{ Endpoints: clus.Client(0).Endpoints(), Username: "root", Password: "123", @@ -177,8 +178,8 @@ type user struct { } func TestV3AuthWithLeaseRevoke(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) users := []user{ @@ -190,11 +191,11 @@ func TestV3AuthWithLeaseRevoke(t *testing.T) { end: "k2", }, } - authSetupUsers(t, toGRPC(clus.Client(0)).Auth, users) + authSetupUsers(t, integration.ToGRPC(clus.Client(0)).Auth, users) - authSetupRoot(t, toGRPC(clus.Client(0)).Auth) + authSetupRoot(t, integration.ToGRPC(clus.Client(0)).Auth) - rootc, cerr := NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "root", Password: "123"}) + rootc, cerr := integration.NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "root", Password: "123"}) if cerr != nil { t.Fatal(cerr) } @@ -211,7 +212,7 @@ func TestV3AuthWithLeaseRevoke(t *testing.T) { t.Fatal(err) } - userc, cerr := NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "user1", Password: "user1-123"}) + userc, cerr := integration.NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "user1", Password: "user1-123"}) if cerr != nil { t.Fatal(cerr) } @@ -223,8 +224,8 @@ func TestV3AuthWithLeaseRevoke(t *testing.T) { } func TestV3AuthWithLeaseAttach(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) users := []user{ @@ -243,17 +244,17 @@ func TestV3AuthWithLeaseAttach(t *testing.T) { end: "k4", }, } - authSetupUsers(t, toGRPC(clus.Client(0)).Auth, users) + authSetupUsers(t, integration.ToGRPC(clus.Client(0)).Auth, users) - authSetupRoot(t, toGRPC(clus.Client(0)).Auth) + authSetupRoot(t, integration.ToGRPC(clus.Client(0)).Auth) - user1c, cerr := NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "user1", Password: "user1-123"}) + user1c, cerr := integration.NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "user1", Password: "user1-123"}) if cerr != nil { t.Fatal(cerr) } defer user1c.Close() - user2c, cerr := NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "user2", Password: "user2-123"}) + user2c, cerr := integration.NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "user2", Password: "user2-123"}) if cerr != nil { t.Fatal(cerr) } @@ -335,8 +336,8 @@ func authSetupRoot(t *testing.T, auth pb.AuthClient) { } func TestV3AuthNonAuthorizedRPCs(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) nonAuthedKV := clus.Client(0).KV @@ -348,7 +349,7 @@ func TestV3AuthNonAuthorizedRPCs(t *testing.T) { t.Fatalf("couldn't put key (%v)", err) } - authSetupRoot(t, toGRPC(clus.Client(0)).Auth) + authSetupRoot(t, integration.ToGRPC(clus.Client(0)).Auth) respput, err := nonAuthedKV.Put(context.TODO(), key, val) if !eqErrGRPC(err, rpctypes.ErrGRPCUserEmpty) { @@ -358,13 +359,13 @@ func TestV3AuthNonAuthorizedRPCs(t *testing.T) { func TestV3AuthOldRevConcurrent(t *testing.T) { t.Skip() // TODO(jingyih): re-enable the test when #10408 is fixed. - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) - authSetupRoot(t, toGRPC(clus.Client(0)).Auth) + authSetupRoot(t, integration.ToGRPC(clus.Client(0)).Auth) - c, cerr := NewClient(t, clientv3.Config{ + c, cerr := integration.NewClient(t, clientv3.Config{ Endpoints: clus.Client(0).Endpoints(), DialTimeout: 5 * time.Second, Username: "root", diff --git a/tests/integration/v3_election_test.go b/tests/integration/v3_election_test.go index a3aed9ba6..b726f26b3 100644 --- a/tests/integration/v3_election_test.go +++ b/tests/integration/v3_election_test.go @@ -22,20 +22,21 @@ import ( "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/concurrency" + "go.etcd.io/etcd/tests/v3/framework/integration" ) // TestElectionWait tests if followers can correctly wait for elections. func TestElectionWait(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) leaders := 3 followers := 3 var clients []*clientv3.Client - newClient := MakeMultiNodeClients(t, clus, &clients) + newClient := integration.MakeMultiNodeClients(t, clus, &clients) defer func() { - CloseClients(t, clients) + integration.CloseClients(t, clients) }() electedc := make(chan string) @@ -108,8 +109,8 @@ func TestElectionWait(t *testing.T) { // TestElectionFailover tests that an election will func TestElectionFailover(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) cctx, cancel := context.WithCancel(context.TODO()) @@ -119,7 +120,7 @@ func TestElectionFailover(t *testing.T) { for i := 0; i < 3; i++ { var err error - ss[i], err = concurrency.NewSession(clus.clients[i]) + ss[i], err = concurrency.NewSession(clus.Clients[i]) if err != nil { t.Error(err) } @@ -176,8 +177,8 @@ func TestElectionFailover(t *testing.T) { // TestElectionSessionRelock ensures that campaigning twice on the same election // with the same lock will Proclaim instead of deadlocking. func TestElectionSessionRecampaign(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.RandClient() @@ -209,8 +210,8 @@ func TestElectionSessionRecampaign(t *testing.T) { // of bug #6278. https://github.com/etcd-io/etcd/issues/6278 // func TestElectionOnPrefixOfExistingKey(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.RandClient() @@ -236,8 +237,8 @@ func TestElectionOnPrefixOfExistingKey(t *testing.T) { // in a new session with the same lease id) does not result in loss of // leadership. func TestElectionOnSessionRestart(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.RandClient() @@ -283,8 +284,8 @@ func TestElectionOnSessionRestart(t *testing.T) { // TestElectionObserveCompacted checks that observe can tolerate // a leader key with a modrev less than the compaction revision. func TestElectionObserveCompacted(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.Client(0) diff --git a/tests/integration/v3_grpc_inflight_test.go b/tests/integration/v3_grpc_inflight_test.go index 7432fb46a..b96bac45a 100644 --- a/tests/integration/v3_grpc_inflight_test.go +++ b/tests/integration/v3_grpc_inflight_test.go @@ -22,6 +22,7 @@ import ( pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" + "go.etcd.io/etcd/tests/v3/framework/integration" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -30,12 +31,12 @@ import ( // TestV3MaintenanceDefragmentInflightRange ensures inflight range requests // does not panic the mvcc backend while defragment is running. func TestV3MaintenanceDefragmentInflightRange(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.RandClient() - kvc := toGRPC(cli).KV + kvc := integration.ToGRPC(cli).KV if _, err := kvc.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil { t.Fatal(err) } @@ -48,7 +49,7 @@ func TestV3MaintenanceDefragmentInflightRange(t *testing.T) { kvc.Range(ctx, &pb.RangeRequest{Key: []byte("foo")}) }() - mvc := toGRPC(cli).Maintenance + mvc := integration.ToGRPC(cli).Maintenance mvc.Defragment(context.Background(), &pb.DefragmentRequest{}) cancel() @@ -60,12 +61,12 @@ func TestV3MaintenanceDefragmentInflightRange(t *testing.T) { // They are either finished or canceled, but never crash the backend. // See https://github.com/etcd-io/etcd/issues/7322 for more detail. func TestV3KVInflightRangeRequests(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1, UseBridge: true}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) cli := clus.RandClient() - kvc := toGRPC(cli).KV + kvc := integration.ToGRPC(cli).KV if _, err := kvc.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil { t.Fatal(err) diff --git a/tests/integration/v3_grpc_test.go b/tests/integration/v3_grpc_test.go index ca9e5c8ad..c7b2a681b 100644 --- a/tests/integration/v3_grpc_test.go +++ b/tests/integration/v3_grpc_test.go @@ -30,6 +30,7 @@ import ( "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/client/pkg/v3/transport" "go.etcd.io/etcd/client/v3" + "go.etcd.io/etcd/tests/v3/framework/integration" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -37,14 +38,14 @@ import ( "google.golang.org/grpc/status" ) -// TestV3PutOverwrite puts a key with the v3 api to a random cluster member, +// TestV3PutOverwrite puts a key with the v3 api to a random Cluster member, // overwrites it, then checks that the change was applied. func TestV3PutOverwrite(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV key := []byte("foo") reqput := &pb.PutRequest{Key: key, Value: []byte("bar"), PrevKv: true} @@ -88,26 +89,26 @@ func TestV3PutOverwrite(t *testing.T) { // TestPutRestart checks if a put after an unrelated member restart succeeds func TestV3PutRestart(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3, UseBridge: true}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) kvIdx := rand.Intn(3) - kvc := toGRPC(clus.Client(kvIdx)).KV + kvc := integration.ToGRPC(clus.Client(kvIdx)).KV stopIdx := kvIdx for stopIdx == kvIdx { stopIdx = rand.Intn(3) } - clus.clients[stopIdx].Close() + clus.Clients[stopIdx].Close() clus.Members[stopIdx].Stop(t) clus.Members[stopIdx].Restart(t) - c, cerr := NewClientV3(clus.Members[stopIdx]) + c, cerr := integration.NewClientV3(clus.Members[stopIdx]) if cerr != nil { t.Fatalf("cannot create client: %v", cerr) } - clus.clients[stopIdx] = c + clus.Clients[stopIdx] = c ctx, cancel := context.WithTimeout(context.TODO(), 10*time.Second) defer cancel() @@ -120,11 +121,11 @@ func TestV3PutRestart(t *testing.T) { // TestV3CompactCurrentRev ensures keys are present when compacting on current revision. func TestV3CompactCurrentRev(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV preq := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")} for i := 0; i < 3; i++ { if _, err := kvc.Put(context.Background(), preq); err != nil { @@ -154,12 +155,12 @@ func TestV3CompactCurrentRev(t *testing.T) { // TestV3HashKV ensures that multiple calls of HashKV on same node return same hash and compact rev. func TestV3HashKV(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) - kvc := toGRPC(clus.RandClient()).KV - mvc := toGRPC(clus.RandClient()).Maintenance + kvc := integration.ToGRPC(clus.RandClient()).KV + mvc := integration.ToGRPC(clus.RandClient()).Maintenance for i := 0; i < 10; i++ { resp, err := kvc.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte(fmt.Sprintf("bar%d", i))}) @@ -202,12 +203,12 @@ func TestV3HashKV(t *testing.T) { } func TestV3TxnTooManyOps(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) maxTxnOps := uint(128) - clus := NewClusterV3(t, &ClusterConfig{Size: 3, MaxTxnOps: maxTxnOps}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, MaxTxnOps: maxTxnOps}) defer clus.Terminate(t) - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV // unique keys i := new(int) @@ -278,8 +279,8 @@ func TestV3TxnTooManyOps(t *testing.T) { } func TestV3TxnDuplicateKeys(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) putreq := &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: &pb.PutRequest{Key: []byte("abc"), Value: []byte("def")}}} @@ -323,7 +324,7 @@ func TestV3TxnDuplicateKeys(t *testing.T) { }, } - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV tests := []struct { txnSuccess []*pb.RequestOp @@ -396,11 +397,11 @@ func TestV3TxnDuplicateKeys(t *testing.T) { // Testv3TxnRevision tests that the transaction header revision is set as expected. func TestV3TxnRevision(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV pr := &pb.PutRequest{Key: []byte("abc"), Value: []byte("def")} presp, err := kvc.Put(context.TODO(), pr) if err != nil { @@ -447,11 +448,11 @@ func TestV3TxnRevision(t *testing.T) { // Testv3TxnCmpHeaderRev tests that the txn header revision is set as expected // when compared to the Succeeded field in the txn response. func TestV3TxnCmpHeaderRev(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV for i := 0; i < 10; i++ { // Concurrently put a key with a txn comparing on it. @@ -503,8 +504,8 @@ func TestV3TxnCmpHeaderRev(t *testing.T) { // TestV3TxnRangeCompare tests range comparisons in txns func TestV3TxnRangeCompare(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) // put keys, named by expected revision @@ -598,7 +599,7 @@ func TestV3TxnRangeCompare(t *testing.T) { }, } - kvc := toGRPC(clus.Client(0)).KV + kvc := integration.ToGRPC(clus.Client(0)).KV for i, tt := range tests { txn := &pb.TxnRequest{} txn.Compare = append(txn.Compare, &tt.cmp) @@ -614,11 +615,11 @@ func TestV3TxnRangeCompare(t *testing.T) { // TestV3TxnNested tests nested txns follow paths as expected. func TestV3TxnNestedPath(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV cmpTrue := &pb.Compare{ Result: pb.Compare_EQUAL, @@ -667,17 +668,17 @@ func TestV3TxnNestedPath(t *testing.T) { // TestV3PutIgnoreValue ensures that writes with ignore_value overwrites with previous key-value pair. func TestV3PutIgnoreValue(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV key, val := []byte("foo"), []byte("bar") putReq := pb.PutRequest{Key: key, Value: val} // create lease - lc := toGRPC(clus.RandClient()).Lease + lc := integration.ToGRPC(clus.RandClient()).Lease lresp, err := lc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) if err != nil { t.Fatal(err) @@ -800,15 +801,15 @@ func TestV3PutIgnoreValue(t *testing.T) { // TestV3PutIgnoreLease ensures that writes with ignore_lease uses previous lease for the key overwrites. func TestV3PutIgnoreLease(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV // create lease - lc := toGRPC(clus.RandClient()).Lease + lc := integration.ToGRPC(clus.RandClient()).Lease lresp, err := lc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) if err != nil { t.Fatal(err) @@ -940,11 +941,11 @@ func TestV3PutIgnoreLease(t *testing.T) { // TestV3PutMissingLease ensures that a Put on a key with a bogus lease fails. func TestV3PutMissingLease(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV key := []byte("foo") preq := &pb.PutRequest{Key: key, Lease: 123456} tests := []func(){ @@ -1011,7 +1012,7 @@ func TestV3PutMissingLease(t *testing.T) { // TestV3DeleteRange tests various edge cases in the DeleteRange API. func TestV3DeleteRange(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) tests := []struct { name string @@ -1069,8 +1070,8 @@ func TestV3DeleteRange(t *testing.T) { for i, tt := range tests { t.Run(tt.name, func(t *testing.T) { - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) - kvc := toGRPC(clus.RandClient()).KV + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + kvc := integration.ToGRPC(clus.RandClient()).KV defer clus.Terminate(t) ks := tt.keySet @@ -1123,11 +1124,11 @@ func TestV3DeleteRange(t *testing.T) { // TestV3TxnInvalidRange tests that invalid ranges are rejected in txns. func TestV3TxnInvalidRange(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV preq := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")} for i := 0; i < 3; i++ { @@ -1166,12 +1167,12 @@ func TestV3TxnInvalidRange(t *testing.T) { } func TestV3TooLargeRequest(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV // 2MB request value largeV := make([]byte, 2*1024*1024) @@ -1185,13 +1186,13 @@ func TestV3TooLargeRequest(t *testing.T) { // TestV3Hash tests hash. func TestV3Hash(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) cli := clus.RandClient() - kvc := toGRPC(cli).KV - m := toGRPC(cli).Maintenance + kvc := integration.ToGRPC(cli).KV + m := integration.ToGRPC(cli).Maintenance preq := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")} @@ -1210,12 +1211,12 @@ func TestV3Hash(t *testing.T) { // TestV3HashRestart ensures that hash stays the same after restart. func TestV3HashRestart(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1, UseBridge: true}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) cli := clus.RandClient() - resp, err := toGRPC(cli).Maintenance.Hash(context.Background(), &pb.HashRequest{}) + resp, err := integration.ToGRPC(cli).Maintenance.Hash(context.Background(), &pb.HashRequest{}) if err != nil || resp.Hash == 0 { t.Fatalf("couldn't hash (%v, hash %d)", err, resp.Hash) } @@ -1223,12 +1224,12 @@ func TestV3HashRestart(t *testing.T) { clus.Members[0].Stop(t) clus.Members[0].Restart(t) - clus.waitLeader(t, clus.Members) - kvc := toGRPC(clus.Client(0)).KV + clus.WaitMembersForLeader(t, clus.Members) + kvc := integration.ToGRPC(clus.Client(0)).KV waitForRestart(t, kvc) cli = clus.RandClient() - resp, err = toGRPC(cli).Maintenance.Hash(context.Background(), &pb.HashRequest{}) + resp, err = integration.ToGRPC(cli).Maintenance.Hash(context.Background(), &pb.HashRequest{}) if err != nil || resp.Hash == 0 { t.Fatalf("couldn't hash (%v, hash %d)", err, resp.Hash) } @@ -1241,10 +1242,10 @@ func TestV3HashRestart(t *testing.T) { // TestV3StorageQuotaAPI tests the V3 server respects quotas at the API layer func TestV3StorageQuotaAPI(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) quotasize := int64(16 * os.Getpagesize()) - clus := NewClusterV3(t, &ClusterConfig{Size: 3, UseBridge: true}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) // Set a quota on one node clus.Members[0].QuotaBackendBytes = quotasize @@ -1252,7 +1253,7 @@ func TestV3StorageQuotaAPI(t *testing.T) { clus.Members[0].Restart(t) defer clus.Terminate(t) - kvc := toGRPC(clus.Client(0)).KV + kvc := integration.ToGRPC(clus.Client(0)).KV waitForRestart(t, kvc) key := []byte("abc") @@ -1288,7 +1289,7 @@ func TestV3StorageQuotaAPI(t *testing.T) { } func TestV3RangeRequest(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) tests := []struct { name string @@ -1508,10 +1509,10 @@ func TestV3RangeRequest(t *testing.T) { for i, tt := range tests { t.Run(tt.name, func(t *testing.T) { - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) for _, k := range tt.putKeys { - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV req := &pb.PutRequest{Key: []byte(k), Value: []byte("bar")} if _, err := kvc.Put(context.TODO(), req); err != nil { t.Fatalf("#%d: couldn't put key (%v)", i, err) @@ -1519,7 +1520,7 @@ func TestV3RangeRequest(t *testing.T) { } for j, req := range tt.reqs { - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV resp, err := kvc.Range(context.TODO(), &req) if err != nil { t.Errorf("#%d.%d: Range error: %v", i, j, err) @@ -1550,24 +1551,24 @@ func TestV3RangeRequest(t *testing.T) { } } -func newClusterV3NoClients(t *testing.T, cfg *ClusterConfig) *ClusterV3 { +func newClusterV3NoClients(t *testing.T, cfg *integration.ClusterConfig) *integration.ClusterV3 { cfg.UseGRPC = true - clus := &ClusterV3{cluster: NewClusterByConfig(t, cfg)} + clus := &integration.ClusterV3{Cluster: integration.NewClusterByConfig(t, cfg)} clus.Launch(t) return clus } // TestTLSGRPCRejectInsecureClient checks that connection is rejected if server is TLS but not client. func TestTLSGRPCRejectInsecureClient(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - cfg := ClusterConfig{Size: 3, ClientTLS: &testTLSInfo} + cfg := integration.ClusterConfig{Size: 3, ClientTLS: &integration.TestTLSInfo} clus := newClusterV3NoClients(t, &cfg) defer clus.Terminate(t) // nil out TLS field so client will use an insecure connection clus.Members[0].ClientTLSInfo = nil - client, err := NewClientV3(clus.Members[0]) + client, err := integration.NewClientV3(clus.Members[0]) if err != nil && err != context.DeadlineExceeded { t.Fatalf("unexpected error (%v)", err) } else if client == nil { @@ -1582,7 +1583,7 @@ func TestTLSGRPCRejectInsecureClient(t *testing.T) { go func() { ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second) reqput := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")} - _, perr := toGRPC(client).KV.Put(ctx, reqput) + _, perr := integration.ToGRPC(client).KV.Put(ctx, reqput) cancel() donec <- perr }() @@ -1594,16 +1595,16 @@ func TestTLSGRPCRejectInsecureClient(t *testing.T) { // TestTLSGRPCRejectSecureClient checks that connection is rejected if client is TLS but not server. func TestTLSGRPCRejectSecureClient(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - cfg := ClusterConfig{Size: 3} + cfg := integration.ClusterConfig{Size: 3} clus := newClusterV3NoClients(t, &cfg) defer clus.Terminate(t) - clus.Members[0].ClientTLSInfo = &testTLSInfo + clus.Members[0].ClientTLSInfo = &integration.TestTLSInfo clus.Members[0].DialOptions = []grpc.DialOption{grpc.WithBlock()} - clus.Members[0].grpcURL = strings.Replace(clus.Members[0].grpcURL, "http://", "https://", 1) - client, err := NewClientV3(clus.Members[0]) + clus.Members[0].GrpcURL = strings.Replace(clus.Members[0].GrpcURL, "http://", "https://", 1) + client, err := integration.NewClientV3(clus.Members[0]) if client != nil || err == nil { client.Close() t.Fatalf("expected no client") @@ -1614,20 +1615,20 @@ func TestTLSGRPCRejectSecureClient(t *testing.T) { // TestTLSGRPCAcceptSecureAll checks that connection is accepted if both client and server are TLS func TestTLSGRPCAcceptSecureAll(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - cfg := ClusterConfig{Size: 3, ClientTLS: &testTLSInfo} + cfg := integration.ClusterConfig{Size: 3, ClientTLS: &integration.TestTLSInfo} clus := newClusterV3NoClients(t, &cfg) defer clus.Terminate(t) - client, err := NewClientV3(clus.Members[0]) + client, err := integration.NewClientV3(clus.Members[0]) if err != nil { t.Fatalf("expected tls client (%v)", err) } defer client.Close() reqput := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")} - if _, err := toGRPC(client).KV.Put(context.TODO(), reqput); err != nil { + if _, err := integration.ToGRPC(client).KV.Put(context.TODO(), reqput); err != nil { t.Fatalf("unexpected error on put over tls (%v)", err) } } @@ -1656,11 +1657,11 @@ func TestTLSReloadAtomicReplace(t *testing.T) { defer os.RemoveAll(certsDirExp) cloneFunc := func() transport.TLSInfo { - tlsInfo, terr := copyTLSFiles(testTLSInfo, certsDir) + tlsInfo, terr := copyTLSFiles(integration.TestTLSInfo, certsDir) if terr != nil { t.Fatal(terr) } - if _, err = copyTLSFiles(testTLSInfoExpired, certsDirExp); err != nil { + if _, err = copyTLSFiles(integration.TestTLSInfoExpired, certsDirExp); err != nil { t.Fatal(err) } return tlsInfo @@ -1702,19 +1703,19 @@ func TestTLSReloadCopy(t *testing.T) { defer os.RemoveAll(certsDir) cloneFunc := func() transport.TLSInfo { - tlsInfo, terr := copyTLSFiles(testTLSInfo, certsDir) + tlsInfo, terr := copyTLSFiles(integration.TestTLSInfo, certsDir) if terr != nil { t.Fatal(terr) } return tlsInfo } replaceFunc := func() { - if _, err = copyTLSFiles(testTLSInfoExpired, certsDir); err != nil { + if _, err = copyTLSFiles(integration.TestTLSInfoExpired, certsDir); err != nil { t.Fatal(err) } } revertFunc := func() { - if _, err = copyTLSFiles(testTLSInfo, certsDir); err != nil { + if _, err = copyTLSFiles(integration.TestTLSInfo, certsDir); err != nil { t.Fatal(err) } } @@ -1732,19 +1733,19 @@ func TestTLSReloadCopyIPOnly(t *testing.T) { defer os.RemoveAll(certsDir) cloneFunc := func() transport.TLSInfo { - tlsInfo, terr := copyTLSFiles(testTLSInfoIP, certsDir) + tlsInfo, terr := copyTLSFiles(integration.TestTLSInfoIP, certsDir) if terr != nil { t.Fatal(terr) } return tlsInfo } replaceFunc := func() { - if _, err = copyTLSFiles(testTLSInfoExpiredIP, certsDir); err != nil { + if _, err = copyTLSFiles(integration.TestTLSInfoExpiredIP, certsDir); err != nil { t.Fatal(err) } } revertFunc := func() { - if _, err = copyTLSFiles(testTLSInfoIP, certsDir); err != nil { + if _, err = copyTLSFiles(integration.TestTLSInfoIP, certsDir); err != nil { t.Fatal(err) } } @@ -1757,13 +1758,13 @@ func testTLSReload( replaceFunc func(), revertFunc func(), useIP bool) { - BeforeTest(t) + integration.BeforeTest(t) // 1. separate copies for TLS assets modification tlsInfo := cloneFunc() // 2. start cluster with valid certs - clus := NewClusterV3(t, &ClusterConfig{ + clus := integration.NewClusterV3(t, &integration.ClusterConfig{ Size: 1, PeerTLS: &tlsInfo, ClientTLS: &tlsInfo, @@ -1785,7 +1786,7 @@ func testTLSReload( t.Log(err) continue } - cli, cerr := NewClient(t, clientv3.Config{ + cli, cerr := integration.NewClient(t, clientv3.Config{ DialOptions: []grpc.DialOption{grpc.WithBlock()}, Endpoints: []string{clus.Members[0].GRPCURL()}, DialTimeout: time.Second, @@ -1820,7 +1821,7 @@ func testTLSReload( if terr != nil { t.Fatal(terr) } - cl, cerr := NewClient(t, clientv3.Config{ + cl, cerr := integration.NewClient(t, clientv3.Config{ Endpoints: []string{clus.Members[0].GRPCURL()}, DialTimeout: 5 * time.Second, TLS: tls, @@ -1832,46 +1833,46 @@ func testTLSReload( } func TestGRPCRequireLeader(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - cfg := ClusterConfig{Size: 3} + cfg := integration.ClusterConfig{Size: 3} clus := newClusterV3NoClients(t, &cfg) defer clus.Terminate(t) clus.Members[1].Stop(t) clus.Members[2].Stop(t) - client, err := NewClientV3(clus.Members[0]) + client, err := integration.NewClientV3(clus.Members[0]) if err != nil { t.Fatalf("cannot create client: %v", err) } defer client.Close() // wait for election timeout, then member[0] will not have a leader. - time.Sleep(time.Duration(3*electionTicks) * tickDuration) + time.Sleep(time.Duration(3*integration.ElectionTicks) * integration.TickDuration) md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) ctx := metadata.NewOutgoingContext(context.Background(), md) reqput := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")} - if _, err := toGRPC(client).KV.Put(ctx, reqput); rpctypes.ErrorDesc(err) != rpctypes.ErrNoLeader.Error() { + if _, err := integration.ToGRPC(client).KV.Put(ctx, reqput); rpctypes.ErrorDesc(err) != rpctypes.ErrNoLeader.Error() { t.Errorf("err = %v, want %v", err, rpctypes.ErrNoLeader) } } func TestGRPCStreamRequireLeader(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - cfg := ClusterConfig{Size: 3, UseBridge: true} + cfg := integration.ClusterConfig{Size: 3, UseBridge: true} clus := newClusterV3NoClients(t, &cfg) defer clus.Terminate(t) - client, err := NewClientV3(clus.Members[0]) + client, err := integration.NewClientV3(clus.Members[0]) if err != nil { t.Fatalf("failed to create client (%v)", err) } defer client.Close() - wAPI := toGRPC(client).Watch + wAPI := integration.ToGRPC(client).Watch md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) ctx := metadata.NewOutgoingContext(context.Background(), md) wStream, err := wAPI.Watch(ctx) @@ -1901,8 +1902,8 @@ func TestGRPCStreamRequireLeader(t *testing.T) { clus.Members[1].Restart(t) clus.Members[2].Restart(t) - clus.waitLeader(t, clus.Members) - time.Sleep(time.Duration(2*electionTicks) * tickDuration) + clus.WaitMembersForLeader(t, clus.Members) + time.Sleep(time.Duration(2*integration.ElectionTicks) * integration.TickDuration) // new stream should also be OK now after we restarted the other members wStream, err = wAPI.Watch(ctx) @@ -1922,7 +1923,7 @@ func TestGRPCStreamRequireLeader(t *testing.T) { // TestV3LargeRequests ensures that configurable MaxRequestBytes works as intended. func TestV3LargeRequests(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) tests := []struct { maxRequestBytes uint valueSize int @@ -1936,9 +1937,9 @@ func TestV3LargeRequests(t *testing.T) { } for i, test := range tests { t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { - clus := NewClusterV3(t, &ClusterConfig{Size: 1, MaxRequestBytes: test.maxRequestBytes}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, MaxRequestBytes: test.maxRequestBytes}) defer clus.Terminate(t) - kvcli := toGRPC(clus.Client(0)).KV + kvcli := integration.ToGRPC(clus.Client(0)).KV reqput := &pb.PutRequest{Key: []byte("foo"), Value: make([]byte, test.valueSize)} _, err := kvcli.Put(context.TODO(), reqput) if !eqErrGRPC(err, test.expectError) { diff --git a/tests/integration/v3_health_test.go b/tests/integration/v3_health_test.go index fd7257fb9..2bd03588d 100644 --- a/tests/integration/v3_health_test.go +++ b/tests/integration/v3_health_test.go @@ -18,13 +18,14 @@ import ( "context" "testing" + "go.etcd.io/etcd/tests/v3/framework/integration" healthpb "google.golang.org/grpc/health/grpc_health_v1" ) func TestHealthCheck(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := healthpb.NewHealthClient(clus.RandClient().ActiveConnection()) diff --git a/tests/integration/v3_kv_test.go b/tests/integration/v3_kv_test.go index aca4aeb49..851edd294 100644 --- a/tests/integration/v3_kv_test.go +++ b/tests/integration/v3_kv_test.go @@ -6,13 +6,14 @@ import ( "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/namespace" + "go.etcd.io/etcd/tests/v3/framework/integration" ) // TestKVWithEmptyValue ensures that a get/delete with an empty value, and with WithFromKey/WithPrefix function will return an empty error. func TestKVWithEmptyValue(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) client := clus.RandClient() diff --git a/tests/integration/v3_leadership_test.go b/tests/integration/v3_leadership_test.go index e530bbdfc..8fba02c86 100644 --- a/tests/integration/v3_leadership_test.go +++ b/tests/integration/v3_leadership_test.go @@ -23,6 +23,7 @@ import ( pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" + "go.etcd.io/etcd/tests/v3/framework/integration" "golang.org/x/sync/errgroup" ) @@ -30,13 +31,13 @@ func TestMoveLeader(t *testing.T) { testMoveLeader(t, true) } func TestMoveLeaderService(t *testing.T) { testMoveLeader(t, false) } func testMoveLeader(t *testing.T, auto bool) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) oldLeadIdx := clus.WaitLeader(t) - oldLeadID := uint64(clus.Members[oldLeadIdx].s.ID()) + oldLeadID := uint64(clus.Members[oldLeadIdx].Server.ID()) // ensure followers go through leader transition while leadership transfer idc := make(chan uint64) @@ -45,23 +46,23 @@ func testMoveLeader(t *testing.T, auto bool) { for i := range clus.Members { if oldLeadIdx != i { - go func(m *member) { + go func(m *integration.Member) { select { - case idc <- checkLeaderTransition(m, oldLeadID): + case idc <- integration.CheckLeaderTransition(m, oldLeadID): case <-stopc: } }(clus.Members[i]) } } - target := uint64(clus.Members[(oldLeadIdx+1)%3].s.ID()) + target := uint64(clus.Members[(oldLeadIdx+1)%3].Server.ID()) if auto { - err := clus.Members[oldLeadIdx].s.TransferLeadership() + err := clus.Members[oldLeadIdx].Server.TransferLeadership() if err != nil { t.Fatal(err) } } else { - mvc := toGRPC(clus.Client(oldLeadIdx)).Maintenance + mvc := integration.ToGRPC(clus.Client(oldLeadIdx)).Maintenance _, err := mvc.MoveLeader(context.TODO(), &pb.MoveLeaderRequest{TargetID: target}) if err != nil { t.Fatal(err) @@ -98,17 +99,17 @@ func testMoveLeader(t *testing.T, auto bool) { // TestMoveLeaderError ensures that request to non-leader fail. func TestMoveLeaderError(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) oldLeadIdx := clus.WaitLeader(t) followerIdx := (oldLeadIdx + 1) % 3 - target := uint64(clus.Members[(oldLeadIdx+2)%3].s.ID()) + target := uint64(clus.Members[(oldLeadIdx+2)%3].Server.ID()) - mvc := toGRPC(clus.Client(followerIdx)).Maintenance + mvc := integration.ToGRPC(clus.Client(followerIdx)).Maintenance _, err := mvc.MoveLeader(context.TODO(), &pb.MoveLeaderRequest{TargetID: target}) if !eqErrGRPC(err, rpctypes.ErrGRPCNotLeader) { t.Errorf("err = %v, want %v", err, rpctypes.ErrGRPCNotLeader) @@ -117,9 +118,9 @@ func TestMoveLeaderError(t *testing.T) { // TestMoveLeaderToLearnerError ensures that leader transfer to learner member will fail. func TestMoveLeaderToLearnerError(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) // we have to add and launch learner member after initial cluster was created, because @@ -128,10 +129,10 @@ func TestMoveLeaderToLearnerError(t *testing.T) { learners, err := clus.GetLearnerMembers() if err != nil { - t.Fatalf("failed to get the learner members in cluster: %v", err) + t.Fatalf("failed to get the learner members in Cluster: %v", err) } if len(learners) != 1 { - t.Fatalf("added 1 learner to cluster, got %d", len(learners)) + t.Fatalf("added 1 learner to Cluster, got %d", len(learners)) } learnerID := learners[0].ID @@ -150,19 +151,19 @@ func TestMoveLeaderToLearnerError(t *testing.T) { // TestTransferLeadershipWithLearner ensures TransferLeadership does not timeout due to learner is // automatically picked by leader as transferee. func TestTransferLeadershipWithLearner(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) clus.AddAndLaunchLearnerMember(t) learners, err := clus.GetLearnerMembers() if err != nil { - t.Fatalf("failed to get the learner members in cluster: %v", err) + t.Fatalf("failed to get the learner members in Cluster: %v", err) } if len(learners) != 1 { - t.Fatalf("added 1 learner to cluster, got %d", len(learners)) + t.Fatalf("added 1 learner to Cluster, got %d", len(learners)) } leaderIdx := clus.WaitLeader(t) @@ -170,7 +171,7 @@ func TestTransferLeadershipWithLearner(t *testing.T) { go func() { // note that this cluster has 1 leader and 1 learner. TransferLeadership should return nil. // Leadership transfer is skipped in cluster with 1 voting member. - errCh <- clus.Members[leaderIdx].s.TransferLeadership() + errCh <- clus.Members[leaderIdx].Server.TransferLeadership() }() select { case err := <-errCh: @@ -183,10 +184,10 @@ func TestTransferLeadershipWithLearner(t *testing.T) { } func TestFirstCommitNotification(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) ctx := context.Background() clusterSize := 3 - cluster := NewClusterV3(t, &ClusterConfig{Size: clusterSize}) + cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: clusterSize}) defer cluster.Terminate(t) oldLeaderIdx := cluster.WaitLeader(t) @@ -197,7 +198,7 @@ func TestFirstCommitNotification(t *testing.T) { notifiers := make(map[int]<-chan struct{}, clusterSize) for i, clusterMember := range cluster.Members { - notifiers[i] = clusterMember.s.FirstCommitInTermNotify() + notifiers[i] = clusterMember.Server.FirstCommitInTermNotify() } _, err := oldLeaderClient.MoveLeader(context.Background(), newLeaderId) @@ -215,7 +216,7 @@ func TestFirstCommitNotification(t *testing.T) { } // It's guaranteed now that leader contains the 'foo'->'bar' index entry. - leaderAppliedIndex := cluster.Members[newLeaderIdx].s.AppliedIndex() + leaderAppliedIndex := cluster.Members[newLeaderIdx].Server.AppliedIndex() ctx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() @@ -238,13 +239,13 @@ func TestFirstCommitNotification(t *testing.T) { func checkFirstCommitNotification( ctx context.Context, t testing.TB, - member *member, + member *integration.Member, leaderAppliedIndex uint64, notifier <-chan struct{}, ) error { // wait until server applies all the changes of leader - for member.s.AppliedIndex() < leaderAppliedIndex { - t.Logf("member.s.AppliedIndex():%v <= leaderAppliedIndex:%v", member.s.AppliedIndex(), leaderAppliedIndex) + for member.Server.AppliedIndex() < leaderAppliedIndex { + t.Logf("member.Server.AppliedIndex():%v <= leaderAppliedIndex:%v", member.Server.AppliedIndex(), leaderAppliedIndex) select { case <-ctx.Done(): return ctx.Err() @@ -262,7 +263,7 @@ func checkFirstCommitNotification( ) } default: - t.Logf("member.s.AppliedIndex():%v >= leaderAppliedIndex:%v", member.s.AppliedIndex(), leaderAppliedIndex) + t.Logf("member.Server.AppliedIndex():%v >= leaderAppliedIndex:%v", member.Server.AppliedIndex(), leaderAppliedIndex) return fmt.Errorf( "notification was not triggered, member ID: %d", member.ID(), diff --git a/tests/integration/v3_lease_test.go b/tests/integration/v3_lease_test.go index 1727da65c..40bced9c3 100644 --- a/tests/integration/v3_lease_test.go +++ b/tests/integration/v3_lease_test.go @@ -24,6 +24,7 @@ import ( "go.etcd.io/etcd/api/v3/mvccpb" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/client/pkg/v3/testutil" + "go.etcd.io/etcd/tests/v3/framework/integration" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" @@ -34,13 +35,13 @@ import ( // to the primary lessor, refresh the leases and start to manage leases. // TODO: use customized clock to make this test go faster? func TestV3LeasePromote(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3, UseBridge: true}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) defer clus.Terminate(t) // create lease - lresp, err := toGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 3}) + lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 3}) ttl := time.Duration(lresp.TTL) * time.Second afterGrant := time.Now() if err != nil { @@ -54,19 +55,19 @@ func TestV3LeasePromote(t *testing.T) { time.Sleep(time.Until(afterGrant.Add(ttl - time.Second))) // kill the current leader, all leases should be refreshed. - toStop := clus.waitLeader(t, clus.Members) + toStop := clus.WaitMembersForLeader(t, clus.Members) beforeStop := time.Now() clus.Members[toStop].Stop(t) - var toWait []*member + var toWait []*integration.Member for i, m := range clus.Members { if i != toStop { toWait = append(toWait, m) } } - clus.waitLeader(t, toWait) + clus.WaitMembersForLeader(t, toWait) clus.Members[toStop].Restart(t) - clus.waitLeader(t, clus.Members) + clus.WaitMembersForLeader(t, clus.Members) afterReelect := time.Now() // ensure lease is refreshed by waiting for a "long" time. @@ -96,9 +97,9 @@ func TestV3LeasePromote(t *testing.T) { // TestV3LeaseRevoke ensures a key is deleted once its lease is revoked. func TestV3LeaseRevoke(t *testing.T) { - BeforeTest(t) - testLeaseRemoveLeasedKey(t, func(clus *ClusterV3, leaseID int64) error { - lc := toGRPC(clus.RandClient()).Lease + integration.BeforeTest(t) + testLeaseRemoveLeasedKey(t, func(clus *integration.ClusterV3, leaseID int64) error { + lc := integration.ToGRPC(clus.RandClient()).Lease _, err := lc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: leaseID}) return err }) @@ -106,12 +107,12 @@ func TestV3LeaseRevoke(t *testing.T) { // TestV3LeaseGrantById ensures leases may be created by a given id. func TestV3LeaseGrantByID(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) // create fixed lease - lresp, err := toGRPC(clus.RandClient()).Lease.LeaseGrant( + lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant( context.TODO(), &pb.LeaseGrantRequest{ID: 1, TTL: 1}) if err != nil { @@ -122,7 +123,7 @@ func TestV3LeaseGrantByID(t *testing.T) { } // create duplicate fixed lease - _, err = toGRPC(clus.RandClient()).Lease.LeaseGrant( + _, err = integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant( context.TODO(), &pb.LeaseGrantRequest{ID: 1, TTL: 1}) if !eqErrGRPC(err, rpctypes.ErrGRPCLeaseExist) { @@ -130,7 +131,7 @@ func TestV3LeaseGrantByID(t *testing.T) { } // create fresh fixed lease - lresp, err = toGRPC(clus.RandClient()).Lease.LeaseGrant( + lresp, err = integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant( context.TODO(), &pb.LeaseGrantRequest{ID: 2, TTL: 1}) if err != nil { @@ -143,13 +144,13 @@ func TestV3LeaseGrantByID(t *testing.T) { // TestV3LeaseExpire ensures a key is deleted once a key expires. func TestV3LeaseExpire(t *testing.T) { - BeforeTest(t) - testLeaseRemoveLeasedKey(t, func(clus *ClusterV3, leaseID int64) error { + integration.BeforeTest(t) + testLeaseRemoveLeasedKey(t, func(clus *integration.ClusterV3, leaseID int64) error { // let lease lapse; wait for deleted key ctx, cancel := context.WithCancel(context.Background()) defer cancel() - wStream, err := toGRPC(clus.RandClient()).Watch.Watch(ctx) + wStream, err := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx) if err != nil { return err } @@ -195,9 +196,9 @@ func TestV3LeaseExpire(t *testing.T) { // TestV3LeaseKeepAlive ensures keepalive keeps the lease alive. func TestV3LeaseKeepAlive(t *testing.T) { - BeforeTest(t) - testLeaseRemoveLeasedKey(t, func(clus *ClusterV3, leaseID int64) error { - lc := toGRPC(clus.RandClient()).Lease + integration.BeforeTest(t) + testLeaseRemoveLeasedKey(t, func(clus *integration.ClusterV3, leaseID int64) error { + lc := integration.ToGRPC(clus.RandClient()).Lease lreq := &pb.LeaseKeepAliveRequest{ID: leaseID} ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -229,11 +230,11 @@ func TestV3LeaseKeepAlive(t *testing.T) { // TestV3LeaseCheckpoint ensures a lease checkpoint results in a remaining TTL being persisted // across leader elections. func TestV3LeaseCheckpoint(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) var ttl int64 = 300 leaseInterval := 2 * time.Second - clus := NewClusterV3(t, &ClusterConfig{ + clus := integration.NewClusterV3(t, &integration.ClusterConfig{ Size: 3, EnableLeaseCheckpoint: true, LeaseCheckpointInterval: leaseInterval, @@ -244,7 +245,7 @@ func TestV3LeaseCheckpoint(t *testing.T) { // create lease ctx, cancel := context.WithCancel(context.Background()) defer cancel() - c := toGRPC(clus.RandClient()) + c := integration.ToGRPC(clus.RandClient()) lresp, err := c.Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: ttl}) if err != nil { t.Fatal(err) @@ -257,10 +258,10 @@ func TestV3LeaseCheckpoint(t *testing.T) { leaderId := clus.WaitLeader(t) leader := clus.Members[leaderId] leader.Stop(t) - time.Sleep(time.Duration(3*electionTicks) * tickDuration) + time.Sleep(time.Duration(3*integration.ElectionTicks) * integration.TickDuration) leader.Restart(t) newLeaderId := clus.WaitLeader(t) - c2 := toGRPC(clus.Client(newLeaderId)) + c2 := integration.ToGRPC(clus.Client(newLeaderId)) time.Sleep(250 * time.Millisecond) @@ -284,14 +285,14 @@ func TestV3LeaseCheckpoint(t *testing.T) { // TestV3LeaseExists creates a lease on a random client and confirms it exists in the cluster. func TestV3LeaseExists(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) // create lease ctx0, cancel0 := context.WithCancel(context.Background()) defer cancel0() - lresp, err := toGRPC(clus.RandClient()).Lease.LeaseGrant( + lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant( ctx0, &pb.LeaseGrantRequest{TTL: 30}) if err != nil { @@ -308,8 +309,8 @@ func TestV3LeaseExists(t *testing.T) { // TestV3LeaseLeases creates leases and confirms list RPC fetches created ones. func TestV3LeaseLeases(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) ctx0, cancel0 := context.WithCancel(context.Background()) @@ -318,7 +319,7 @@ func TestV3LeaseLeases(t *testing.T) { // create leases ids := []int64{} for i := 0; i < 5; i++ { - lresp, err := toGRPC(clus.RandClient()).Lease.LeaseGrant( + lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant( ctx0, &pb.LeaseGrantRequest{TTL: 30}) if err != nil { @@ -330,7 +331,7 @@ func TestV3LeaseLeases(t *testing.T) { ids = append(ids, lresp.ID) } - lresp, err := toGRPC(clus.RandClient()).Lease.LeaseLeases( + lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseLeases( context.Background(), &pb.LeaseLeasesRequest{}) if err != nil { @@ -358,8 +359,8 @@ func TestV3LeaseTimeToLiveStress(t *testing.T) { } func testLeaseStress(t *testing.T, stresser func(context.Context, pb.LeaseClient) error) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) @@ -368,7 +369,7 @@ func testLeaseStress(t *testing.T, stresser func(context.Context, pb.LeaseClient for i := 0; i < 30; i++ { for j := 0; j < 3; j++ { - go func(i int) { errc <- stresser(ctx, toGRPC(clus.Client(i)).Lease) }(j) + go func(i int) { errc <- stresser(ctx, integration.ToGRPC(clus.Client(i)).Lease) }(j) } } @@ -429,8 +430,8 @@ func stressLeaseTimeToLive(tctx context.Context, lc pb.LeaseClient) (reterr erro } func TestV3PutOnNonExistLease(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) ctx, cancel := context.WithCancel(context.Background()) @@ -438,7 +439,7 @@ func TestV3PutOnNonExistLease(t *testing.T) { badLeaseID := int64(0x12345678) putr := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar"), Lease: badLeaseID} - _, err := toGRPC(clus.RandClient()).KV.Put(ctx, putr) + _, err := integration.ToGRPC(clus.RandClient()).KV.Put(ctx, putr) if !eqErrGRPC(err, rpctypes.ErrGRPCLeaseNotFound) { t.Errorf("err = %v, want %v", err, rpctypes.ErrGRPCLeaseNotFound) } @@ -447,13 +448,13 @@ func TestV3PutOnNonExistLease(t *testing.T) { // TestV3GetNonExistLease ensures client retrieving nonexistent lease on a follower doesn't result node panic // related issue https://github.com/etcd-io/etcd/issues/6537 func TestV3GetNonExistLease(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - lc := toGRPC(clus.RandClient()).Lease + lc := integration.ToGRPC(clus.RandClient()).Lease lresp, err := lc.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: 10}) if err != nil { t.Errorf("failed to create lease %v", err) @@ -468,12 +469,12 @@ func TestV3GetNonExistLease(t *testing.T) { Keys: true, } - for _, client := range clus.clients { + for _, client := range clus.Clients { // quorum-read to ensure revoke completes before TimeToLive - if _, err := toGRPC(client).KV.Range(ctx, &pb.RangeRequest{Key: []byte("_")}); err != nil { + if _, err := integration.ToGRPC(client).KV.Range(ctx, &pb.RangeRequest{Key: []byte("_")}); err != nil { t.Fatal(err) } - resp, err := toGRPC(client).Lease.LeaseTimeToLive(ctx, leaseTTLr) + resp, err := integration.ToGRPC(client).Lease.LeaseTimeToLive(ctx, leaseTTLr) if err != nil { t.Fatalf("expected non nil error, but go %v", err) } @@ -485,8 +486,8 @@ func TestV3GetNonExistLease(t *testing.T) { // TestV3LeaseSwitch tests a key can be switched from one lease to another. func TestV3LeaseSwitch(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) key := "foo" @@ -494,34 +495,34 @@ func TestV3LeaseSwitch(t *testing.T) { // create lease ctx, cancel := context.WithCancel(context.Background()) defer cancel() - lresp1, err1 := toGRPC(clus.RandClient()).Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: 30}) + lresp1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: 30}) if err1 != nil { t.Fatal(err1) } - lresp2, err2 := toGRPC(clus.RandClient()).Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: 30}) + lresp2, err2 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: 30}) if err2 != nil { t.Fatal(err2) } // attach key on lease1 then switch it to lease2 put1 := &pb.PutRequest{Key: []byte(key), Lease: lresp1.ID} - _, err := toGRPC(clus.RandClient()).KV.Put(ctx, put1) + _, err := integration.ToGRPC(clus.RandClient()).KV.Put(ctx, put1) if err != nil { t.Fatal(err) } put2 := &pb.PutRequest{Key: []byte(key), Lease: lresp2.ID} - _, err = toGRPC(clus.RandClient()).KV.Put(ctx, put2) + _, err = integration.ToGRPC(clus.RandClient()).KV.Put(ctx, put2) if err != nil { t.Fatal(err) } // revoke lease1 should not remove key - _, err = toGRPC(clus.RandClient()).Lease.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lresp1.ID}) + _, err = integration.ToGRPC(clus.RandClient()).Lease.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lresp1.ID}) if err != nil { t.Fatal(err) } rreq := &pb.RangeRequest{Key: []byte("foo")} - rresp, err := toGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq) + rresp, err := integration.ToGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq) if err != nil { t.Fatal(err) } @@ -530,11 +531,11 @@ func TestV3LeaseSwitch(t *testing.T) { } // revoke lease2 should remove key - _, err = toGRPC(clus.RandClient()).Lease.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lresp2.ID}) + _, err = integration.ToGRPC(clus.RandClient()).Lease.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lresp2.ID}) if err != nil { t.Fatal(err) } - rresp, err = toGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq) + rresp, err = integration.ToGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq) if err != nil { t.Fatal(err) } @@ -547,14 +548,14 @@ func TestV3LeaseSwitch(t *testing.T) { // election timeout after it loses its quorum. And the new leader extends the TTL of // the lease to at least TTL + election timeout. func TestV3LeaseFailover(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) - toIsolate := clus.waitLeader(t, clus.Members) + toIsolate := clus.WaitMembersForLeader(t, clus.Members) - lc := toGRPC(clus.Client(toIsolate)).Lease + lc := integration.ToGRPC(clus.Client(toIsolate)).Lease // create lease lresp, err := lc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 5}) @@ -595,7 +596,7 @@ func TestV3LeaseFailover(t *testing.T) { } clus.Members[toIsolate].Resume() - clus.waitLeader(t, clus.Members) + clus.WaitMembersForLeader(t, clus.Members) // lease should not expire at the last received expire deadline. time.Sleep(time.Until(expectedExp) - 500*time.Millisecond) @@ -608,12 +609,12 @@ func TestV3LeaseFailover(t *testing.T) { // TestV3LeaseRequireLeader ensures that a Recv will get a leader // loss error if there is no leader. func TestV3LeaseRequireLeader(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) - lc := toGRPC(clus.Client(0)).Lease + lc := integration.ToGRPC(clus.Client(0)).Lease clus.Members[1].Stop(t) clus.Members[2].Stop(t) @@ -648,13 +649,13 @@ const fiveMinTTL int64 = 300 // TestV3LeaseRecoverAndRevoke ensures that revoking a lease after restart deletes the attached key. func TestV3LeaseRecoverAndRevoke(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1, UseBridge: true}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) - kvc := toGRPC(clus.Client(0)).KV - lsc := toGRPC(clus.Client(0)).Lease + kvc := integration.ToGRPC(clus.Client(0)).KV + lsc := integration.ToGRPC(clus.Client(0)).Lease lresp, err := lsc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: fiveMinTTL}) if err != nil { @@ -671,16 +672,16 @@ func TestV3LeaseRecoverAndRevoke(t *testing.T) { // restart server and ensure lease still exists clus.Members[0].Stop(t) clus.Members[0].Restart(t) - clus.waitLeader(t, clus.Members) + clus.WaitMembersForLeader(t, clus.Members) // overwrite old client with newly dialed connection // otherwise, error with "grpc: RPC failed fast due to transport failure" - nc, err := NewClientV3(clus.Members[0]) + nc, err := integration.NewClientV3(clus.Members[0]) if err != nil { t.Fatal(err) } - kvc = toGRPC(nc).KV - lsc = toGRPC(nc).Lease + kvc = integration.ToGRPC(nc).KV + lsc = integration.ToGRPC(nc).Lease defer nc.Close() // revoke should delete the key @@ -699,13 +700,13 @@ func TestV3LeaseRecoverAndRevoke(t *testing.T) { // TestV3LeaseRevokeAndRecover ensures that revoked key stays deleted after restart. func TestV3LeaseRevokeAndRecover(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1, UseBridge: true}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) - kvc := toGRPC(clus.Client(0)).KV - lsc := toGRPC(clus.Client(0)).Lease + kvc := integration.ToGRPC(clus.Client(0)).KV + lsc := integration.ToGRPC(clus.Client(0)).Lease lresp, err := lsc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: fiveMinTTL}) if err != nil { @@ -728,15 +729,15 @@ func TestV3LeaseRevokeAndRecover(t *testing.T) { // restart server and ensure revoked key doesn't exist clus.Members[0].Stop(t) clus.Members[0].Restart(t) - clus.waitLeader(t, clus.Members) + clus.WaitMembersForLeader(t, clus.Members) // overwrite old client with newly dialed connection // otherwise, error with "grpc: RPC failed fast due to transport failure" - nc, err := NewClientV3(clus.Members[0]) + nc, err := integration.NewClientV3(clus.Members[0]) if err != nil { t.Fatal(err) } - kvc = toGRPC(nc).KV + kvc = integration.ToGRPC(nc).KV defer nc.Close() rresp, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")}) @@ -751,13 +752,13 @@ func TestV3LeaseRevokeAndRecover(t *testing.T) { // TestV3LeaseRecoverKeyWithDetachedLease ensures that revoking a detached lease after restart // does not delete the key. func TestV3LeaseRecoverKeyWithDetachedLease(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1, UseBridge: true}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) - kvc := toGRPC(clus.Client(0)).KV - lsc := toGRPC(clus.Client(0)).Lease + kvc := integration.ToGRPC(clus.Client(0)).KV + lsc := integration.ToGRPC(clus.Client(0)).Lease lresp, err := lsc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: fiveMinTTL}) if err != nil { @@ -780,16 +781,16 @@ func TestV3LeaseRecoverKeyWithDetachedLease(t *testing.T) { // restart server and ensure lease still exists clus.Members[0].Stop(t) clus.Members[0].Restart(t) - clus.waitLeader(t, clus.Members) + clus.WaitMembersForLeader(t, clus.Members) // overwrite old client with newly dialed connection // otherwise, error with "grpc: RPC failed fast due to transport failure" - nc, err := NewClientV3(clus.Members[0]) + nc, err := integration.NewClientV3(clus.Members[0]) if err != nil { t.Fatal(err) } - kvc = toGRPC(nc).KV - lsc = toGRPC(nc).Lease + kvc = integration.ToGRPC(nc).KV + lsc = integration.ToGRPC(nc).Lease defer nc.Close() // revoke the detached lease @@ -807,13 +808,13 @@ func TestV3LeaseRecoverKeyWithDetachedLease(t *testing.T) { } func TestV3LeaseRecoverKeyWithMutipleLease(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1, UseBridge: true}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) - kvc := toGRPC(clus.Client(0)).KV - lsc := toGRPC(clus.Client(0)).Lease + kvc := integration.ToGRPC(clus.Client(0)).KV + lsc := integration.ToGRPC(clus.Client(0)).Lease var leaseIDs []int64 for i := 0; i < 2; i++ { @@ -835,7 +836,7 @@ func TestV3LeaseRecoverKeyWithMutipleLease(t *testing.T) { // restart server and ensure lease still exists clus.Members[0].Stop(t) clus.Members[0].Restart(t) - clus.waitLeader(t, clus.Members) + clus.WaitMembersForLeader(t, clus.Members) for i, leaseID := range leaseIDs { if !leaseExist(t, clus, leaseID) { t.Errorf("#%d: unexpected lease not exists", i) @@ -844,12 +845,12 @@ func TestV3LeaseRecoverKeyWithMutipleLease(t *testing.T) { // overwrite old client with newly dialed connection // otherwise, error with "grpc: RPC failed fast due to transport failure" - nc, err := NewClientV3(clus.Members[0]) + nc, err := integration.NewClientV3(clus.Members[0]) if err != nil { t.Fatal(err) } - kvc = toGRPC(nc).KV - lsc = toGRPC(nc).Lease + kvc = integration.ToGRPC(nc).KV + lsc = integration.ToGRPC(nc).Lease defer nc.Close() // revoke the old lease @@ -881,9 +882,9 @@ func TestV3LeaseRecoverKeyWithMutipleLease(t *testing.T) { } // acquireLeaseAndKey creates a new lease and creates an attached key. -func acquireLeaseAndKey(clus *ClusterV3, key string) (int64, error) { +func acquireLeaseAndKey(clus *integration.ClusterV3, key string) (int64, error) { // create lease - lresp, err := toGRPC(clus.RandClient()).Lease.LeaseGrant( + lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant( context.TODO(), &pb.LeaseGrantRequest{TTL: 1}) if err != nil { @@ -894,7 +895,7 @@ func acquireLeaseAndKey(clus *ClusterV3, key string) (int64, error) { } // attach to key put := &pb.PutRequest{Key: []byte(key), Lease: lresp.ID} - if _, err := toGRPC(clus.RandClient()).KV.Put(context.TODO(), put); err != nil { + if _, err := integration.ToGRPC(clus.RandClient()).KV.Put(context.TODO(), put); err != nil { return 0, err } return lresp.ID, nil @@ -902,8 +903,8 @@ func acquireLeaseAndKey(clus *ClusterV3, key string) (int64, error) { // testLeaseRemoveLeasedKey performs some action while holding a lease with an // attached key "foo", then confirms the key is gone. -func testLeaseRemoveLeasedKey(t *testing.T, act func(*ClusterV3, int64) error) { - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) +func testLeaseRemoveLeasedKey(t *testing.T, act func(*integration.ClusterV3, int64) error) { + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) leaseID, err := acquireLeaseAndKey(clus, "foo") @@ -917,7 +918,7 @@ func testLeaseRemoveLeasedKey(t *testing.T, act func(*ClusterV3, int64) error) { // confirm no key rreq := &pb.RangeRequest{Key: []byte("foo")} - rresp, err := toGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq) + rresp, err := integration.ToGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq) if err != nil { t.Fatal(err) } @@ -926,8 +927,8 @@ func testLeaseRemoveLeasedKey(t *testing.T, act func(*ClusterV3, int64) error) { } } -func leaseExist(t *testing.T, clus *ClusterV3, leaseID int64) bool { - l := toGRPC(clus.RandClient()).Lease +func leaseExist(t *testing.T, clus *integration.ClusterV3, leaseID int64) bool { + l := integration.ToGRPC(clus.RandClient()).Lease _, err := l.LeaseGrant(context.Background(), &pb.LeaseGrantRequest{ID: leaseID, TTL: 5}) if err == nil { diff --git a/tests/integration/v3_stm_test.go b/tests/integration/v3_stm_test.go index 89c666b02..ccd7eea1d 100644 --- a/tests/integration/v3_stm_test.go +++ b/tests/integration/v3_stm_test.go @@ -24,13 +24,14 @@ import ( "go.etcd.io/etcd/client/pkg/v3/testutil" v3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/concurrency" + "go.etcd.io/etcd/tests/v3/framework/integration" ) // TestSTMConflict tests that conflicts are retried. func TestSTMConflict(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) etcdc := clus.RandClient() @@ -96,9 +97,9 @@ func TestSTMConflict(t *testing.T) { // TestSTMPutNewKey confirms a STM put on a new key is visible after commit. func TestSTMPutNewKey(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) etcdc := clus.RandClient() @@ -123,9 +124,9 @@ func TestSTMPutNewKey(t *testing.T) { // TestSTMAbort tests that an aborted txn does not modify any keys. func TestSTMAbort(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) etcdc := clus.RandClient() @@ -154,9 +155,9 @@ func TestSTMAbort(t *testing.T) { // TestSTMSerialize tests that serialization is honored when serializable. func TestSTMSerialize(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) etcdc := clus.RandClient() @@ -217,9 +218,9 @@ func TestSTMSerialize(t *testing.T) { // TestSTMApplyOnConcurrentDeletion ensures that concurrent key deletion // fails the first GET revision comparison within STM; trigger retry. func TestSTMApplyOnConcurrentDeletion(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) etcdc := clus.RandClient() @@ -266,9 +267,9 @@ func TestSTMApplyOnConcurrentDeletion(t *testing.T) { } func TestSTMSerializableSnapshotPut(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) cli := clus.Client(0) diff --git a/tests/integration/v3_tls_test.go b/tests/integration/v3_tls_test.go index 2437df94e..e50419261 100644 --- a/tests/integration/v3_tls_test.go +++ b/tests/integration/v3_tls_test.go @@ -21,6 +21,7 @@ import ( "time" "go.etcd.io/etcd/client/v3" + "go.etcd.io/etcd/tests/v3/framework/integration" "google.golang.org/grpc" ) @@ -30,7 +31,7 @@ func TestTLSClientCipherSuitesMismatch(t *testing.T) { testTLSCipherSuites(t, fa // testTLSCipherSuites ensures mismatching client-side cipher suite // fail TLS handshake with the server. func testTLSCipherSuites(t *testing.T, valid bool) { - BeforeTest(t) + integration.BeforeTest(t) cipherSuites := []uint16{ tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, @@ -40,21 +41,21 @@ func testTLSCipherSuites(t *testing.T, valid bool) { tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, } - srvTLS, cliTLS := testTLSInfo, testTLSInfo + srvTLS, cliTLS := integration.TestTLSInfo, integration.TestTLSInfo if valid { srvTLS.CipherSuites, cliTLS.CipherSuites = cipherSuites, cipherSuites } else { srvTLS.CipherSuites, cliTLS.CipherSuites = cipherSuites[:2], cipherSuites[2:] } - clus := NewClusterV3(t, &ClusterConfig{Size: 1, ClientTLS: &srvTLS}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, ClientTLS: &srvTLS}) defer clus.Terminate(t) cc, err := cliTLS.ClientConfig() if err != nil { t.Fatal(err) } - cli, cerr := NewClient(t, clientv3.Config{ + cli, cerr := integration.NewClient(t, clientv3.Config{ Endpoints: []string{clus.Members[0].GRPCURL()}, DialTimeout: time.Second, DialOptions: []grpc.DialOption{grpc.WithBlock()}, diff --git a/tests/integration/v3_watch_restore_test.go b/tests/integration/v3_watch_restore_test.go index a07dd138c..532871bf2 100644 --- a/tests/integration/v3_watch_restore_test.go +++ b/tests/integration/v3_watch_restore_test.go @@ -21,13 +21,14 @@ import ( "time" pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/tests/v3/framework/integration" ) // MustFetchNotEmptyMetric attempts to fetch given 'metric' from 'member', // waiting for not-empty value or 'timeout'. -func MustFetchNotEmptyMetric(tb testing.TB, member *member, metric string, timeout <-chan time.Time) string { +func MustFetchNotEmptyMetric(tb testing.TB, member *integration.Member, metric string, timeout <-chan time.Time) string { metricValue := "" - tick := time.Tick(tickDuration) + tick := time.Tick(integration.TickDuration) for metricValue == "" { tb.Logf("Waiting for metric: %v", metric) select { @@ -50,9 +51,9 @@ func MustFetchNotEmptyMetric(tb testing.TB, member *member, metric string, timeo // that were created in synced watcher group in the first place. // TODO: fix panic with gRPC proxy "panic: watcher current revision should not exceed current revision" func TestV3WatchRestoreSnapshotUnsync(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{ + clus := integration.NewClusterV3(t, &integration.ClusterConfig{ Size: 3, SnapshotCount: 10, SnapshotCatchUpEntries: 5, @@ -62,7 +63,7 @@ func TestV3WatchRestoreSnapshotUnsync(t *testing.T) { // spawn a watcher before shutdown, and put it in synced watcher ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() - wStream, errW := toGRPC(clus.Client(0)).Watch.Watch(ctx) + wStream, errW := integration.ToGRPC(clus.Client(0)).Watch.Watch(ctx) if errW != nil { t.Fatal(errW) } @@ -79,13 +80,13 @@ func TestV3WatchRestoreSnapshotUnsync(t *testing.T) { } clus.Members[0].InjectPartition(t, clus.Members[1:]...) - initialLead := clus.waitLeader(t, clus.Members[1:]) - t.Logf("elected lead: %v", clus.Members[initialLead].s.ID()) + initialLead := clus.WaitMembersForLeader(t, clus.Members[1:]) + t.Logf("elected lead: %v", clus.Members[initialLead].Server.ID()) t.Logf("sleeping for 2 seconds") time.Sleep(2 * time.Second) t.Logf("sleeping for 2 seconds DONE") - kvc := toGRPC(clus.Client(1)).KV + kvc := integration.ToGRPC(clus.Client(1)).KV // to trigger snapshot from the leader to the stopped follower for i := 0; i < 15; i++ { @@ -98,7 +99,7 @@ func TestV3WatchRestoreSnapshotUnsync(t *testing.T) { // trigger snapshot send from leader to this slow follower // which then calls watchable store Restore clus.Members[0].RecoverPartition(t, clus.Members[1:]...) - // We don't expect leadership change here, just recompute the leader's index + // We don't expect leadership change here, just recompute the leader'Server index // within clus.Members list. lead := clus.WaitLeader(t) diff --git a/tests/integration/v3_watch_test.go b/tests/integration/v3_watch_test.go index 323d0d72c..59433e0cc 100644 --- a/tests/integration/v3_watch_test.go +++ b/tests/integration/v3_watch_test.go @@ -27,11 +27,12 @@ import ( pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/api/v3/mvccpb" "go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc" + "go.etcd.io/etcd/tests/v3/framework/integration" ) // TestV3WatchFromCurrentRevision tests Watch APIs from current revision. func TestV3WatchFromCurrentRevision(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) tests := []struct { name string @@ -206,10 +207,10 @@ func TestV3WatchFromCurrentRevision(t *testing.T) { for i, tt := range tests { t.Run(tt.name, func(t *testing.T) { - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) - wAPI := toGRPC(clus.RandClient()).Watch + wAPI := integration.ToGRPC(clus.RandClient()).Watch ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() wStream, err := wAPI.Watch(ctx) @@ -243,7 +244,7 @@ func TestV3WatchFromCurrentRevision(t *testing.T) { ch := make(chan struct{}, 1) go func() { for _, k := range tt.putKeys { - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV req := &pb.PutRequest{Key: []byte(k), Value: []byte("bar")} if _, err := kvc.Put(context.TODO(), req); err != nil { t.Errorf("#%d: couldn't put key (%v)", i, err) @@ -291,12 +292,12 @@ func TestV3WatchFromCurrentRevision(t *testing.T) { // TestV3WatchFutureRevision tests Watch APIs from a future revision. func TestV3WatchFutureRevision(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) - wAPI := toGRPC(clus.RandClient()).Watch + wAPI := integration.ToGRPC(clus.RandClient()).Watch ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() wStream, err := wAPI.Watch(ctx) @@ -322,7 +323,7 @@ func TestV3WatchFutureRevision(t *testing.T) { t.Fatalf("create %v, want %v", cresp.Created, true) } - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV for { req := &pb.PutRequest{Key: wkey, Value: []byte("bar")} resp, rerr := kvc.Put(context.TODO(), req) @@ -352,12 +353,12 @@ func TestV3WatchFutureRevision(t *testing.T) { // TestV3WatchWrongRange tests wrong range does not create watchers. func TestV3WatchWrongRange(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) - wAPI := toGRPC(clus.RandClient()).Watch + wAPI := integration.ToGRPC(clus.RandClient()).Watch ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() wStream, err := wAPI.Watch(ctx) @@ -397,23 +398,23 @@ func TestV3WatchWrongRange(t *testing.T) { // TestV3WatchCancelSynced tests Watch APIs cancellation from synced map. func TestV3WatchCancelSynced(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) testV3WatchCancel(t, 0) } // TestV3WatchCancelUnsynced tests Watch APIs cancellation from unsynced map. func TestV3WatchCancelUnsynced(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) testV3WatchCancel(t, 1) } func testV3WatchCancel(t *testing.T, startRev int64) { - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() - wStream, errW := toGRPC(clus.RandClient()).Watch.Watch(ctx) + wStream, errW := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx) if errW != nil { t.Fatalf("wAPI.Watch error: %v", errW) } @@ -448,7 +449,7 @@ func testV3WatchCancel(t *testing.T, startRev int64) { t.Errorf("cresp.Canceled got = %v, want = true", cresp.Canceled) } - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil { t.Errorf("couldn't put key (%v)", err) } @@ -463,13 +464,13 @@ func testV3WatchCancel(t *testing.T, startRev int64) { // TestV3WatchCurrentPutOverlap ensures current watchers receive all events with // overlapping puts. func TestV3WatchCurrentPutOverlap(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() - wStream, wErr := toGRPC(clus.RandClient()).Watch.Watch(ctx) + wStream, wErr := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx) if wErr != nil { t.Fatalf("wAPI.Watch error: %v", wErr) } @@ -482,7 +483,7 @@ func TestV3WatchCurrentPutOverlap(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV req := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")} if _, err := kvc.Put(context.TODO(), req); err != nil { t.Errorf("couldn't put key (%v)", err) @@ -547,15 +548,15 @@ func TestV3WatchCurrentPutOverlap(t *testing.T) { // TestV3WatchEmptyKey ensures synced watchers see empty key PUTs as PUT events func TestV3WatchEmptyKey(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() - ws, werr := toGRPC(clus.RandClient()).Watch.Watch(ctx) + ws, werr := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx) if werr != nil { t.Fatal(werr) } @@ -570,7 +571,7 @@ func TestV3WatchEmptyKey(t *testing.T) { } // put a key with empty value - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV preq := &pb.PutRequest{Key: []byte("foo")} if _, err := kvc.Put(context.TODO(), preq); err != nil { t.Fatal(err) @@ -593,12 +594,12 @@ func TestV3WatchEmptyKey(t *testing.T) { } func TestV3WatchMultipleWatchersSynced(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) testV3WatchMultipleWatchers(t, 0) } func TestV3WatchMultipleWatchersUnsynced(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) testV3WatchMultipleWatchers(t, 1) } @@ -607,14 +608,14 @@ func TestV3WatchMultipleWatchersUnsynced(t *testing.T) { // that matches all watchers, and another key that matches only // one watcher to test if it receives expected events. func testV3WatchMultipleWatchers(t *testing.T, startRev int64) { - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() - wStream, errW := toGRPC(clus.RandClient()).Watch.Watch(ctx) + wStream, errW := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx) if errW != nil { t.Fatalf("wAPI.Watch error: %v", errW) } @@ -698,23 +699,23 @@ func testV3WatchMultipleWatchers(t *testing.T, startRev int64) { } func TestV3WatchMultipleEventsTxnSynced(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) testV3WatchMultipleEventsTxn(t, 0) } func TestV3WatchMultipleEventsTxnUnsynced(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) testV3WatchMultipleEventsTxn(t, 1) } // testV3WatchMultipleEventsTxn tests Watch APIs when it receives multiple events. func testV3WatchMultipleEventsTxn(t *testing.T, startRev int64) { - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() - wStream, wErr := toGRPC(clus.RandClient()).Watch.Watch(ctx) + wStream, wErr := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx) if wErr != nil { t.Fatalf("wAPI.Watch error: %v", wErr) } @@ -729,7 +730,7 @@ func testV3WatchMultipleEventsTxn(t *testing.T, startRev int64) { t.Fatalf("create response failed: resp=%v, err=%v", resp, err) } - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV txn := pb.TxnRequest{} for i := 0; i < 3; i++ { ru := &pb.RequestOp{} @@ -791,11 +792,11 @@ func (evs eventsSortByKey) Less(i, j int) bool { } func TestV3WatchMultipleEventsPutUnsynced(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo0"), Value: []byte("bar")}); err != nil { t.Fatalf("couldn't put key (%v)", err) @@ -806,7 +807,7 @@ func TestV3WatchMultipleEventsPutUnsynced(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() - wStream, wErr := toGRPC(clus.RandClient()).Watch.Watch(ctx) + wStream, wErr := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx) if wErr != nil { t.Fatalf("wAPI.Watch error: %v", wErr) } @@ -870,22 +871,22 @@ func TestV3WatchMultipleEventsPutUnsynced(t *testing.T) { } func TestV3WatchMultipleStreamsSynced(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) testV3WatchMultipleStreams(t, 0) } func TestV3WatchMultipleStreamsUnsynced(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) testV3WatchMultipleStreams(t, 1) } // testV3WatchMultipleStreams tests multiple watchers on the same key on multiple streams. func testV3WatchMultipleStreams(t *testing.T, startRev int64) { - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) - wAPI := toGRPC(clus.RandClient()).Watch - kvc := toGRPC(clus.RandClient()).KV + wAPI := integration.ToGRPC(clus.RandClient()).Watch + kvc := integration.ToGRPC(clus.RandClient()).KV streams := make([]pb.Watch_WatchClient, 5) for i := range streams { @@ -983,13 +984,13 @@ func TestWatchWithProgressNotify(t *testing.T) { testInterval := 3 * time.Second defer func() { v3rpc.SetProgressReportInterval(oldpi) }() - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() - wStream, wErr := toGRPC(clus.RandClient()).Watch.Watch(ctx) + wStream, wErr := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx) if wErr != nil { t.Fatalf("wAPI.Watch error: %v", wErr) } @@ -1033,12 +1034,12 @@ func TestWatchWithProgressNotify(t *testing.T) { // TestV3WatcMultiOpenhClose opens many watchers concurrently on multiple streams. func TestV3WatchClose(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1, UseBridge: true}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) c := clus.Client(0) - wapi := toGRPC(c).Watch + wapi := integration.ToGRPC(c).Watch var wg sync.WaitGroup wg.Add(100) @@ -1068,15 +1069,15 @@ func TestV3WatchClose(t *testing.T) { // TestV3WatchWithFilter ensures watcher filters out the events correctly. func TestV3WatchWithFilter(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() - ws, werr := toGRPC(clus.RandClient()).Watch.Watch(ctx) + ws, werr := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx) if werr != nil { t.Fatal(werr) } @@ -1103,7 +1104,7 @@ func TestV3WatchWithFilter(t *testing.T) { }() // put a key with empty value - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV preq := &pb.PutRequest{Key: []byte("foo")} if _, err := kvc.Put(context.TODO(), preq); err != nil { t.Fatal(err) @@ -1137,8 +1138,8 @@ func TestV3WatchWithFilter(t *testing.T) { } func TestV3WatchWithPrevKV(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) wctx, wcancel := context.WithCancel(context.Background()) @@ -1158,12 +1159,12 @@ func TestV3WatchWithPrevKV(t *testing.T) { vals: []string{"first", "second"}, }} for i, tt := range tests { - kvc := toGRPC(clus.RandClient()).KV + kvc := integration.ToGRPC(clus.RandClient()).KV if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte(tt.key), Value: []byte(tt.vals[0])}); err != nil { t.Fatal(err) } - ws, werr := toGRPC(clus.RandClient()).Watch.Watch(wctx) + ws, werr := integration.ToGRPC(clus.RandClient()).Watch.Watch(wctx) if werr != nil { t.Fatal(werr) } @@ -1211,9 +1212,9 @@ func TestV3WatchWithPrevKV(t *testing.T) { // TestV3WatchCancellation ensures that watch cancellation frees up server resources. func TestV3WatchCancellation(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) @@ -1239,7 +1240,7 @@ func TestV3WatchCancellation(t *testing.T) { } var expected string - if ThroughProxy { + if integration.ThroughProxy { // grpc proxy has additional 2 watches open expected = "3" } else { @@ -1253,9 +1254,9 @@ func TestV3WatchCancellation(t *testing.T) { // TestV3WatchCloseCancelRace ensures that watch close doesn't decrement the watcher total too far. func TestV3WatchCloseCancelRace(t *testing.T) { - BeforeTest(t) + integration.BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) @@ -1278,7 +1279,7 @@ func TestV3WatchCloseCancelRace(t *testing.T) { } var expected string - if ThroughProxy { + if integration.ThroughProxy { // grpc proxy has additional 2 watches open expected = "2" } else { diff --git a/tests/integration/v3election_grpc_test.go b/tests/integration/v3election_grpc_test.go index d5f67dd9f..32127e50a 100644 --- a/tests/integration/v3election_grpc_test.go +++ b/tests/integration/v3election_grpc_test.go @@ -22,25 +22,26 @@ import ( pb "go.etcd.io/etcd/api/v3/etcdserverpb" epb "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb" + "go.etcd.io/etcd/tests/v3/framework/integration" ) // TestV3ElectionCampaign checks that Campaign will not give // simultaneous leadership to multiple campaigners. func TestV3ElectionCampaign(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) - lease1, err1 := toGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) + lease1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) if err1 != nil { t.Fatal(err1) } - lease2, err2 := toGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) + lease2, err2 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) if err2 != nil { t.Fatal(err2) } - lc := toGRPC(clus.Client(0)).Election + lc := integration.ToGRPC(clus.Client(0)).Election req1 := &epb.CampaignRequest{Name: []byte("foo"), Lease: lease1.ID, Value: []byte("abc")} l1, lerr1 := lc.Campaign(context.TODO(), req1) if lerr1 != nil { @@ -89,11 +90,11 @@ func TestV3ElectionCampaign(t *testing.T) { // TestV3ElectionObserve checks that an Observe stream receives // proclamations from different leaders uninterrupted. func TestV3ElectionObserve(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) - lc := toGRPC(clus.Client(0)).Election + lc := integration.ToGRPC(clus.Client(0)).Election // observe leadership events observec := make(chan struct{}, 1) @@ -125,7 +126,7 @@ func TestV3ElectionObserve(t *testing.T) { t.Fatalf("observe stream took too long to start") } - lease1, err1 := toGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) + lease1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) if err1 != nil { t.Fatal(err1) } @@ -139,7 +140,7 @@ func TestV3ElectionObserve(t *testing.T) { go func() { defer close(leader2c) - lease2, err2 := toGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) + lease2, err2 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) if err2 != nil { t.Error(err2) } diff --git a/tests/integration/v3lock_grpc_test.go b/tests/integration/v3lock_grpc_test.go index 38a36369f..1396fb3d4 100644 --- a/tests/integration/v3lock_grpc_test.go +++ b/tests/integration/v3lock_grpc_test.go @@ -21,25 +21,26 @@ import ( pb "go.etcd.io/etcd/api/v3/etcdserverpb" lockpb "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb" + "go.etcd.io/etcd/tests/v3/framework/integration" ) // TestV3LockLockWaiter tests that a client will wait for a lock, then acquire it // once it is unlocked. func TestV3LockLockWaiter(t *testing.T) { - BeforeTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + integration.BeforeTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) - lease1, err1 := toGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) + lease1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) if err1 != nil { t.Fatal(err1) } - lease2, err2 := toGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) + lease2, err2 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) if err2 != nil { t.Fatal(err2) } - lc := toGRPC(clus.Client(0)).Lock + lc := integration.ToGRPC(clus.Client(0)).Lock l1, lerr1 := lc.Lock(context.TODO(), &lockpb.LockRequest{Name: []byte("foo"), Lease: lease1.ID}) if lerr1 != nil { t.Fatal(lerr1)