mirror of
https://github.com/etcd-io/etcd.git
synced 2024-09-27 06:25:44 +00:00
Merge pull request #14344 from clarkfw/common-framework-share-constants
tests: e2e and integration share TickDuration constant
This commit is contained in:
commit
a1405e9633
@ -14,12 +14,16 @@
|
||||
|
||||
package config
|
||||
|
||||
import "time"
|
||||
|
||||
type TLSConfig string
|
||||
|
||||
const (
|
||||
NoTLS TLSConfig = ""
|
||||
AutoTLS TLSConfig = "auto-tls"
|
||||
ManualTLS TLSConfig = "manual-tls"
|
||||
|
||||
TickDuration = 10 * time.Millisecond
|
||||
)
|
||||
|
||||
type ClusterConfig struct {
|
||||
|
@ -26,8 +26,6 @@ import (
|
||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||
)
|
||||
|
||||
const TickDuration = 10 * time.Millisecond
|
||||
|
||||
type e2eRunner struct{}
|
||||
|
||||
func (e e2eRunner) TestMain(m *testing.M) {
|
||||
@ -116,7 +114,7 @@ func (c *e2eCluster) WaitMembersForLeader(ctx context.Context, t testing.TB, mem
|
||||
t.Fatal("WaitMembersForLeader timeout")
|
||||
default:
|
||||
}
|
||||
_, err := cc.Get("0", config.GetOptions{Timeout: 10*TickDuration + time.Second})
|
||||
_, err := cc.Get("0", config.GetOptions{Timeout: 10*config.TickDuration + time.Second})
|
||||
if err == nil || strings.Contains(err.Error(), "Key not found") {
|
||||
break
|
||||
}
|
||||
@ -149,7 +147,7 @@ func (c *e2eCluster) WaitMembersForLeader(ctx context.Context, t testing.TB, mem
|
||||
}
|
||||
leaders = make(map[uint64]struct{})
|
||||
members = make(map[uint64]int)
|
||||
time.Sleep(10 * TickDuration)
|
||||
time.Sleep(10 * config.TickDuration)
|
||||
}
|
||||
for l := range leaders {
|
||||
if index, ok := members[l]; ok {
|
||||
|
@ -55,6 +55,7 @@ import (
|
||||
lockpb "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc"
|
||||
"go.etcd.io/etcd/server/v3/verify"
|
||||
framecfg "go.etcd.io/etcd/tests/v3/framework/config"
|
||||
"go.uber.org/zap/zapcore"
|
||||
"go.uber.org/zap/zaptest"
|
||||
|
||||
@ -70,7 +71,6 @@ import (
|
||||
const (
|
||||
// RequestWaitTimeout is the time duration to wait for a request to go through or detect leader loss.
|
||||
RequestWaitTimeout = 5 * time.Second
|
||||
TickDuration = 10 * time.Millisecond
|
||||
RequestTimeout = 20 * time.Second
|
||||
|
||||
ClusterName = "etcd"
|
||||
@ -363,7 +363,7 @@ func (c *Cluster) RemoveMember(t testutil.TB, cc *clientv3.Client, id uint64) er
|
||||
// 1s stop delay + election timeout + 1s disk and network delay + connection write timeout
|
||||
// TODO: remove connection write timeout by selecting on http response closeNotifier
|
||||
// blocking on https://github.com/golang/go/issues/9524
|
||||
case <-time.After(time.Second + time.Duration(ElectionTicks)*TickDuration + time.Second + rafthttp.ConnWriteTimeout):
|
||||
case <-time.After(time.Second + time.Duration(ElectionTicks)*framecfg.TickDuration + time.Second + rafthttp.ConnWriteTimeout):
|
||||
t.Fatalf("failed to remove member %s in time", m.Server.MemberId())
|
||||
}
|
||||
}
|
||||
@ -394,7 +394,7 @@ func (c *Cluster) WaitMembersMatch(t testutil.TB, membs []*pb.Member) {
|
||||
if isMembersEqual(resp.Members, membs) {
|
||||
break
|
||||
}
|
||||
time.Sleep(TickDuration)
|
||||
time.Sleep(framecfg.TickDuration)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -445,7 +445,7 @@ func (c *Cluster) waitMembersForLeader(ctx context.Context, t testing.TB, membs
|
||||
}
|
||||
// ensure leader is up via linearizable get
|
||||
for {
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*TickDuration+time.Second)
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*framecfg.TickDuration+time.Second)
|
||||
_, err := cc.Get(ctx, "0")
|
||||
cancel()
|
||||
if err == nil || strings.Contains(err.Error(), "Key not found") {
|
||||
@ -463,7 +463,7 @@ func (c *Cluster) waitMembersForLeader(ctx context.Context, t testing.TB, membs
|
||||
}
|
||||
if lead != 0 && lead != m.Server.Lead() {
|
||||
lead = 0
|
||||
time.Sleep(10 * TickDuration)
|
||||
time.Sleep(10 * framecfg.TickDuration)
|
||||
break
|
||||
}
|
||||
lead = m.Server.Lead()
|
||||
@ -496,7 +496,7 @@ func (c *Cluster) WaitMembersNoLeader(membs []*Member) {
|
||||
}
|
||||
if m.Server.Lead() != 0 {
|
||||
noLeader = false
|
||||
time.Sleep(10 * TickDuration)
|
||||
time.Sleep(10 * framecfg.TickDuration)
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -509,7 +509,7 @@ func (c *Cluster) waitVersion() {
|
||||
if m.Server.ClusterVersion() != nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(TickDuration)
|
||||
time.Sleep(framecfg.TickDuration)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -655,7 +655,7 @@ func MustNewMember(t testutil.TB, mcfg MemberConfig) *Member {
|
||||
}
|
||||
m.ElectionTicks = ElectionTicks
|
||||
m.InitialElectionTickAdvance = true
|
||||
m.TickMs = uint(TickDuration / time.Millisecond)
|
||||
m.TickMs = uint(framecfg.TickDuration / time.Millisecond)
|
||||
m.QuotaBackendBytes = mcfg.QuotaBackendBytes
|
||||
m.MaxTxnOps = mcfg.MaxTxnOps
|
||||
if m.MaxTxnOps == 0 {
|
||||
@ -1079,7 +1079,7 @@ func (m *Member) RecordedRequests() []grpc_testing.RequestInfo {
|
||||
func (m *Member) WaitOK(t testutil.TB) {
|
||||
m.WaitStarted(t)
|
||||
for m.Server.Leader() == 0 {
|
||||
time.Sleep(TickDuration)
|
||||
time.Sleep(framecfg.TickDuration)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1088,7 +1088,7 @@ func (m *Member) WaitStarted(t testutil.TB) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout)
|
||||
_, err := m.Client.Get(ctx, "/", clientv3.WithSerializable())
|
||||
if err != nil {
|
||||
time.Sleep(TickDuration)
|
||||
time.Sleep(framecfg.TickDuration)
|
||||
continue
|
||||
}
|
||||
cancel()
|
||||
@ -1106,7 +1106,7 @@ func WaitClientV3(t testutil.TB, kv clientv3.KV) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
time.Sleep(TickDuration)
|
||||
time.Sleep(framecfg.TickDuration)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("timed out waiting for client: %v", err)
|
||||
@ -1604,7 +1604,7 @@ func (c *Cluster) waitMembersMatch(t testutil.TB) {
|
||||
return
|
||||
}
|
||||
|
||||
time.Sleep(TickDuration)
|
||||
time.Sleep(framecfg.TickDuration)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver"
|
||||
"go.etcd.io/etcd/tests/v3/framework/config"
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
@ -294,7 +295,7 @@ func TestIssue3699(t *testing.T) {
|
||||
// do not restart the killed member immediately.
|
||||
// the member will advance its election timeout after restart,
|
||||
// so it will have a better chance to become the leader again.
|
||||
time.Sleep(time.Duration(integration.ElectionTicks * int(integration.TickDuration)))
|
||||
time.Sleep(time.Duration(integration.ElectionTicks * int(config.TickDuration)))
|
||||
c.Members[leaderID].Restart(t)
|
||||
leaderID = c.WaitMembersForLeader(t, c.Members)
|
||||
}
|
||||
@ -391,7 +392,7 @@ func TestRejectUnhealthyRemove(t *testing.T) {
|
||||
}
|
||||
|
||||
// member stopped after launch; wait for missing heartbeats
|
||||
time.Sleep(time.Duration(integration.ElectionTicks * int(integration.TickDuration)))
|
||||
time.Sleep(time.Duration(integration.ElectionTicks * int(config.TickDuration)))
|
||||
|
||||
// permit remove dead member since (3,2) - (0,1) => (3,1) has quorum
|
||||
if err = c.RemoveMember(t, c.Members[2].Client, uint64(c.Members[0].Server.MemberId())); err != nil {
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
|
||||
"go.etcd.io/etcd/client/pkg/v3/transport"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/tests/v3/framework/config"
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
@ -1817,7 +1818,7 @@ func TestGRPCRequireLeader(t *testing.T) {
|
||||
defer client.Close()
|
||||
|
||||
// wait for election timeout, then member[0] will not have a leader.
|
||||
time.Sleep(time.Duration(3*integration.ElectionTicks) * integration.TickDuration)
|
||||
time.Sleep(time.Duration(3*integration.ElectionTicks) * config.TickDuration)
|
||||
|
||||
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
|
||||
ctx := metadata.NewOutgoingContext(context.Background(), md)
|
||||
@ -1870,7 +1871,7 @@ func TestGRPCStreamRequireLeader(t *testing.T) {
|
||||
clus.Members[2].Restart(t)
|
||||
|
||||
clus.WaitMembersForLeader(t, clus.Members)
|
||||
time.Sleep(time.Duration(2*integration.ElectionTicks) * integration.TickDuration)
|
||||
time.Sleep(time.Duration(2*integration.ElectionTicks) * config.TickDuration)
|
||||
|
||||
// new stream should also be OK now after we restarted the other members
|
||||
wStream, err = wAPI.Watch(ctx)
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
"go.etcd.io/etcd/api/v3/mvccpb"
|
||||
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
|
||||
"go.etcd.io/etcd/client/pkg/v3/testutil"
|
||||
framecfg "go.etcd.io/etcd/tests/v3/framework/config"
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
@ -391,7 +392,7 @@ func TestV3LeaseCheckpoint(t *testing.T) {
|
||||
leaderId := clus.WaitLeader(t)
|
||||
leader := clus.Members[leaderId]
|
||||
leader.Stop(t)
|
||||
time.Sleep(time.Duration(3*integration.ElectionTicks) * integration.TickDuration)
|
||||
time.Sleep(time.Duration(3*integration.ElectionTicks) * framecfg.TickDuration)
|
||||
leader.Restart(t)
|
||||
}
|
||||
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"time"
|
||||
|
||||
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||
"go.etcd.io/etcd/tests/v3/framework/config"
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
@ -28,7 +29,7 @@ import (
|
||||
// waiting for not-empty value or 'timeout'.
|
||||
func MustFetchNotEmptyMetric(tb testing.TB, member *integration.Member, metric string, timeout <-chan time.Time) string {
|
||||
metricValue := ""
|
||||
tick := time.Tick(integration.TickDuration)
|
||||
tick := time.Tick(config.TickDuration)
|
||||
for metricValue == "" {
|
||||
tb.Logf("Waiting for metric: %v", metric)
|
||||
select {
|
||||
|
Loading…
x
Reference in New Issue
Block a user