Merge pull request #14360 from clarkfw/common-test-of-e2e-disable-strict-reconfig-check-by-default

tests: unify `--strict-reconfig-check` config of common framework
This commit is contained in:
Benjamin Wang 2022-09-05 03:24:07 +08:00 committed by GitHub
commit 4d57eb8d3b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 71 additions and 60 deletions

View File

@ -41,7 +41,7 @@ func TestCtlV3AuthTxnJWT(t *testing.T) { testCtl(t, authTestTxn, wi
func TestCtlV3AuthPrefixPerm(t *testing.T) { testCtl(t, authTestPrefixPerm) }
func TestCtlV3AuthMemberAdd(t *testing.T) { testCtl(t, authTestMemberAdd) }
func TestCtlV3AuthMemberRemove(t *testing.T) {
testCtl(t, authTestMemberRemove, withQuorum(), withNoStrictReconfig())
testCtl(t, authTestMemberRemove, withQuorum(), withDisableStrictReconfig())
}
func TestCtlV3AuthMemberUpdate(t *testing.T) { testCtl(t, authTestMemberUpdate) }
func TestCtlV3AuthRevokeWithDelete(t *testing.T) { testCtl(t, authTestRevokeWithDelete) }

View File

@ -29,16 +29,16 @@ import (
func TestCtlV3MemberList(t *testing.T) { testCtl(t, memberListTest) }
func TestCtlV3MemberListWithHex(t *testing.T) { testCtl(t, memberListWithHexTest) }
func TestCtlV3MemberRemove(t *testing.T) {
testCtl(t, memberRemoveTest, withQuorum(), withNoStrictReconfig())
testCtl(t, memberRemoveTest, withQuorum(), withDisableStrictReconfig())
}
func TestCtlV3MemberRemoveNoTLS(t *testing.T) {
testCtl(t, memberRemoveTest, withQuorum(), withNoStrictReconfig(), withCfg(*e2e.NewConfigNoTLS()))
testCtl(t, memberRemoveTest, withQuorum(), withDisableStrictReconfig(), withCfg(*e2e.NewConfigNoTLS()))
}
func TestCtlV3MemberRemoveClientTLS(t *testing.T) {
testCtl(t, memberRemoveTest, withQuorum(), withNoStrictReconfig(), withCfg(*e2e.NewConfigClientTLS()))
testCtl(t, memberRemoveTest, withQuorum(), withDisableStrictReconfig(), withCfg(*e2e.NewConfigClientTLS()))
}
func TestCtlV3MemberRemoveClientAutoTLS(t *testing.T) {
testCtl(t, memberRemoveTest, withQuorum(), withNoStrictReconfig(), withCfg(
testCtl(t, memberRemoveTest, withQuorum(), withDisableStrictReconfig(), withCfg(
// default ClusterSize is 1
e2e.EtcdProcessClusterConfig{
ClusterSize: 3,
@ -48,7 +48,7 @@ func TestCtlV3MemberRemoveClientAutoTLS(t *testing.T) {
}))
}
func TestCtlV3MemberRemovePeerTLS(t *testing.T) {
testCtl(t, memberRemoveTest, withQuorum(), withNoStrictReconfig(), withCfg(*e2e.NewConfigPeerTLS()))
testCtl(t, memberRemoveTest, withQuorum(), withDisableStrictReconfig(), withCfg(*e2e.NewConfigPeerTLS()))
}
func TestCtlV3MemberAdd(t *testing.T) { testCtl(t, memberAddTest) }
func TestCtlV3MemberAddNoTLS(t *testing.T) { testCtl(t, memberAddTest, withCfg(*e2e.NewConfigNoTLS())) }

View File

@ -122,11 +122,12 @@ func dialWithSchemeTest(cx ctlCtx) {
}
type ctlCtx struct {
t *testing.T
apiPrefix string
cfg e2e.EtcdProcessClusterConfig
corruptFunc func(string) error
noStrictReconfig bool
t *testing.T
apiPrefix string
cfg e2e.EtcdProcessClusterConfig
corruptFunc func(string) error
disableStrictReconfigCheck bool
epc *e2e.EtcdProcessCluster
@ -185,8 +186,8 @@ func withCorruptFunc(f func(string) error) ctlOption {
return func(cx *ctlCtx) { cx.corruptFunc = f }
}
func withNoStrictReconfig() ctlOption {
return func(cx *ctlCtx) { cx.noStrictReconfig = true }
func withDisableStrictReconfig() ctlOption {
return func(cx *ctlCtx) { cx.disableStrictReconfigCheck = true }
}
func withApiPrefix(p string) ctlOption {
@ -226,7 +227,7 @@ func testCtlWithOffline(t *testing.T, testFunc func(ctlCtx), testOfflineFunc fun
if !ret.quorum {
ret.cfg = *e2e.ConfigStandalone(ret.cfg)
}
ret.cfg.NoStrictReconfig = ret.noStrictReconfig
ret.cfg.DisableStrictReconfigCheck = ret.disableStrictReconfigCheck
if ret.initialCorruptCheck {
ret.cfg.InitialCorruptCheck = ret.initialCorruptCheck
}

View File

@ -27,8 +27,9 @@ const (
)
type ClusterConfig struct {
ClusterSize int
PeerTLS TLSConfig
ClientTLS TLSConfig
QuotaBackendBytes int64
ClusterSize int
PeerTLS TLSConfig
ClientTLS TLSConfig
QuotaBackendBytes int64
DisableStrictReconfigCheck bool
}

View File

@ -43,9 +43,10 @@ func (e e2eRunner) BeforeTest(t testing.TB) {
func (e e2eRunner) NewCluster(ctx context.Context, t testing.TB, cfg config.ClusterConfig) Cluster {
e2eConfig := e2e.EtcdProcessClusterConfig{
InitialToken: "new",
ClusterSize: cfg.ClusterSize,
QuotaBackendBytes: cfg.QuotaBackendBytes,
InitialToken: "new",
ClusterSize: cfg.ClusterSize,
QuotaBackendBytes: cfg.QuotaBackendBytes,
DisableStrictReconfigCheck: cfg.DisableStrictReconfigCheck,
}
switch cfg.ClientTLS {
case config.NoTLS:

View File

@ -166,14 +166,14 @@ type EtcdProcessClusterConfig struct {
CipherSuites []string
ForceNewCluster bool
InitialToken string
QuotaBackendBytes int64
NoStrictReconfig bool
EnableV2 bool
InitialCorruptCheck bool
AuthTokenOpts string
V2deprecation string
ForceNewCluster bool
InitialToken string
QuotaBackendBytes int64
DisableStrictReconfigCheck bool
EnableV2 bool
InitialCorruptCheck bool
AuthTokenOpts string
V2deprecation string
RollingStart bool
@ -317,7 +317,7 @@ func (cfg *EtcdProcessClusterConfig) EtcdServerProcessConfigs(tb testing.TB) []*
"--quota-backend-bytes", fmt.Sprintf("%d", cfg.QuotaBackendBytes),
)
}
if cfg.NoStrictReconfig {
if cfg.DisableStrictReconfigCheck {
args = append(args, "--strict-reconfig-check=false")
}
if cfg.EnableV2 {

View File

@ -48,6 +48,7 @@ func (e integrationRunner) NewCluster(ctx context.Context, t testing.TB, cfg con
integrationCfg.Size = cfg.ClusterSize
integrationCfg.ClientTLS, err = tlsInfo(t, cfg.ClientTLS)
integrationCfg.QuotaBackendBytes = cfg.QuotaBackendBytes
integrationCfg.DisableStrictReconfigCheck = cfg.DisableStrictReconfigCheck
if err != nil {
t.Fatalf("ClientTLS: %s", err)
}

View File

@ -169,7 +169,7 @@ type ClusterConfig struct {
WatchProgressNotifyInterval time.Duration
ExperimentalMaxLearners int
StrictReconfigCheck bool
DisableStrictReconfigCheck bool
CorruptCheckTime time.Duration
}
@ -283,7 +283,7 @@ func (c *Cluster) mustNewMember(t testutil.TB) *Member {
LeaseCheckpointPersist: c.Cfg.LeaseCheckpointPersist,
WatchProgressNotifyInterval: c.Cfg.WatchProgressNotifyInterval,
ExperimentalMaxLearners: c.Cfg.ExperimentalMaxLearners,
StrictReconfigCheck: c.Cfg.StrictReconfigCheck,
DisableStrictReconfigCheck: c.Cfg.DisableStrictReconfigCheck,
CorruptCheckTime: c.Cfg.CorruptCheckTime,
})
m.DiscoveryURL = c.Cfg.DiscoveryURL
@ -604,7 +604,7 @@ type MemberConfig struct {
LeaseCheckpointPersist bool
WatchProgressNotifyInterval time.Duration
ExperimentalMaxLearners int
StrictReconfigCheck bool
DisableStrictReconfigCheck bool
CorruptCheckTime time.Duration
}
@ -720,7 +720,7 @@ func MustNewMember(t testutil.TB, mcfg MemberConfig) *Member {
m.V2Deprecation = config.V2_DEPR_DEFAULT
m.GrpcServerRecorder = &grpc_testing.GrpcRecorder{}
m.Logger = memberLogger(t, mcfg.Name)
m.StrictReconfigCheck = mcfg.StrictReconfigCheck
m.StrictReconfigCheck = !mcfg.DisableStrictReconfigCheck
if err := m.listenGRPC(); err != nil {
t.Fatalf("listenGRPC FAILED: %v", err)
}

View File

@ -48,7 +48,7 @@ func TestMemberList(t *testing.T) {
func TestMemberAdd(t *testing.T) {
integration2.BeforeTest(t)
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, DisableStrictReconfigCheck: true})
defer clus.Terminate(t)
capi := clus.RandClient()
@ -67,7 +67,7 @@ func TestMemberAdd(t *testing.T) {
func TestMemberAddWithExistingURLs(t *testing.T) {
integration2.BeforeTest(t)
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, DisableStrictReconfigCheck: true})
defer clus.Terminate(t)
capi := clus.RandClient()
@ -91,7 +91,7 @@ func TestMemberAddWithExistingURLs(t *testing.T) {
func TestMemberRemove(t *testing.T) {
integration2.BeforeTest(t)
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, DisableStrictReconfigCheck: true})
defer clus.Terminate(t)
capi := clus.Client(1)
@ -190,7 +190,7 @@ func TestMemberAddUpdateWrongURLs(t *testing.T) {
func TestMemberAddForLearner(t *testing.T) {
integration2.BeforeTest(t)
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, DisableStrictReconfigCheck: true})
defer clus.Terminate(t)
capi := clus.RandClient()
@ -219,7 +219,7 @@ func TestMemberAddForLearner(t *testing.T) {
func TestMemberPromote(t *testing.T) {
integration2.BeforeTest(t)
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, DisableStrictReconfigCheck: true})
defer clus.Terminate(t)
// member promote request can be sent to any server in cluster,
@ -382,7 +382,7 @@ func TestMaxLearnerInCluster(t *testing.T) {
integration2.BeforeTest(t)
// 1. start with a cluster with 3 voting member and max learner 2
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, ExperimentalMaxLearners: 2})
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, ExperimentalMaxLearners: 2, DisableStrictReconfigCheck: true})
defer clus.Terminate(t)
// 2. adding 2 learner members should succeed

View File

@ -33,7 +33,8 @@ const (
var lazyCluster = integration.NewLazyClusterWithConfig(
integration2.ClusterConfig{
Size: 3,
WatchProgressNotifyInterval: 200 * time.Millisecond})
WatchProgressNotifyInterval: 200 * time.Millisecond,
DisableStrictReconfigCheck: true})
func exampleEndpoints() []string { return lazyCluster.EndpointsV3() }

View File

@ -776,7 +776,7 @@ func TestKVLargeRequests(t *testing.T) {
func TestKVForLearner(t *testing.T) {
integration2.BeforeTest(t)
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, DisableStrictReconfigCheck: true})
defer clus.Terminate(t)
// we have to add and launch learner member after initial cluster was created, because
@ -855,7 +855,7 @@ func TestKVForLearner(t *testing.T) {
func TestBalancerSupportLearner(t *testing.T) {
integration2.BeforeTest(t)
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, DisableStrictReconfigCheck: true})
defer clus.Terminate(t)
// we have to add and launch learner member after initial cluster was created, because

View File

@ -72,7 +72,7 @@ func TestDoubleClusterSizeOf3(t *testing.T) { testDoubleClusterSize(t, 3) }
func testDoubleClusterSize(t *testing.T, size int) {
integration.BeforeTest(t)
c := integration.NewCluster(t, &integration.ClusterConfig{Size: size})
c := integration.NewCluster(t, &integration.ClusterConfig{Size: size, DisableStrictReconfigCheck: true})
defer c.Terminate(t)
for i := 0; i < size; i++ {
@ -83,7 +83,12 @@ func testDoubleClusterSize(t *testing.T, size int) {
func TestDoubleTLSClusterSizeOf3(t *testing.T) {
integration.BeforeTest(t)
c := integration.NewCluster(t, &integration.ClusterConfig{Size: 1, PeerTLS: &integration.TestTLSInfo})
cfg := &integration.ClusterConfig{
Size: 1,
PeerTLS: &integration.TestTLSInfo,
DisableStrictReconfigCheck: true,
}
c := integration.NewCluster(t, cfg)
defer c.Terminate(t)
for i := 0; i < 3; i++ {
@ -97,7 +102,7 @@ func TestDecreaseClusterSizeOf5(t *testing.T) { testDecreaseClusterSize(t, 5) }
func testDecreaseClusterSize(t *testing.T, size int) {
integration.BeforeTest(t)
c := integration.NewCluster(t, &integration.ClusterConfig{Size: size})
c := integration.NewCluster(t, &integration.ClusterConfig{Size: size, DisableStrictReconfigCheck: true})
defer c.Terminate(t)
// TODO: remove the last but one member
@ -175,7 +180,7 @@ func TestForceNewCluster(t *testing.T) {
func TestAddMemberAfterClusterFullRotation(t *testing.T) {
integration.BeforeTest(t)
c := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
c := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, DisableStrictReconfigCheck: true})
defer c.Terminate(t)
// remove all the previous three members and add in three new members.
@ -198,7 +203,7 @@ func TestAddMemberAfterClusterFullRotation(t *testing.T) {
// Ensure we can remove a member then add a new one back immediately.
func TestIssue2681(t *testing.T) {
integration.BeforeTest(t)
c := integration.NewCluster(t, &integration.ClusterConfig{Size: 5})
c := integration.NewCluster(t, &integration.ClusterConfig{Size: 5, DisableStrictReconfigCheck: true})
defer c.Terminate(t)
if err := c.RemoveMember(t, c.Members[0].Client, uint64(c.Members[4].Server.MemberId())); err != nil {
@ -219,7 +224,7 @@ func TestIssue2746WithThree(t *testing.T) { testIssue2746(t, 3) }
func testIssue2746(t *testing.T, members int) {
integration.BeforeTest(t)
c := integration.NewCluster(t, &integration.ClusterConfig{Size: members, SnapshotCount: 10})
c := integration.NewCluster(t, &integration.ClusterConfig{Size: members, SnapshotCount: 10, DisableStrictReconfigCheck: true})
defer c.Terminate(t)
// force a snapshot
@ -241,7 +246,7 @@ func testIssue2746(t *testing.T, members int) {
func TestIssue2904(t *testing.T) {
integration.BeforeTest(t)
// start 1-member Cluster to ensure member 0 is the leader of the Cluster.
c := integration.NewCluster(t, &integration.ClusterConfig{Size: 2, UseBridge: true})
c := integration.NewCluster(t, &integration.ClusterConfig{Size: 2, UseBridge: true, DisableStrictReconfigCheck: true})
defer c.Terminate(t)
c.WaitLeader(t)
@ -276,7 +281,7 @@ func TestIssue2904(t *testing.T) {
func TestIssue3699(t *testing.T) {
// start a Cluster of 3 nodes a, b, c
integration.BeforeTest(t)
c := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
c := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, UseBridge: true, DisableStrictReconfigCheck: true})
defer c.Terminate(t)
// make node a unavailable
@ -333,7 +338,7 @@ func TestIssue3699(t *testing.T) {
// TestRejectUnhealthyAdd ensures an unhealthy cluster rejects adding members.
func TestRejectUnhealthyAdd(t *testing.T) {
integration.BeforeTest(t)
c := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, UseBridge: true, StrictReconfigCheck: true})
c := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
defer c.Terminate(t)
// make Cluster unhealthy and wait for downed peer
@ -373,7 +378,7 @@ func TestRejectUnhealthyAdd(t *testing.T) {
// if quorum will be lost.
func TestRejectUnhealthyRemove(t *testing.T) {
integration.BeforeTest(t)
c := integration.NewCluster(t, &integration.ClusterConfig{Size: 5, UseBridge: true, StrictReconfigCheck: true})
c := integration.NewCluster(t, &integration.ClusterConfig{Size: 5, UseBridge: true})
defer c.Terminate(t)
// make cluster unhealthy and wait for downed peer; (3 up, 2 down)
@ -418,11 +423,11 @@ func TestRestartRemoved(t *testing.T) {
integration.BeforeTest(t)
// 1. start single-member Cluster
c := integration.NewCluster(t, &integration.ClusterConfig{Size: 1, StrictReconfigCheck: true})
c := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer c.Terminate(t)
// 2. add a new member
c.Cfg.StrictReconfigCheck = false
c.Cfg.DisableStrictReconfigCheck = true
c.AddMember(t)
c.WaitLeader(t)

View File

@ -261,10 +261,11 @@ func TestV3CorruptAlarmWithLeaseCorrupted(t *testing.T) {
integration.BeforeTest(t)
lg := zaptest.NewLogger(t)
clus := integration.NewCluster(t, &integration.ClusterConfig{
CorruptCheckTime: time.Second,
Size: 3,
SnapshotCount: 10,
SnapshotCatchUpEntries: 5,
CorruptCheckTime: time.Second,
Size: 3,
SnapshotCount: 10,
SnapshotCatchUpEntries: 5,
DisableStrictReconfigCheck: true,
})
defer clus.Terminate(t)

View File

@ -120,7 +120,7 @@ func TestMoveLeaderError(t *testing.T) {
func TestMoveLeaderToLearnerError(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, DisableStrictReconfigCheck: true})
defer clus.Terminate(t)
// we have to add and launch learner member after initial cluster was created, because