Merge pull request #12286 from ptabor/20200911-short-test-separated

*: 'go test --short ./...' runs all UNIT tests in <25s
This commit is contained in:
Jingyi Hu 2020-09-16 05:16:18 -07:00 committed by GitHub
commit 3e1a64913a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 79 additions and 24 deletions

View File

@ -73,8 +73,8 @@ func TestMirrorSync(t *testing.T) {
}
func TestMirrorSyncBase(t *testing.T) {
cluster := integration.NewClusterV3(nil, &integration.ClusterConfig{Size: 1})
defer cluster.Terminate(nil)
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer cluster.Terminate(t)
cli := cluster.Client(0)
ctx := context.TODO()

View File

@ -33,6 +33,8 @@ import (
"go.uber.org/zap"
)
// TODO(ptabor): This is integration test. Skip it in --short and move to integration tests directory.
// TestSnapshotV3RestoreSingle tests single node cluster restoring
// from a snapshot file.
func TestSnapshotV3RestoreSingle(t *testing.T) {

View File

@ -31,8 +31,9 @@ func TestPeriodicHourly(t *testing.T) {
retentionDuration := time.Duration(retentionHours) * time.Hour
fc := clockwork.NewFakeClock()
rg := &fakeRevGetter{testutil.NewRecorderStream(), 0}
compactable := &fakeCompactable{testutil.NewRecorderStream()}
// TODO: Do not depand or real time (Recorder.Wait) in unit tests.
rg := &fakeRevGetter{testutil.NewRecorderStreamWithWaitTimout(10 * time.Millisecond), 0}
compactable := &fakeCompactable{testutil.NewRecorderStreamWithWaitTimout(10 * time.Millisecond)}
tb := newPeriodic(zap.NewExample(), fc, retentionDuration, rg, compactable)
tb.Run()
@ -82,8 +83,8 @@ func TestPeriodicMinutes(t *testing.T) {
retentionDuration := time.Duration(retentionMinutes) * time.Minute
fc := clockwork.NewFakeClock()
rg := &fakeRevGetter{testutil.NewRecorderStream(), 0}
compactable := &fakeCompactable{testutil.NewRecorderStream()}
rg := &fakeRevGetter{testutil.NewRecorderStreamWithWaitTimout(10 * time.Millisecond), 0}
compactable := &fakeCompactable{testutil.NewRecorderStreamWithWaitTimout(10 * time.Millisecond)}
tb := newPeriodic(zap.NewExample(), fc, retentionDuration, rg, compactable)
tb.Run()
@ -130,8 +131,8 @@ func TestPeriodicMinutes(t *testing.T) {
func TestPeriodicPause(t *testing.T) {
fc := clockwork.NewFakeClock()
retentionDuration := time.Hour
rg := &fakeRevGetter{testutil.NewRecorderStream(), 0}
compactable := &fakeCompactable{testutil.NewRecorderStream()}
rg := &fakeRevGetter{testutil.NewRecorderStreamWithWaitTimout(10 * time.Millisecond), 0}
compactable := &fakeCompactable{testutil.NewRecorderStreamWithWaitTimout(10 * time.Millisecond)}
tb := newPeriodic(zap.NewExample(), fc, retentionDuration, rg, compactable)
tb.Run()

View File

@ -28,8 +28,8 @@ import (
func TestRevision(t *testing.T) {
fc := clockwork.NewFakeClock()
rg := &fakeRevGetter{testutil.NewRecorderStream(), 0}
compactable := &fakeCompactable{testutil.NewRecorderStream()}
rg := &fakeRevGetter{testutil.NewRecorderStreamWithWaitTimout(10 * time.Millisecond), 0}
compactable := &fakeCompactable{testutil.NewRecorderStreamWithWaitTimout(10 * time.Millisecond)}
tb := newRevision(zap.NewExample(), fc, 10, rg, compactable)
tb.Run()

View File

@ -193,6 +193,8 @@ func (c *cluster) fillClusterForMembers() error {
}
func newCluster(t testing.TB, cfg *ClusterConfig) *cluster {
testutil.SkipTestIfShortMode(t, "Cannot start etcd cluster in --short tests")
c := &cluster{cfg: cfg}
ms := make([]*member, cfg.Size)
for i := 0; i < cfg.Size; i++ {
@ -209,6 +211,7 @@ func newCluster(t testing.TB, cfg *ClusterConfig) *cluster {
// NewCluster returns an unlaunched cluster of the given size which has been
// set to use static bootstrap.
func NewCluster(t testing.TB, size int) *cluster {
t.Helper()
return newCluster(t, &ClusterConfig{Size: size})
}
@ -1245,6 +1248,10 @@ type ClusterV3 struct {
// NewClusterV3 returns a launched cluster with a grpc client connection
// for each cluster member.
func NewClusterV3(t testing.TB, cfg *ClusterConfig) *ClusterV3 {
if t != nil {
t.Helper()
}
cfg.UseGRPC = true
if os.Getenv("CLIENT_DEBUG") != "" {
clientv3.SetLogger(grpclog.NewLoggerV2WithVerbosity(os.Stderr, os.Stderr, os.Stderr, 4))

View File

@ -31,6 +31,7 @@ import (
"go.etcd.io/etcd/v3/clientv3"
"go.etcd.io/etcd/v3/embed"
"go.etcd.io/etcd/v3/pkg/testutil"
"go.etcd.io/etcd/v3/pkg/transport"
)
@ -44,6 +45,8 @@ var (
)
func TestEmbedEtcd(t *testing.T) {
testutil.SkipTestIfShortMode(t, "Cannot start embedded cluster in --short tests")
tests := []struct {
cfg embed.Config
@ -128,6 +131,8 @@ func TestEmbedEtcdGracefulStopInsecure(t *testing.T) { testEmbedEtcdGracefulStop
// testEmbedEtcdGracefulStop ensures embedded server stops
// cutting existing transports.
func testEmbedEtcdGracefulStop(t *testing.T, secure bool) {
testutil.SkipTestIfShortMode(t, "Cannot start embedded cluster in --short tests")
cfg := embed.NewConfig()
if secure {
cfg.ClientTLSInfo = testTLSInfo

View File

@ -34,6 +34,7 @@ func TestMain(m *testing.M) {
}
func TestSample(t *testing.T) {
SkipTestIfShortMode(t, "Counting leaked routines is disabled in --short tests")
defer AfterTest(t)
ranSample = true
for range make([]struct{}, 100) {

View File

@ -82,11 +82,16 @@ func (r *RecorderBuffered) Chan() <-chan Action {
// RecorderStream writes all Actions to an unbuffered channel
type recorderStream struct {
ch chan Action
ch chan Action
waitTimeout time.Duration
}
func NewRecorderStream() Recorder {
return &recorderStream{ch: make(chan Action)}
return NewRecorderStreamWithWaitTimout(time.Duration(5 * time.Second))
}
func NewRecorderStreamWithWaitTimout(waitTimeout time.Duration) Recorder {
return &recorderStream{ch: make(chan Action), waitTimeout: waitTimeout}
}
func (r *recorderStream) Record(a Action) {
@ -110,7 +115,7 @@ func (r *recorderStream) Chan() <-chan Action {
func (r *recorderStream) Wait(n int) ([]Action, error) {
acts := make([]Action, n)
timeoutC := time.After(5 * time.Second)
timeoutC := time.After(r.waitTimeout)
for i := 0; i < n; i++ {
select {
case acts[i] = <-r.ch:

View File

@ -82,3 +82,12 @@ func Poll(interval time.Duration, timeout time.Duration, condition ConditionFunc
}
}
}
func SkipTestIfShortMode(t testing.TB, reason string) {
if t != nil {
t.Helper()
if testing.Short() {
t.Skip(reason)
}
}
}

View File

@ -28,7 +28,7 @@ type InteractionOpts struct {
OnConfig func(*raft.Config)
}
// A Node is a member of a raft group tested via an InteractionEnv.
// Node is a member of a raft group tested via an InteractionEnv.
type Node struct {
*raft.RawNode
Storage

View File

@ -20,6 +20,7 @@ import (
"net/url"
"os"
"strings"
"testing"
"time"
"go.etcd.io/etcd/v3/etcdserver"
@ -141,7 +142,9 @@ type etcdProcessClusterConfig struct {
// newEtcdProcessCluster launches a new cluster from etcd processes, returning
// a new etcdProcessCluster once all nodes are ready to accept client requests.
func newEtcdProcessCluster(cfg *etcdProcessClusterConfig) (*etcdProcessCluster, error) {
func newEtcdProcessCluster(t testing.TB, cfg *etcdProcessClusterConfig) (*etcdProcessCluster, error) {
skipInShortMode(t)
etcdCfgs := cfg.etcdServerProcessConfigs()
epc := &etcdProcessCluster{
cfg: cfg,

View File

@ -498,10 +498,12 @@ func etcdctlBackup(clus *etcdProcessCluster, dataDir, backupDir string, v3 bool)
}
func setupEtcdctlTest(t *testing.T, cfg *etcdProcessClusterConfig, quorum bool) *etcdProcessCluster {
skipInShortMode(t)
if !quorum {
cfg = configStandalone(*cfg)
}
epc, err := newEtcdProcessCluster(cfg)
epc, err := newEtcdProcessCluster(t, cfg)
if err != nil {
t.Fatalf("could not start etcd process cluster (%v)", err)
}

View File

@ -68,7 +68,7 @@ func testMirrorCommand(cx ctlCtx, flags []string, sourcekvs []kv, destkvs []kvEx
dialTimeout: 7 * time.Second,
}
mirrorepc, err := newEtcdProcessCluster(&mirrorctx.cfg)
mirrorepc, err := newEtcdProcessCluster(cx.t, &mirrorctx.cfg)
if err != nil {
cx.t.Fatalf("could not start etcd process cluster (%v)", err)
}

View File

@ -158,7 +158,7 @@ func TestIssue6361(t *testing.T) {
os.Setenv("ETCDCTL_API", "3")
defer os.Unsetenv("ETCDCTL_API")
epc, err := newEtcdProcessCluster(&etcdProcessClusterConfig{
epc, err := newEtcdProcessCluster(t, &etcdProcessClusterConfig{
clusterSize: 1,
initialToken: "new",
keepDataDir: true,

View File

@ -30,6 +30,8 @@ import (
func TestCtlV3Version(t *testing.T) { testCtl(t, versionTest) }
func TestClusterVersion(t *testing.T) {
skipInShortMode(t)
tests := []struct {
name string
rollingStart bool
@ -57,7 +59,7 @@ func TestClusterVersion(t *testing.T) {
cfg.baseScheme = "unix" // to avoid port conflict
cfg.rollingStart = tt.rollingStart
epc, err := newEtcdProcessCluster(&cfg)
epc, err := newEtcdProcessCluster(t, &cfg)
if err != nil {
t.Fatalf("could not start etcd process cluster (%v)", err)
}
@ -216,7 +218,7 @@ func testCtl(t *testing.T, testFunc func(ctlCtx), opts ...ctlOption) {
ret.cfg.initialCorruptCheck = ret.initialCorruptCheck
}
epc, err := newEtcdProcessCluster(&ret.cfg)
epc, err := newEtcdProcessCluster(t, &ret.cfg)
if err != nil {
t.Fatalf("could not start etcd process cluster (%v)", err)
}

View File

@ -27,6 +27,8 @@ import (
const exampleConfigFile = "../../etcd.conf.yml.sample"
func TestEtcdExampleConfig(t *testing.T) {
skipInShortMode(t)
proc, err := spawnCmd([]string{binDir + "/etcd", "--config-file", exampleConfigFile})
if err != nil {
t.Fatal(err)
@ -40,6 +42,8 @@ func TestEtcdExampleConfig(t *testing.T) {
}
func TestEtcdMultiPeer(t *testing.T) {
skipInShortMode(t)
peers, tmpdirs := make([]string, 3), make([]string, 3)
for i := range peers {
peers[i] = fmt.Sprintf("e%d=http://127.0.0.1:%d", i, etcdProcessBasePort+i)
@ -87,6 +91,8 @@ func TestEtcdMultiPeer(t *testing.T) {
// TestEtcdUnixPeers checks that etcd will boot with unix socket peers.
func TestEtcdUnixPeers(t *testing.T) {
skipInShortMode(t)
d, err := ioutil.TempDir("", "e1.etcd")
if err != nil {
t.Fatal(err)
@ -116,6 +122,8 @@ func TestEtcdUnixPeers(t *testing.T) {
// TestEtcdPeerCNAuth checks that the inter peer auth based on CN of cert is working correctly.
func TestEtcdPeerCNAuth(t *testing.T) {
skipInShortMode(t)
peers, tmpdirs := make([]string, 3), make([]string, 3)
for i := range peers {
peers[i] = fmt.Sprintf("e%d=https://127.0.0.1:%d", i, etcdProcessBasePort+i)
@ -193,6 +201,8 @@ func TestEtcdPeerCNAuth(t *testing.T) {
// TestEtcdPeerNameAuth checks that the inter peer auth based on cert name validation is working correctly.
func TestEtcdPeerNameAuth(t *testing.T) {
skipInShortMode(t)
peers, tmpdirs := make([]string, 3), make([]string, 3)
for i := range peers {
peers[i] = fmt.Sprintf("e%d=https://127.0.0.1:%d", i, etcdProcessBasePort+i)
@ -269,6 +279,8 @@ func TestEtcdPeerNameAuth(t *testing.T) {
}
func TestGrpcproxyAndCommonName(t *testing.T) {
skipInShortMode(t)
argsWithNonEmptyCN := []string{
binDir + "/etcd",
"grpc-proxy",

View File

@ -41,7 +41,7 @@ func TestReleaseUpgrade(t *testing.T) {
copiedCfg.snapshotCount = 3
copiedCfg.baseScheme = "unix" // to avoid port conflict
epc, err := newEtcdProcessCluster(&copiedCfg)
epc, err := newEtcdProcessCluster(t, &copiedCfg)
if err != nil {
t.Fatalf("could not start etcd process cluster (%v)", err)
}
@ -132,7 +132,7 @@ func TestReleaseUpgradeWithRestart(t *testing.T) {
copiedCfg.snapshotCount = 10
copiedCfg.baseScheme = "unix"
epc, err := newEtcdProcessCluster(&copiedCfg)
epc, err := newEtcdProcessCluster(t, &copiedCfg)
if err != nil {
t.Fatalf("could not start etcd process cluster (%v)", err)
}

View File

@ -27,7 +27,7 @@ var (
)
func TestGateway(t *testing.T) {
ec, err := newEtcdProcessCluster(&configNoTLS)
ec, err := newEtcdProcessCluster(t, &configNoTLS)
if err != nil {
t.Fatal(err)
}

View File

@ -19,9 +19,11 @@ import (
"fmt"
"math/rand"
"strings"
"testing"
"time"
"go.etcd.io/etcd/v3/pkg/expect"
"go.etcd.io/etcd/v3/pkg/testutil"
)
func waitReadyExpectProc(exproc *expect.ExpectProcess, readyStrs []string) error {
@ -109,3 +111,7 @@ func closeWithTimeout(p *expect.ExpectProcess, d time.Duration) error {
func toTLS(s string) string {
return strings.Replace(s, "http://", "https://", 1)
}
func skipInShortMode(t testing.TB) {
testutil.SkipTestIfShortMode(t, "e2e tests are not running in --short mode")
}

View File

@ -38,7 +38,7 @@ func testCurlPutGet(t *testing.T, cfg *etcdProcessClusterConfig) {
cfg = configStandalone(*cfg)
cfg.enableV2 = true
epc, err := newEtcdProcessCluster(cfg)
epc, err := newEtcdProcessCluster(t, cfg)
if err != nil {
t.Fatalf("could not start etcd process cluster (%v)", err)
}