Integration tests: Use BeforeTest(t) instead of defer AfterTest().

Thanks to this change, a single method BeforeTest(t) can handle
before-test logic as well as registration of cleanup code
(t.Cleanup(func)).
This commit is contained in:
Piotr Tabor 2021-03-06 15:08:16 +01:00
parent 87258efd90
commit fb1d48e98e
56 changed files with 357 additions and 384 deletions

View File

@ -28,7 +28,7 @@ import (
)
func TestDialCancel(t *testing.T) {
defer testutil.AfterTest(t)
testutil.BeforeTest(t)
// accept first connection so client is created with dial timeout
ln, err := net.Listen("unix", "dialcancel:12345")

View File

@ -22,7 +22,7 @@ import (
)
func TestTxnPanics(t *testing.T) {
defer testutil.AfterTest(t)
testutil.BeforeTest(t)
kv := &kv{}

View File

@ -28,7 +28,7 @@ running(leaking) after all tests.
}
func TestSample(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
...
}
@ -94,10 +94,18 @@ func CheckAfterTest(d time.Duration) error {
return fmt.Errorf("appears to have leaked %s:\n%s", bad, stacks)
}
// BeforeTest is a convenient way to register before-and-after code to a test.
// If you execute BeforeTest, you don't need to explicitly register AfterTest.
func BeforeTest(t testing.TB) {
t.Cleanup(func() {
AfterTest(t)
})
}
// AfterTest is meant to run in a defer that executes after a test completes.
// It will detect common goroutine leaks, retrying in case there are goroutines
// not synchronously torn down, and fail the test if any goroutines are stuck.
func AfterTest(t *testing.T) {
func AfterTest(t testing.TB) {
if err := CheckAfterTest(1 * time.Second); err != nil {
t.Errorf("Test %v", err)
}

View File

@ -187,7 +187,7 @@ func TestStreamReaderDialResult(t *testing.T) {
// TestStreamReaderStopOnDial tests a stream reader closes the connection on stop.
func TestStreamReaderStopOnDial(t *testing.T) {
defer testutil.AfterTest(t)
testutil.BeforeTest(t)
h := http.Header{}
h.Add("X-Server-Version", version.Version)
tr := &respWaitRoundTripper{rrt: &respRoundTripper{code: http.StatusOK, header: h}}

View File

@ -24,15 +24,22 @@ import (
"go.etcd.io/etcd/pkg/v3/testutil"
)
func BeforeTestV2(t testing.TB) {
skipInShortMode(t)
os.Setenv("ETCDCTL_API", "2")
t.Cleanup(func() {
os.Unsetenv("ETCDCTL_API")
})
testutil.BeforeTest(t)
}
func TestCtlV2Set(t *testing.T) { testCtlV2Set(t, newConfigNoTLS(), false) }
func TestCtlV2SetQuorum(t *testing.T) { testCtlV2Set(t, newConfigNoTLS(), true) }
func TestCtlV2SetClientTLS(t *testing.T) { testCtlV2Set(t, newConfigClientTLS(), false) }
func TestCtlV2SetPeerTLS(t *testing.T) { testCtlV2Set(t, newConfigPeerTLS(), false) }
func TestCtlV2SetTLS(t *testing.T) { testCtlV2Set(t, newConfigTLS(), false) }
func testCtlV2Set(t *testing.T, cfg *etcdProcessClusterConfig, quorum bool) {
os.Setenv("ETCDCTL_API", "2")
defer os.Unsetenv("ETCDCTL_API")
defer testutil.AfterTest(t)
BeforeTestV2(t)
cfg.enableV2 = true
epc := setupEtcdctlTest(t, cfg, quorum)
@ -53,9 +60,7 @@ func TestCtlV2Mk(t *testing.T) { testCtlV2Mk(t, newConfigNoTLS(), false) }
func TestCtlV2MkQuorum(t *testing.T) { testCtlV2Mk(t, newConfigNoTLS(), true) }
func TestCtlV2MkTLS(t *testing.T) { testCtlV2Mk(t, newConfigTLS(), false) }
func testCtlV2Mk(t *testing.T, cfg *etcdProcessClusterConfig, quorum bool) {
os.Setenv("ETCDCTL_API", "2")
defer os.Unsetenv("ETCDCTL_API")
defer testutil.AfterTest(t)
BeforeTestV2(t)
cfg.enableV2 = true
epc := setupEtcdctlTest(t, cfg, quorum)
@ -78,9 +83,7 @@ func testCtlV2Mk(t *testing.T, cfg *etcdProcessClusterConfig, quorum bool) {
func TestCtlV2Rm(t *testing.T) { testCtlV2Rm(t, newConfigNoTLS()) }
func TestCtlV2RmTLS(t *testing.T) { testCtlV2Rm(t, newConfigTLS()) }
func testCtlV2Rm(t *testing.T, cfg *etcdProcessClusterConfig) {
os.Setenv("ETCDCTL_API", "2")
defer os.Unsetenv("ETCDCTL_API")
defer testutil.AfterTest(t)
BeforeTestV2(t)
cfg.enableV2 = true
epc := setupEtcdctlTest(t, cfg, true)
@ -104,9 +107,7 @@ func TestCtlV2Ls(t *testing.T) { testCtlV2Ls(t, newConfigNoTLS(), false) }
func TestCtlV2LsQuorum(t *testing.T) { testCtlV2Ls(t, newConfigNoTLS(), true) }
func TestCtlV2LsTLS(t *testing.T) { testCtlV2Ls(t, newConfigTLS(), false) }
func testCtlV2Ls(t *testing.T, cfg *etcdProcessClusterConfig, quorum bool) {
os.Setenv("ETCDCTL_API", "2")
defer os.Unsetenv("ETCDCTL_API")
defer testutil.AfterTest(t)
BeforeTestV2(t)
cfg.enableV2 = true
epc := setupEtcdctlTest(t, cfg, quorum)
@ -127,9 +128,7 @@ func TestCtlV2Watch(t *testing.T) { testCtlV2Watch(t, newConfigNoTLS(), false
func TestCtlV2WatchTLS(t *testing.T) { testCtlV2Watch(t, newConfigTLS(), false) }
func testCtlV2Watch(t *testing.T, cfg *etcdProcessClusterConfig, noSync bool) {
os.Setenv("ETCDCTL_API", "2")
defer os.Unsetenv("ETCDCTL_API")
defer testutil.AfterTest(t)
BeforeTestV2(t)
cfg.enableV2 = true
epc := setupEtcdctlTest(t, cfg, true)
@ -152,9 +151,7 @@ func testCtlV2Watch(t *testing.T, cfg *etcdProcessClusterConfig, noSync bool) {
}
func TestCtlV2GetRoleUser(t *testing.T) {
os.Setenv("ETCDCTL_API", "2")
defer os.Unsetenv("ETCDCTL_API")
defer testutil.AfterTest(t)
BeforeTestV2(t)
copied := newConfigNoTLS()
copied.enableV2 = true
@ -185,9 +182,7 @@ func TestCtlV2GetRoleUser(t *testing.T) {
func TestCtlV2UserListUsername(t *testing.T) { testCtlV2UserList(t, "username") }
func TestCtlV2UserListRoot(t *testing.T) { testCtlV2UserList(t, "root") }
func testCtlV2UserList(t *testing.T, username string) {
os.Setenv("ETCDCTL_API", "2")
defer os.Unsetenv("ETCDCTL_API")
defer testutil.AfterTest(t)
BeforeTestV2(t)
copied := newConfigNoTLS()
copied.enableV2 = true
@ -203,9 +198,7 @@ func testCtlV2UserList(t *testing.T, username string) {
}
func TestCtlV2RoleList(t *testing.T) {
os.Setenv("ETCDCTL_API", "2")
defer os.Unsetenv("ETCDCTL_API")
defer testutil.AfterTest(t)
BeforeTestV2(t)
copied := newConfigNoTLS()
copied.enableV2 = true
@ -227,9 +220,7 @@ func TestCtlV2BackupV3(t *testing.T) { testCtlV2Backup(t, 0, true) }
func TestCtlV2BackupV3Snapshot(t *testing.T) { testCtlV2Backup(t, 1, true) }
func testCtlV2Backup(t *testing.T, snapCount int, v3 bool) {
os.Setenv("ETCDCTL_API", "2")
defer os.Unsetenv("ETCDCTL_API")
defer testutil.AfterTest(t)
BeforeTestV2(t)
backupDir, err := ioutil.TempDir("", "testbackup0.etcd")
if err != nil {
@ -309,9 +300,7 @@ func testCtlV2Backup(t *testing.T, snapCount int, v3 bool) {
}
func TestCtlV2AuthWithCommonName(t *testing.T) {
os.Setenv("ETCDCTL_API", "2")
defer os.Unsetenv("ETCDCTL_API")
defer testutil.AfterTest(t)
BeforeTestV2(t)
copiedCfg := newConfigClientTLS()
copiedCfg.clientCertAuthEnabled = true
@ -343,9 +332,7 @@ func TestCtlV2AuthWithCommonName(t *testing.T) {
}
func TestCtlV2ClusterHealth(t *testing.T) {
os.Setenv("ETCDCTL_API", "2")
defer os.Unsetenv("ETCDCTL_API")
defer testutil.AfterTest(t)
BeforeTestV2(t)
copied := newConfigNoTLS()
copied.enableV2 = true
@ -498,8 +485,6 @@ func etcdctlBackup(clus *etcdProcessCluster, dataDir, backupDir string, v3 bool)
}
func setupEtcdctlTest(t *testing.T, cfg *etcdProcessClusterConfig, quorum bool) *etcdProcessCluster {
skipInShortMode(t)
if !quorum {
cfg = configStandalone(*cfg)
}

View File

@ -25,8 +25,12 @@ import (
"go.etcd.io/etcd/pkg/v3/testutil"
)
func BeforeTest(t testing.TB) {
testutil.BeforeTest(t)
}
func TestCtlV3Migrate(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
cfg := newConfigNoTLS()
cfg.enableV2 = true

View File

@ -23,7 +23,6 @@ import (
"time"
"go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/testutil"
"go.etcd.io/etcd/pkg/v3/transport"
"go.etcd.io/etcd/pkg/v3/types"
)
@ -37,7 +36,7 @@ func TestCtlV3MoveLeaderInsecure(t *testing.T) {
}
func testCtlV3MoveLeader(t *testing.T, cfg etcdProcessClusterConfig) {
defer testutil.AfterTest(t)
BeforeTest(t)
epc := setupEtcdctlTest(t, &cfg, true)
defer func() {

View File

@ -26,7 +26,6 @@ import (
"go.etcd.io/etcd/etcdctl/v3/snapshot"
"go.etcd.io/etcd/pkg/v3/expect"
"go.etcd.io/etcd/pkg/v3/testutil"
)
func TestCtlV3Snapshot(t *testing.T) { testCtl(t, snapshotTest) }
@ -164,7 +163,7 @@ func TestIssue6361(t *testing.T) {
os.Setenv("EXPECT_DEBUG", "1")
}
defer testutil.AfterTest(t)
BeforeTest(t)
os.Setenv("ETCDCTL_API", "3")
defer os.Unsetenv("ETCDCTL_API")

View File

@ -52,7 +52,7 @@ func TestClusterVersion(t *testing.T) {
if !fileutil.Exist(binary) {
t.Skipf("%q does not exist", binary)
}
defer testutil.AfterTest(t)
BeforeTest(t)
cfg := newConfigNoTLS()
cfg.execPath = binary
cfg.snapshotCount = 3
@ -198,7 +198,7 @@ func withFlagByEnv() ctlOption {
}
func testCtl(t *testing.T, testFunc func(ctlCtx), opts ...ctlOption) {
defer testutil.AfterTest(t)
BeforeTest(t)
ret := ctlCtx{
t: t,

View File

@ -23,7 +23,6 @@ import (
"go.etcd.io/etcd/api/v3/version"
"go.etcd.io/etcd/pkg/v3/fileutil"
"go.etcd.io/etcd/pkg/v3/testutil"
)
// TestReleaseUpgrade ensures that changes to master branch does not affect
@ -34,7 +33,7 @@ func TestReleaseUpgrade(t *testing.T) {
t.Skipf("%q does not exist", lastReleaseBinary)
}
defer testutil.AfterTest(t)
BeforeTest(t)
copiedCfg := newConfigNoTLS()
copiedCfg.execPath = lastReleaseBinary
@ -125,7 +124,7 @@ func TestReleaseUpgradeWithRestart(t *testing.T) {
t.Skipf("%q does not exist", lastReleaseBinary)
}
defer testutil.AfterTest(t)
BeforeTest(t)
copiedCfg := newConfigNoTLS()
copiedCfg.execPath = lastReleaseBinary

View File

@ -17,11 +17,8 @@ package e2e
import (
"fmt"
"math/rand"
"os"
"strings"
"testing"
"go.etcd.io/etcd/pkg/v3/testutil"
)
func TestV2CurlNoTLS(t *testing.T) { testCurlPutGet(t, newConfigNoTLS()) }
@ -31,7 +28,7 @@ func TestV2CurlPeerTLS(t *testing.T) { testCurlPutGet(t, newConfigPeerTLS())
func TestV2CurlClientTLS(t *testing.T) { testCurlPutGet(t, newConfigClientTLS()) }
func TestV2CurlClientBoth(t *testing.T) { testCurlPutGet(t, newConfigClientBoth()) }
func testCurlPutGet(t *testing.T, cfg *etcdProcessClusterConfig) {
defer testutil.AfterTest(t)
BeforeTestV2(t)
// test doesn't use quorum gets, so ensure there are no followers to avoid
// stale reads that will break the test
@ -66,9 +63,7 @@ func testCurlPutGet(t *testing.T, cfg *etcdProcessClusterConfig) {
}
func TestV2CurlIssue5182(t *testing.T) {
os.Setenv("ETCDCTL_API", "2")
defer os.Unsetenv("ETCDCTL_API")
defer testutil.AfterTest(t)
BeforeTestV2(t)
copied := newConfigNoTLS()
copied.enableV2 = true

View File

@ -25,13 +25,12 @@ import (
"testing"
"go.etcd.io/etcd/client/v2"
"go.etcd.io/etcd/pkg/v3/testutil"
"go.etcd.io/etcd/tests/v3/integration"
)
// TestV2NoRetryEOF tests destructive api calls won't retry on a disconnection.
func TestV2NoRetryEOF(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
// generate an EOF response; specify address so appears first in sorted ep list
lEOF := integration.NewListenerWithAddr(t, fmt.Sprintf("127.0.0.1:%05d", os.Getpid()))
defer lEOF.Close()
@ -63,7 +62,7 @@ func TestV2NoRetryEOF(t *testing.T) {
// TestV2NoRetryNoLeader tests destructive api calls won't retry if given an error code.
func TestV2NoRetryNoLeader(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
lHTTP := integration.NewListenerWithAddr(t, fmt.Sprintf("127.0.0.1:%05d", os.Getpid()))
eh := &errHandler{errCode: http.StatusServiceUnavailable}
srv := httptest.NewUnstartedServer(eh)
@ -89,7 +88,7 @@ func TestV2NoRetryNoLeader(t *testing.T) {
// TestV2RetryRefuse tests destructive api calls will retry if a connection is refused.
func TestV2RetryRefuse(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
cl := integration.NewCluster(t, 1)
cl.Launch(t)
defer cl.Terminate(t)

View File

@ -22,13 +22,12 @@ import (
"testing"
"time"
"go.etcd.io/etcd/pkg/v3/testutil"
"go.etcd.io/etcd/pkg/v3/types"
"go.etcd.io/etcd/tests/v3/integration"
)
func TestMemberList(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -46,7 +45,7 @@ func TestMemberList(t *testing.T) {
}
func TestMemberAdd(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -65,7 +64,7 @@ func TestMemberAdd(t *testing.T) {
}
func TestMemberAddWithExistingURLs(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -89,7 +88,7 @@ func TestMemberAddWithExistingURLs(t *testing.T) {
}
func TestMemberRemove(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -127,7 +126,7 @@ func TestMemberRemove(t *testing.T) {
}
func TestMemberUpdate(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -155,7 +154,7 @@ func TestMemberUpdate(t *testing.T) {
}
func TestMemberAddUpdateWrongURLs(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -188,7 +187,7 @@ func TestMemberAddUpdateWrongURLs(t *testing.T) {
}
func TestMemberAddForLearner(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -217,7 +216,7 @@ func TestMemberAddForLearner(t *testing.T) {
}
func TestMemberPromote(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -294,7 +293,7 @@ func TestMemberPromote(t *testing.T) {
// TestMemberPromoteMemberNotLearner ensures that promoting a voting member fails.
func TestMemberPromoteMemberNotLearner(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -330,7 +329,7 @@ func TestMemberPromoteMemberNotLearner(t *testing.T) {
// TestMemberPromoteMemberNotExist ensures that promoting a member that does not exist in cluster fails.
func TestMemberPromoteMemberNotExist(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -379,7 +378,7 @@ func TestMemberPromoteMemberNotExist(t *testing.T) {
// TestMaxLearnerInCluster verifies that the maximum number of learners allowed in a cluster is 1
func TestMaxLearnerInCluster(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
// 1. start with a cluster with 3 voting member and 0 learner member
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})

View File

@ -24,7 +24,6 @@ import (
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/testutil"
"go.etcd.io/etcd/tests/v3/integration"
"go.etcd.io/etcd/tests/v3/integration/clientv3"
"google.golang.org/grpc"
@ -34,7 +33,7 @@ import (
// blackholed endpoint, client balancer switches to healthy one.
// TODO: test server-to-client keepalive ping
func TestBalancerUnderBlackholeKeepAliveWatch(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
Size: 2,
@ -166,7 +165,7 @@ func TestBalancerUnderBlackholeNoKeepAliveSerializableGet(t *testing.T) {
// testBalancerUnderBlackholeNoKeepAlive ensures that first request to blackholed endpoint
// fails due to context timeout, but succeeds on next try, with endpoint switch.
func testBalancerUnderBlackholeNoKeepAlive(t *testing.T, op func(*clientv3.Client, context.Context) error) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
Size: 2,

View File

@ -23,7 +23,6 @@ import (
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/testutil"
"go.etcd.io/etcd/pkg/v3/transport"
"go.etcd.io/etcd/tests/v3/integration"
clientv3test "go.etcd.io/etcd/tests/v3/integration/clientv3"
@ -48,7 +47,7 @@ var (
// TestDialTLSExpired tests client with expired certs fails to dial.
func TestDialTLSExpired(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, PeerTLS: &testTLSInfo, ClientTLS: &testTLSInfo, SkipCreatingClient: true})
defer clus.Terminate(t)
@ -71,7 +70,7 @@ func TestDialTLSExpired(t *testing.T) {
// TestDialTLSNoConfig ensures the client fails to dial / times out
// when TLS endpoints (https, unixs) are given but no tls config.
func TestDialTLSNoConfig(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, ClientTLS: &testTLSInfo, SkipCreatingClient: true})
defer clus.Terminate(t)
// expect "signed by unknown authority"
@ -102,7 +101,7 @@ func TestDialSetEndpointsAfterFail(t *testing.T) {
// testDialSetEndpoints ensures SetEndpoints can replace unavailable endpoints with available ones.
func testDialSetEndpoints(t *testing.T, setBefore bool) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, SkipCreatingClient: true})
defer clus.Terminate(t)
@ -145,7 +144,7 @@ func testDialSetEndpoints(t *testing.T, setBefore bool) {
// TestSwitchSetEndpoints ensures SetEndpoints can switch one endpoint
// with a new one that doesn't include original endpoint.
func TestSwitchSetEndpoints(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -165,7 +164,7 @@ func TestSwitchSetEndpoints(t *testing.T) {
}
func TestRejectOldCluster(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
// 2 endpoints to test multi-endpoint Status
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2, SkipCreatingClient: true})
defer clus.Terminate(t)
@ -186,7 +185,7 @@ func TestRejectOldCluster(t *testing.T) {
// TestDialForeignEndpoint checks an endpoint that is not registered
// with the balancer can be dialed.
func TestDialForeignEndpoint(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
defer clus.Terminate(t)
@ -209,7 +208,7 @@ func TestDialForeignEndpoint(t *testing.T) {
// TestSetEndpointAndPut checks that a Put following a SetEndpoints
// to a working endpoint will always succeed.
func TestSetEndpointAndPut(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
defer clus.Terminate(t)

View File

@ -26,7 +26,6 @@ import (
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/testutil"
"go.etcd.io/etcd/tests/v3/integration"
"go.etcd.io/etcd/tests/v3/integration/clientv3"
"google.golang.org/grpc"
@ -104,7 +103,7 @@ func TestBalancerUnderNetworkPartitionSerializableGet(t *testing.T) {
}
func testBalancerUnderNetworkPartition(t *testing.T, op func(*clientv3.Client, context.Context) error, timeout time.Duration) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
Size: 3,
@ -159,7 +158,7 @@ func testBalancerUnderNetworkPartition(t *testing.T, op func(*clientv3.Client, c
// switches endpoint when leader fails and linearizable get requests returns
// "etcdserver: request timed out".
func TestBalancerUnderNetworkPartitionLinearizableGetLeaderElection(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
Size: 3,
@ -214,7 +213,7 @@ func TestBalancerUnderNetworkPartitionWatchFollower(t *testing.T) {
// testBalancerUnderNetworkPartitionWatch ensures watch stream
// to a partitioned node be closed when context requires leader.
func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
Size: 3,
@ -274,7 +273,7 @@ func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) {
}
func TestDropReadUnderNetworkPartition(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
Size: 3,

View File

@ -22,7 +22,6 @@ import (
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/testutil"
"go.etcd.io/etcd/tests/v3/integration"
"go.etcd.io/etcd/tests/v3/integration/clientv3"
)
@ -30,7 +29,7 @@ import (
// TestBalancerUnderServerShutdownWatch expects that watch client
// switch its endpoints when the member of the pinned endpoint fails.
func TestBalancerUnderServerShutdownWatch(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
Size: 3,
@ -142,7 +141,7 @@ func TestBalancerUnderServerShutdownTxn(t *testing.T) {
// the pinned endpoint is shut down, the balancer switches its endpoints
// and all subsequent put/delete/txn requests succeed with new endpoints.
func testBalancerUnderServerShutdownMutable(t *testing.T, op func(*clientv3.Client, context.Context) error) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
Size: 3,
@ -200,7 +199,7 @@ func TestBalancerUnderServerShutdownGetSerializable(t *testing.T) {
// the pinned endpoint is shut down, the balancer switches its endpoints
// and all subsequent range requests succeed with new endpoints.
func testBalancerUnderServerShutdownImmutable(t *testing.T, op func(*clientv3.Client, context.Context) error, timeout time.Duration) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
Size: 3,
@ -269,7 +268,7 @@ type pinTestOpt struct {
// testBalancerUnderServerStopInflightRangeOnRestart expects
// inflight range request reconnects on server restart.
func testBalancerUnderServerStopInflightRangeOnRestart(t *testing.T, linearizable bool, opt pinTestOpt) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
cfg := &integration.ClusterConfig{
Size: 2,

View File

@ -16,9 +16,10 @@ package clientv3_test
import (
"context"
"log"
"go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/transport"
"log"
)
func mockConfig_insecure() {}

View File

@ -20,19 +20,18 @@ import (
"go.etcd.io/etcd/client/v3"
recipe "go.etcd.io/etcd/client/v3/experimental/recipes"
"go.etcd.io/etcd/pkg/v3/testutil"
"go.etcd.io/etcd/tests/v3/integration"
)
func TestBarrierSingleNode(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
testBarrier(t, 5, func() *clientv3.Client { return clus.Client(0) })
}
func TestBarrierMultiNode(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
testBarrier(t, 5, func() *clientv3.Client { return clus.RandClient() })

View File

@ -24,7 +24,6 @@ import (
"go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/concurrency"
recipe "go.etcd.io/etcd/client/v3/experimental/recipes"
"go.etcd.io/etcd/pkg/v3/testutil"
"go.etcd.io/etcd/tests/v3/integration"
)
@ -178,7 +177,7 @@ func TestMutexSessionRelock(t *testing.T) {
// waiters older than the new owner are gone by testing the case where
// the waiter prior to the acquirer expires before the current holder.
func TestMutexWaitsOnCurrentHolder(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)

View File

@ -29,14 +29,13 @@ import (
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/api/v3/version"
"go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/testutil"
"go.etcd.io/etcd/tests/v3/integration"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
func TestKVPutError(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
var (
maxReqBytes = 1.5 * 1024 * 1024 // hard coded max in v3_server.go
@ -72,7 +71,7 @@ func TestKVPutError(t *testing.T) {
}
func TestKVPut(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -117,7 +116,7 @@ func TestKVPut(t *testing.T) {
// TestKVPutWithIgnoreValue ensures that Put with WithIgnoreValue does not clobber the old value.
func TestKVPutWithIgnoreValue(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -150,7 +149,7 @@ func TestKVPutWithIgnoreValue(t *testing.T) {
// TestKVPutWithIgnoreLease ensures that Put with WithIgnoreLease does not affect the existing lease for the key.
func TestKVPutWithIgnoreLease(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -189,7 +188,7 @@ func TestKVPutWithIgnoreLease(t *testing.T) {
}
func TestKVPutWithRequireLeader(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -235,7 +234,7 @@ func TestKVPutWithRequireLeader(t *testing.T) {
}
func TestKVRange(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -464,7 +463,7 @@ func TestKVRange(t *testing.T) {
}
func TestKVGetErrConnClosed(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -493,7 +492,7 @@ func TestKVGetErrConnClosed(t *testing.T) {
}
func TestKVNewAfterClose(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -520,7 +519,7 @@ func TestKVNewAfterClose(t *testing.T) {
}
func TestKVDeleteRange(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -592,7 +591,7 @@ func TestKVDeleteRange(t *testing.T) {
}
func TestKVDelete(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -624,7 +623,7 @@ func TestKVDelete(t *testing.T) {
}
func TestKVCompactError(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -654,7 +653,7 @@ func TestKVCompactError(t *testing.T) {
}
func TestKVCompact(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -709,7 +708,7 @@ func TestKVCompact(t *testing.T) {
// TestKVGetRetry ensures get will retry on disconnect.
func TestKVGetRetry(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clusterSize := 3
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: clusterSize})
@ -763,7 +762,7 @@ func TestKVGetRetry(t *testing.T) {
// TestKVPutFailGetRetry ensures a get will retry following a failed put.
func TestKVPutFailGetRetry(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -803,7 +802,7 @@ func TestKVPutFailGetRetry(t *testing.T) {
// TestKVGetCancel tests that a context cancel on a Get terminates as expected.
func TestKVGetCancel(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -826,7 +825,7 @@ func TestKVGetCancel(t *testing.T) {
// TestKVGetStoppedServerAndClose ensures closing after a failed Get works.
func TestKVGetStoppedServerAndClose(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -844,7 +843,7 @@ func TestKVGetStoppedServerAndClose(t *testing.T) {
// TestKVPutStoppedServerAndClose ensures closing after a failed Put works.
func TestKVPutStoppedServerAndClose(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -875,7 +874,7 @@ func TestKVPutStoppedServerAndClose(t *testing.T) {
// TestKVPutAtMostOnce ensures that a Put will only occur at most once
// in the presence of network errors.
func TestKVPutAtMostOnce(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -911,7 +910,7 @@ func TestKVPutAtMostOnce(t *testing.T) {
// TestKVLargeRequests tests various client/server side request limits.
func TestKVLargeRequests(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
tests := []struct {
// make sure that "MaxCallSendMsgSize" < server-side default send/recv limit
maxRequestBytesServer uint
@ -1003,7 +1002,7 @@ func TestKVLargeRequests(t *testing.T) {
// TestKVForLearner ensures learner member only accepts serializable read request.
func TestKVForLearner(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -1082,7 +1081,7 @@ func TestKVForLearner(t *testing.T) {
// TestBalancerSupportLearner verifies that balancer's retry and failover mechanism supports cluster with learner member
func TestBalancerSupportLearner(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)

View File

@ -26,12 +26,11 @@ import (
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/concurrency"
"go.etcd.io/etcd/pkg/v3/testutil"
"go.etcd.io/etcd/tests/v3/integration"
)
func TestLeaseNotFoundError(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -45,7 +44,7 @@ func TestLeaseNotFoundError(t *testing.T) {
}
func TestLeaseGrant(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -71,7 +70,7 @@ func TestLeaseGrant(t *testing.T) {
}
func TestLeaseRevoke(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -97,7 +96,7 @@ func TestLeaseRevoke(t *testing.T) {
}
func TestLeaseKeepAliveOnce(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -121,7 +120,7 @@ func TestLeaseKeepAliveOnce(t *testing.T) {
}
func TestLeaseKeepAlive(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -161,7 +160,7 @@ func TestLeaseKeepAlive(t *testing.T) {
}
func TestLeaseKeepAliveOneSecond(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -189,7 +188,7 @@ func TestLeaseKeepAliveOneSecond(t *testing.T) {
func TestLeaseKeepAliveHandleFailure(t *testing.T) {
t.Skip("test it when we have a cluster client")
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -244,7 +243,7 @@ type leaseCh struct {
// TestLeaseKeepAliveNotFound ensures a revoked lease won't halt other leases.
func TestLeaseKeepAliveNotFound(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -277,7 +276,7 @@ func TestLeaseKeepAliveNotFound(t *testing.T) {
}
func TestLeaseGrantErrConnClosed(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -309,7 +308,7 @@ func TestLeaseGrantErrConnClosed(t *testing.T) {
// queue is full thus dropping keepalive response sends,
// keepalive request is sent with the same rate of TTL / 3.
func TestLeaseKeepAliveFullResponseQueue(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -349,7 +348,7 @@ func TestLeaseKeepAliveFullResponseQueue(t *testing.T) {
}
func TestLeaseGrantNewAfterClose(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -376,7 +375,7 @@ func TestLeaseGrantNewAfterClose(t *testing.T) {
}
func TestLeaseRevokeNewAfterClose(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -415,7 +414,7 @@ func TestLeaseRevokeNewAfterClose(t *testing.T) {
// TestLeaseKeepAliveCloseAfterDisconnectRevoke ensures the keep alive channel is closed
// following a disconnection, lease revoke, then reconnect.
func TestLeaseKeepAliveCloseAfterDisconnectRevoke(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -461,7 +460,7 @@ func TestLeaseKeepAliveCloseAfterDisconnectRevoke(t *testing.T) {
// TestLeaseKeepAliveInitTimeout ensures the keep alive channel closes if
// the initial keep alive request never gets a response.
func TestLeaseKeepAliveInitTimeout(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -494,7 +493,7 @@ func TestLeaseKeepAliveInitTimeout(t *testing.T) {
// TestLeaseKeepAliveInitTimeout ensures the keep alive channel closes if
// a keep alive request after the first never gets a response.
func TestLeaseKeepAliveTTLTimeout(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -529,7 +528,7 @@ func TestLeaseKeepAliveTTLTimeout(t *testing.T) {
}
func TestLeaseTimeToLive(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -587,7 +586,7 @@ func TestLeaseTimeToLive(t *testing.T) {
}
func TestLeaseTimeToLiveLeaseNotFound(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -622,7 +621,7 @@ func TestLeaseTimeToLiveLeaseNotFound(t *testing.T) {
}
func TestLeaseLeases(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -655,7 +654,7 @@ func TestLeaseLeases(t *testing.T) {
// TestLeaseRenewLostQuorum ensures keepalives work after losing quorum
// for a while.
func TestLeaseRenewLostQuorum(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -703,7 +702,7 @@ func TestLeaseRenewLostQuorum(t *testing.T) {
}
func TestLeaseKeepAliveLoopExit(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -728,7 +727,7 @@ func TestLeaseKeepAliveLoopExit(t *testing.T) {
// before, during, and after quorum loss to confirm Grant/KeepAlive tolerates
// transient cluster failure.
func TestV3LeaseFailureOverlap(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
defer clus.Terminate(t)
@ -781,7 +780,7 @@ func TestV3LeaseFailureOverlap(t *testing.T) {
// TestLeaseWithRequireLeader checks keep-alive channel close when no leader.
func TestLeaseWithRequireLeader(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
defer clus.Terminate(t)

View File

@ -31,7 +31,7 @@ import (
)
func TestLeasingPutGet(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -90,7 +90,7 @@ func TestLeasingPutGet(t *testing.T) {
// TestLeasingInterval checks the leasing KV fetches key intervals.
func TestLeasingInterval(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -129,7 +129,7 @@ func TestLeasingInterval(t *testing.T) {
// TestLeasingPutInvalidateNew checks the leasing KV updates its cache on a Put to a new key.
func TestLeasingPutInvalidateNew(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -159,7 +159,7 @@ func TestLeasingPutInvalidateNew(t *testing.T) {
// TestLeasingPutInvalidateExisting checks the leasing KV updates its cache on a Put to an existing key.
func TestLeasingPutInvalidateExisting(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -193,7 +193,7 @@ func TestLeasingPutInvalidateExisting(t *testing.T) {
// TestLeasingGetNoLeaseTTL checks a key with a TTL is not leased.
func TestLeasingGetNoLeaseTTL(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -222,7 +222,7 @@ func TestLeasingGetNoLeaseTTL(t *testing.T) {
// TestLeasingGetSerializable checks the leasing KV can make serialized requests
// when the etcd cluster is partitioned.
func TestLeasingGetSerializable(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
defer clus.Terminate(t)
@ -262,7 +262,7 @@ func TestLeasingGetSerializable(t *testing.T) {
// TestLeasingPrevKey checks the cache respects WithPrevKV on puts.
func TestLeasingPrevKey(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
defer clus.Terminate(t)
@ -288,7 +288,7 @@ func TestLeasingPrevKey(t *testing.T) {
// TestLeasingRevGet checks the cache respects Get by Revision.
func TestLeasingRevGet(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -324,7 +324,7 @@ func TestLeasingRevGet(t *testing.T) {
// TestLeasingGetWithOpts checks options that can be served through the cache do not depend on the server.
func TestLeasingGetWithOpts(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -369,7 +369,7 @@ func TestLeasingGetWithOpts(t *testing.T) {
// TestLeasingConcurrentPut ensures that a get after concurrent puts returns
// the recently put data.
func TestLeasingConcurrentPut(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -416,7 +416,7 @@ func TestLeasingConcurrentPut(t *testing.T) {
}
func TestLeasingDisconnectedGet(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -445,7 +445,7 @@ func TestLeasingDisconnectedGet(t *testing.T) {
}
func TestLeasingDeleteOwner(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -479,7 +479,7 @@ func TestLeasingDeleteOwner(t *testing.T) {
}
func TestLeasingDeleteNonOwner(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -514,7 +514,7 @@ func TestLeasingDeleteNonOwner(t *testing.T) {
}
func TestLeasingOverwriteResponse(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -548,7 +548,7 @@ func TestLeasingOverwriteResponse(t *testing.T) {
}
func TestLeasingOwnerPutResponse(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -586,7 +586,7 @@ func TestLeasingOwnerPutResponse(t *testing.T) {
}
func TestLeasingTxnOwnerGetRange(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -615,7 +615,7 @@ func TestLeasingTxnOwnerGetRange(t *testing.T) {
}
func TestLeasingTxnOwnerGet(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -701,7 +701,7 @@ func TestLeasingTxnOwnerGet(t *testing.T) {
}
func TestLeasingTxnOwnerDeleteRange(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -740,7 +740,7 @@ func TestLeasingTxnOwnerDeleteRange(t *testing.T) {
}
func TestLeasingTxnOwnerDelete(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -771,7 +771,7 @@ func TestLeasingTxnOwnerDelete(t *testing.T) {
}
func TestLeasingTxnOwnerIf(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -865,7 +865,7 @@ func TestLeasingTxnOwnerIf(t *testing.T) {
}
func TestLeasingTxnCancel(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -899,7 +899,7 @@ func TestLeasingTxnCancel(t *testing.T) {
}
func TestLeasingTxnNonOwnerPut(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -977,7 +977,7 @@ func TestLeasingTxnNonOwnerPut(t *testing.T) {
// TestLeasingTxnRandIfThenOrElse randomly leases keys two separate clients, then
// issues a random If/{Then,Else} transaction on those keys to one client.
func TestLeasingTxnRandIfThenOrElse(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -1083,7 +1083,7 @@ func TestLeasingTxnRandIfThenOrElse(t *testing.T) {
}
func TestLeasingOwnerPutError(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -1104,7 +1104,7 @@ func TestLeasingOwnerPutError(t *testing.T) {
}
func TestLeasingOwnerDeleteError(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -1125,7 +1125,7 @@ func TestLeasingOwnerDeleteError(t *testing.T) {
}
func TestLeasingNonOwnerPutError(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -1150,7 +1150,7 @@ func TestLeasingOwnerDeleteFrom(t *testing.T) {
}
func testLeasingOwnerDelete(t *testing.T, del clientv3.Op) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -1199,7 +1199,7 @@ func testLeasingOwnerDelete(t *testing.T, del clientv3.Op) {
}
func TestLeasingDeleteRangeBounds(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -1257,7 +1257,7 @@ func TestLeaseDeleteRangeContendDel(t *testing.T) {
}
func testLeasingDeleteRangeContend(t *testing.T, op clientv3.Op) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -1315,7 +1315,7 @@ func testLeasingDeleteRangeContend(t *testing.T, op clientv3.Op) {
}
func TestLeasingPutGetDeleteConcurrent(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -1374,7 +1374,7 @@ func TestLeasingPutGetDeleteConcurrent(t *testing.T) {
// TestLeasingReconnectOwnerRevoke checks that revocation works if
// disconnected when trying to submit revoke txn.
func TestLeasingReconnectOwnerRevoke(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -1435,7 +1435,7 @@ func TestLeasingReconnectOwnerRevoke(t *testing.T) {
// TestLeasingReconnectOwnerRevokeCompact checks that revocation works if
// disconnected and the watch is compacted.
func TestLeasingReconnectOwnerRevokeCompact(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -1488,7 +1488,7 @@ func TestLeasingReconnectOwnerRevokeCompact(t *testing.T) {
// TestLeasingReconnectOwnerConsistency checks a write error on an owner will
// not cause inconsistency between the server and the client.
func TestLeasingReconnectOwnerConsistency(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -1562,7 +1562,7 @@ func TestLeasingReconnectOwnerConsistency(t *testing.T) {
}
func TestLeasingTxnAtomicCache(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -1648,7 +1648,7 @@ func TestLeasingTxnAtomicCache(t *testing.T) {
// TestLeasingReconnectTxn checks that Txn is resilient to disconnects.
func TestLeasingReconnectTxn(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -1684,7 +1684,7 @@ func TestLeasingReconnectTxn(t *testing.T) {
// TestLeasingReconnectNonOwnerGet checks a get error on an owner will
// not cause inconsistency between the server and the client.
func TestLeasingReconnectNonOwnerGet(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -1735,7 +1735,7 @@ func TestLeasingReconnectNonOwnerGet(t *testing.T) {
}
func TestLeasingTxnRangeCmp(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -1770,7 +1770,7 @@ func TestLeasingTxnRangeCmp(t *testing.T) {
}
func TestLeasingDo(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -1812,7 +1812,7 @@ func TestLeasingDo(t *testing.T) {
}
func TestLeasingTxnOwnerPutBranch(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -1906,7 +1906,7 @@ func randCmps(pfx string, dat []*clientv3.PutResponse) (cmps []clientv3.Cmp, the
}
func TestLeasingSessionExpire(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -1982,7 +1982,7 @@ func TestLeasingSessionExpireCancel(t *testing.T) {
}
for i := range tests {
t.Run(fmt.Sprintf("test %d", i), func(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)

View File

@ -30,7 +30,6 @@ import (
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/testutil"
"go.etcd.io/etcd/server/v3/lease"
"go.etcd.io/etcd/server/v3/mvcc"
"go.etcd.io/etcd/server/v3/mvcc/backend"
@ -38,7 +37,7 @@ import (
)
func TestMaintenanceHashKV(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -71,7 +70,7 @@ func TestMaintenanceHashKV(t *testing.T) {
}
func TestMaintenanceMoveLeader(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -102,7 +101,7 @@ func TestMaintenanceMoveLeader(t *testing.T) {
// TestMaintenanceSnapshotError ensures that context cancel/timeout
// before snapshot reading returns corresponding context errors.
func TestMaintenanceSnapshotError(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -141,7 +140,7 @@ func TestMaintenanceSnapshotError(t *testing.T) {
// TestMaintenanceSnapshotErrorInflight ensures that inflight context cancel/timeout
// fails snapshot reading with corresponding context errors.
func TestMaintenanceSnapshotErrorInflight(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -198,7 +197,7 @@ func TestMaintenanceSnapshotErrorInflight(t *testing.T) {
}
func TestMaintenanceStatus(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)

View File

@ -26,7 +26,6 @@ import (
"time"
"go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/testutil"
"go.etcd.io/etcd/pkg/v3/transport"
"go.etcd.io/etcd/tests/v3/integration"
@ -36,7 +35,7 @@ import (
)
func TestV3ClientMetrics(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
var (
addr = "localhost:27989"

View File

@ -24,12 +24,11 @@ import (
"go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/client/v3/mirror"
"go.etcd.io/etcd/pkg/v3/testutil"
"go.etcd.io/etcd/tests/v3/integration"
)
func TestMirrorSync(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)

View File

@ -22,12 +22,11 @@ import (
"go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/namespace"
"go.etcd.io/etcd/pkg/v3/testutil"
"go.etcd.io/etcd/tests/v3/integration"
)
func TestNamespacePutGet(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -56,7 +55,7 @@ func TestNamespacePutGet(t *testing.T) {
}
func TestNamespaceWatch(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)

View File

@ -22,12 +22,11 @@ import (
etcd "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/naming/endpoints"
"go.etcd.io/etcd/pkg/v3/testutil"
"go.etcd.io/etcd/tests/v3/integration"
)
func TestEndpointManager(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -89,7 +88,7 @@ func TestEndpointManager(t *testing.T) {
// correctly with multiple hosts and correctly receive multiple
// updates in a single revision.
func TestEndpointManagerAtomicity(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -131,7 +130,7 @@ func TestEndpointManagerAtomicity(t *testing.T) {
}
func TestEndpointManagerCRUD(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)

View File

@ -23,7 +23,6 @@ import (
"go.etcd.io/etcd/client/v3/naming/endpoints"
"go.etcd.io/etcd/client/v3/naming/resolver"
grpctest "go.etcd.io/etcd/pkg/v3/grpc_testing"
"go.etcd.io/etcd/pkg/v3/testutil"
"go.etcd.io/etcd/tests/v3/integration"
"google.golang.org/grpc"
@ -33,7 +32,7 @@ import (
// This test mimics scenario described in grpc_naming.md doc.
func TestEtcdGrpcResolver(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
s1PayloadBody := []byte{'1'}
s1 := newDummyStubServer(s1PayloadBody)

View File

@ -22,14 +22,13 @@ import (
"go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/ordering"
"go.etcd.io/etcd/pkg/v3/testutil"
"go.etcd.io/etcd/tests/v3/integration"
)
func TestDetectKvOrderViolation(t *testing.T) {
var errOrderViolation = errors.New("Detected Order Violation")
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -92,7 +91,7 @@ func TestDetectKvOrderViolation(t *testing.T) {
func TestDetectTxnOrderViolation(t *testing.T) {
var errOrderViolation = errors.New("Detected Order Violation")
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)

View File

@ -21,12 +21,11 @@ import (
"go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/ordering"
"go.etcd.io/etcd/pkg/v3/testutil"
"go.etcd.io/etcd/tests/v3/integration"
)
func TestEndpointSwitchResolvesViolation(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
eps := []string{
@ -80,7 +79,7 @@ func TestEndpointSwitchResolvesViolation(t *testing.T) {
}
func TestUnresolvableOrderViolation(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 5, SkipCreatingClient: true})
defer clus.Terminate(t)
cfg := clientv3.Config{

View File

@ -19,12 +19,11 @@ import (
"testing"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/pkg/v3/testutil"
"go.etcd.io/etcd/tests/v3/integration"
)
func TestRoleError(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)

View File

@ -22,13 +22,12 @@ import (
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/testutil"
"go.etcd.io/etcd/server/v3/embed"
"go.etcd.io/etcd/tests/v3/integration"
)
func TestTxnError(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -52,7 +51,7 @@ func TestTxnError(t *testing.T) {
}
func TestTxnWriteFail(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -102,7 +101,7 @@ func TestTxnWriteFail(t *testing.T) {
func TestTxnReadRetry(t *testing.T) {
t.Skipf("skipping txn read retry test: re-enable after we do retry on txn read request")
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -141,7 +140,7 @@ func TestTxnReadRetry(t *testing.T) {
}
func TestTxnSuccess(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -164,7 +163,7 @@ func TestTxnSuccess(t *testing.T) {
}
func TestTxnCompareRange(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -191,7 +190,7 @@ func TestTxnCompareRange(t *testing.T) {
}
func TestTxnNested(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)

View File

@ -21,13 +21,12 @@ import (
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/testutil"
"go.etcd.io/etcd/tests/v3/integration"
"google.golang.org/grpc"
)
func TestUserError(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -56,7 +55,7 @@ func TestUserError(t *testing.T) {
}
func TestUserErrorAuth(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -113,7 +112,7 @@ func authSetupRoot(t *testing.T, auth clientv3.Auth) {
// Client can connect to etcd even if they supply credentials and the server is in AuthDisable mode.
func TestGetTokenWithoutAuth(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
defer clus.Terminate(t)

View File

@ -28,7 +28,6 @@ import (
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/api/v3/version"
"go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/testutil"
"go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc"
"go.etcd.io/etcd/tests/v3/integration"
"google.golang.org/grpc/metadata"
@ -46,7 +45,7 @@ type watchctx struct {
}
func runWatchTest(t *testing.T, f watcherTest) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -346,7 +345,7 @@ func putAndWatch(t *testing.T, wctx *watchctx, key, val string) {
}
func TestWatchResumeInitRev(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -401,7 +400,7 @@ func TestWatchResumeInitRev(t *testing.T) {
// either a compaction error or all keys by staying in sync before the compaction
// is finally applied.
func TestWatchResumeCompacted(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -488,7 +487,7 @@ func TestWatchResumeCompacted(t *testing.T) {
// TestWatchCompactRevision ensures the CompactRevision error is given on a
// compaction event ahead of a watcher.
func TestWatchCompactRevision(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -530,7 +529,7 @@ func TestWatchWithProgressNotify(t *testing.T) { testWatchWithProgressNot
func TestWatchWithProgressNotifyNoEvent(t *testing.T) { testWatchWithProgressNotify(t, false) }
func testWatchWithProgressNotify(t *testing.T, watchOnPut bool) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
// accelerate report interval so test terminates quickly
oldpi := v3rpc.GetProgressReportInterval()
@ -622,7 +621,7 @@ func TestWatchRequestProgress(t *testing.T) {
for _, c := range testCases {
t.Run(c.name, func(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
watchTimeout := 3 * time.Second
@ -755,7 +754,7 @@ func TestWatchEventType(t *testing.T) {
}
func TestWatchErrConnClosed(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -785,7 +784,7 @@ func TestWatchErrConnClosed(t *testing.T) {
}
func TestWatchAfterClose(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -813,7 +812,7 @@ func TestWatchAfterClose(t *testing.T) {
// TestWatchWithRequireLeader checks the watch channel closes when no leader.
func TestWatchWithRequireLeader(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -1049,7 +1048,7 @@ func TestWatchOverlapDropConnContextCancel(t *testing.T) {
}
func testWatchOverlapContextCancel(t *testing.T, f func(*integration.ClusterV3)) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -1110,7 +1109,7 @@ func testWatchOverlapContextCancel(t *testing.T, f func(*integration.ClusterV3))
// TestWatchCancelAndCloseClient ensures that canceling a watcher then immediately
// closing the client does not return a client closing error.
func TestWatchCancelAndCloseClient(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.Client(0)
@ -1140,7 +1139,7 @@ func TestWatchCancelAndCloseClient(t *testing.T) {
// to put them in resuming mode, cancels them so some resumes by cancel fail,
// then closes the watcher interface to ensure correct clean up.
func TestWatchStressResumeClose(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.Client(0)
@ -1162,7 +1161,7 @@ func TestWatchStressResumeClose(t *testing.T) {
// TestWatchCancelDisconnected ensures canceling a watcher works when
// its grpc stream is disconnected / reconnecting.
func TestWatchCancelDisconnected(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.Client(0)

View File

@ -26,7 +26,6 @@ import (
"time"
"go.etcd.io/etcd/client/v2"
"go.etcd.io/etcd/pkg/v3/testutil"
"go.etcd.io/etcd/server/v3/etcdserver"
)
@ -44,7 +43,7 @@ func TestClusterOf1(t *testing.T) { testCluster(t, 1) }
func TestClusterOf3(t *testing.T) { testCluster(t, 3) }
func testCluster(t *testing.T, size int) {
defer testutil.AfterTest(t)
BeforeTest(t)
c := NewCluster(t, size)
c.Launch(t)
defer c.Terminate(t)
@ -52,7 +51,7 @@ func testCluster(t *testing.T, size int) {
}
func TestTLSClusterOf3(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
c := NewClusterByConfig(t, &ClusterConfig{Size: 3, PeerTLS: &testTLSInfo})
c.Launch(t)
defer c.Terminate(t)
@ -62,7 +61,7 @@ func TestTLSClusterOf3(t *testing.T) {
// Test that a cluster can progress when using separate client and server certs when peering. This supports certificate
// authorities that don't issue dual-usage certificates.
func TestTLSClusterOf3WithSpecificUsage(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
c := NewClusterByConfig(t, &ClusterConfig{Size: 3, PeerTLS: &testTLSInfoWithSpecificUsage})
c.Launch(t)
defer c.Terminate(t)
@ -73,7 +72,7 @@ func TestClusterOf1UsingDiscovery(t *testing.T) { testClusterUsingDiscovery(t, 1
func TestClusterOf3UsingDiscovery(t *testing.T) { testClusterUsingDiscovery(t, 3) }
func testClusterUsingDiscovery(t *testing.T, size int) {
defer testutil.AfterTest(t)
BeforeTest(t)
dc := NewCluster(t, 1)
dc.Launch(t)
defer dc.Terminate(t)
@ -96,7 +95,7 @@ func testClusterUsingDiscovery(t *testing.T, size int) {
}
func TestTLSClusterOf3UsingDiscovery(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
dc := NewCluster(t, 1)
dc.Launch(t)
defer dc.Terminate(t)
@ -124,7 +123,7 @@ func TestDoubleClusterSizeOf1(t *testing.T) { testDoubleClusterSize(t, 1) }
func TestDoubleClusterSizeOf3(t *testing.T) { testDoubleClusterSize(t, 3) }
func testDoubleClusterSize(t *testing.T, size int) {
defer testutil.AfterTest(t)
BeforeTest(t)
c := NewCluster(t, size)
c.Launch(t)
defer c.Terminate(t)
@ -136,7 +135,7 @@ func testDoubleClusterSize(t *testing.T, size int) {
}
func TestDoubleTLSClusterSizeOf3(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
c := NewClusterByConfig(t, &ClusterConfig{Size: 3, PeerTLS: &testTLSInfo})
c.Launch(t)
defer c.Terminate(t)
@ -151,7 +150,7 @@ func TestDecreaseClusterSizeOf3(t *testing.T) { testDecreaseClusterSize(t, 3) }
func TestDecreaseClusterSizeOf5(t *testing.T) { testDecreaseClusterSize(t, 5) }
func testDecreaseClusterSize(t *testing.T, size int) {
defer testutil.AfterTest(t)
BeforeTest(t)
c := NewCluster(t, size)
c.Launch(t)
defer c.Terminate(t)
@ -215,7 +214,7 @@ func TestForceNewCluster(t *testing.T) {
}
func TestAddMemberAfterClusterFullRotation(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
c := NewCluster(t, 3)
c.Launch(t)
defer c.Terminate(t)
@ -237,7 +236,7 @@ func TestAddMemberAfterClusterFullRotation(t *testing.T) {
// Ensure we can remove a member then add a new one back immediately.
func TestIssue2681(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
c := NewCluster(t, 5)
c.Launch(t)
defer c.Terminate(t)
@ -257,7 +256,7 @@ func TestIssue2746(t *testing.T) { testIssue2746(t, 5) }
func TestIssue2746WithThree(t *testing.T) { testIssue2746(t, 3) }
func testIssue2746(t *testing.T, members int) {
defer testutil.AfterTest(t)
BeforeTest(t)
c := NewCluster(t, members)
for _, m := range c.Members {
@ -282,7 +281,7 @@ func testIssue2746(t *testing.T, members int) {
// Ensure etcd will not panic when removing a just started member.
func TestIssue2904(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
// start 1-member cluster to ensure member 0 is the leader of the cluster.
c := NewCluster(t, 1)
c.Launch(t)
@ -319,7 +318,7 @@ func TestIssue2904(t *testing.T) {
// deadlocking.
func TestIssue3699(t *testing.T) {
// start a cluster of 3 nodes a, b, c
defer testutil.AfterTest(t)
BeforeTest(t)
c := NewCluster(t, 3)
c.Launch(t)
defer c.Terminate(t)
@ -371,7 +370,7 @@ func TestIssue3699(t *testing.T) {
// TestRejectUnhealthyAdd ensures an unhealthy cluster rejects adding members.
func TestRejectUnhealthyAdd(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
c := NewCluster(t, 3)
for _, m := range c.Members {
m.ServerConfig.StrictReconfigCheck = true
@ -415,7 +414,7 @@ func TestRejectUnhealthyAdd(t *testing.T) {
// TestRejectUnhealthyRemove ensures an unhealthy cluster rejects removing members
// if quorum will be lost.
func TestRejectUnhealthyRemove(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
c := NewCluster(t, 5)
for _, m := range c.Members {
m.ServerConfig.StrictReconfigCheck = true
@ -462,7 +461,7 @@ func TestRejectUnhealthyRemove(t *testing.T) {
// if 'initial-cluster-state' is set 'new' and old data directory still exists
// (see https://github.com/etcd-io/etcd/issues/7512 for more).
func TestRestartRemoved(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
// 1. start single-member cluster
c := NewCluster(t, 1)
@ -540,7 +539,7 @@ func clusterMustProgress(t *testing.T, membs []*member) {
}
func TestSpeedyTerminate(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
// Stop/Restart so requests will time out on lost leaders
for i := 0; i < 3; i++ {

View File

@ -23,11 +23,10 @@ import (
"testing"
"go.etcd.io/etcd/client/v2"
"go.etcd.io/etcd/pkg/v3/testutil"
)
func TestPauseMember(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
c := NewCluster(t, 5)
c.Launch(t)
@ -46,7 +45,7 @@ func TestPauseMember(t *testing.T) {
}
func TestRestartMember(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
c := NewCluster(t, 3)
c.Launch(t)
defer c.Terminate(t)
@ -84,7 +83,7 @@ func TestLaunchDuplicateMemberShouldFail(t *testing.T) {
}
func TestSnapshotAndRestartMember(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
m := mustNewMember(t, memberConfig{name: "snapAndRestartTest"})
m.SnapshotCount = 100
m.Launch()

View File

@ -23,14 +23,13 @@ import (
"time"
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/pkg/v3/testutil"
"go.etcd.io/etcd/pkg/v3/transport"
"go.etcd.io/etcd/server/v3/etcdserver"
)
// TestMetricDbSizeBoot checks that the db size metric is set on boot.
func TestMetricDbSizeBoot(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -54,7 +53,7 @@ func TestMetricDbSizeDefragDebugging(t *testing.T) {
// testMetricDbSizeDefrag checks that the db size metric is set after defrag.
func testMetricDbSizeDefrag(t *testing.T, name string) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -168,7 +167,7 @@ func testMetricDbSizeDefrag(t *testing.T, name string) {
}
func TestMetricQuotaBackendBytes(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -186,7 +185,7 @@ func TestMetricQuotaBackendBytes(t *testing.T) {
}
func TestMetricsHealth(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)

View File

@ -18,12 +18,10 @@ import (
"fmt"
"testing"
"time"
"go.etcd.io/etcd/pkg/v3/testutil"
)
func TestNetworkPartition5MembersLeaderInMinority(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 5})
defer clus.Terminate(t)
@ -71,7 +69,7 @@ func TestNetworkPartition5MembersLeaderInMajority(t *testing.T) {
}
func testNetworkPartition5MembersLeaderInMajority(t *testing.T) error {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 5})
defer clus.Terminate(t)
@ -110,7 +108,7 @@ func testNetworkPartition5MembersLeaderInMajority(t *testing.T) error {
}
func TestNetworkPartition4Members(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 4})
defer clus.Terminate(t)

View File

@ -22,7 +22,6 @@ import (
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/testutil"
"go.etcd.io/etcd/server/v3/proxy/grpcproxy"
"go.etcd.io/etcd/tests/v3/integration"
@ -31,7 +30,7 @@ import (
)
func TestClusterProxyMemberList(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)

View File

@ -22,7 +22,6 @@ import (
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/testutil"
"go.etcd.io/etcd/server/v3/proxy/grpcproxy"
"go.etcd.io/etcd/tests/v3/integration"
@ -30,7 +29,7 @@ import (
)
func TestKVProxyRange(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)

View File

@ -20,7 +20,6 @@ import (
clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/naming/endpoints"
"go.etcd.io/etcd/pkg/v3/testutil"
"go.etcd.io/etcd/server/v3/proxy/grpcproxy"
"go.etcd.io/etcd/tests/v3/integration"
@ -28,7 +27,7 @@ import (
)
func TestRegister(t *testing.T) {
defer testutil.AfterTest(t)
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)

View File

@ -0,0 +1,25 @@
// Copyright 2021 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"testing"
"go.etcd.io/etcd/pkg/v3/testutil"
)
func BeforeTest(t testing.TB) {
testutil.BeforeTest(t)
}

View File

@ -26,12 +26,11 @@ import (
"testing"
"time"
"go.etcd.io/etcd/pkg/v3/testutil"
"go.etcd.io/etcd/pkg/v3/transport"
)
func TestV2Set(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
cl := NewCluster(t, 1)
cl.Launch(t)
defer cl.Terminate(t)
@ -93,7 +92,7 @@ func TestV2Set(t *testing.T) {
}
func TestV2CreateUpdate(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
cl := NewCluster(t, 1)
cl.Launch(t)
defer cl.Terminate(t)
@ -229,7 +228,7 @@ func TestV2CreateUpdate(t *testing.T) {
}
func TestV2CAS(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
cl := NewCluster(t, 1)
cl.Launch(t)
defer cl.Terminate(t)
@ -377,7 +376,7 @@ func TestV2CAS(t *testing.T) {
}
func TestV2Delete(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
cl := NewCluster(t, 1)
cl.Launch(t)
defer cl.Terminate(t)
@ -477,7 +476,7 @@ func TestV2Delete(t *testing.T) {
}
func TestV2CAD(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
cl := NewCluster(t, 1)
cl.Launch(t)
defer cl.Terminate(t)
@ -577,7 +576,7 @@ func TestV2CAD(t *testing.T) {
}
func TestV2Unique(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
cl := NewCluster(t, 1)
cl.Launch(t)
defer cl.Terminate(t)
@ -644,7 +643,7 @@ func TestV2Unique(t *testing.T) {
}
func TestV2Get(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
cl := NewCluster(t, 1)
cl.Launch(t)
defer cl.Terminate(t)
@ -742,7 +741,7 @@ func TestV2Get(t *testing.T) {
}
func TestV2QuorumGet(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
cl := NewCluster(t, 1)
cl.Launch(t)
defer cl.Terminate(t)
@ -840,7 +839,7 @@ func TestV2QuorumGet(t *testing.T) {
}
func TestV2Watch(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
cl := NewCluster(t, 1)
cl.Launch(t)
defer cl.Terminate(t)
@ -878,7 +877,7 @@ func TestV2Watch(t *testing.T) {
}
func TestV2WatchWithIndex(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
cl := NewCluster(t, 1)
cl.Launch(t)
defer cl.Terminate(t)
@ -945,7 +944,7 @@ func TestV2WatchWithIndex(t *testing.T) {
}
func TestV2WatchKeyInDir(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
cl := NewCluster(t, 1)
cl.Launch(t)
defer cl.Terminate(t)
@ -1006,7 +1005,7 @@ func TestV2WatchKeyInDir(t *testing.T) {
}
func TestV2Head(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
cl := NewCluster(t, 1)
cl.Launch(t)
defer cl.Terminate(t)

View File

@ -147,7 +147,7 @@ func TestV3AlarmDeactivate(t *testing.T) {
}
func TestV3CorruptAlarm(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)

View File

@ -30,7 +30,7 @@ import (
// TestV3AuthEmptyUserGet ensures that a get with an empty user will return an empty user error.
func TestV3AuthEmptyUserGet(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -49,7 +49,7 @@ func TestV3AuthEmptyUserGet(t *testing.T) {
// TestV3AuthTokenWithDisable tests that auth won't crash if
// given a valid token when authentication is disabled
func TestV3AuthTokenWithDisable(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -81,7 +81,7 @@ func TestV3AuthTokenWithDisable(t *testing.T) {
}
func TestV3AuthRevision(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -119,7 +119,7 @@ func TestV3AuthWithLeaseRevokeWithRootJWT(t *testing.T) {
}
func testV3AuthWithLeaseRevokeWithRoot(t *testing.T, ccfg ClusterConfig) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ccfg)
defer clus.Terminate(t)
@ -177,7 +177,7 @@ type user struct {
}
func TestV3AuthWithLeaseRevoke(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -223,7 +223,7 @@ func TestV3AuthWithLeaseRevoke(t *testing.T) {
}
func TestV3AuthWithLeaseAttach(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -335,7 +335,7 @@ func authSetupRoot(t *testing.T, auth pb.AuthClient) {
}
func TestV3AuthNonAuthorizedRPCs(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -358,7 +358,7 @@ func TestV3AuthNonAuthorizedRPCs(t *testing.T) {
func TestV3AuthOldRevConcurrent(t *testing.T) {
t.Skip() // TODO(jingyih): re-enable the test when #10408 is fixed.
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)

View File

@ -22,8 +22,6 @@ import (
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/pkg/v3/testutil"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@ -32,7 +30,7 @@ import (
// TestV3MaintenanceDefragmentInflightRange ensures inflight range requests
// does not panic the mvcc backend while defragment is running.
func TestV3MaintenanceDefragmentInflightRange(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -62,7 +60,7 @@ func TestV3MaintenanceDefragmentInflightRange(t *testing.T) {
// They are either finished or canceled, but never crash the backend.
// See https://github.com/etcd-io/etcd/issues/7322 for more detail.
func TestV3KVInflightRangeRequests(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)

View File

@ -28,7 +28,6 @@ import (
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/testutil"
"go.etcd.io/etcd/pkg/v3/transport"
"google.golang.org/grpc"
@ -40,7 +39,7 @@ import (
// TestV3PutOverwrite puts a key with the v3 api to a random cluster member,
// overwrites it, then checks that the change was applied.
func TestV3PutOverwrite(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -88,7 +87,7 @@ func TestV3PutOverwrite(t *testing.T) {
// TestPutRestart checks if a put after an unrelated member restart succeeds
func TestV3PutRestart(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -120,7 +119,7 @@ func TestV3PutRestart(t *testing.T) {
// TestV3CompactCurrentRev ensures keys are present when compacting on current revision.
func TestV3CompactCurrentRev(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -154,7 +153,7 @@ func TestV3CompactCurrentRev(t *testing.T) {
// TestV3HashKV ensures that multiple calls of HashKV on same node return same hash and compact rev.
func TestV3HashKV(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -202,7 +201,7 @@ func TestV3HashKV(t *testing.T) {
}
func TestV3TxnTooManyOps(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
maxTxnOps := uint(128)
clus := NewClusterV3(t, &ClusterConfig{Size: 3, MaxTxnOps: maxTxnOps})
defer clus.Terminate(t)
@ -278,7 +277,7 @@ func TestV3TxnTooManyOps(t *testing.T) {
}
func TestV3TxnDuplicateKeys(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -396,7 +395,7 @@ func TestV3TxnDuplicateKeys(t *testing.T) {
// Testv3TxnRevision tests that the transaction header revision is set as expected.
func TestV3TxnRevision(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -447,7 +446,7 @@ func TestV3TxnRevision(t *testing.T) {
// Testv3TxnCmpHeaderRev tests that the txn header revision is set as expected
// when compared to the Succeeded field in the txn response.
func TestV3TxnCmpHeaderRev(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -503,7 +502,7 @@ func TestV3TxnCmpHeaderRev(t *testing.T) {
// TestV3TxnRangeCompare tests range comparisons in txns
func TestV3TxnRangeCompare(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -614,7 +613,7 @@ func TestV3TxnRangeCompare(t *testing.T) {
// TestV3TxnNested tests nested txns follow paths as expected.
func TestV3TxnNestedPath(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -667,7 +666,7 @@ func TestV3TxnNestedPath(t *testing.T) {
// TestV3PutIgnoreValue ensures that writes with ignore_value overwrites with previous key-value pair.
func TestV3PutIgnoreValue(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -800,7 +799,7 @@ func TestV3PutIgnoreValue(t *testing.T) {
// TestV3PutIgnoreLease ensures that writes with ignore_lease uses previous lease for the key overwrites.
func TestV3PutIgnoreLease(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -940,7 +939,7 @@ func TestV3PutIgnoreLease(t *testing.T) {
// TestV3PutMissingLease ensures that a Put on a key with a bogus lease fails.
func TestV3PutMissingLease(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -1011,7 +1010,7 @@ func TestV3PutMissingLease(t *testing.T) {
// TestV3DeleteRange tests various edge cases in the DeleteRange API.
func TestV3DeleteRange(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
tests := []struct {
keySet []string
begin string
@ -1120,7 +1119,7 @@ func TestV3DeleteRange(t *testing.T) {
// TestV3TxnInvalidRange tests that invalid ranges are rejected in txns.
func TestV3TxnInvalidRange(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -1163,7 +1162,7 @@ func TestV3TxnInvalidRange(t *testing.T) {
}
func TestV3TooLargeRequest(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -1182,7 +1181,7 @@ func TestV3TooLargeRequest(t *testing.T) {
// TestV3Hash tests hash.
func TestV3Hash(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -1207,7 +1206,7 @@ func TestV3Hash(t *testing.T) {
// TestV3HashRestart ensures that hash stays the same after restart.
func TestV3HashRestart(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -1238,7 +1237,7 @@ func TestV3HashRestart(t *testing.T) {
// TestV3StorageQuotaAPI tests the V3 server respects quotas at the API layer
func TestV3StorageQuotaAPI(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
quotasize := int64(16 * os.Getpagesize())
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
@ -1285,7 +1284,7 @@ func TestV3StorageQuotaAPI(t *testing.T) {
}
func TestV3RangeRequest(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
tests := []struct {
putKeys []string
reqs []pb.RangeRequest
@ -1535,7 +1534,7 @@ func newClusterV3NoClients(t *testing.T, cfg *ClusterConfig) *ClusterV3 {
// TestTLSGRPCRejectInsecureClient checks that connection is rejected if server is TLS but not client.
func TestTLSGRPCRejectInsecureClient(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
cfg := ClusterConfig{Size: 3, ClientTLS: &testTLSInfo}
clus := newClusterV3NoClients(t, &cfg)
@ -1570,7 +1569,7 @@ func TestTLSGRPCRejectInsecureClient(t *testing.T) {
// TestTLSGRPCRejectSecureClient checks that connection is rejected if client is TLS but not server.
func TestTLSGRPCRejectSecureClient(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
cfg := ClusterConfig{Size: 3}
clus := newClusterV3NoClients(t, &cfg)
@ -1588,7 +1587,7 @@ func TestTLSGRPCRejectSecureClient(t *testing.T) {
// TestTLSGRPCAcceptSecureAll checks that connection is accepted if both client and server are TLS
func TestTLSGRPCAcceptSecureAll(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
cfg := ClusterConfig{Size: 3, ClientTLS: &testTLSInfo}
clus := newClusterV3NoClients(t, &cfg)
@ -1731,7 +1730,7 @@ func testTLSReload(
replaceFunc func(),
revertFunc func(),
useIP bool) {
defer testutil.AfterTest(t)
BeforeTest(t)
// 1. separate copies for TLS assets modification
tlsInfo := cloneFunc()
@ -1806,7 +1805,7 @@ func testTLSReload(
}
func TestGRPCRequireLeader(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
cfg := ClusterConfig{Size: 3}
clus := newClusterV3NoClients(t, &cfg)
@ -1833,7 +1832,7 @@ func TestGRPCRequireLeader(t *testing.T) {
}
func TestGRPCStreamRequireLeader(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
cfg := ClusterConfig{Size: 3}
clus := newClusterV3NoClients(t, &cfg)
@ -1896,7 +1895,7 @@ func TestGRPCStreamRequireLeader(t *testing.T) {
// TestV3LargeRequests ensures that configurable MaxRequestBytes works as intended.
func TestV3LargeRequests(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
tests := []struct {
maxRequestBytes uint
valueSize int

View File

@ -18,13 +18,11 @@ import (
"context"
"testing"
"go.etcd.io/etcd/pkg/v3/testutil"
healthpb "google.golang.org/grpc/health/grpc_health_v1"
)
func TestHealthCheck(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)

View File

@ -2,19 +2,19 @@ package integration
import (
"context"
"go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/namespace"
"go.etcd.io/etcd/pkg/v3/testutil"
"go.etcd.io/etcd/server/v3/embed"
"go.etcd.io/etcd/server/v3/etcdserver/api/v3client"
"io/ioutil"
"os"
"testing"
"go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/namespace"
"go.etcd.io/etcd/server/v3/embed"
"go.etcd.io/etcd/server/v3/etcdserver/api/v3client"
)
// TestKVWithEmptyValue ensures that a get/delete with an empty value, and with WithFromKey/WithPrefix function will return an empty error.
func TestKVWithEmptyValue(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
cfg := embed.NewConfig()

View File

@ -22,14 +22,13 @@ import (
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/pkg/v3/testutil"
)
func TestMoveLeader(t *testing.T) { testMoveLeader(t, true) }
func TestMoveLeaderService(t *testing.T) { testMoveLeader(t, false) }
func testMoveLeader(t *testing.T, auto bool) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -97,7 +96,7 @@ func testMoveLeader(t *testing.T, auto bool) {
// TestMoveLeaderError ensures that request to non-leader fail.
func TestMoveLeaderError(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -116,7 +115,7 @@ func TestMoveLeaderError(t *testing.T) {
// TestMoveLeaderToLearnerError ensures that leader transfer to learner member will fail.
func TestMoveLeaderToLearnerError(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -149,7 +148,7 @@ func TestMoveLeaderToLearnerError(t *testing.T) {
// TestTransferLeadershipWithLearner ensures TransferLeadership does not timeout due to learner is
// automatically picked by leader as transferee.
func TestTransferLeadershipWithLearner(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)

View File

@ -33,7 +33,7 @@ import (
// TestV3LeasePrmote ensures the newly elected leader can promote itself
// to the primary lessor, refresh the leases and start to manage leases.
// TODO: use customized clock to make this test go faster?
func TestV3LeasePrmote(t *testing.T) {
func TestV3LeasePromote(t *testing.T) {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -94,7 +94,7 @@ func TestV3LeasePrmote(t *testing.T) {
// TestV3LeaseRevoke ensures a key is deleted once its lease is revoked.
func TestV3LeaseRevoke(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
testLeaseRemoveLeasedKey(t, func(clus *ClusterV3, leaseID int64) error {
lc := toGRPC(clus.RandClient()).Lease
_, err := lc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: leaseID})
@ -104,7 +104,7 @@ func TestV3LeaseRevoke(t *testing.T) {
// TestV3LeaseGrantById ensures leases may be created by a given id.
func TestV3LeaseGrantByID(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -141,7 +141,7 @@ func TestV3LeaseGrantByID(t *testing.T) {
// TestV3LeaseExpire ensures a key is deleted once a key expires.
func TestV3LeaseExpire(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
testLeaseRemoveLeasedKey(t, func(clus *ClusterV3, leaseID int64) error {
// let lease lapse; wait for deleted key
@ -193,7 +193,7 @@ func TestV3LeaseExpire(t *testing.T) {
// TestV3LeaseKeepAlive ensures keepalive keeps the lease alive.
func TestV3LeaseKeepAlive(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
testLeaseRemoveLeasedKey(t, func(clus *ClusterV3, leaseID int64) error {
lc := toGRPC(clus.RandClient()).Lease
lreq := &pb.LeaseKeepAliveRequest{ID: leaseID}
@ -229,7 +229,7 @@ func TestV3LeaseKeepAlive(t *testing.T) {
func TestV3LeaseCheckpoint(t *testing.T) {
var ttl int64 = 300
leaseInterval := 2 * time.Second
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{
Size: 3,
EnableLeaseCheckpoint: true,
@ -280,7 +280,7 @@ func TestV3LeaseCheckpoint(t *testing.T) {
// TestV3LeaseExists creates a lease on a random client and confirms it exists in the cluster.
func TestV3LeaseExists(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -304,7 +304,7 @@ func TestV3LeaseExists(t *testing.T) {
// TestV3LeaseLeases creates leases and confirms list RPC fetches created ones.
func TestV3LeaseLeases(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -354,7 +354,7 @@ func TestV3LeaseTimeToLiveStress(t *testing.T) {
}
func testLeaseStress(t *testing.T, stresser func(context.Context, pb.LeaseClient) error) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -425,7 +425,7 @@ func stressLeaseTimeToLive(tctx context.Context, lc pb.LeaseClient) (reterr erro
}
func TestV3PutOnNonExistLease(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -443,7 +443,7 @@ func TestV3PutOnNonExistLease(t *testing.T) {
// TestV3GetNonExistLease ensures client retrieving nonexistent lease on a follower doesn't result node panic
// related issue https://github.com/etcd-io/etcd/issues/6537
func TestV3GetNonExistLease(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -481,7 +481,7 @@ func TestV3GetNonExistLease(t *testing.T) {
// TestV3LeaseSwitch tests a key can be switched from one lease to another.
func TestV3LeaseSwitch(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -543,7 +543,7 @@ func TestV3LeaseSwitch(t *testing.T) {
// election timeout after it loses its quorum. And the new leader extends the TTL of
// the lease to at least TTL + election timeout.
func TestV3LeaseFailover(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -604,7 +604,7 @@ func TestV3LeaseFailover(t *testing.T) {
// TestV3LeaseRequireLeader ensures that a Recv will get a leader
// loss error if there is no leader.
func TestV3LeaseRequireLeader(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)

View File

@ -21,8 +21,6 @@ import (
"time"
"go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/testutil"
"google.golang.org/grpc"
)
@ -32,7 +30,7 @@ func TestTLSClientCipherSuitesMismatch(t *testing.T) { testTLSCipherSuites(t, fa
// testTLSCipherSuites ensures mismatching client-side cipher suite
// fail TLS handshake with the server.
func testTLSCipherSuites(t *testing.T, valid bool) {
defer testutil.AfterTest(t)
BeforeTest(t)
cipherSuites := []uint16{
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,

View File

@ -26,13 +26,12 @@ import (
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/pkg/v3/testutil"
"go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc"
)
// TestV3WatchFromCurrentRevision tests Watch APIs from current revision.
func TestV3WatchFromCurrentRevision(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
tests := []struct {
putKeys []string
watchRequest *pb.WatchRequest
@ -298,7 +297,7 @@ func TestV3WatchFromCurrentRevision(t *testing.T) {
// TestV3WatchFutureRevision tests Watch APIs from a future revision.
func TestV3WatchFutureRevision(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -359,7 +358,7 @@ func TestV3WatchFutureRevision(t *testing.T) {
// TestV3WatchWrongRange tests wrong range does not create watchers.
func TestV3WatchWrongRange(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -404,13 +403,13 @@ func TestV3WatchWrongRange(t *testing.T) {
// TestV3WatchCancelSynced tests Watch APIs cancellation from synced map.
func TestV3WatchCancelSynced(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
testV3WatchCancel(t, 0)
}
// TestV3WatchCancelUnsynced tests Watch APIs cancellation from unsynced map.
func TestV3WatchCancelUnsynced(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
testV3WatchCancel(t, 1)
}
@ -470,7 +469,7 @@ func testV3WatchCancel(t *testing.T, startRev int64) {
// TestV3WatchCurrentPutOverlap ensures current watchers receive all events with
// overlapping puts.
func TestV3WatchCurrentPutOverlap(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -554,7 +553,7 @@ func TestV3WatchCurrentPutOverlap(t *testing.T) {
// TestV3WatchEmptyKey ensures synced watchers see empty key PUTs as PUT events
func TestV3WatchEmptyKey(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -600,12 +599,12 @@ func TestV3WatchEmptyKey(t *testing.T) {
}
func TestV3WatchMultipleWatchersSynced(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
testV3WatchMultipleWatchers(t, 0)
}
func TestV3WatchMultipleWatchersUnsynced(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
testV3WatchMultipleWatchers(t, 1)
}
@ -705,12 +704,12 @@ func testV3WatchMultipleWatchers(t *testing.T, startRev int64) {
}
func TestV3WatchMultipleEventsTxnSynced(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
testV3WatchMultipleEventsTxn(t, 0)
}
func TestV3WatchMultipleEventsTxnUnsynced(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
testV3WatchMultipleEventsTxn(t, 1)
}
@ -798,7 +797,7 @@ func (evs eventsSortByKey) Less(i, j int) bool {
}
func TestV3WatchMultipleEventsPutUnsynced(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -877,12 +876,12 @@ func TestV3WatchMultipleEventsPutUnsynced(t *testing.T) {
}
func TestV3WatchMultipleStreamsSynced(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
testV3WatchMultipleStreams(t, 0)
}
func TestV3WatchMultipleStreamsUnsynced(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
testV3WatchMultipleStreams(t, 1)
}
@ -990,7 +989,7 @@ func TestWatchWithProgressNotify(t *testing.T) {
testInterval := 3 * time.Second
defer func() { v3rpc.SetProgressReportInterval(oldpi) }()
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
@ -1040,7 +1039,7 @@ func TestWatchWithProgressNotify(t *testing.T) {
// TestV3WatcMultiOpenhClose opens many watchers concurrently on multiple streams.
func TestV3WatchClose(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -1142,7 +1141,7 @@ func TestV3WatchWithFilter(t *testing.T) {
}
func TestV3WatchWithPrevKV(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)

View File

@ -21,14 +21,13 @@ import (
"time"
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/pkg/v3/testutil"
epb "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb"
)
// TestV3ElectionCampaign checks that Campaign will not give
// simultaneous leadership to multiple campaigners.
func TestV3ElectionCampaign(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
@ -90,7 +89,7 @@ func TestV3ElectionCampaign(t *testing.T) {
// TestV3ElectionObserve checks that an Observe stream receives
// proclamations from different leaders uninterrupted.
func TestV3ElectionObserve(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)

View File

@ -20,14 +20,13 @@ import (
"time"
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/pkg/v3/testutil"
lockpb "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb"
)
// TestV3LockLockWaiter tests that a client will wait for a lock, then acquire it
// once it is unlocked.
func TestV3LockLockWaiter(t *testing.T) {
defer testutil.AfterTest(t)
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)