mirror of
https://github.com/etcd-io/etcd.git
synced 2024-09-27 06:25:44 +00:00
tests: Move integration setup to separa framework package
This commit is contained in:
parent
dcd0d3fc9c
commit
ca9b720c1d
@ -26,7 +26,7 @@ import (
|
||||
|
||||
"go.etcd.io/etcd/client/pkg/v3/fileutil"
|
||||
"go.etcd.io/etcd/pkg/v3/expect"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -19,7 +19,7 @@ import (
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
var (
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -26,8 +26,8 @@ import (
|
||||
|
||||
const ThroughProxy = false
|
||||
|
||||
func toGRPC(c *clientv3.Client) grpcAPI {
|
||||
return grpcAPI{
|
||||
func ToGRPC(c *clientv3.Client) GrpcAPI {
|
||||
return GrpcAPI{
|
||||
pb.NewClusterClient(c.ActiveConnection()),
|
||||
pb.NewKVClient(c.ActiveConnection()),
|
||||
pb.NewLeaseClient(c.ActiveConnection()),
|
@ -39,13 +39,13 @@ const proxyNamespace = "proxy-namespace"
|
||||
type grpcClientProxy struct {
|
||||
ctx context.Context
|
||||
ctxCancel func()
|
||||
grpc grpcAPI
|
||||
grpc GrpcAPI
|
||||
wdonec <-chan struct{}
|
||||
kvdonec <-chan struct{}
|
||||
lpdonec <-chan struct{}
|
||||
}
|
||||
|
||||
func toGRPC(c *clientv3.Client) grpcAPI {
|
||||
func ToGRPC(c *clientv3.Client) GrpcAPI {
|
||||
pmu.Lock()
|
||||
defer pmu.Unlock()
|
||||
|
||||
@ -74,7 +74,7 @@ func toGRPC(c *clientv3.Client) grpcAPI {
|
||||
lockp := grpcproxy.NewLockProxy(c)
|
||||
electp := grpcproxy.NewElectionProxy(c)
|
||||
|
||||
grpc := grpcAPI{
|
||||
grpc := GrpcAPI{
|
||||
adapter.ClusterServerToClusterClient(clp),
|
||||
adapter.KvServerToKvClient(kvp),
|
||||
adapter.LeaseServerToLeaseClient(lp),
|
||||
@ -112,7 +112,7 @@ func newClientV3(cfg clientv3.Config) (*clientv3.Client, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rpc := toGRPC(c)
|
||||
rpc := ToGRPC(c)
|
||||
c.KV = clientv3.NewKVFromKVClient(rpc.KV, c)
|
||||
pmu.Lock()
|
||||
lc := c.Lease
|
@ -25,14 +25,14 @@ import (
|
||||
"testing"
|
||||
|
||||
"go.etcd.io/etcd/client/v2"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
// TestV2NoRetryEOF tests destructive api calls won't retry on a disconnection.
|
||||
func TestV2NoRetryEOF(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
// generate an EOF response; specify address so appears first in sorted ep list
|
||||
lEOF := integration.NewListenerWithAddr(t, fmt.Sprintf("127.0.0.1:%05d", os.Getpid()))
|
||||
lEOF := integration2.NewListenerWithAddr(t, fmt.Sprintf("127.0.0.1:%05d", os.Getpid()))
|
||||
defer lEOF.Close()
|
||||
tries := uint32(0)
|
||||
go func() {
|
||||
@ -45,8 +45,8 @@ func TestV2NoRetryEOF(t *testing.T) {
|
||||
conn.Close()
|
||||
}
|
||||
}()
|
||||
eofURL := integration.URLScheme + "://" + lEOF.Addr().String()
|
||||
cli := integration.MustNewHTTPClient(t, []string{eofURL, eofURL}, nil)
|
||||
eofURL := integration2.URLScheme + "://" + lEOF.Addr().String()
|
||||
cli := integration2.MustNewHTTPClient(t, []string{eofURL, eofURL}, nil)
|
||||
kapi := client.NewKeysAPI(cli)
|
||||
for i, f := range noRetryList(kapi) {
|
||||
startTries := atomic.LoadUint32(&tries)
|
||||
@ -62,17 +62,17 @@ func TestV2NoRetryEOF(t *testing.T) {
|
||||
|
||||
// TestV2NoRetryNoLeader tests destructive api calls won't retry if given an error code.
|
||||
func TestV2NoRetryNoLeader(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
lHTTP := integration.NewListenerWithAddr(t, fmt.Sprintf("127.0.0.1:%05d", os.Getpid()))
|
||||
integration2.BeforeTest(t)
|
||||
lHTTP := integration2.NewListenerWithAddr(t, fmt.Sprintf("127.0.0.1:%05d", os.Getpid()))
|
||||
eh := &errHandler{errCode: http.StatusServiceUnavailable}
|
||||
srv := httptest.NewUnstartedServer(eh)
|
||||
defer lHTTP.Close()
|
||||
defer srv.Close()
|
||||
srv.Listener = lHTTP
|
||||
go srv.Start()
|
||||
lHTTPURL := integration.URLScheme + "://" + lHTTP.Addr().String()
|
||||
lHTTPURL := integration2.URLScheme + "://" + lHTTP.Addr().String()
|
||||
|
||||
cli := integration.MustNewHTTPClient(t, []string{lHTTPURL, lHTTPURL}, nil)
|
||||
cli := integration2.MustNewHTTPClient(t, []string{lHTTPURL, lHTTPURL}, nil)
|
||||
kapi := client.NewKeysAPI(cli)
|
||||
// test error code
|
||||
for i, f := range noRetryList(kapi) {
|
||||
@ -88,12 +88,12 @@ func TestV2NoRetryNoLeader(t *testing.T) {
|
||||
|
||||
// TestV2RetryRefuse tests destructive api calls will retry if a connection is refused.
|
||||
func TestV2RetryRefuse(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
cl := integration.NewCluster(t, 1)
|
||||
integration2.BeforeTest(t)
|
||||
cl := integration2.NewCluster(t, 1)
|
||||
cl.Launch(t)
|
||||
defer cl.Terminate(t)
|
||||
// test connection refused; expect no error failover
|
||||
cli := integration.MustNewHTTPClient(t, []string{integration.URLScheme + "://refuseconn:123", cl.URL(0)}, nil)
|
||||
cli := integration2.MustNewHTTPClient(t, []string{integration2.URLScheme + "://refuseconn:123", cl.URL(0)}, nil)
|
||||
kapi := client.NewKeysAPI(cli)
|
||||
if _, err := kapi.Set(context.Background(), "/delkey", "def", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -23,13 +23,13 @@ import (
|
||||
"time"
|
||||
|
||||
"go.etcd.io/etcd/client/pkg/v3/types"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func TestMemberList(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
capi := clus.RandClient()
|
||||
@ -45,9 +45,9 @@ func TestMemberList(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMemberAdd(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
capi := clus.RandClient()
|
||||
@ -64,9 +64,9 @@ func TestMemberAdd(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMemberAddWithExistingURLs(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
capi := clus.RandClient()
|
||||
@ -88,9 +88,9 @@ func TestMemberAddWithExistingURLs(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMemberRemove(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
capi := clus.Client(1)
|
||||
@ -126,9 +126,9 @@ func TestMemberRemove(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMemberUpdate(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
capi := clus.RandClient()
|
||||
@ -154,9 +154,9 @@ func TestMemberUpdate(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMemberAddUpdateWrongURLs(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
capi := clus.RandClient()
|
||||
@ -187,9 +187,9 @@ func TestMemberAddUpdateWrongURLs(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMemberAddForLearner(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
capi := clus.RandClient()
|
||||
@ -216,9 +216,9 @@ func TestMemberAddForLearner(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMemberPromote(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// member promote request can be sent to any server in cluster,
|
||||
@ -293,9 +293,9 @@ func TestMemberPromote(t *testing.T) {
|
||||
|
||||
// TestMemberPromoteMemberNotLearner ensures that promoting a voting member fails.
|
||||
func TestMemberPromoteMemberNotLearner(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// member promote request can be sent to any server in cluster,
|
||||
@ -329,9 +329,9 @@ func TestMemberPromoteMemberNotLearner(t *testing.T) {
|
||||
|
||||
// TestMemberPromoteMemberNotExist ensures that promoting a member that does not exist in cluster fails.
|
||||
func TestMemberPromoteMemberNotExist(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// member promote request can be sent to any server in cluster,
|
||||
@ -378,10 +378,10 @@ func TestMemberPromoteMemberNotExist(t *testing.T) {
|
||||
|
||||
// TestMaxLearnerInCluster verifies that the maximum number of learners allowed in a cluster is 1
|
||||
func TestMaxLearnerInCluster(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
// 1. start with a cluster with 3 voting member and 0 learner member
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// 2. adding a learner member should succeed
|
||||
|
@ -23,13 +23,13 @@ import (
|
||||
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/client/v3/concurrency"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func TestResumeElection(t *testing.T) {
|
||||
const prefix = "/resume-election/"
|
||||
|
||||
cli, err := integration.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()})
|
||||
cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
@ -20,11 +20,11 @@ import (
|
||||
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/client/v3/concurrency"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func TestMutexLockSessionExpired(t *testing.T) {
|
||||
cli, err := integration.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()})
|
||||
cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -24,7 +24,7 @@ import (
|
||||
|
||||
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"go.etcd.io/etcd/tests/v3/integration/clientv3"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
@ -33,9 +33,9 @@ import (
|
||||
// blackholed endpoint, client balancer switches to healthy one.
|
||||
// TODO: test server-to-client keepalive ping
|
||||
func TestBalancerUnderBlackholeKeepAliveWatch(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{
|
||||
Size: 2,
|
||||
GRPCKeepAliveMinTime: time.Millisecond, // avoid too_many_pings
|
||||
UseBridge: true,
|
||||
@ -58,9 +58,9 @@ func TestBalancerUnderBlackholeKeepAliveWatch(t *testing.T) {
|
||||
// TODO: only send healthy endpoint to gRPC so gRPC wont waste time to
|
||||
// dial for unhealthy endpoint.
|
||||
// then we can reduce 3s to 1s.
|
||||
timeout := pingInterval + integration.RequestWaitTimeout
|
||||
timeout := pingInterval + integration2.RequestWaitTimeout
|
||||
|
||||
cli, err := integration.NewClient(t, ccfg)
|
||||
cli, err := integration2.NewClient(t, ccfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -166,9 +166,9 @@ func TestBalancerUnderBlackholeNoKeepAliveSerializableGet(t *testing.T) {
|
||||
// testBalancerUnderBlackholeNoKeepAlive ensures that first request to blackholed endpoint
|
||||
// fails due to context timeout, but succeeds on next try, with endpoint switch.
|
||||
func testBalancerUnderBlackholeNoKeepAlive(t *testing.T, op func(*clientv3.Client, context.Context) error) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{
|
||||
Size: 2,
|
||||
SkipCreatingClient: true,
|
||||
UseBridge: true,
|
||||
@ -182,7 +182,7 @@ func testBalancerUnderBlackholeNoKeepAlive(t *testing.T, op func(*clientv3.Clien
|
||||
DialTimeout: 1 * time.Second,
|
||||
DialOptions: []grpc.DialOption{grpc.WithBlock()},
|
||||
}
|
||||
cli, err := integration.NewClient(t, ccfg)
|
||||
cli, err := integration2.NewClient(t, ccfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -24,31 +24,31 @@ import (
|
||||
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||
"go.etcd.io/etcd/client/pkg/v3/transport"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
clientv3test "go.etcd.io/etcd/tests/v3/integration/clientv3"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
testTLSInfo = transport.TLSInfo{
|
||||
KeyFile: integration.MustAbsPath("../../../fixtures/server.key.insecure"),
|
||||
CertFile: integration.MustAbsPath("../../../fixtures/server.crt"),
|
||||
TrustedCAFile: integration.MustAbsPath("../../../fixtures/ca.crt"),
|
||||
KeyFile: integration2.MustAbsPath("../../../fixtures/server.key.insecure"),
|
||||
CertFile: integration2.MustAbsPath("../../../fixtures/server.crt"),
|
||||
TrustedCAFile: integration2.MustAbsPath("../../../fixtures/ca.crt"),
|
||||
ClientCertAuth: true,
|
||||
}
|
||||
|
||||
testTLSInfoExpired = transport.TLSInfo{
|
||||
KeyFile: integration.MustAbsPath("../../fixtures-expired/server.key.insecure"),
|
||||
CertFile: integration.MustAbsPath("../../fixtures-expired/server.crt"),
|
||||
TrustedCAFile: integration.MustAbsPath("../../fixtures-expired/ca.crt"),
|
||||
KeyFile: integration2.MustAbsPath("../../fixtures-expired/server.key.insecure"),
|
||||
CertFile: integration2.MustAbsPath("../../fixtures-expired/server.crt"),
|
||||
TrustedCAFile: integration2.MustAbsPath("../../fixtures-expired/ca.crt"),
|
||||
ClientCertAuth: true,
|
||||
}
|
||||
)
|
||||
|
||||
// TestDialTLSExpired tests client with expired certs fails to dial.
|
||||
func TestDialTLSExpired(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, PeerTLS: &testTLSInfo, ClientTLS: &testTLSInfo, SkipCreatingClient: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, PeerTLS: &testTLSInfo, ClientTLS: &testTLSInfo, SkipCreatingClient: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
tls, err := testTLSInfoExpired.ClientConfig()
|
||||
@ -56,7 +56,7 @@ func TestDialTLSExpired(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// expect remote errors "tls: bad certificate"
|
||||
_, err = integration.NewClient(t, clientv3.Config{
|
||||
_, err = integration2.NewClient(t, clientv3.Config{
|
||||
Endpoints: []string{clus.Members[0].GRPCURL()},
|
||||
DialTimeout: 3 * time.Second,
|
||||
DialOptions: []grpc.DialOption{grpc.WithBlock()},
|
||||
@ -70,11 +70,11 @@ func TestDialTLSExpired(t *testing.T) {
|
||||
// TestDialTLSNoConfig ensures the client fails to dial / times out
|
||||
// when TLS endpoints (https, unixs) are given but no tls config.
|
||||
func TestDialTLSNoConfig(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, ClientTLS: &testTLSInfo, SkipCreatingClient: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, ClientTLS: &testTLSInfo, SkipCreatingClient: true})
|
||||
defer clus.Terminate(t)
|
||||
// expect "signed by unknown authority"
|
||||
c, err := integration.NewClient(t, clientv3.Config{
|
||||
c, err := integration2.NewClient(t, clientv3.Config{
|
||||
Endpoints: []string{clus.Members[0].GRPCURL()},
|
||||
DialTimeout: time.Second,
|
||||
DialOptions: []grpc.DialOption{grpc.WithBlock()},
|
||||
@ -101,8 +101,8 @@ func TestDialSetEndpointsAfterFail(t *testing.T) {
|
||||
|
||||
// testDialSetEndpoints ensures SetEndpoints can replace unavailable endpoints with available ones.
|
||||
func testDialSetEndpoints(t *testing.T, setBefore bool) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, SkipCreatingClient: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, SkipCreatingClient: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// get endpoint list
|
||||
@ -117,7 +117,7 @@ func testDialSetEndpoints(t *testing.T, setBefore bool) {
|
||||
DialTimeout: 1 * time.Second,
|
||||
DialOptions: []grpc.DialOption{grpc.WithBlock()},
|
||||
}
|
||||
cli, err := integration.NewClient(t, cfg)
|
||||
cli, err := integration2.NewClient(t, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -134,7 +134,7 @@ func testDialSetEndpoints(t *testing.T, setBefore bool) {
|
||||
cli.SetEndpoints(eps[toKill%3], eps[(toKill+1)%3])
|
||||
}
|
||||
time.Sleep(time.Second * 2)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), integration.RequestWaitTimeout)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), integration2.RequestWaitTimeout)
|
||||
if _, err = cli.Get(ctx, "foo", clientv3.WithSerializable()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -144,8 +144,8 @@ func testDialSetEndpoints(t *testing.T, setBefore bool) {
|
||||
// TestSwitchSetEndpoints ensures SetEndpoints can switch one endpoint
|
||||
// with a new one that doesn't include original endpoint.
|
||||
func TestSwitchSetEndpoints(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// get non partitioned members endpoints
|
||||
@ -164,9 +164,9 @@ func TestSwitchSetEndpoints(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRejectOldCluster(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
// 2 endpoints to test multi-endpoint Status
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2, SkipCreatingClient: true})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 2, SkipCreatingClient: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cfg := clientv3.Config{
|
||||
@ -175,7 +175,7 @@ func TestRejectOldCluster(t *testing.T) {
|
||||
DialOptions: []grpc.DialOption{grpc.WithBlock()},
|
||||
RejectOldCluster: true,
|
||||
}
|
||||
cli, err := integration.NewClient(t, cfg)
|
||||
cli, err := integration2.NewClient(t, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -185,8 +185,8 @@ func TestRejectOldCluster(t *testing.T) {
|
||||
// TestDialForeignEndpoint checks an endpoint that is not registered
|
||||
// with the balancer can be dialed.
|
||||
func TestDialForeignEndpoint(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 2})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
conn, err := clus.Client(0).Dial(clus.Client(1).Endpoints()[0])
|
||||
@ -208,8 +208,8 @@ func TestDialForeignEndpoint(t *testing.T) {
|
||||
// TestSetEndpointAndPut checks that a Put following a SetEndpoints
|
||||
// to a working endpoint will always succeed.
|
||||
func TestSetEndpointAndPut(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 2})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
clus.Client(1).SetEndpoints(clus.Members[0].GRPCURL())
|
||||
|
@ -26,7 +26,7 @@ import (
|
||||
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"go.etcd.io/etcd/tests/v3/integration/clientv3"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
@ -103,9 +103,9 @@ func TestBalancerUnderNetworkPartitionSerializableGet(t *testing.T) {
|
||||
}
|
||||
|
||||
func testBalancerUnderNetworkPartition(t *testing.T, op func(*clientv3.Client, context.Context) error, timeout time.Duration) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{
|
||||
Size: 3,
|
||||
SkipCreatingClient: true,
|
||||
})
|
||||
@ -119,7 +119,7 @@ func testBalancerUnderNetworkPartition(t *testing.T, op func(*clientv3.Client, c
|
||||
DialTimeout: 3 * time.Second,
|
||||
DialOptions: []grpc.DialOption{grpc.WithBlock()},
|
||||
}
|
||||
cli, err := integration.NewClient(t, ccfg)
|
||||
cli, err := integration2.NewClient(t, ccfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -159,9 +159,9 @@ func testBalancerUnderNetworkPartition(t *testing.T, op func(*clientv3.Client, c
|
||||
// switches endpoint when leader fails and linearizable get requests returns
|
||||
// "etcdserver: request timed out".
|
||||
func TestBalancerUnderNetworkPartitionLinearizableGetLeaderElection(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{
|
||||
Size: 3,
|
||||
SkipCreatingClient: true,
|
||||
})
|
||||
@ -172,7 +172,7 @@ func TestBalancerUnderNetworkPartitionLinearizableGetLeaderElection(t *testing.T
|
||||
|
||||
timeout := 3 * clus.Members[(lead+1)%2].ServerConfig.ReqTimeout()
|
||||
|
||||
cli, err := integration.NewClient(t, clientv3.Config{
|
||||
cli, err := integration2.NewClient(t, clientv3.Config{
|
||||
Endpoints: []string{eps[(lead+1)%2]},
|
||||
DialTimeout: 2 * time.Second,
|
||||
DialOptions: []grpc.DialOption{grpc.WithBlock()},
|
||||
@ -214,9 +214,9 @@ func TestBalancerUnderNetworkPartitionWatchFollower(t *testing.T) {
|
||||
// testBalancerUnderNetworkPartitionWatch ensures watch stream
|
||||
// to a partitioned node be closed when context requires leader.
|
||||
func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{
|
||||
Size: 3,
|
||||
SkipCreatingClient: true,
|
||||
})
|
||||
@ -230,7 +230,7 @@ func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) {
|
||||
}
|
||||
|
||||
// pin eps[target]
|
||||
watchCli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{eps[target]}})
|
||||
watchCli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[target]}})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -248,7 +248,7 @@ func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) {
|
||||
wch := watchCli.Watch(clientv3.WithRequireLeader(context.Background()), "foo", clientv3.WithCreatedNotify())
|
||||
select {
|
||||
case <-wch:
|
||||
case <-time.After(integration.RequestWaitTimeout):
|
||||
case <-time.After(integration2.RequestWaitTimeout):
|
||||
t.Fatal("took too long to create watch")
|
||||
}
|
||||
|
||||
@ -268,15 +268,15 @@ func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) {
|
||||
if err = ev.Err(); err != rpctypes.ErrNoLeader {
|
||||
t.Fatalf("expected %v, got %v", rpctypes.ErrNoLeader, err)
|
||||
}
|
||||
case <-time.After(integration.RequestWaitTimeout): // enough time to detect leader lost
|
||||
case <-time.After(integration2.RequestWaitTimeout): // enough time to detect leader lost
|
||||
t.Fatal("took too long to detect leader lost")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDropReadUnderNetworkPartition(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{
|
||||
Size: 3,
|
||||
SkipCreatingClient: true,
|
||||
})
|
||||
@ -289,7 +289,7 @@ func TestDropReadUnderNetworkPartition(t *testing.T) {
|
||||
DialTimeout: 10 * time.Second,
|
||||
DialOptions: []grpc.DialOption{grpc.WithBlock()},
|
||||
}
|
||||
cli, err := integration.NewClient(t, ccfg)
|
||||
cli, err := integration2.NewClient(t, ccfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -23,16 +23,16 @@ import (
|
||||
|
||||
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"go.etcd.io/etcd/tests/v3/integration/clientv3"
|
||||
)
|
||||
|
||||
// TestBalancerUnderServerShutdownWatch expects that watch client
|
||||
// switch its endpoints when the member of the pinned endpoint fails.
|
||||
func TestBalancerUnderServerShutdownWatch(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{
|
||||
Size: 3,
|
||||
SkipCreatingClient: true,
|
||||
UseBridge: true,
|
||||
@ -44,7 +44,7 @@ func TestBalancerUnderServerShutdownWatch(t *testing.T) {
|
||||
lead := clus.WaitLeader(t)
|
||||
|
||||
// pin eps[lead]
|
||||
watchCli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{eps[lead]}})
|
||||
watchCli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[lead]}})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -61,7 +61,7 @@ func TestBalancerUnderServerShutdownWatch(t *testing.T) {
|
||||
wch := watchCli.Watch(context.Background(), key, clientv3.WithCreatedNotify())
|
||||
select {
|
||||
case <-wch:
|
||||
case <-time.After(integration.RequestWaitTimeout):
|
||||
case <-time.After(integration2.RequestWaitTimeout):
|
||||
t.Fatal("took too long to create watch")
|
||||
}
|
||||
|
||||
@ -90,7 +90,7 @@ func TestBalancerUnderServerShutdownWatch(t *testing.T) {
|
||||
clus.Members[lead].Terminate(t)
|
||||
|
||||
// writes to eps[lead+1]
|
||||
putCli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{eps[(lead+1)%3]}})
|
||||
putCli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[(lead+1)%3]}})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -143,9 +143,9 @@ func TestBalancerUnderServerShutdownTxn(t *testing.T) {
|
||||
// the pinned endpoint is shut down, the balancer switches its endpoints
|
||||
// and all subsequent put/delete/txn requests succeed with new endpoints.
|
||||
func testBalancerUnderServerShutdownMutable(t *testing.T, op func(*clientv3.Client, context.Context) error) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{
|
||||
Size: 3,
|
||||
SkipCreatingClient: true,
|
||||
})
|
||||
@ -154,7 +154,7 @@ func testBalancerUnderServerShutdownMutable(t *testing.T, op func(*clientv3.Clie
|
||||
eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL(), clus.Members[2].GRPCURL()}
|
||||
|
||||
// pin eps[0]
|
||||
cli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{eps[0]}})
|
||||
cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[0]}})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -201,9 +201,9 @@ func TestBalancerUnderServerShutdownGetSerializable(t *testing.T) {
|
||||
// the pinned endpoint is shut down, the balancer switches its endpoints
|
||||
// and all subsequent range requests succeed with new endpoints.
|
||||
func testBalancerUnderServerShutdownImmutable(t *testing.T, op func(*clientv3.Client, context.Context) error, timeout time.Duration) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{
|
||||
Size: 3,
|
||||
SkipCreatingClient: true,
|
||||
})
|
||||
@ -212,7 +212,7 @@ func testBalancerUnderServerShutdownImmutable(t *testing.T, op func(*clientv3.Cl
|
||||
eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL(), clus.Members[2].GRPCURL()}
|
||||
|
||||
// pin eps[0]
|
||||
cli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{eps[0]}})
|
||||
cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[0]}})
|
||||
if err != nil {
|
||||
t.Errorf("failed to create client: %v", err)
|
||||
}
|
||||
@ -274,9 +274,9 @@ type pinTestOpt struct {
|
||||
// testBalancerUnderServerStopInflightRangeOnRestart expects
|
||||
// inflight range request reconnects on server restart.
|
||||
func testBalancerUnderServerStopInflightRangeOnRestart(t *testing.T, linearizable bool, opt pinTestOpt) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
cfg := &integration.ClusterConfig{
|
||||
cfg := &integration2.ClusterConfig{
|
||||
Size: 2,
|
||||
SkipCreatingClient: true,
|
||||
UseBridge: true,
|
||||
@ -285,7 +285,7 @@ func testBalancerUnderServerStopInflightRangeOnRestart(t *testing.T, linearizabl
|
||||
cfg.Size = 3
|
||||
}
|
||||
|
||||
clus := integration.NewClusterV3(t, cfg)
|
||||
clus := integration2.NewClusterV3(t, cfg)
|
||||
defer clus.Terminate(t)
|
||||
eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL()}
|
||||
if linearizable {
|
||||
@ -300,7 +300,7 @@ func testBalancerUnderServerStopInflightRangeOnRestart(t *testing.T, linearizabl
|
||||
}
|
||||
|
||||
// pin eps[target]
|
||||
cli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{eps[target]}})
|
||||
cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[target]}})
|
||||
if err != nil {
|
||||
t.Errorf("failed to create client: %v", err)
|
||||
}
|
||||
@ -361,7 +361,7 @@ func testBalancerUnderServerStopInflightRangeOnRestart(t *testing.T, linearizabl
|
||||
clus.Members[target].Restart(t)
|
||||
|
||||
select {
|
||||
case <-time.After(clientTimeout + integration.RequestWaitTimeout):
|
||||
case <-time.After(clientTimeout + integration2.RequestWaitTimeout):
|
||||
t.Fatalf("timed out waiting for Get [linearizable: %v, opt: %+v]", linearizable, opt)
|
||||
case <-donec:
|
||||
}
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
"time"
|
||||
|
||||
"go.etcd.io/etcd/client/pkg/v3/testutil"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
)
|
||||
|
||||
@ -29,7 +30,7 @@ const (
|
||||
)
|
||||
|
||||
var lazyCluster = integration.NewLazyClusterWithConfig(
|
||||
integration.ClusterConfig{
|
||||
integration2.ClusterConfig{
|
||||
Size: 3,
|
||||
WatchProgressNotifyInterval: 200 * time.Millisecond})
|
||||
|
||||
|
@ -20,19 +20,19 @@ import (
|
||||
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
recipe "go.etcd.io/etcd/client/v3/experimental/recipes"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func TestBarrierSingleNode(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
testBarrier(t, 5, func() *clientv3.Client { return clus.Client(0) })
|
||||
}
|
||||
|
||||
func TestBarrierMultiNode(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
testBarrier(t, 5, func() *clientv3.Client { return clus.RandClient() })
|
||||
}
|
||||
|
@ -20,13 +20,13 @@ import (
|
||||
|
||||
"go.etcd.io/etcd/client/v3/concurrency"
|
||||
recipe "go.etcd.io/etcd/client/v3/experimental/recipes"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func TestDoubleBarrier(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
waiters := 10
|
||||
@ -98,9 +98,9 @@ func TestDoubleBarrier(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDoubleBarrierFailover(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
waiters := 10
|
||||
|
@ -24,29 +24,29 @@ import (
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/client/v3/concurrency"
|
||||
recipe "go.etcd.io/etcd/client/v3/experimental/recipes"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func TestMutexLockSingleNode(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
var clients []*clientv3.Client
|
||||
testMutexLock(t, 5, integration.MakeSingleNodeClients(t, clus, &clients))
|
||||
integration.CloseClients(t, clients)
|
||||
testMutexLock(t, 5, integration2.MakeSingleNodeClients(t, clus, &clients))
|
||||
integration2.CloseClients(t, clients)
|
||||
}
|
||||
|
||||
func TestMutexLockMultiNode(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
var clients []*clientv3.Client
|
||||
testMutexLock(t, 5, integration.MakeMultiNodeClients(t, clus, &clients))
|
||||
integration.CloseClients(t, clients)
|
||||
testMutexLock(t, 5, integration2.MakeMultiNodeClients(t, clus, &clients))
|
||||
integration2.CloseClients(t, clients)
|
||||
}
|
||||
|
||||
func testMutexLock(t *testing.T, waiters int, chooseClient func() *clientv3.Client) {
|
||||
@ -93,27 +93,27 @@ func testMutexLock(t *testing.T, waiters int, chooseClient func() *clientv3.Clie
|
||||
}
|
||||
|
||||
func TestMutexTryLockSingleNode(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
var clients []*clientv3.Client
|
||||
testMutexTryLock(t, 5, integration.MakeSingleNodeClients(t, clus, &clients))
|
||||
integration.CloseClients(t, clients)
|
||||
testMutexTryLock(t, 5, integration2.MakeSingleNodeClients(t, clus, &clients))
|
||||
integration2.CloseClients(t, clients)
|
||||
}
|
||||
|
||||
func TestMutexTryLockMultiNode(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
var clients []*clientv3.Client
|
||||
testMutexTryLock(t, 5, integration.MakeMultiNodeClients(t, clus, &clients))
|
||||
integration.CloseClients(t, clients)
|
||||
testMutexTryLock(t, 5, integration2.MakeMultiNodeClients(t, clus, &clients))
|
||||
integration2.CloseClients(t, clients)
|
||||
}
|
||||
|
||||
func testMutexTryLock(t *testing.T, lockers int, chooseClient func() *clientv3.Client) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
lockedC := make(chan *concurrency.Mutex)
|
||||
notlockedC := make(chan *concurrency.Mutex)
|
||||
@ -163,9 +163,9 @@ func testMutexTryLock(t *testing.T, lockers int, chooseClient func() *clientv3.C
|
||||
// TestMutexSessionRelock ensures that acquiring the same lock with the same
|
||||
// session will not result in deadlock.
|
||||
func TestMutexSessionRelock(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
session, err := concurrency.NewSession(clus.RandClient())
|
||||
if err != nil {
|
||||
@ -187,9 +187,9 @@ func TestMutexSessionRelock(t *testing.T) {
|
||||
// waiters older than the new owner are gone by testing the case where
|
||||
// the waiter prior to the acquirer expires before the current holder.
|
||||
func TestMutexWaitsOnCurrentHolder(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cctx := context.Background()
|
||||
@ -295,9 +295,9 @@ func TestMutexWaitsOnCurrentHolder(t *testing.T) {
|
||||
}
|
||||
|
||||
func BenchmarkMutex4Waiters(b *testing.B) {
|
||||
integration.BeforeTest(b)
|
||||
integration2.BeforeTest(b)
|
||||
// XXX switch tests to use TB interface
|
||||
clus := integration.NewClusterV3(nil, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(nil, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(nil)
|
||||
for i := 0; i < b.N; i++ {
|
||||
testMutexLock(nil, 4, func() *clientv3.Client { return clus.RandClient() })
|
||||
@ -305,15 +305,15 @@ func BenchmarkMutex4Waiters(b *testing.B) {
|
||||
}
|
||||
|
||||
func TestRWMutexSingleNode(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
testRWMutex(t, 5, func() *clientv3.Client { return clus.Client(0) })
|
||||
}
|
||||
|
||||
func TestRWMutexMultiNode(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
testRWMutex(t, 5, func() *clientv3.Client { return clus.RandClient() })
|
||||
}
|
||||
|
@ -21,7 +21,7 @@ import (
|
||||
"testing"
|
||||
|
||||
recipe "go.etcd.io/etcd/client/v3/experimental/recipes"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -31,9 +31,9 @@ const (
|
||||
|
||||
// TestQueueOneReaderOneWriter confirms the queue is FIFO
|
||||
func TestQueueOneReaderOneWriter(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
done := make(chan struct{})
|
||||
@ -78,10 +78,10 @@ func TestQueueManyReaderManyWriter(t *testing.T) {
|
||||
|
||||
// BenchmarkQueue benchmarks Queues using many/many readers/writers
|
||||
func BenchmarkQueue(b *testing.B) {
|
||||
integration.BeforeTest(b)
|
||||
integration2.BeforeTest(b)
|
||||
|
||||
// XXX switch tests to use TB interface
|
||||
clus := integration.NewClusterV3(nil, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(nil, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(nil)
|
||||
for i := 0; i < b.N; i++ {
|
||||
testQueueNReaderMWriter(nil, manyQueueClients, manyQueueClients)
|
||||
@ -90,9 +90,9 @@ func BenchmarkQueue(b *testing.B) {
|
||||
|
||||
// TestPrQueueOneReaderOneWriter tests whether priority queues respect priorities.
|
||||
func TestPrQueueOneReaderOneWriter(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// write out five items with random priority
|
||||
@ -124,9 +124,9 @@ func TestPrQueueOneReaderOneWriter(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPrQueueManyReaderManyWriter(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
rqs := newPriorityQueues(clus, manyQueueClients)
|
||||
wqs := newPriorityQueues(clus, manyQueueClients)
|
||||
@ -135,10 +135,10 @@ func TestPrQueueManyReaderManyWriter(t *testing.T) {
|
||||
|
||||
// BenchmarkQueue benchmarks Queues using n/n readers/writers
|
||||
func BenchmarkPrQueueOneReaderOneWriter(b *testing.B) {
|
||||
integration.BeforeTest(b)
|
||||
integration2.BeforeTest(b)
|
||||
|
||||
// XXX switch tests to use TB interface
|
||||
clus := integration.NewClusterV3(nil, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(nil, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(nil)
|
||||
rqs := newPriorityQueues(clus, 1)
|
||||
wqs := newPriorityQueues(clus, 1)
|
||||
@ -148,13 +148,13 @@ func BenchmarkPrQueueOneReaderOneWriter(b *testing.B) {
|
||||
}
|
||||
|
||||
func testQueueNReaderMWriter(t *testing.T, n int, m int) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
testReadersWriters(t, newQueues(clus, n), newQueues(clus, m))
|
||||
}
|
||||
|
||||
func newQueues(clus *integration.ClusterV3, n int) (qs []testQueue) {
|
||||
func newQueues(clus *integration2.ClusterV3, n int) (qs []testQueue) {
|
||||
for i := 0; i < n; i++ {
|
||||
etcdc := clus.RandClient()
|
||||
qs = append(qs, recipe.NewQueue(etcdc, "q"))
|
||||
@ -162,7 +162,7 @@ func newQueues(clus *integration.ClusterV3, n int) (qs []testQueue) {
|
||||
return qs
|
||||
}
|
||||
|
||||
func newPriorityQueues(clus *integration.ClusterV3, n int) (qs []testQueue) {
|
||||
func newPriorityQueues(clus *integration2.ClusterV3, n int) (qs []testQueue) {
|
||||
for i := 0; i < n; i++ {
|
||||
etcdc := clus.RandClient()
|
||||
q := &flatPriorityQueue{recipe.NewPriorityQueue(etcdc, "prq")}
|
||||
|
@ -29,20 +29,20 @@ import (
|
||||
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
|
||||
"go.etcd.io/etcd/api/v3/version"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
func TestKVPutError(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
var (
|
||||
maxReqBytes = 1.5 * 1024 * 1024 // hard coded max in v3_server.go
|
||||
quota = int64(int(maxReqBytes*1.2) + 8*os.Getpagesize()) // make sure we have enough overhead in backend quota. See discussion in #6486.
|
||||
)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, QuotaBackendBytes: quota, ClientMaxCallSendMsgSize: 100 * 1024 * 1024})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, QuotaBackendBytes: quota, ClientMaxCallSendMsgSize: 100 * 1024 * 1024})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
@ -72,9 +72,9 @@ func TestKVPutError(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestKVPut(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lapi := clus.RandClient()
|
||||
@ -117,9 +117,9 @@ func TestKVPut(t *testing.T) {
|
||||
|
||||
// TestKVPutWithIgnoreValue ensures that Put with WithIgnoreValue does not clobber the old value.
|
||||
func TestKVPutWithIgnoreValue(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
@ -150,9 +150,9 @@ func TestKVPutWithIgnoreValue(t *testing.T) {
|
||||
|
||||
// TestKVPutWithIgnoreLease ensures that Put with WithIgnoreLease does not affect the existing lease for the key.
|
||||
func TestKVPutWithIgnoreLease(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
@ -189,9 +189,9 @@ func TestKVPutWithIgnoreLease(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestKVPutWithRequireLeader(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
clus.Members[1].Stop(t)
|
||||
@ -235,9 +235,9 @@ func TestKVPutWithRequireLeader(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestKVRange(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
@ -464,9 +464,9 @@ func TestKVRange(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestKVGetErrConnClosed(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
@ -486,16 +486,16 @@ func TestKVGetErrConnClosed(t *testing.T) {
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(integration.RequestWaitTimeout):
|
||||
case <-time.After(integration2.RequestWaitTimeout):
|
||||
t.Fatal("kv.Get took too long")
|
||||
case <-donec:
|
||||
}
|
||||
}
|
||||
|
||||
func TestKVNewAfterClose(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
@ -513,16 +513,16 @@ func TestKVNewAfterClose(t *testing.T) {
|
||||
close(donec)
|
||||
}()
|
||||
select {
|
||||
case <-time.After(integration.RequestWaitTimeout):
|
||||
case <-time.After(integration2.RequestWaitTimeout):
|
||||
t.Fatal("kv.Get took too long")
|
||||
case <-donec:
|
||||
}
|
||||
}
|
||||
|
||||
func TestKVDeleteRange(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
@ -592,9 +592,9 @@ func TestKVDeleteRange(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestKVDelete(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
@ -624,9 +624,9 @@ func TestKVDelete(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestKVCompactError(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
@ -654,9 +654,9 @@ func TestKVCompactError(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestKVCompact(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
@ -709,10 +709,10 @@ func TestKVCompact(t *testing.T) {
|
||||
|
||||
// TestKVGetRetry ensures get will retry on disconnect.
|
||||
func TestKVGetRetry(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clusterSize := 3
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: clusterSize, UseBridge: true})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: clusterSize, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// because killing leader and following election
|
||||
@ -763,9 +763,9 @@ func TestKVGetRetry(t *testing.T) {
|
||||
|
||||
// TestKVPutFailGetRetry ensures a get will retry following a failed put.
|
||||
func TestKVPutFailGetRetry(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.Client(0)
|
||||
@ -803,9 +803,9 @@ func TestKVPutFailGetRetry(t *testing.T) {
|
||||
|
||||
// TestKVGetCancel tests that a context cancel on a Get terminates as expected.
|
||||
func TestKVGetCancel(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
oldconn := clus.Client(0).ActiveConnection()
|
||||
@ -826,9 +826,9 @@ func TestKVGetCancel(t *testing.T) {
|
||||
|
||||
// TestKVGetStoppedServerAndClose ensures closing after a failed Get works.
|
||||
func TestKVGetStoppedServerAndClose(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
@ -844,9 +844,9 @@ func TestKVGetStoppedServerAndClose(t *testing.T) {
|
||||
|
||||
// TestKVPutStoppedServerAndClose ensures closing after a failed Put works.
|
||||
func TestKVPutStoppedServerAndClose(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
@ -875,8 +875,8 @@ func TestKVPutStoppedServerAndClose(t *testing.T) {
|
||||
// TestKVPutAtMostOnce ensures that a Put will only occur at most once
|
||||
// in the presence of network errors.
|
||||
func TestKVPutAtMostOnce(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
if _, err := clus.Client(0).Put(context.TODO(), "k", "1"); err != nil {
|
||||
@ -911,7 +911,7 @@ func TestKVPutAtMostOnce(t *testing.T) {
|
||||
|
||||
// TestKVLargeRequests tests various client/server side request limits.
|
||||
func TestKVLargeRequests(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
tests := []struct {
|
||||
// make sure that "MaxCallSendMsgSize" < server-side default send/recv limit
|
||||
maxRequestBytesServer uint
|
||||
@ -970,8 +970,8 @@ func TestKVLargeRequests(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for i, test := range tests {
|
||||
clus := integration.NewClusterV3(t,
|
||||
&integration.ClusterConfig{
|
||||
clus := integration2.NewClusterV3(t,
|
||||
&integration2.ClusterConfig{
|
||||
Size: 1,
|
||||
MaxRequestBytes: test.maxRequestBytesServer,
|
||||
ClientMaxCallSendMsgSize: test.maxCallSendBytesClient,
|
||||
@ -1003,9 +1003,9 @@ func TestKVLargeRequests(t *testing.T) {
|
||||
|
||||
// TestKVForLearner ensures learner member only accepts serializable read request.
|
||||
func TestKVForLearner(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// we have to add and launch learner member after initial cluster was created, because
|
||||
@ -1034,7 +1034,7 @@ func TestKVForLearner(t *testing.T) {
|
||||
DialOptions: []grpc.DialOption{grpc.WithBlock()},
|
||||
}
|
||||
// this client only has endpoint of the learner member
|
||||
cli, err := integration.NewClient(t, cfg)
|
||||
cli, err := integration2.NewClient(t, cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create clientv3: %v", err)
|
||||
}
|
||||
@ -1082,9 +1082,9 @@ func TestKVForLearner(t *testing.T) {
|
||||
|
||||
// TestBalancerSupportLearner verifies that balancer's retry and failover mechanism supports cluster with learner member
|
||||
func TestBalancerSupportLearner(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// we have to add and launch learner member after initial cluster was created, because
|
||||
@ -1106,7 +1106,7 @@ func TestBalancerSupportLearner(t *testing.T) {
|
||||
DialTimeout: 5 * time.Second,
|
||||
DialOptions: []grpc.DialOption{grpc.WithBlock()},
|
||||
}
|
||||
cli, err := integration.NewClient(t, cfg)
|
||||
cli, err := integration2.NewClient(t, cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create clientv3: %v", err)
|
||||
}
|
||||
|
@ -26,13 +26,13 @@ import (
|
||||
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/client/v3/concurrency"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func TestLeaseNotFoundError(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
@ -44,9 +44,9 @@ func TestLeaseNotFoundError(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeaseGrant(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lapi := clus.RandClient()
|
||||
@ -70,9 +70,9 @@ func TestLeaseGrant(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeaseRevoke(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lapi := clus.RandClient()
|
||||
@ -96,9 +96,9 @@ func TestLeaseRevoke(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeaseKeepAliveOnce(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lapi := clus.RandClient()
|
||||
@ -120,9 +120,9 @@ func TestLeaseKeepAliveOnce(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeaseKeepAlive(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lapi := clus.Client(0)
|
||||
@ -160,9 +160,9 @@ func TestLeaseKeepAlive(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeaseKeepAliveOneSecond(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
@ -188,9 +188,9 @@ func TestLeaseKeepAliveOneSecond(t *testing.T) {
|
||||
func TestLeaseKeepAliveHandleFailure(t *testing.T) {
|
||||
t.Skip("test it when we have a cluster client")
|
||||
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// TODO: change this line to get a cluster client
|
||||
@ -243,9 +243,9 @@ type leaseCh struct {
|
||||
|
||||
// TestLeaseKeepAliveNotFound ensures a revoked lease won't halt other leases.
|
||||
func TestLeaseKeepAliveNotFound(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.RandClient()
|
||||
@ -276,9 +276,9 @@ func TestLeaseKeepAliveNotFound(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeaseGrantErrConnClosed(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
@ -298,7 +298,7 @@ func TestLeaseGrantErrConnClosed(t *testing.T) {
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(integration.RequestWaitTimeout):
|
||||
case <-time.After(integration2.RequestWaitTimeout):
|
||||
t.Fatal("le.Grant took too long")
|
||||
case <-donec:
|
||||
}
|
||||
@ -308,9 +308,9 @@ func TestLeaseGrantErrConnClosed(t *testing.T) {
|
||||
// queue is full thus dropping keepalive response sends,
|
||||
// keepalive request is sent with the same rate of TTL / 3.
|
||||
func TestLeaseKeepAliveFullResponseQueue(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lapi := clus.Client(0)
|
||||
@ -348,9 +348,9 @@ func TestLeaseKeepAliveFullResponseQueue(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeaseGrantNewAfterClose(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
@ -368,16 +368,16 @@ func TestLeaseGrantNewAfterClose(t *testing.T) {
|
||||
close(donec)
|
||||
}()
|
||||
select {
|
||||
case <-time.After(integration.RequestWaitTimeout):
|
||||
case <-time.After(integration2.RequestWaitTimeout):
|
||||
t.Fatal("le.Grant took too long")
|
||||
case <-donec:
|
||||
}
|
||||
}
|
||||
|
||||
func TestLeaseRevokeNewAfterClose(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
@ -402,7 +402,7 @@ func TestLeaseRevokeNewAfterClose(t *testing.T) {
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case <-time.After(integration.RequestWaitTimeout):
|
||||
case <-time.After(integration2.RequestWaitTimeout):
|
||||
t.Fatal("le.Revoke took too long")
|
||||
case errMsg := <-errMsgCh:
|
||||
if errMsg != "" {
|
||||
@ -414,9 +414,9 @@ func TestLeaseRevokeNewAfterClose(t *testing.T) {
|
||||
// TestLeaseKeepAliveCloseAfterDisconnectRevoke ensures the keep alive channel is closed
|
||||
// following a disconnection, lease revoke, then reconnect.
|
||||
func TestLeaseKeepAliveCloseAfterDisconnectRevoke(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
@ -460,9 +460,9 @@ func TestLeaseKeepAliveCloseAfterDisconnectRevoke(t *testing.T) {
|
||||
// TestLeaseKeepAliveInitTimeout ensures the keep alive channel closes if
|
||||
// the initial keep alive request never gets a response.
|
||||
func TestLeaseKeepAliveInitTimeout(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
@ -493,9 +493,9 @@ func TestLeaseKeepAliveInitTimeout(t *testing.T) {
|
||||
// TestLeaseKeepAliveInitTimeout ensures the keep alive channel closes if
|
||||
// a keep alive request after the first never gets a response.
|
||||
func TestLeaseKeepAliveTTLTimeout(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
@ -528,9 +528,9 @@ func TestLeaseKeepAliveTTLTimeout(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeaseTimeToLive(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
c := clus.RandClient()
|
||||
@ -586,9 +586,9 @@ func TestLeaseTimeToLive(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeaseTimeToLiveLeaseNotFound(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.RandClient()
|
||||
@ -621,9 +621,9 @@ func TestLeaseTimeToLiveLeaseNotFound(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeaseLeases(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.RandClient()
|
||||
@ -654,9 +654,9 @@ func TestLeaseLeases(t *testing.T) {
|
||||
// TestLeaseRenewLostQuorum ensures keepalives work after losing quorum
|
||||
// for a while.
|
||||
func TestLeaseRenewLostQuorum(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
@ -702,9 +702,9 @@ func TestLeaseRenewLostQuorum(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeaseKeepAliveLoopExit(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
ctx := context.Background()
|
||||
@ -727,8 +727,8 @@ func TestLeaseKeepAliveLoopExit(t *testing.T) {
|
||||
// before, during, and after quorum loss to confirm Grant/KeepAlive tolerates
|
||||
// transient cluster failure.
|
||||
func TestV3LeaseFailureOverlap(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 2, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
numReqs := 5
|
||||
@ -780,9 +780,9 @@ func TestV3LeaseFailureOverlap(t *testing.T) {
|
||||
|
||||
// TestLeaseWithRequireLeader checks keep-alive channel close when no leader.
|
||||
func TestLeaseWithRequireLeader(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2, UseBridge: true})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 2, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
c := clus.Client(0)
|
||||
|
@ -28,13 +28,13 @@ import (
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/client/v3/concurrency"
|
||||
"go.etcd.io/etcd/client/v3/leasing"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func TestLeasingPutGet(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lKV1, closeLKV1, err := leasing.NewKV(clus.Client(0), "foo/")
|
||||
@ -91,8 +91,8 @@ func TestLeasingPutGet(t *testing.T) {
|
||||
|
||||
// TestLeasingInterval checks the leasing KV fetches key intervals.
|
||||
func TestLeasingInterval(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -130,8 +130,8 @@ func TestLeasingInterval(t *testing.T) {
|
||||
|
||||
// TestLeasingPutInvalidateNew checks the leasing KV updates its cache on a Put to a new key.
|
||||
func TestLeasingPutInvalidateNew(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -160,8 +160,8 @@ func TestLeasingPutInvalidateNew(t *testing.T) {
|
||||
|
||||
// TestLeasingPutInvalidateExisting checks the leasing KV updates its cache on a Put to an existing key.
|
||||
func TestLeasingPutInvalidateExisting(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
if _, err := clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil {
|
||||
@ -194,8 +194,8 @@ func TestLeasingPutInvalidateExisting(t *testing.T) {
|
||||
|
||||
// TestLeasingGetNoLeaseTTL checks a key with a TTL is not leased.
|
||||
func TestLeasingGetNoLeaseTTL(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -223,8 +223,8 @@ func TestLeasingGetNoLeaseTTL(t *testing.T) {
|
||||
// TestLeasingGetSerializable checks the leasing KV can make serialized requests
|
||||
// when the etcd cluster is partitioned.
|
||||
func TestLeasingGetSerializable(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 2, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -263,8 +263,8 @@ func TestLeasingGetSerializable(t *testing.T) {
|
||||
|
||||
// TestLeasingPrevKey checks the cache respects WithPrevKV on puts.
|
||||
func TestLeasingPrevKey(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 2})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -289,8 +289,8 @@ func TestLeasingPrevKey(t *testing.T) {
|
||||
|
||||
// TestLeasingRevGet checks the cache respects Get by Revision.
|
||||
func TestLeasingRevGet(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -325,8 +325,8 @@ func TestLeasingRevGet(t *testing.T) {
|
||||
|
||||
// TestLeasingGetWithOpts checks options that can be served through the cache do not depend on the server.
|
||||
func TestLeasingGetWithOpts(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -370,8 +370,8 @@ func TestLeasingGetWithOpts(t *testing.T) {
|
||||
// TestLeasingConcurrentPut ensures that a get after concurrent puts returns
|
||||
// the recently put data.
|
||||
func TestLeasingConcurrentPut(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -417,8 +417,8 @@ func TestLeasingConcurrentPut(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeasingDisconnectedGet(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -446,8 +446,8 @@ func TestLeasingDisconnectedGet(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeasingDeleteOwner(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -480,8 +480,8 @@ func TestLeasingDeleteOwner(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeasingDeleteNonOwner(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv1, closeLKV1, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -515,8 +515,8 @@ func TestLeasingDeleteNonOwner(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeasingOverwriteResponse(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -549,8 +549,8 @@ func TestLeasingOverwriteResponse(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeasingOwnerPutResponse(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -587,8 +587,8 @@ func TestLeasingOwnerPutResponse(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeasingTxnOwnerGetRange(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -616,8 +616,8 @@ func TestLeasingTxnOwnerGetRange(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeasingTxnOwnerGet(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
client := clus.Client(0)
|
||||
@ -702,8 +702,8 @@ func TestLeasingTxnOwnerGet(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeasingTxnOwnerDeleteRange(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -741,8 +741,8 @@ func TestLeasingTxnOwnerDeleteRange(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeasingTxnOwnerDelete(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -772,8 +772,8 @@ func TestLeasingTxnOwnerDelete(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeasingTxnOwnerIf(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -866,8 +866,8 @@ func TestLeasingTxnOwnerIf(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeasingTxnCancel(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv1, closeLKV1, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -900,8 +900,8 @@ func TestLeasingTxnCancel(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeasingTxnNonOwnerPut(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -978,8 +978,8 @@ func TestLeasingTxnNonOwnerPut(t *testing.T) {
|
||||
// TestLeasingTxnRandIfThenOrElse randomly leases keys two separate clients, then
|
||||
// issues a random If/{Then,Else} transaction on those keys to one client.
|
||||
func TestLeasingTxnRandIfThenOrElse(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv1, closeLKV1, err1 := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -1084,8 +1084,8 @@ func TestLeasingTxnRandIfThenOrElse(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeasingOwnerPutError(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -1105,8 +1105,8 @@ func TestLeasingOwnerPutError(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeasingOwnerDeleteError(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -1126,8 +1126,8 @@ func TestLeasingOwnerDeleteError(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeasingNonOwnerPutError(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||
@ -1151,8 +1151,8 @@ func TestLeasingOwnerDeleteFrom(t *testing.T) {
|
||||
}
|
||||
|
||||
func testLeasingOwnerDelete(t *testing.T, del clientv3.Op) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "0/")
|
||||
@ -1200,8 +1200,8 @@ func testLeasingOwnerDelete(t *testing.T, del clientv3.Op) {
|
||||
}
|
||||
|
||||
func TestLeasingDeleteRangeBounds(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
delkv, closeDelKV, err := leasing.NewKV(clus.Client(0), "0/")
|
||||
@ -1258,8 +1258,8 @@ func TestLeaseDeleteRangeContendDel(t *testing.T) {
|
||||
}
|
||||
|
||||
func testLeasingDeleteRangeContend(t *testing.T, op clientv3.Op) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
delkv, closeDelKV, err := leasing.NewKV(clus.Client(0), "0/")
|
||||
@ -1316,8 +1316,8 @@ func testLeasingDeleteRangeContend(t *testing.T, op clientv3.Op) {
|
||||
}
|
||||
|
||||
func TestLeasingPutGetDeleteConcurrent(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkvs := make([]clientv3.KV, 16)
|
||||
@ -1375,8 +1375,8 @@ func TestLeasingPutGetDeleteConcurrent(t *testing.T) {
|
||||
// TestLeasingReconnectOwnerRevoke checks that revocation works if
|
||||
// disconnected when trying to submit revoke txn.
|
||||
func TestLeasingReconnectOwnerRevoke(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv1, closeLKV1, err1 := leasing.NewKV(clus.Client(0), "foo/")
|
||||
@ -1436,8 +1436,8 @@ func TestLeasingReconnectOwnerRevoke(t *testing.T) {
|
||||
// TestLeasingReconnectOwnerRevokeCompact checks that revocation works if
|
||||
// disconnected and the watch is compacted.
|
||||
func TestLeasingReconnectOwnerRevokeCompact(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv1, closeLKV1, err1 := leasing.NewKV(clus.Client(0), "foo/")
|
||||
@ -1489,8 +1489,8 @@ func TestLeasingReconnectOwnerRevokeCompact(t *testing.T) {
|
||||
// TestLeasingReconnectOwnerConsistency checks a write error on an owner will
|
||||
// not cause inconsistency between the server and the client.
|
||||
func TestLeasingReconnectOwnerConsistency(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/")
|
||||
@ -1563,8 +1563,8 @@ func TestLeasingReconnectOwnerConsistency(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeasingTxnAtomicCache(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/")
|
||||
@ -1649,8 +1649,8 @@ func TestLeasingTxnAtomicCache(t *testing.T) {
|
||||
|
||||
// TestLeasingReconnectTxn checks that Txn is resilient to disconnects.
|
||||
func TestLeasingReconnectTxn(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/")
|
||||
@ -1685,8 +1685,8 @@ func TestLeasingReconnectTxn(t *testing.T) {
|
||||
// TestLeasingReconnectNonOwnerGet checks a get error on an owner will
|
||||
// not cause inconsistency between the server and the client.
|
||||
func TestLeasingReconnectNonOwnerGet(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/")
|
||||
@ -1736,8 +1736,8 @@ func TestLeasingReconnectNonOwnerGet(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeasingTxnRangeCmp(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/")
|
||||
@ -1771,8 +1771,8 @@ func TestLeasingTxnRangeCmp(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeasingDo(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/")
|
||||
@ -1813,8 +1813,8 @@ func TestLeasingDo(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLeasingTxnOwnerPutBranch(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/")
|
||||
@ -1907,8 +1907,8 @@ func randCmps(pfx string, dat []*clientv3.PutResponse) (cmps []clientv3.Cmp, the
|
||||
}
|
||||
|
||||
func TestLeasingSessionExpire(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/", concurrency.WithTTL(1))
|
||||
@ -1931,7 +1931,7 @@ func TestLeasingSessionExpire(t *testing.T) {
|
||||
}
|
||||
waitForExpireAck(t, lkv)
|
||||
clus.Members[0].Restart(t)
|
||||
integration.WaitClientV3(t, lkv2)
|
||||
integration2.WaitClientV3(t, lkv2)
|
||||
if _, err = lkv2.Put(context.TODO(), "abc", "def"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -1983,8 +1983,8 @@ func TestLeasingSessionExpireCancel(t *testing.T) {
|
||||
}
|
||||
for i := range tests {
|
||||
t.Run(fmt.Sprintf("test %d", i), func(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/", concurrency.WithTTL(1))
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"go.uber.org/zap/zaptest"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
@ -34,13 +35,12 @@ import (
|
||||
"go.etcd.io/etcd/server/v3/lease"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/mvcc"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
)
|
||||
|
||||
func TestMaintenanceHashKV(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
@ -71,9 +71,9 @@ func TestMaintenanceHashKV(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMaintenanceMoveLeader(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
oldLeadIdx := clus.WaitLeader(t)
|
||||
@ -102,9 +102,9 @@ func TestMaintenanceMoveLeader(t *testing.T) {
|
||||
// TestMaintenanceSnapshotCancel ensures that context cancel
|
||||
// before snapshot reading returns corresponding context errors.
|
||||
func TestMaintenanceSnapshotCancel(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// reading snapshot with canceled context should error out
|
||||
@ -145,9 +145,9 @@ func TestMaintenanceSnapshotTimeout(t *testing.T) {
|
||||
// testMaintenanceSnapshotTimeout given snapshot function ensures that it
|
||||
// returns corresponding context errors when context timeout happened before snapshot reading
|
||||
func testMaintenanceSnapshotTimeout(t *testing.T, snapshot func(context.Context, *clientv3.Client) (io.ReadCloser, error)) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// reading snapshot with deadline exceeded should error out
|
||||
@ -190,9 +190,9 @@ func TestMaintenanceSnapshotErrorInflight(t *testing.T) {
|
||||
// testMaintenanceSnapshotErrorInflight given snapshot function ensures that ReaderCloser returned by it
|
||||
// will fail to read with corresponding context errors on inflight context cancel timeout.
|
||||
func testMaintenanceSnapshotErrorInflight(t *testing.T, snapshot func(context.Context, *clientv3.Client) (io.ReadCloser, error)) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// take about 1-second to read snapshot
|
||||
@ -247,10 +247,10 @@ func testMaintenanceSnapshotErrorInflight(t *testing.T, snapshot func(context.Co
|
||||
|
||||
// TestMaintenanceSnapshotWithVersionVersion ensures that SnapshotWithVersion returns correct version value.
|
||||
func TestMaintenanceSnapshotWithVersionVersion(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
// Set SnapshotCount to 1 to force raft snapshot to ensure that storage version is set
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, SnapshotCount: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, SnapshotCount: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// Put some keys to ensure that wal snapshot is triggered
|
||||
@ -270,9 +270,9 @@ func TestMaintenanceSnapshotWithVersionVersion(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMaintenanceStatus(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
clus.WaitLeader(t)
|
||||
@ -282,7 +282,7 @@ func TestMaintenanceStatus(t *testing.T) {
|
||||
eps[i] = clus.Members[i].GRPCURL()
|
||||
}
|
||||
|
||||
cli, err := integration.NewClient(t, clientv3.Config{Endpoints: eps, DialOptions: []grpc.DialOption{grpc.WithBlock()}})
|
||||
cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: eps, DialOptions: []grpc.DialOption{grpc.WithBlock()}})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -25,17 +25,16 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"go.etcd.io/etcd/client/pkg/v3/transport"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
|
||||
grpcprom "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"go.etcd.io/etcd/client/pkg/v3/transport"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func TestV3ClientMetrics(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
var (
|
||||
addr = "localhost:27989"
|
||||
@ -71,7 +70,7 @@ func TestV3ClientMetrics(t *testing.T) {
|
||||
|
||||
url := "unix://" + addr + "/metrics"
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, SkipCreatingClient: true})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, SkipCreatingClient: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cfg := clientv3.Config{
|
||||
@ -81,7 +80,7 @@ func TestV3ClientMetrics(t *testing.T) {
|
||||
grpc.WithStreamInterceptor(grpcprom.StreamClientInterceptor),
|
||||
},
|
||||
}
|
||||
cli, cerr := integration.NewClient(t, cfg)
|
||||
cli, cerr := integration2.NewClient(t, cfg)
|
||||
if cerr != nil {
|
||||
t.Fatal(cerr)
|
||||
}
|
||||
|
@ -24,13 +24,13 @@ import (
|
||||
|
||||
"go.etcd.io/etcd/api/v3/mvccpb"
|
||||
"go.etcd.io/etcd/client/v3/mirror"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func TestMirrorSync(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
c := clus.Client(0)
|
||||
@ -72,9 +72,9 @@ func TestMirrorSync(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMirrorSyncBase(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
cluster := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer cluster.Terminate(t)
|
||||
|
||||
cli := cluster.Client(0)
|
||||
|
@ -22,13 +22,13 @@ import (
|
||||
"go.etcd.io/etcd/api/v3/mvccpb"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/client/v3/namespace"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func TestNamespacePutGet(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
c := clus.Client(0)
|
||||
@ -55,9 +55,9 @@ func TestNamespacePutGet(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNamespaceWatch(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
c := clus.Client(0)
|
||||
|
@ -21,14 +21,13 @@ import (
|
||||
|
||||
etcd "go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/client/v3/naming/endpoints"
|
||||
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func TestEndpointManager(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
em, err := endpoints.NewManager(clus.RandClient(), "foo")
|
||||
@ -88,9 +87,9 @@ func TestEndpointManager(t *testing.T) {
|
||||
// correctly with multiple hosts and correctly receive multiple
|
||||
// updates in a single revision.
|
||||
func TestEndpointManagerAtomicity(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
c := clus.RandClient()
|
||||
@ -130,9 +129,9 @@ func TestEndpointManagerAtomicity(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestEndpointManagerCRUD(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
em, err := endpoints.NewManager(clus.RandClient(), "foo")
|
||||
|
@ -23,8 +23,7 @@ import (
|
||||
"go.etcd.io/etcd/client/v3/naming/endpoints"
|
||||
"go.etcd.io/etcd/client/v3/naming/resolver"
|
||||
"go.etcd.io/etcd/pkg/v3/grpc_testing"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"google.golang.org/grpc"
|
||||
testpb "google.golang.org/grpc/test/grpc_testing"
|
||||
)
|
||||
@ -32,7 +31,7 @@ import (
|
||||
// This test mimics scenario described in grpc_naming.md doc.
|
||||
|
||||
func TestEtcdGrpcResolver(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
s1PayloadBody := []byte{'1'}
|
||||
s1 := grpc_testing.NewDummyStubServer(s1PayloadBody)
|
||||
@ -48,7 +47,7 @@ func TestEtcdGrpcResolver(t *testing.T) {
|
||||
}
|
||||
defer s2.Stop()
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
em, err := endpoints.NewManager(clus.Client(0), "foo")
|
||||
|
@ -23,14 +23,14 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/client/v3/ordering"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func TestDetectKvOrderViolation(t *testing.T) {
|
||||
var errOrderViolation = errors.New("DetectedOrderViolation")
|
||||
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cfg := clientv3.Config{
|
||||
@ -40,7 +40,7 @@ func TestDetectKvOrderViolation(t *testing.T) {
|
||||
clus.Members[2].GRPCURL(),
|
||||
},
|
||||
}
|
||||
cli, err := integration.NewClient(t, cfg)
|
||||
cli, err := integration2.NewClient(t, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -96,8 +96,8 @@ func TestDetectKvOrderViolation(t *testing.T) {
|
||||
func TestDetectTxnOrderViolation(t *testing.T) {
|
||||
var errOrderViolation = errors.New("DetectedOrderViolation")
|
||||
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cfg := clientv3.Config{
|
||||
@ -107,7 +107,7 @@ func TestDetectTxnOrderViolation(t *testing.T) {
|
||||
clus.Members[2].GRPCURL(),
|
||||
},
|
||||
}
|
||||
cli, err := integration.NewClient(t, cfg)
|
||||
cli, err := integration2.NewClient(t, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -21,12 +21,12 @@ import (
|
||||
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/client/v3/ordering"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func TestEndpointSwitchResolvesViolation(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
eps := []string{
|
||||
clus.Members[0].GRPCURL(),
|
||||
@ -34,7 +34,7 @@ func TestEndpointSwitchResolvesViolation(t *testing.T) {
|
||||
clus.Members[2].GRPCURL(),
|
||||
}
|
||||
cfg := clientv3.Config{Endpoints: []string{clus.Members[0].GRPCURL()}}
|
||||
cli, err := integration.NewClient(t, cfg)
|
||||
cli, err := integration2.NewClient(t, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -79,8 +79,8 @@ func TestEndpointSwitchResolvesViolation(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUnresolvableOrderViolation(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 5, SkipCreatingClient: true, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 5, SkipCreatingClient: true, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
cfg := clientv3.Config{
|
||||
Endpoints: []string{
|
||||
@ -91,7 +91,7 @@ func TestUnresolvableOrderViolation(t *testing.T) {
|
||||
clus.Members[4].GRPCURL(),
|
||||
},
|
||||
}
|
||||
cli, err := integration.NewClient(t, cfg)
|
||||
cli, err := integration2.NewClient(t, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -19,13 +19,13 @@ import (
|
||||
"testing"
|
||||
|
||||
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func TestRoleError(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
authapi := clus.RandClient()
|
||||
|
@ -29,7 +29,7 @@ import (
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/client/v3/snapshot"
|
||||
"go.etcd.io/etcd/server/v3/embed"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"go.uber.org/zap/zaptest"
|
||||
)
|
||||
|
||||
@ -78,7 +78,7 @@ func newEmbedConfig(t *testing.T) *embed.Config {
|
||||
clusterN := 1
|
||||
urls := newEmbedURLs(clusterN * 2)
|
||||
cURLs, pURLs := urls[:clusterN], urls[clusterN:]
|
||||
cfg := integration.NewEmbedConfig(t, "default")
|
||||
cfg := integration2.NewEmbedConfig(t, "default")
|
||||
cfg.ClusterState = "new"
|
||||
cfg.LCUrls, cfg.ACUrls = cURLs, cURLs
|
||||
cfg.LPUrls, cfg.APUrls = pURLs, pURLs
|
||||
@ -105,7 +105,7 @@ func createSnapshotFile(t *testing.T, cfg *embed.Config, kvs []kv) (version stri
|
||||
}
|
||||
|
||||
ccfg := clientv3.Config{Endpoints: []string{cfg.ACUrls[0].String()}}
|
||||
cli, err := integration.NewClient(t, ccfg)
|
||||
cli, err := integration2.NewClient(t, ccfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -23,13 +23,13 @@ import (
|
||||
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/server/v3/embed"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func TestTxnError(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
@ -51,9 +51,9 @@ func TestTxnError(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTxnWriteFail(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.Client(0)
|
||||
@ -101,9 +101,9 @@ func TestTxnWriteFail(t *testing.T) {
|
||||
func TestTxnReadRetry(t *testing.T) {
|
||||
t.Skipf("skipping txn read retry test: re-enable after we do retry on txn read request")
|
||||
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.Client(0)
|
||||
@ -140,9 +140,9 @@ func TestTxnReadRetry(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTxnSuccess(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.Client(0)
|
||||
@ -163,9 +163,9 @@ func TestTxnSuccess(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTxnCompareRange(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.Client(0)
|
||||
@ -190,9 +190,9 @@ func TestTxnCompareRange(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTxnNested(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.Client(0)
|
||||
|
@ -21,14 +21,14 @@ import (
|
||||
|
||||
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func TestUserError(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
authapi := clus.RandClient()
|
||||
@ -55,9 +55,9 @@ func TestUserError(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUserErrorAuth(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
authapi := clus.RandClient()
|
||||
@ -75,16 +75,16 @@ func TestUserErrorAuth(t *testing.T) {
|
||||
DialOptions: []grpc.DialOption{grpc.WithBlock()},
|
||||
}
|
||||
cfg.Username, cfg.Password = "wrong-id", "123"
|
||||
if _, err := integration.NewClient(t, cfg); err != rpctypes.ErrAuthFailed {
|
||||
if _, err := integration2.NewClient(t, cfg); err != rpctypes.ErrAuthFailed {
|
||||
t.Fatalf("expected %v, got %v", rpctypes.ErrAuthFailed, err)
|
||||
}
|
||||
cfg.Username, cfg.Password = "root", "wrong-pass"
|
||||
if _, err := integration.NewClient(t, cfg); err != rpctypes.ErrAuthFailed {
|
||||
if _, err := integration2.NewClient(t, cfg); err != rpctypes.ErrAuthFailed {
|
||||
t.Fatalf("expected %v, got %v", rpctypes.ErrAuthFailed, err)
|
||||
}
|
||||
|
||||
cfg.Username, cfg.Password = "root", "123"
|
||||
authed, err := integration.NewClient(t, cfg)
|
||||
authed, err := integration2.NewClient(t, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -112,9 +112,9 @@ func authSetupRoot(t *testing.T, auth clientv3.Auth) {
|
||||
|
||||
// Client can connect to etcd even if they supply credentials and the server is in AuthDisable mode.
|
||||
func TestGetTokenWithoutAuth(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 2})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
authapi := clus.RandClient()
|
||||
@ -135,7 +135,7 @@ func TestGetTokenWithoutAuth(t *testing.T) {
|
||||
Password: "123",
|
||||
}
|
||||
|
||||
client, err = integration.NewClient(t, cfg)
|
||||
client, err = integration2.NewClient(t, cfg)
|
||||
if err == nil {
|
||||
defer client.Close()
|
||||
}
|
||||
|
@ -26,7 +26,7 @@ import (
|
||||
|
||||
"go.etcd.io/etcd/client/pkg/v3/testutil"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
// TestWatchFragmentDisable ensures that large watch
|
||||
@ -64,16 +64,16 @@ func TestWatchFragmentEnableWithGRPCLimit(t *testing.T) {
|
||||
// testWatchFragment triggers watch response that spans over multiple
|
||||
// revisions exceeding server request limits when combined.
|
||||
func testWatchFragment(t *testing.T, fragment, exceedRecvLimit bool) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
cfg := &integration.ClusterConfig{
|
||||
cfg := &integration2.ClusterConfig{
|
||||
Size: 1,
|
||||
MaxRequestBytes: 1.5 * 1024 * 1024,
|
||||
}
|
||||
if exceedRecvLimit {
|
||||
cfg.ClientMaxCallRecvMsgSize = 1.5 * 1024 * 1024
|
||||
}
|
||||
clus := integration.NewClusterV3(t, cfg)
|
||||
clus := integration2.NewClusterV3(t, cfg)
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
|
@ -29,14 +29,14 @@ import (
|
||||
"go.etcd.io/etcd/api/v3/version"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
type watcherTest func(*testing.T, *watchctx)
|
||||
|
||||
type watchctx struct {
|
||||
clus *integration.ClusterV3
|
||||
clus *integration2.ClusterV3
|
||||
w clientv3.Watcher
|
||||
kv clientv3.KV
|
||||
wclientMember int
|
||||
@ -45,9 +45,9 @@ type watchctx struct {
|
||||
}
|
||||
|
||||
func runWatchTest(t *testing.T, f watcherTest) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
wclientMember := rand.Intn(3)
|
||||
@ -299,7 +299,7 @@ func TestWatchCancelRunning(t *testing.T) {
|
||||
}
|
||||
|
||||
func testWatchCancelRunning(t *testing.T, wctx *watchctx) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
if wctx.ch = wctx.w.Watch(ctx, "a"); wctx.ch == nil {
|
||||
@ -347,8 +347,8 @@ func putAndWatch(t *testing.T, wctx *watchctx, key, val string) {
|
||||
}
|
||||
|
||||
func TestWatchResumeInitRev(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
@ -402,9 +402,9 @@ func TestWatchResumeInitRev(t *testing.T) {
|
||||
// either a compaction error or all keys by staying in sync before the compaction
|
||||
// is finally applied.
|
||||
func TestWatchResumeCompacted(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// create a waiting watcher at rev 1
|
||||
@ -489,9 +489,9 @@ func TestWatchResumeCompacted(t *testing.T) {
|
||||
// TestWatchCompactRevision ensures the CompactRevision error is given on a
|
||||
// compaction event ahead of a watcher.
|
||||
func TestWatchCompactRevision(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// set some keys
|
||||
@ -531,7 +531,7 @@ func TestWatchWithProgressNotify(t *testing.T) { testWatchWithProgressNot
|
||||
func TestWatchWithProgressNotifyNoEvent(t *testing.T) { testWatchWithProgressNotify(t, false) }
|
||||
|
||||
func testWatchWithProgressNotify(t *testing.T, watchOnPut bool) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
// accelerate report interval so test terminates quickly
|
||||
oldpi := v3rpc.GetProgressReportInterval()
|
||||
@ -540,7 +540,7 @@ func testWatchWithProgressNotify(t *testing.T, watchOnPut bool) {
|
||||
pi := 3 * time.Second
|
||||
defer func() { v3rpc.SetProgressReportInterval(oldpi) }()
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
wc := clus.RandClient()
|
||||
@ -585,11 +585,11 @@ func testWatchWithProgressNotify(t *testing.T, watchOnPut bool) {
|
||||
}
|
||||
|
||||
func TestConfigurableWatchProgressNotifyInterval(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
progressInterval := 200 * time.Millisecond
|
||||
clus := integration.NewClusterV3(t,
|
||||
&integration.ClusterConfig{
|
||||
clus := integration2.NewClusterV3(t,
|
||||
&integration2.ClusterConfig{
|
||||
Size: 3,
|
||||
WatchProgressNotifyInterval: progressInterval,
|
||||
})
|
||||
@ -611,7 +611,7 @@ func TestConfigurableWatchProgressNotifyInterval(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestWatchRequestProgress(t *testing.T) {
|
||||
if integration.ThroughProxy {
|
||||
if integration2.ThroughProxy {
|
||||
t.Skipf("grpc-proxy does not support WatchProgress yet")
|
||||
}
|
||||
testCases := []struct {
|
||||
@ -625,11 +625,11 @@ func TestWatchRequestProgress(t *testing.T) {
|
||||
|
||||
for _, c := range testCases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
watchTimeout := 3 * time.Second
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
wc := clus.RandClient()
|
||||
@ -686,9 +686,9 @@ func TestWatchRequestProgress(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestWatchEventType(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
cluster := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer cluster.Terminate(t)
|
||||
|
||||
client := cluster.RandClient()
|
||||
@ -760,9 +760,9 @@ func TestWatchEventType(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestWatchErrConnClosed(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
@ -783,16 +783,16 @@ func TestWatchErrConnClosed(t *testing.T) {
|
||||
clus.TakeClient(0)
|
||||
|
||||
select {
|
||||
case <-time.After(integration.RequestWaitTimeout):
|
||||
case <-time.After(integration2.RequestWaitTimeout):
|
||||
t.Fatal("wc.Watch took too long")
|
||||
case <-donec:
|
||||
}
|
||||
}
|
||||
|
||||
func TestWatchAfterClose(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
@ -810,7 +810,7 @@ func TestWatchAfterClose(t *testing.T) {
|
||||
close(donec)
|
||||
}()
|
||||
select {
|
||||
case <-time.After(integration.RequestWaitTimeout):
|
||||
case <-time.After(integration2.RequestWaitTimeout):
|
||||
t.Fatal("wc.Watch took too long")
|
||||
case <-donec:
|
||||
}
|
||||
@ -818,9 +818,9 @@ func TestWatchAfterClose(t *testing.T) {
|
||||
|
||||
// TestWatchWithRequireLeader checks the watch channel closes when no leader.
|
||||
func TestWatchWithRequireLeader(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// Put a key for the non-require leader watch to read as an event.
|
||||
@ -856,7 +856,7 @@ func TestWatchWithRequireLeader(t *testing.T) {
|
||||
if resp.Err() != rpctypes.ErrNoLeader {
|
||||
t.Fatalf("expected %v watch response error, got %+v", rpctypes.ErrNoLeader, resp)
|
||||
}
|
||||
case <-time.After(integration.RequestWaitTimeout):
|
||||
case <-time.After(integration2.RequestWaitTimeout):
|
||||
t.Fatal("watch without leader took too long to close")
|
||||
}
|
||||
|
||||
@ -865,7 +865,7 @@ func TestWatchWithRequireLeader(t *testing.T) {
|
||||
if ok {
|
||||
t.Fatalf("expected closed channel, got response %v", resp)
|
||||
}
|
||||
case <-time.After(integration.RequestWaitTimeout):
|
||||
case <-time.After(integration2.RequestWaitTimeout):
|
||||
t.Fatal("waited too long for channel to close")
|
||||
}
|
||||
|
||||
@ -892,9 +892,9 @@ func TestWatchWithRequireLeader(t *testing.T) {
|
||||
|
||||
// TestWatchWithFilter checks that watch filtering works.
|
||||
func TestWatchWithFilter(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
cluster := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer cluster.Terminate(t)
|
||||
|
||||
client := cluster.RandClient()
|
||||
@ -931,9 +931,9 @@ func TestWatchWithFilter(t *testing.T) {
|
||||
// TestWatchWithCreatedNotification checks that WithCreatedNotify returns a
|
||||
// Created watch response.
|
||||
func TestWatchWithCreatedNotification(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
cluster := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer cluster.Terminate(t)
|
||||
|
||||
client := cluster.RandClient()
|
||||
@ -953,9 +953,9 @@ func TestWatchWithCreatedNotification(t *testing.T) {
|
||||
// a watcher with created notify does not post duplicate
|
||||
// created events from disconnect.
|
||||
func TestWatchWithCreatedNotificationDropConn(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
cluster := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer cluster.Terminate(t)
|
||||
|
||||
client := cluster.RandClient()
|
||||
@ -982,9 +982,9 @@ func TestWatchWithCreatedNotificationDropConn(t *testing.T) {
|
||||
|
||||
// TestWatchCancelOnServer ensures client watcher cancels propagate back to the server.
|
||||
func TestWatchCancelOnServer(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
cluster := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer cluster.Terminate(t)
|
||||
|
||||
client := cluster.RandClient()
|
||||
@ -1050,20 +1050,20 @@ func TestWatchCancelOnServer(t *testing.T) {
|
||||
// 4. watcher client finishes tearing down stream on "ctx"
|
||||
// 5. w2 comes back canceled
|
||||
func TestWatchOverlapContextCancel(t *testing.T) {
|
||||
f := func(clus *integration.ClusterV3) {}
|
||||
f := func(clus *integration2.ClusterV3) {}
|
||||
testWatchOverlapContextCancel(t, f)
|
||||
}
|
||||
|
||||
func TestWatchOverlapDropConnContextCancel(t *testing.T) {
|
||||
f := func(clus *integration.ClusterV3) {
|
||||
f := func(clus *integration2.ClusterV3) {
|
||||
clus.Members[0].Bridge().DropConnections()
|
||||
}
|
||||
testWatchOverlapContextCancel(t, f)
|
||||
}
|
||||
|
||||
func testWatchOverlapContextCancel(t *testing.T, f func(*integration.ClusterV3)) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
func testWatchOverlapContextCancel(t *testing.T, f func(*integration2.ClusterV3)) {
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
n := 100
|
||||
@ -1123,8 +1123,8 @@ func testWatchOverlapContextCancel(t *testing.T, f func(*integration.ClusterV3))
|
||||
// TestWatchCancelAndCloseClient ensures that canceling a watcher then immediately
|
||||
// closing the client does not return a client closing error.
|
||||
func TestWatchCancelAndCloseClient(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
cli := clus.Client(0)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@ -1153,8 +1153,8 @@ func TestWatchCancelAndCloseClient(t *testing.T) {
|
||||
// to put them in resuming mode, cancels them so some resumes by cancel fail,
|
||||
// then closes the watcher interface to ensure correct clean up.
|
||||
func TestWatchStressResumeClose(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
cli := clus.Client(0)
|
||||
|
||||
@ -1175,8 +1175,8 @@ func TestWatchStressResumeClose(t *testing.T) {
|
||||
// TestWatchCancelDisconnected ensures canceling a watcher works when
|
||||
// its grpc stream is disconnected / reconnecting.
|
||||
func TestWatchCancelDisconnected(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
cli := clus.Client(0)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
|
||||
"go.etcd.io/etcd/client/v2"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver"
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -34,7 +35,7 @@ func init() {
|
||||
log.SetFlags(log.Ltime | log.Lmicroseconds | log.Lshortfile)
|
||||
if t := os.Getenv("ETCD_ELECTION_TIMEOUT_TICKS"); t != "" {
|
||||
if i, err := strconv.ParseInt(t, 10, 64); err == nil {
|
||||
electionTicks = int(i)
|
||||
integration.ElectionTicks = int(i)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -43,16 +44,16 @@ func TestClusterOf1(t *testing.T) { testCluster(t, 1) }
|
||||
func TestClusterOf3(t *testing.T) { testCluster(t, 3) }
|
||||
|
||||
func testCluster(t *testing.T, size int) {
|
||||
BeforeTest(t)
|
||||
c := NewCluster(t, size)
|
||||
integration.BeforeTest(t)
|
||||
c := integration.NewCluster(t, size)
|
||||
c.Launch(t)
|
||||
defer c.Terminate(t)
|
||||
clusterMustProgress(t, c.Members)
|
||||
}
|
||||
|
||||
func TestTLSClusterOf3(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
c := NewClusterByConfig(t, &ClusterConfig{Size: 3, PeerTLS: &testTLSInfo})
|
||||
integration.BeforeTest(t)
|
||||
c := integration.NewClusterByConfig(t, &integration.ClusterConfig{Size: 3, PeerTLS: &integration.TestTLSInfo})
|
||||
c.Launch(t)
|
||||
defer c.Terminate(t)
|
||||
clusterMustProgress(t, c.Members)
|
||||
@ -61,8 +62,8 @@ func TestTLSClusterOf3(t *testing.T) {
|
||||
// Test that a cluster can progress when using separate client and server certs when peering. This supports certificate
|
||||
// authorities that don't issue dual-usage certificates.
|
||||
func TestTLSClusterOf3WithSpecificUsage(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
c := NewClusterByConfig(t, &ClusterConfig{Size: 3, PeerTLS: &testTLSInfoWithSpecificUsage})
|
||||
integration.BeforeTest(t)
|
||||
c := integration.NewClusterByConfig(t, &integration.ClusterConfig{Size: 3, PeerTLS: &integration.TestTLSInfoWithSpecificUsage})
|
||||
c.Launch(t)
|
||||
defer c.Terminate(t)
|
||||
clusterMustProgress(t, c.Members)
|
||||
@ -72,22 +73,22 @@ func TestClusterOf1UsingDiscovery(t *testing.T) { testClusterUsingDiscovery(t, 1
|
||||
func TestClusterOf3UsingDiscovery(t *testing.T) { testClusterUsingDiscovery(t, 3) }
|
||||
|
||||
func testClusterUsingDiscovery(t *testing.T, size int) {
|
||||
BeforeTest(t)
|
||||
dc := NewCluster(t, 1)
|
||||
integration.BeforeTest(t)
|
||||
dc := integration.NewCluster(t, 1)
|
||||
dc.Launch(t)
|
||||
defer dc.Terminate(t)
|
||||
// init discovery token space
|
||||
dcc := MustNewHTTPClient(t, dc.URLs(), nil)
|
||||
dcc := integration.MustNewHTTPClient(t, dc.URLs(), nil)
|
||||
dkapi := client.NewKeysAPI(dcc)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout)
|
||||
if _, err := dkapi.Create(ctx, "/_config/size", fmt.Sprintf("%d", size)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cancel()
|
||||
|
||||
c := NewClusterByConfig(
|
||||
c := integration.NewClusterByConfig(
|
||||
t,
|
||||
&ClusterConfig{Size: size, DiscoveryURL: dc.URL(0) + "/v2/keys"},
|
||||
&integration.ClusterConfig{Size: size, DiscoveryURL: dc.URL(0) + "/v2/keys"},
|
||||
)
|
||||
c.Launch(t)
|
||||
defer c.Terminate(t)
|
||||
@ -95,23 +96,23 @@ func testClusterUsingDiscovery(t *testing.T, size int) {
|
||||
}
|
||||
|
||||
func TestTLSClusterOf3UsingDiscovery(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
dc := NewCluster(t, 1)
|
||||
integration.BeforeTest(t)
|
||||
dc := integration.NewCluster(t, 1)
|
||||
dc.Launch(t)
|
||||
defer dc.Terminate(t)
|
||||
// init discovery token space
|
||||
dcc := MustNewHTTPClient(t, dc.URLs(), nil)
|
||||
dcc := integration.MustNewHTTPClient(t, dc.URLs(), nil)
|
||||
dkapi := client.NewKeysAPI(dcc)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout)
|
||||
if _, err := dkapi.Create(ctx, "/_config/size", fmt.Sprintf("%d", 3)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cancel()
|
||||
|
||||
c := NewClusterByConfig(t,
|
||||
&ClusterConfig{
|
||||
c := integration.NewClusterByConfig(t,
|
||||
&integration.ClusterConfig{
|
||||
Size: 3,
|
||||
PeerTLS: &testTLSInfo,
|
||||
PeerTLS: &integration.TestTLSInfo,
|
||||
DiscoveryURL: dc.URL(0) + "/v2/keys"},
|
||||
)
|
||||
c.Launch(t)
|
||||
@ -123,8 +124,8 @@ func TestDoubleClusterSizeOf1(t *testing.T) { testDoubleClusterSize(t, 1) }
|
||||
func TestDoubleClusterSizeOf3(t *testing.T) { testDoubleClusterSize(t, 3) }
|
||||
|
||||
func testDoubleClusterSize(t *testing.T, size int) {
|
||||
BeforeTest(t)
|
||||
c := NewCluster(t, size)
|
||||
integration.BeforeTest(t)
|
||||
c := integration.NewCluster(t, size)
|
||||
c.Launch(t)
|
||||
defer c.Terminate(t)
|
||||
|
||||
@ -135,8 +136,8 @@ func testDoubleClusterSize(t *testing.T, size int) {
|
||||
}
|
||||
|
||||
func TestDoubleTLSClusterSizeOf3(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
c := NewClusterByConfig(t, &ClusterConfig{Size: 3, PeerTLS: &testTLSInfo})
|
||||
integration.BeforeTest(t)
|
||||
c := integration.NewClusterByConfig(t, &integration.ClusterConfig{Size: 3, PeerTLS: &integration.TestTLSInfo})
|
||||
c.Launch(t)
|
||||
defer c.Terminate(t)
|
||||
|
||||
@ -150,16 +151,16 @@ func TestDecreaseClusterSizeOf3(t *testing.T) { testDecreaseClusterSize(t, 3) }
|
||||
func TestDecreaseClusterSizeOf5(t *testing.T) { testDecreaseClusterSize(t, 5) }
|
||||
|
||||
func testDecreaseClusterSize(t *testing.T, size int) {
|
||||
BeforeTest(t)
|
||||
c := NewCluster(t, size)
|
||||
integration.BeforeTest(t)
|
||||
c := integration.NewCluster(t, size)
|
||||
c.Launch(t)
|
||||
defer c.Terminate(t)
|
||||
|
||||
// TODO: remove the last but one member
|
||||
for i := 0; i < size-1; i++ {
|
||||
id := c.Members[len(c.Members)-1].s.ID()
|
||||
id := c.Members[len(c.Members)-1].Server.ID()
|
||||
// may hit second leader election on slow machines
|
||||
if err := c.removeMember(t, uint64(id)); err != nil {
|
||||
if err := c.RemoveMember(t, uint64(id)); err != nil {
|
||||
if strings.Contains(err.Error(), "no leader") {
|
||||
t.Logf("got leader error (%v)", err)
|
||||
i--
|
||||
@ -167,24 +168,24 @@ func testDecreaseClusterSize(t *testing.T, size int) {
|
||||
}
|
||||
t.Fatal(err)
|
||||
}
|
||||
c.waitLeader(t, c.Members)
|
||||
c.WaitMembersForLeader(t, c.Members)
|
||||
}
|
||||
clusterMustProgress(t, c.Members)
|
||||
}
|
||||
|
||||
func TestForceNewCluster(t *testing.T) {
|
||||
c := newCluster(t, &ClusterConfig{Size: 3, UseBridge: true})
|
||||
c := integration.NewClusterFromConfig(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
c.Launch(t)
|
||||
cc := MustNewHTTPClient(t, []string{c.Members[0].URL()}, nil)
|
||||
cc := integration.MustNewHTTPClient(t, []string{c.Members[0].URL()}, nil)
|
||||
kapi := client.NewKeysAPI(cc)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout)
|
||||
resp, err := kapi.Create(ctx, "/foo", "bar")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected create error: %v", err)
|
||||
}
|
||||
cancel()
|
||||
// ensure create has been applied in this machine
|
||||
ctx, cancel = context.WithTimeout(context.Background(), requestTimeout)
|
||||
ctx, cancel = context.WithTimeout(context.Background(), integration.RequestTimeout)
|
||||
if _, err = kapi.Watcher("/foo", &client.WatcherOptions{AfterIndex: resp.Node.ModifiedIndex - 1}).Next(ctx); err != nil {
|
||||
t.Fatalf("unexpected watch error: %v", err)
|
||||
}
|
||||
@ -199,13 +200,13 @@ func TestForceNewCluster(t *testing.T) {
|
||||
t.Fatalf("unexpected ForceRestart error: %v", err)
|
||||
}
|
||||
defer c.Members[0].Terminate(t)
|
||||
c.waitLeader(t, c.Members[:1])
|
||||
c.WaitMembersForLeader(t, c.Members[:1])
|
||||
|
||||
// use new http client to init new connection
|
||||
cc = MustNewHTTPClient(t, []string{c.Members[0].URL()}, nil)
|
||||
cc = integration.MustNewHTTPClient(t, []string{c.Members[0].URL()}, nil)
|
||||
kapi = client.NewKeysAPI(cc)
|
||||
// ensure force restart keep the old data, and new cluster can make progress
|
||||
ctx, cancel = context.WithTimeout(context.Background(), requestTimeout)
|
||||
// ensure force restart keep the old data, and new Cluster can make progress
|
||||
ctx, cancel = context.WithTimeout(context.Background(), integration.RequestTimeout)
|
||||
if _, err := kapi.Watcher("/foo", &client.WatcherOptions{AfterIndex: resp.Node.ModifiedIndex - 1}).Next(ctx); err != nil {
|
||||
t.Fatalf("unexpected watch error: %v", err)
|
||||
}
|
||||
@ -214,38 +215,38 @@ func TestForceNewCluster(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAddMemberAfterClusterFullRotation(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
c := NewCluster(t, 3)
|
||||
integration.BeforeTest(t)
|
||||
c := integration.NewCluster(t, 3)
|
||||
c.Launch(t)
|
||||
defer c.Terminate(t)
|
||||
|
||||
// remove all the previous three members and add in three new members.
|
||||
for i := 0; i < 3; i++ {
|
||||
c.RemoveMember(t, uint64(c.Members[0].s.ID()))
|
||||
c.waitLeader(t, c.Members)
|
||||
c.MustRemoveMember(t, uint64(c.Members[0].Server.ID()))
|
||||
c.WaitMembersForLeader(t, c.Members)
|
||||
|
||||
c.AddMember(t)
|
||||
c.waitLeader(t, c.Members)
|
||||
c.WaitMembersForLeader(t, c.Members)
|
||||
}
|
||||
|
||||
c.AddMember(t)
|
||||
c.waitLeader(t, c.Members)
|
||||
c.WaitMembersForLeader(t, c.Members)
|
||||
|
||||
clusterMustProgress(t, c.Members)
|
||||
}
|
||||
|
||||
// Ensure we can remove a member then add a new one back immediately.
|
||||
func TestIssue2681(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
c := NewCluster(t, 5)
|
||||
integration.BeforeTest(t)
|
||||
c := integration.NewCluster(t, 5)
|
||||
c.Launch(t)
|
||||
defer c.Terminate(t)
|
||||
|
||||
c.RemoveMember(t, uint64(c.Members[4].s.ID()))
|
||||
c.waitLeader(t, c.Members)
|
||||
c.MustRemoveMember(t, uint64(c.Members[4].Server.ID()))
|
||||
c.WaitMembersForLeader(t, c.Members)
|
||||
|
||||
c.AddMember(t)
|
||||
c.waitLeader(t, c.Members)
|
||||
c.WaitMembersForLeader(t, c.Members)
|
||||
clusterMustProgress(t, c.Members)
|
||||
}
|
||||
|
||||
@ -256,8 +257,8 @@ func TestIssue2746(t *testing.T) { testIssue2746(t, 5) }
|
||||
func TestIssue2746WithThree(t *testing.T) { testIssue2746(t, 3) }
|
||||
|
||||
func testIssue2746(t *testing.T, members int) {
|
||||
BeforeTest(t)
|
||||
c := NewCluster(t, members)
|
||||
integration.BeforeTest(t)
|
||||
c := integration.NewCluster(t, members)
|
||||
|
||||
for _, m := range c.Members {
|
||||
m.SnapshotCount = 10
|
||||
@ -271,32 +272,32 @@ func testIssue2746(t *testing.T, members int) {
|
||||
clusterMustProgress(t, c.Members)
|
||||
}
|
||||
|
||||
c.RemoveMember(t, uint64(c.Members[members-1].s.ID()))
|
||||
c.waitLeader(t, c.Members)
|
||||
c.MustRemoveMember(t, uint64(c.Members[members-1].Server.ID()))
|
||||
c.WaitMembersForLeader(t, c.Members)
|
||||
|
||||
c.AddMember(t)
|
||||
c.waitLeader(t, c.Members)
|
||||
c.WaitMembersForLeader(t, c.Members)
|
||||
clusterMustProgress(t, c.Members)
|
||||
}
|
||||
|
||||
// Ensure etcd will not panic when removing a just started member.
|
||||
func TestIssue2904(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
// start 1-member cluster to ensure member 0 is the leader of the cluster.
|
||||
c := newCluster(t, &ClusterConfig{Size: 1, UseBridge: true})
|
||||
integration.BeforeTest(t)
|
||||
// start 1-member Cluster to ensure member 0 is the leader of the Cluster.
|
||||
c := integration.NewClusterFromConfig(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
c.Launch(t)
|
||||
defer c.Terminate(t)
|
||||
|
||||
c.AddMember(t)
|
||||
c.Members[1].Stop(t)
|
||||
|
||||
// send remove member-1 request to the cluster.
|
||||
cc := MustNewHTTPClient(t, c.URLs(), nil)
|
||||
// send remove member-1 request to the Cluster.
|
||||
cc := integration.MustNewHTTPClient(t, c.URLs(), nil)
|
||||
ma := client.NewMembersAPI(cc)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout)
|
||||
// the proposal is not committed because member 1 is stopped, but the
|
||||
// proposal is appended to leader's raft log.
|
||||
ma.Remove(ctx, c.Members[1].s.ID().String())
|
||||
// proposal is appended to leader'Server raft log.
|
||||
ma.Remove(ctx, c.Members[1].Server.ID().String())
|
||||
cancel()
|
||||
|
||||
// restart member, and expect it to send UpdateAttributes request.
|
||||
@ -305,21 +306,21 @@ func TestIssue2904(t *testing.T) {
|
||||
c.Members[1].Restart(t)
|
||||
// when the member comes back, it ack the proposal to remove itself,
|
||||
// and apply it.
|
||||
<-c.Members[1].s.StopNotify()
|
||||
<-c.Members[1].Server.StopNotify()
|
||||
|
||||
// terminate removed member
|
||||
c.Members[1].Terminate(t)
|
||||
c.Members = c.Members[:1]
|
||||
// wait member to be removed.
|
||||
c.waitMembersMatch(t, c.HTTPMembers())
|
||||
c.WaitMembersMatch(t, c.HTTPMembers())
|
||||
}
|
||||
|
||||
// TestIssue3699 tests minority failure during cluster configuration; it was
|
||||
// deadlocking.
|
||||
func TestIssue3699(t *testing.T) {
|
||||
// start a cluster of 3 nodes a, b, c
|
||||
BeforeTest(t)
|
||||
c := newCluster(t, &ClusterConfig{Size: 3, UseBridge: true})
|
||||
// start a Cluster of 3 nodes a, b, c
|
||||
integration.BeforeTest(t)
|
||||
c := integration.NewClusterFromConfig(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
c.Launch(t)
|
||||
defer c.Terminate(t)
|
||||
|
||||
@ -330,16 +331,16 @@ func TestIssue3699(t *testing.T) {
|
||||
c.AddMember(t)
|
||||
|
||||
// electing node d as leader makes node a unable to participate
|
||||
leaderID := c.waitLeader(t, c.Members)
|
||||
leaderID := c.WaitMembersForLeader(t, c.Members)
|
||||
for leaderID != 3 {
|
||||
c.Members[leaderID].Stop(t)
|
||||
<-c.Members[leaderID].s.StopNotify()
|
||||
<-c.Members[leaderID].Server.StopNotify()
|
||||
// do not restart the killed member immediately.
|
||||
// the member will advance its election timeout after restart,
|
||||
// so it will have a better chance to become the leader again.
|
||||
time.Sleep(time.Duration(electionTicks * int(tickDuration)))
|
||||
time.Sleep(time.Duration(integration.ElectionTicks * int(integration.TickDuration)))
|
||||
c.Members[leaderID].Restart(t)
|
||||
leaderID = c.waitLeader(t, c.Members)
|
||||
leaderID = c.WaitMembersForLeader(t, c.Members)
|
||||
}
|
||||
|
||||
// bring back node a
|
||||
@ -351,17 +352,17 @@ func TestIssue3699(t *testing.T) {
|
||||
// waiting for ReadyNotify can take several seconds
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatalf("waited too long for ready notification")
|
||||
case <-c.Members[0].s.StopNotify():
|
||||
case <-c.Members[0].Server.StopNotify():
|
||||
t.Fatalf("should not be stopped")
|
||||
case <-c.Members[0].s.ReadyNotify():
|
||||
case <-c.Members[0].Server.ReadyNotify():
|
||||
}
|
||||
// must waitLeader so goroutines don't leak on terminate
|
||||
c.waitLeader(t, c.Members)
|
||||
// must WaitMembersForLeader so goroutines don't leak on terminate
|
||||
c.WaitMembersForLeader(t, c.Members)
|
||||
|
||||
// try to participate in cluster
|
||||
cc := MustNewHTTPClient(t, []string{c.URL(0)}, c.cfg.ClientTLS)
|
||||
// try to participate in Cluster
|
||||
cc := integration.MustNewHTTPClient(t, []string{c.URL(0)}, c.Cfg.ClientTLS)
|
||||
kapi := client.NewKeysAPI(cc)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout)
|
||||
if _, err := kapi.Set(ctx, "/foo", "bar", nil); err != nil {
|
||||
t.Fatalf("unexpected error on Set (%v)", err)
|
||||
}
|
||||
@ -370,21 +371,21 @@ func TestIssue3699(t *testing.T) {
|
||||
|
||||
// TestRejectUnhealthyAdd ensures an unhealthy cluster rejects adding members.
|
||||
func TestRejectUnhealthyAdd(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
c := newCluster(t, &ClusterConfig{Size: 3, UseBridge: true})
|
||||
integration.BeforeTest(t)
|
||||
c := integration.NewClusterFromConfig(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
for _, m := range c.Members {
|
||||
m.ServerConfig.StrictReconfigCheck = true
|
||||
}
|
||||
c.Launch(t)
|
||||
defer c.Terminate(t)
|
||||
|
||||
// make cluster unhealthy and wait for downed peer
|
||||
// make Cluster unhealthy and wait for downed peer
|
||||
c.Members[0].Stop(t)
|
||||
c.WaitLeader(t)
|
||||
|
||||
// all attempts to add member should fail
|
||||
for i := 1; i < len(c.Members); i++ {
|
||||
err := c.addMemberByURL(t, c.URL(i), "unix://foo:12345")
|
||||
err := c.AddMemberByURL(t, c.URL(i), "unix://foo:12345")
|
||||
if err == nil {
|
||||
t.Fatalf("should have failed adding peer")
|
||||
}
|
||||
@ -399,23 +400,23 @@ func TestRejectUnhealthyAdd(t *testing.T) {
|
||||
c.WaitLeader(t)
|
||||
time.Sleep(2 * etcdserver.HealthInterval)
|
||||
|
||||
// add member should succeed now that it's healthy
|
||||
// add member should succeed now that it'Server healthy
|
||||
var err error
|
||||
for i := 1; i < len(c.Members); i++ {
|
||||
if err = c.addMemberByURL(t, c.URL(i), "unix://foo:12345"); err == nil {
|
||||
if err = c.AddMemberByURL(t, c.URL(i), "unix://foo:12345"); err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("should have added peer to healthy cluster (%v)", err)
|
||||
t.Fatalf("should have added peer to healthy Cluster (%v)", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestRejectUnhealthyRemove ensures an unhealthy cluster rejects removing members
|
||||
// if quorum will be lost.
|
||||
func TestRejectUnhealthyRemove(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
c := newCluster(t, &ClusterConfig{Size: 5, UseBridge: true})
|
||||
integration.BeforeTest(t)
|
||||
c := integration.NewClusterFromConfig(t, &integration.ClusterConfig{Size: 5, UseBridge: true})
|
||||
for _, m := range c.Members {
|
||||
m.ServerConfig.StrictReconfigCheck = true
|
||||
}
|
||||
@ -428,7 +429,7 @@ func TestRejectUnhealthyRemove(t *testing.T) {
|
||||
c.WaitLeader(t)
|
||||
|
||||
// reject remove active member since (3,2)-(1,0) => (2,2) lacks quorum
|
||||
err := c.removeMember(t, uint64(c.Members[2].s.ID()))
|
||||
err := c.RemoveMember(t, uint64(c.Members[2].Server.ID()))
|
||||
if err == nil {
|
||||
t.Fatalf("should reject quorum breaking remove")
|
||||
}
|
||||
@ -438,10 +439,10 @@ func TestRejectUnhealthyRemove(t *testing.T) {
|
||||
}
|
||||
|
||||
// member stopped after launch; wait for missing heartbeats
|
||||
time.Sleep(time.Duration(electionTicks * int(tickDuration)))
|
||||
time.Sleep(time.Duration(integration.ElectionTicks * int(integration.TickDuration)))
|
||||
|
||||
// permit remove dead member since (3,2) - (0,1) => (3,1) has quorum
|
||||
if err = c.removeMember(t, uint64(c.Members[0].s.ID())); err != nil {
|
||||
if err = c.RemoveMember(t, uint64(c.Members[0].Server.ID())); err != nil {
|
||||
t.Fatalf("should accept removing down member")
|
||||
}
|
||||
|
||||
@ -452,7 +453,7 @@ func TestRejectUnhealthyRemove(t *testing.T) {
|
||||
time.Sleep((3 * etcdserver.HealthInterval) / 2)
|
||||
|
||||
// accept remove member since (4,1)-(1,0) => (3,1) has quorum
|
||||
if err = c.removeMember(t, uint64(c.Members[0].s.ID())); err != nil {
|
||||
if err = c.RemoveMember(t, uint64(c.Members[0].Server.ID())); err != nil {
|
||||
t.Fatalf("expected to remove member, got error %v", err)
|
||||
}
|
||||
}
|
||||
@ -461,10 +462,10 @@ func TestRejectUnhealthyRemove(t *testing.T) {
|
||||
// if 'initial-cluster-state' is set 'new' and old data directory still exists
|
||||
// (see https://github.com/etcd-io/etcd/issues/7512 for more).
|
||||
func TestRestartRemoved(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
// 1. start single-member cluster
|
||||
c := newCluster(t, &ClusterConfig{Size: 1, UseBridge: true})
|
||||
// 1. start single-member Cluster
|
||||
c := integration.NewClusterFromConfig(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
for _, m := range c.Members {
|
||||
m.ServerConfig.StrictReconfigCheck = true
|
||||
}
|
||||
@ -476,10 +477,10 @@ func TestRestartRemoved(t *testing.T) {
|
||||
c.WaitLeader(t)
|
||||
|
||||
oldm := c.Members[0]
|
||||
oldm.keepDataDirTerminate = true
|
||||
oldm.KeepDataDirTerminate = true
|
||||
|
||||
// 3. remove first member, shut down without deleting data
|
||||
if err := c.removeMember(t, uint64(c.Members[0].s.ID())); err != nil {
|
||||
if err := c.RemoveMember(t, uint64(c.Members[0].Server.ID())); err != nil {
|
||||
t.Fatalf("expected to remove member, got error %v", err)
|
||||
}
|
||||
c.WaitLeader(t)
|
||||
@ -495,7 +496,7 @@ func TestRestartRemoved(t *testing.T) {
|
||||
os.RemoveAll(oldm.ServerConfig.DataDir)
|
||||
}()
|
||||
select {
|
||||
case <-oldm.s.StopNotify():
|
||||
case <-oldm.Server.StopNotify():
|
||||
case <-time.After(time.Minute):
|
||||
t.Fatalf("removed member didn't exit within %v", time.Minute)
|
||||
}
|
||||
@ -504,8 +505,8 @@ func TestRestartRemoved(t *testing.T) {
|
||||
// clusterMustProgress ensures that cluster can make progress. It creates
|
||||
// a random key first, and check the new key could be got from all client urls
|
||||
// of the cluster.
|
||||
func clusterMustProgress(t *testing.T, membs []*member) {
|
||||
cc := MustNewHTTPClient(t, []string{membs[0].URL()}, nil)
|
||||
func clusterMustProgress(t *testing.T, membs []*integration.Member) {
|
||||
cc := integration.MustNewHTTPClient(t, []string{membs[0].URL()}, nil)
|
||||
kapi := client.NewKeysAPI(cc)
|
||||
key := fmt.Sprintf("foo%d", rand.Int())
|
||||
var (
|
||||
@ -514,7 +515,7 @@ func clusterMustProgress(t *testing.T, membs []*member) {
|
||||
)
|
||||
// retry in case of leader loss induced by slow CI
|
||||
for i := 0; i < 3; i++ {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout)
|
||||
resp, err = kapi.Create(ctx, "/"+key, "bar")
|
||||
cancel()
|
||||
if err == nil {
|
||||
@ -528,9 +529,9 @@ func clusterMustProgress(t *testing.T, membs []*member) {
|
||||
|
||||
for i, m := range membs {
|
||||
u := m.URL()
|
||||
mcc := MustNewHTTPClient(t, []string{u}, nil)
|
||||
mcc := integration.MustNewHTTPClient(t, []string{u}, nil)
|
||||
mkapi := client.NewKeysAPI(mcc)
|
||||
mctx, mcancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
mctx, mcancel := context.WithTimeout(context.Background(), integration.RequestTimeout)
|
||||
if _, err := mkapi.Watcher(key, &client.WatcherOptions{AfterIndex: resp.Node.ModifiedIndex - 1}).Next(mctx); err != nil {
|
||||
t.Fatalf("#%d: watch on %s error: %v", i, u, err)
|
||||
}
|
||||
@ -539,8 +540,8 @@ func clusterMustProgress(t *testing.T, membs []*member) {
|
||||
}
|
||||
|
||||
func TestSpeedyTerminate(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3, UseBridge: true})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
// Stop/Restart so requests will time out on lost leaders
|
||||
for i := 0; i < 3; i++ {
|
||||
clus.Members[i].Stop(t)
|
||||
@ -553,7 +554,7 @@ func TestSpeedyTerminate(t *testing.T) {
|
||||
}()
|
||||
select {
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatalf("cluster took too long to terminate")
|
||||
t.Fatalf("Cluster took too long to terminate")
|
||||
case <-donec:
|
||||
}
|
||||
}
|
||||
|
@ -34,14 +34,14 @@ import (
|
||||
"go.etcd.io/etcd/client/pkg/v3/transport"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/server/v3/embed"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
var (
|
||||
testTLSInfo = transport.TLSInfo{
|
||||
KeyFile: integration.MustAbsPath("../../fixtures/server.key.insecure"),
|
||||
CertFile: integration.MustAbsPath("../../fixtures/server.crt"),
|
||||
TrustedCAFile: integration.MustAbsPath("../../fixtures/ca.crt"),
|
||||
KeyFile: integration2.MustAbsPath("../../fixtures/server.key.insecure"),
|
||||
CertFile: integration2.MustAbsPath("../../fixtures/server.crt"),
|
||||
TrustedCAFile: integration2.MustAbsPath("../../fixtures/ca.crt"),
|
||||
ClientCertAuth: true,
|
||||
}
|
||||
)
|
||||
@ -160,7 +160,7 @@ func testEmbedEtcdGracefulStop(t *testing.T, secure bool) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
cli, err := integration.NewClient(t, clientCfg)
|
||||
cli, err := integration2.NewClient(t, clientCfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
"time"
|
||||
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
@ -94,14 +95,14 @@ func TestAuthority(t *testing.T) {
|
||||
for _, tc := range tcs {
|
||||
for _, clusterSize := range []int{1, 3} {
|
||||
t.Run(fmt.Sprintf("Size: %d, Scenario: %q", clusterSize, tc.name), func(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
cfg := ClusterConfig{
|
||||
integration.BeforeTest(t)
|
||||
cfg := integration.ClusterConfig{
|
||||
Size: clusterSize,
|
||||
UseTCP: tc.useTCP,
|
||||
UseIP: tc.useTCP,
|
||||
}
|
||||
cfg, tlsConfig := setupTLS(t, tc.useTLS, cfg)
|
||||
clus := NewClusterV3(t, &cfg)
|
||||
clus := integration.NewClusterV3(t, &cfg)
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := setupClient(t, tc.clientURLPattern, clus, tlsConfig)
|
||||
@ -118,11 +119,11 @@ func TestAuthority(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func setupTLS(t *testing.T, useTLS bool, cfg ClusterConfig) (ClusterConfig, *tls.Config) {
|
||||
func setupTLS(t *testing.T, useTLS bool, cfg integration.ClusterConfig) (integration.ClusterConfig, *tls.Config) {
|
||||
t.Helper()
|
||||
if useTLS {
|
||||
cfg.ClientTLS = &testTLSInfo
|
||||
tlsConfig, err := testTLSInfo.ClientConfig()
|
||||
cfg.ClientTLS = &integration.TestTLSInfo
|
||||
tlsConfig, err := integration.TestTLSInfo.ClientConfig()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -131,7 +132,7 @@ func setupTLS(t *testing.T, useTLS bool, cfg ClusterConfig) (ClusterConfig, *tls
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func setupClient(t *testing.T, endpointPattern string, clus *ClusterV3, tlsConfig *tls.Config) *clientv3.Client {
|
||||
func setupClient(t *testing.T, endpointPattern string, clus *integration.ClusterV3, tlsConfig *tls.Config) *clientv3.Client {
|
||||
t.Helper()
|
||||
endpoints := templateEndpoints(t, endpointPattern, clus)
|
||||
kv, err := clientv3.New(clientv3.Config{
|
||||
@ -146,13 +147,13 @@ func setupClient(t *testing.T, endpointPattern string, clus *ClusterV3, tlsConfi
|
||||
return kv
|
||||
}
|
||||
|
||||
func templateEndpoints(t *testing.T, pattern string, clus *ClusterV3) []string {
|
||||
func templateEndpoints(t *testing.T, pattern string, clus *integration.ClusterV3) []string {
|
||||
t.Helper()
|
||||
endpoints := []string{}
|
||||
for _, m := range clus.Members {
|
||||
ent := pattern
|
||||
if strings.Contains(ent, "%d") {
|
||||
ent = fmt.Sprintf(ent, GrpcPortNumber(m.UniqNumber, m.MemberNumber))
|
||||
ent = fmt.Sprintf(ent, integration.GrpcPortNumber(m.UniqNumber, m.MemberNumber))
|
||||
}
|
||||
if strings.Contains(ent, "%s") {
|
||||
ent = fmt.Sprintf(ent, m.Name)
|
||||
@ -165,11 +166,11 @@ func templateEndpoints(t *testing.T, pattern string, clus *ClusterV3) []string {
|
||||
return endpoints
|
||||
}
|
||||
|
||||
func templateAuthority(t *testing.T, pattern string, m *member) string {
|
||||
func templateAuthority(t *testing.T, pattern string, m *integration.Member) string {
|
||||
t.Helper()
|
||||
authority := pattern
|
||||
if strings.Contains(authority, "%d") {
|
||||
authority = fmt.Sprintf(authority, GrpcPortNumber(m.UniqNumber, m.MemberNumber))
|
||||
authority = fmt.Sprintf(authority, integration.GrpcPortNumber(m.UniqNumber, m.MemberNumber))
|
||||
}
|
||||
if strings.Contains(authority, "%s") {
|
||||
authority = fmt.Sprintf(authority, m.Name)
|
||||
@ -180,7 +181,7 @@ func templateAuthority(t *testing.T, pattern string, m *member) string {
|
||||
return authority
|
||||
}
|
||||
|
||||
func assertAuthority(t *testing.T, expectedAuthority string, clus *ClusterV3) {
|
||||
func assertAuthority(t *testing.T, expectedAuthority string, clus *integration.ClusterV3) {
|
||||
t.Helper()
|
||||
requestsFound := 0
|
||||
for _, m := range clus.Members {
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
|
||||
"go.etcd.io/etcd/client/pkg/v3/testutil"
|
||||
"go.etcd.io/etcd/client/pkg/v3/transport"
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
// Infrastructure to provision a single shared cluster for tests - only
|
||||
@ -42,7 +43,7 @@ type LazyCluster interface {
|
||||
EndpointsV3() []string
|
||||
|
||||
// Cluster - calls to this method might initialize the cluster.
|
||||
Cluster() *ClusterV3
|
||||
Cluster() *integration.ClusterV3
|
||||
|
||||
// Transport - call to this method might initialize the cluster.
|
||||
Transport() *http.Transport
|
||||
@ -53,8 +54,8 @@ type LazyCluster interface {
|
||||
}
|
||||
|
||||
type lazyCluster struct {
|
||||
cfg ClusterConfig
|
||||
cluster *ClusterV3
|
||||
cfg integration.ClusterConfig
|
||||
cluster *integration.ClusterV3
|
||||
transport *http.Transport
|
||||
once sync.Once
|
||||
tb testutil.TB
|
||||
@ -64,12 +65,12 @@ type lazyCluster struct {
|
||||
// NewLazyCluster returns a new test cluster handler that gets created on the
|
||||
// first call to GetEndpoints() or GetTransport()
|
||||
func NewLazyCluster() LazyCluster {
|
||||
return NewLazyClusterWithConfig(ClusterConfig{Size: 1})
|
||||
return NewLazyClusterWithConfig(integration.ClusterConfig{Size: 1})
|
||||
}
|
||||
|
||||
// NewLazyClusterWithConfig returns a new test cluster handler that gets created
|
||||
// on the first call to GetEndpoints() or GetTransport()
|
||||
func NewLazyClusterWithConfig(cfg ClusterConfig) LazyCluster {
|
||||
func NewLazyClusterWithConfig(cfg integration.ClusterConfig) LazyCluster {
|
||||
tb, closer := testutil.NewTestingTBProthesis("lazy_cluster")
|
||||
return &lazyCluster{cfg: cfg, tb: tb, closer: closer}
|
||||
}
|
||||
@ -81,7 +82,7 @@ func (lc *lazyCluster) mustLazyInit() {
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
lc.cluster = NewClusterV3(lc.tb, &lc.cfg)
|
||||
lc.cluster = integration.NewClusterV3(lc.tb, &lc.cfg)
|
||||
})
|
||||
}
|
||||
|
||||
@ -105,7 +106,7 @@ func (lc *lazyCluster) EndpointsV3() []string {
|
||||
return lc.Cluster().Client(0).Endpoints()
|
||||
}
|
||||
|
||||
func (lc *lazyCluster) Cluster() *ClusterV3 {
|
||||
func (lc *lazyCluster) Cluster() *integration.ClusterV3 {
|
||||
lc.mustLazyInit()
|
||||
return lc.cluster
|
||||
}
|
||||
|
@ -23,52 +23,53 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.etcd.io/etcd/client/v2"
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func TestPauseMember(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
c := NewCluster(t, 5)
|
||||
c := integration.NewCluster(t, 5)
|
||||
c.Launch(t)
|
||||
defer c.Terminate(t)
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
c.Members[i].Pause()
|
||||
membs := append([]*member{}, c.Members[:i]...)
|
||||
membs := append([]*integration.Member{}, c.Members[:i]...)
|
||||
membs = append(membs, c.Members[i+1:]...)
|
||||
c.waitLeader(t, membs)
|
||||
c.WaitMembersForLeader(t, membs)
|
||||
clusterMustProgress(t, membs)
|
||||
c.Members[i].Resume()
|
||||
}
|
||||
c.waitLeader(t, c.Members)
|
||||
c.WaitMembersForLeader(t, c.Members)
|
||||
clusterMustProgress(t, c.Members)
|
||||
}
|
||||
|
||||
func TestRestartMember(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
c := newCluster(t, &ClusterConfig{Size: 3, UseBridge: true})
|
||||
integration.BeforeTest(t)
|
||||
c := integration.NewClusterFromConfig(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
c.Launch(t)
|
||||
defer c.Terminate(t)
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
c.Members[i].Stop(t)
|
||||
membs := append([]*member{}, c.Members[:i]...)
|
||||
membs := append([]*integration.Member{}, c.Members[:i]...)
|
||||
membs = append(membs, c.Members[i+1:]...)
|
||||
c.waitLeader(t, membs)
|
||||
c.WaitMembersForLeader(t, membs)
|
||||
clusterMustProgress(t, membs)
|
||||
err := c.Members[i].Restart(t)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
c.waitLeader(t, c.Members)
|
||||
c.WaitMembersForLeader(t, c.Members)
|
||||
clusterMustProgress(t, c.Members)
|
||||
}
|
||||
|
||||
func TestLaunchDuplicateMemberShouldFail(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
size := 3
|
||||
c := NewCluster(t, size)
|
||||
c := integration.NewCluster(t, size)
|
||||
m := c.Members[0].Clone(t)
|
||||
var err error
|
||||
m.DataDir, err = ioutil.TempDir(t.TempDir(), "etcd")
|
||||
@ -87,8 +88,8 @@ func TestLaunchDuplicateMemberShouldFail(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSnapshotAndRestartMember(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
m := mustNewMember(t, memberConfig{name: "snapAndRestartTest", useBridge: true})
|
||||
integration.BeforeTest(t)
|
||||
m := integration.MustNewMember(t, integration.MemberConfig{Name: "snapAndRestartTest", UseBridge: true})
|
||||
m.SnapshotCount = 100
|
||||
m.Launch()
|
||||
defer m.Terminate(t)
|
||||
@ -97,9 +98,9 @@ func TestSnapshotAndRestartMember(t *testing.T) {
|
||||
resps := make([]*client.Response, 120)
|
||||
var err error
|
||||
for i := 0; i < 120; i++ {
|
||||
cc := MustNewHTTPClient(t, []string{m.URL()}, nil)
|
||||
cc := integration.MustNewHTTPClient(t, []string{m.URL()}, nil)
|
||||
kapi := client.NewKeysAPI(cc)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout)
|
||||
key := fmt.Sprintf("foo%d", i)
|
||||
resps[i], err = kapi.Create(ctx, "/"+key, "bar")
|
||||
if err != nil {
|
||||
@ -112,9 +113,9 @@ func TestSnapshotAndRestartMember(t *testing.T) {
|
||||
|
||||
m.WaitOK(t)
|
||||
for i := 0; i < 120; i++ {
|
||||
cc := MustNewHTTPClient(t, []string{m.URL()}, nil)
|
||||
cc := integration.MustNewHTTPClient(t, []string{m.URL()}, nil)
|
||||
kapi := client.NewKeysAPI(cc)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout)
|
||||
key := fmt.Sprintf("foo%d", i)
|
||||
resp, err := kapi.Get(ctx, "/"+key, nil)
|
||||
if err != nil {
|
||||
|
@ -25,12 +25,13 @@ import (
|
||||
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||
"go.etcd.io/etcd/client/pkg/v3/transport"
|
||||
"go.etcd.io/etcd/server/v3/storage"
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
// TestMetricDbSizeBoot checks that the db size metric is set on boot.
|
||||
func TestMetricDbSizeBoot(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
v, err := clus.Members[0].Metric("etcd_debugging_mvcc_db_total_size_in_bytes")
|
||||
@ -49,12 +50,12 @@ func TestMetricDbSizeDefrag(t *testing.T) {
|
||||
|
||||
// testMetricDbSizeDefrag checks that the db size metric is set after defrag.
|
||||
func testMetricDbSizeDefrag(t *testing.T, name string) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kvc := toGRPC(clus.Client(0)).KV
|
||||
mc := toGRPC(clus.Client(0)).Maintenance
|
||||
kvc := integration.ToGRPC(clus.Client(0)).KV
|
||||
mc := integration.ToGRPC(clus.Client(0)).Maintenance
|
||||
|
||||
// expand the db size
|
||||
numPuts := 25 // large enough to write more than 1 page
|
||||
@ -163,8 +164,8 @@ func testMetricDbSizeDefrag(t *testing.T, name string) {
|
||||
}
|
||||
|
||||
func TestMetricQuotaBackendBytes(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
qs, err := clus.Members[0].Metric("etcd_server_quota_backend_bytes")
|
||||
@ -181,8 +182,8 @@ func TestMetricQuotaBackendBytes(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMetricsHealth(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
tr, err := transport.NewTransport(transport.TLSInfo{}, 5*time.Second)
|
||||
|
@ -18,12 +18,14 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func TestNetworkPartition5MembersLeaderInMinority(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 5})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 5})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
leadIndex := clus.WaitLeader(t)
|
||||
@ -32,20 +34,20 @@ func TestNetworkPartition5MembersLeaderInMinority(t *testing.T) {
|
||||
minority := []int{leadIndex, (leadIndex + 1) % 5}
|
||||
majority := []int{(leadIndex + 2) % 5, (leadIndex + 3) % 5, (leadIndex + 4) % 5}
|
||||
|
||||
minorityMembers := getMembersByIndexSlice(clus.cluster, minority)
|
||||
majorityMembers := getMembersByIndexSlice(clus.cluster, majority)
|
||||
minorityMembers := getMembersByIndexSlice(clus.Cluster, minority)
|
||||
majorityMembers := getMembersByIndexSlice(clus.Cluster, majority)
|
||||
|
||||
// network partition (bi-directional)
|
||||
injectPartition(t, minorityMembers, majorityMembers)
|
||||
|
||||
// minority leader must be lost
|
||||
clus.waitNoLeader(minorityMembers)
|
||||
clus.WaitMembersNoLeader(minorityMembers)
|
||||
|
||||
// wait extra election timeout
|
||||
time.Sleep(2 * majorityMembers[0].ElectionTimeout())
|
||||
|
||||
// new leader must be from majority
|
||||
clus.waitLeader(t, majorityMembers)
|
||||
clus.WaitMembersForLeader(t, majorityMembers)
|
||||
|
||||
// recover network partition (bi-directional)
|
||||
recoverPartition(t, minorityMembers, majorityMembers)
|
||||
@ -69,9 +71,9 @@ func TestNetworkPartition5MembersLeaderInMajority(t *testing.T) {
|
||||
}
|
||||
|
||||
func testNetworkPartition5MembersLeaderInMajority(t *testing.T) error {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 5})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 5})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
leadIndex := clus.WaitLeader(t)
|
||||
@ -80,21 +82,21 @@ func testNetworkPartition5MembersLeaderInMajority(t *testing.T) error {
|
||||
majority := []int{leadIndex, (leadIndex + 1) % 5, (leadIndex + 2) % 5}
|
||||
minority := []int{(leadIndex + 3) % 5, (leadIndex + 4) % 5}
|
||||
|
||||
majorityMembers := getMembersByIndexSlice(clus.cluster, majority)
|
||||
minorityMembers := getMembersByIndexSlice(clus.cluster, minority)
|
||||
majorityMembers := getMembersByIndexSlice(clus.Cluster, majority)
|
||||
minorityMembers := getMembersByIndexSlice(clus.Cluster, minority)
|
||||
|
||||
// network partition (bi-directional)
|
||||
injectPartition(t, majorityMembers, minorityMembers)
|
||||
|
||||
// minority leader must be lost
|
||||
clus.waitNoLeader(minorityMembers)
|
||||
clus.WaitMembersNoLeader(minorityMembers)
|
||||
|
||||
// wait extra election timeout
|
||||
time.Sleep(2 * majorityMembers[0].ElectionTimeout())
|
||||
|
||||
// leader must be hold in majority
|
||||
leadIndex2 := clus.waitLeader(t, majorityMembers)
|
||||
leadID, leadID2 := clus.Members[leadIndex].s.ID(), majorityMembers[leadIndex2].s.ID()
|
||||
leadIndex2 := clus.WaitMembersForLeader(t, majorityMembers)
|
||||
leadID, leadID2 := clus.Members[leadIndex].Server.ID(), majorityMembers[leadIndex2].Server.ID()
|
||||
if leadID != leadID2 {
|
||||
return fmt.Errorf("unexpected leader change from %s, got %s", leadID, leadID2)
|
||||
}
|
||||
@ -108,9 +110,9 @@ func testNetworkPartition5MembersLeaderInMajority(t *testing.T) error {
|
||||
}
|
||||
|
||||
func TestNetworkPartition4Members(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 4})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 4})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
leadIndex := clus.WaitLeader(t)
|
||||
@ -119,8 +121,8 @@ func TestNetworkPartition4Members(t *testing.T) {
|
||||
groupA := []int{leadIndex, (leadIndex + 1) % 4}
|
||||
groupB := []int{(leadIndex + 2) % 4, (leadIndex + 3) % 4}
|
||||
|
||||
leaderPartition := getMembersByIndexSlice(clus.cluster, groupA)
|
||||
followerPartition := getMembersByIndexSlice(clus.cluster, groupB)
|
||||
leaderPartition := getMembersByIndexSlice(clus.Cluster, groupA)
|
||||
followerPartition := getMembersByIndexSlice(clus.Cluster, groupB)
|
||||
|
||||
// network partition (bi-directional)
|
||||
injectPartition(t, leaderPartition, followerPartition)
|
||||
@ -137,21 +139,21 @@ func TestNetworkPartition4Members(t *testing.T) {
|
||||
clusterMustProgress(t, clus.Members)
|
||||
}
|
||||
|
||||
func getMembersByIndexSlice(clus *cluster, idxs []int) []*member {
|
||||
ms := make([]*member, len(idxs))
|
||||
func getMembersByIndexSlice(clus *integration.Cluster, idxs []int) []*integration.Member {
|
||||
ms := make([]*integration.Member, len(idxs))
|
||||
for i, idx := range idxs {
|
||||
ms[i] = clus.Members[idx]
|
||||
}
|
||||
return ms
|
||||
}
|
||||
|
||||
func injectPartition(t *testing.T, src, others []*member) {
|
||||
func injectPartition(t *testing.T, src, others []*integration.Member) {
|
||||
for _, m := range src {
|
||||
m.InjectPartition(t, others...)
|
||||
}
|
||||
}
|
||||
|
||||
func recoverPartition(t *testing.T, src, others []*member) {
|
||||
func recoverPartition(t *testing.T, src, others []*integration.Member) {
|
||||
for _, m := range src {
|
||||
m.RecoverPartition(t, others...)
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/server/v3/proxy/grpcproxy"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"go.uber.org/zap/zaptest"
|
||||
|
||||
"go.uber.org/zap"
|
||||
@ -31,9 +31,9 @@ import (
|
||||
)
|
||||
|
||||
func TestClusterProxyMemberList(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cts := newClusterProxyServer(zaptest.NewLogger(t), []string{clus.Members[0].GRPCURL()}, t)
|
||||
@ -43,7 +43,7 @@ func TestClusterProxyMemberList(t *testing.T) {
|
||||
Endpoints: []string{cts.caddr},
|
||||
DialTimeout: 5 * time.Second,
|
||||
}
|
||||
client, err := integration.NewClient(t, cfg)
|
||||
client, err := integration2.NewClient(t, cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("err %v, want nil", err)
|
||||
}
|
||||
@ -95,7 +95,7 @@ func newClusterProxyServer(lg *zap.Logger, endpoints []string, t *testing.T) *cl
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: 5 * time.Second,
|
||||
}
|
||||
client, err := integration.NewClient(t, cfg)
|
||||
client, err := integration2.NewClient(t, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -23,15 +23,14 @@ import (
|
||||
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/server/v3/proxy/grpcproxy"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func TestKVProxyRange(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kvts := newKVProxyServer([]string{clus.Members[0].GRPCURL()}, t)
|
||||
@ -42,7 +41,7 @@ func TestKVProxyRange(t *testing.T) {
|
||||
Endpoints: []string{kvts.l.Addr().String()},
|
||||
DialTimeout: 5 * time.Second,
|
||||
}
|
||||
client, err := integration.NewClient(t, cfg)
|
||||
client, err := integration2.NewClient(t, cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("err = %v, want nil", err)
|
||||
}
|
||||
@ -71,7 +70,7 @@ func newKVProxyServer(endpoints []string, t *testing.T) *kvproxyTestServer {
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: 5 * time.Second,
|
||||
}
|
||||
client, err := integration.NewClient(t, cfg)
|
||||
client, err := integration2.NewClient(t, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -21,14 +21,14 @@ import (
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/client/v3/naming/endpoints"
|
||||
"go.etcd.io/etcd/server/v3/proxy/grpcproxy"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"go.uber.org/zap/zaptest"
|
||||
)
|
||||
|
||||
func TestRegister(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
cli := clus.Client(0)
|
||||
paddr := clus.Members[0].GRPCURL()
|
||||
|
@ -24,14 +24,14 @@ import (
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/server/v3/embed"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
// TestSnapshotV3RestoreMultiMemberAdd ensures that multiple members
|
||||
// can boot into the same cluster after being restored from a same
|
||||
// snapshot file, and also be able to add another member to the cluster.
|
||||
func TestSnapshotV3RestoreMultiMemberAdd(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
kvs := []kv{{"foo1", "bar1"}, {"foo2", "bar2"}, {"foo3", "bar3"}}
|
||||
dbPath := createSnapshotFile(t, kvs)
|
||||
@ -48,7 +48,7 @@ func TestSnapshotV3RestoreMultiMemberAdd(t *testing.T) {
|
||||
// wait for health interval + leader election
|
||||
time.Sleep(etcdserver.HealthInterval + 2*time.Second)
|
||||
|
||||
cli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{cURLs[0].String()}})
|
||||
cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{cURLs[0].String()}})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -63,7 +63,7 @@ func TestSnapshotV3RestoreMultiMemberAdd(t *testing.T) {
|
||||
// wait for membership reconfiguration apply
|
||||
time.Sleep(testutil.ApplyTimeout)
|
||||
|
||||
cfg := integration.NewEmbedConfig(t, "3")
|
||||
cfg := integration2.NewEmbedConfig(t, "3")
|
||||
cfg.InitialClusterToken = testClusterTkn
|
||||
cfg.ClusterState = "existing"
|
||||
cfg.LCUrls, cfg.ACUrls = newCURLs, newCURLs
|
||||
@ -88,7 +88,7 @@ func TestSnapshotV3RestoreMultiMemberAdd(t *testing.T) {
|
||||
t.Fatalf("failed to start the newly added etcd member")
|
||||
}
|
||||
|
||||
cli2, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{newCURLs[0].String()}})
|
||||
cli2, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{newCURLs[0].String()}})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -29,7 +29,7 @@ import (
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/etcdutl/v3/snapshot"
|
||||
"go.etcd.io/etcd/server/v3/embed"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"go.uber.org/zap/zapcore"
|
||||
"go.uber.org/zap/zaptest"
|
||||
)
|
||||
@ -37,7 +37,7 @@ import (
|
||||
// TestSnapshotV3RestoreSingle tests single node cluster restoring
|
||||
// from a snapshot file.
|
||||
func TestSnapshotV3RestoreSingle(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
kvs := []kv{{"foo1", "bar1"}, {"foo2", "bar2"}, {"foo3", "bar3"}}
|
||||
dbPath := createSnapshotFile(t, kvs)
|
||||
|
||||
@ -45,7 +45,7 @@ func TestSnapshotV3RestoreSingle(t *testing.T) {
|
||||
urls := newEmbedURLs(clusterN * 2)
|
||||
cURLs, pURLs := urls[:clusterN], urls[clusterN:]
|
||||
|
||||
cfg := integration.NewEmbedConfig(t, "s1")
|
||||
cfg := integration2.NewEmbedConfig(t, "s1")
|
||||
cfg.InitialClusterToken = testClusterTkn
|
||||
cfg.ClusterState = "existing"
|
||||
cfg.LCUrls, cfg.ACUrls = cURLs, cURLs
|
||||
@ -82,7 +82,7 @@ func TestSnapshotV3RestoreSingle(t *testing.T) {
|
||||
}
|
||||
|
||||
var cli *clientv3.Client
|
||||
cli, err = integration.NewClient(t, clientv3.Config{Endpoints: []string{cfg.ACUrls[0].String()}})
|
||||
cli, err = integration2.NewClient(t, clientv3.Config{Endpoints: []string{cfg.ACUrls[0].String()}})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -103,7 +103,7 @@ func TestSnapshotV3RestoreSingle(t *testing.T) {
|
||||
// can boot into the same cluster after being restored from a same
|
||||
// snapshot file.
|
||||
func TestSnapshotV3RestoreMulti(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
kvs := []kv{{"foo1", "bar1"}, {"foo2", "bar2"}, {"foo3", "bar3"}}
|
||||
dbPath := createSnapshotFile(t, kvs)
|
||||
|
||||
@ -119,7 +119,7 @@ func TestSnapshotV3RestoreMulti(t *testing.T) {
|
||||
time.Sleep(time.Second)
|
||||
|
||||
for i := 0; i < clusterN; i++ {
|
||||
cli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{cURLs[i].String()}})
|
||||
cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{cURLs[i].String()}})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -139,8 +139,8 @@ func TestSnapshotV3RestoreMulti(t *testing.T) {
|
||||
|
||||
// TestCorruptedBackupFileCheck tests if we can correctly identify a corrupted backup file.
|
||||
func TestCorruptedBackupFileCheck(t *testing.T) {
|
||||
dbPath := integration.MustAbsPath("testdata/corrupted_backup.db")
|
||||
integration.BeforeTest(t)
|
||||
dbPath := integration2.MustAbsPath("testdata/corrupted_backup.db")
|
||||
integration2.BeforeTest(t)
|
||||
if _, err := os.Stat(dbPath); err != nil {
|
||||
t.Fatalf("test file [%s] does not exist: %v", dbPath, err)
|
||||
}
|
||||
@ -175,7 +175,7 @@ func createSnapshotFile(t *testing.T, kvs []kv) string {
|
||||
urls := newEmbedURLs(clusterN * 2)
|
||||
cURLs, pURLs := urls[:clusterN], urls[clusterN:]
|
||||
|
||||
cfg := integration.NewEmbedConfig(t, "default")
|
||||
cfg := integration2.NewEmbedConfig(t, "default")
|
||||
cfg.ClusterState = "new"
|
||||
cfg.LCUrls, cfg.ACUrls = cURLs, cURLs
|
||||
cfg.LPUrls, cfg.APUrls = pURLs, pURLs
|
||||
@ -194,7 +194,7 @@ func createSnapshotFile(t *testing.T, kvs []kv) string {
|
||||
}
|
||||
|
||||
ccfg := clientv3.Config{Endpoints: []string{cfg.ACUrls[0].String()}}
|
||||
cli, err := integration.NewClient(t, ccfg)
|
||||
cli, err := integration2.NewClient(t, ccfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -234,7 +234,7 @@ func restoreCluster(t *testing.T, clusterN int, dbPath string) (
|
||||
|
||||
cfgs := make([]*embed.Config, clusterN)
|
||||
for i := 0; i < clusterN; i++ {
|
||||
cfg := integration.NewEmbedConfig(t, fmt.Sprintf("m%d", i))
|
||||
cfg := integration2.NewEmbedConfig(t, fmt.Sprintf("m%d", i))
|
||||
cfg.InitialClusterToken = testClusterTkn
|
||||
cfg.ClusterState = "existing"
|
||||
cfg.LCUrls, cfg.ACUrls = []url.URL{cURLs[i]}, []url.URL{cURLs[i]}
|
||||
|
@ -15,12 +15,13 @@
|
||||
package integration_test
|
||||
|
||||
import (
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
"testing"
|
||||
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func TestBeforeTestWithoutLeakDetection(t *testing.T) {
|
||||
integration.BeforeTest(t, integration.WithoutGoLeakDetection(), integration.WithoutSkipInShort())
|
||||
integration2.BeforeTest(t, integration2.WithoutGoLeakDetection(), integration2.WithoutSkipInShort())
|
||||
// Intentional leak that should get ignored
|
||||
go func() {
|
||||
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
|
||||
"github.com/coreos/go-semver/semver"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"go.etcd.io/etcd/client/pkg/v3/testutil"
|
||||
@ -33,7 +34,7 @@ import (
|
||||
func TestEtcdVersionFromWAL(t *testing.T) {
|
||||
testutil.SkipTestIfShortMode(t,
|
||||
"Wal creation tests are depending on embedded etcd server so are integration-level tests.")
|
||||
cfg := NewEmbedConfig(t, "default")
|
||||
cfg := integration.NewEmbedConfig(t, "default")
|
||||
srv, err := embed.StartEtcd(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -45,7 +46,7 @@ func TestEtcdVersionFromWAL(t *testing.T) {
|
||||
}
|
||||
|
||||
ccfg := clientv3.Config{Endpoints: []string{cfg.ACUrls[0].String()}}
|
||||
cli, err := NewClient(t, ccfg)
|
||||
cli, err := integration.NewClient(t, ccfg)
|
||||
if err != nil {
|
||||
srv.Close()
|
||||
t.Fatal(err)
|
||||
|
@ -27,11 +27,12 @@ import (
|
||||
"time"
|
||||
|
||||
"go.etcd.io/etcd/client/pkg/v3/transport"
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
func TestV2Set(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
cl := NewCluster(t, 1)
|
||||
integration.BeforeTest(t)
|
||||
cl := integration.NewCluster(t, 1)
|
||||
cl.Launch(t)
|
||||
defer cl.Terminate(t)
|
||||
|
||||
@ -92,8 +93,8 @@ func TestV2Set(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV2CreateUpdate(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
cl := NewCluster(t, 1)
|
||||
integration.BeforeTest(t)
|
||||
cl := integration.NewCluster(t, 1)
|
||||
cl.Launch(t)
|
||||
defer cl.Terminate(t)
|
||||
|
||||
@ -228,8 +229,8 @@ func TestV2CreateUpdate(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV2CAS(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
cl := NewCluster(t, 1)
|
||||
integration.BeforeTest(t)
|
||||
cl := integration.NewCluster(t, 1)
|
||||
cl.Launch(t)
|
||||
defer cl.Terminate(t)
|
||||
|
||||
@ -376,8 +377,8 @@ func TestV2CAS(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV2Delete(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
cl := NewCluster(t, 1)
|
||||
integration.BeforeTest(t)
|
||||
cl := integration.NewCluster(t, 1)
|
||||
cl.Launch(t)
|
||||
defer cl.Terminate(t)
|
||||
|
||||
@ -476,8 +477,8 @@ func TestV2Delete(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV2CAD(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
cl := NewCluster(t, 1)
|
||||
integration.BeforeTest(t)
|
||||
cl := integration.NewCluster(t, 1)
|
||||
cl.Launch(t)
|
||||
defer cl.Terminate(t)
|
||||
|
||||
@ -576,8 +577,8 @@ func TestV2CAD(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV2Unique(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
cl := NewCluster(t, 1)
|
||||
integration.BeforeTest(t)
|
||||
cl := integration.NewCluster(t, 1)
|
||||
cl.Launch(t)
|
||||
defer cl.Terminate(t)
|
||||
|
||||
@ -643,8 +644,8 @@ func TestV2Unique(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV2Get(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
cl := NewCluster(t, 1)
|
||||
integration.BeforeTest(t)
|
||||
cl := integration.NewCluster(t, 1)
|
||||
cl.Launch(t)
|
||||
defer cl.Terminate(t)
|
||||
|
||||
@ -741,8 +742,8 @@ func TestV2Get(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV2QuorumGet(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
cl := NewCluster(t, 1)
|
||||
integration.BeforeTest(t)
|
||||
cl := integration.NewCluster(t, 1)
|
||||
cl.Launch(t)
|
||||
defer cl.Terminate(t)
|
||||
|
||||
@ -839,8 +840,8 @@ func TestV2QuorumGet(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV2Watch(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
cl := NewCluster(t, 1)
|
||||
integration.BeforeTest(t)
|
||||
cl := integration.NewCluster(t, 1)
|
||||
cl.Launch(t)
|
||||
defer cl.Terminate(t)
|
||||
|
||||
@ -877,8 +878,8 @@ func TestV2Watch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV2WatchWithIndex(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
cl := NewCluster(t, 1)
|
||||
integration.BeforeTest(t)
|
||||
cl := integration.NewCluster(t, 1)
|
||||
cl.Launch(t)
|
||||
defer cl.Terminate(t)
|
||||
|
||||
@ -944,8 +945,8 @@ func TestV2WatchWithIndex(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV2WatchKeyInDir(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
cl := NewCluster(t, 1)
|
||||
integration.BeforeTest(t)
|
||||
cl := integration.NewCluster(t, 1)
|
||||
cl.Launch(t)
|
||||
defer cl.Terminate(t)
|
||||
|
||||
@ -1005,8 +1006,8 @@ func TestV2WatchKeyInDir(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV2Head(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
cl := NewCluster(t, 1)
|
||||
integration.BeforeTest(t)
|
||||
cl := integration.NewCluster(t, 1)
|
||||
cl.Launch(t)
|
||||
defer cl.Terminate(t)
|
||||
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.etcd.io/etcd/client/pkg/v3/testutil"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
type v2TestStore struct {
|
||||
@ -41,7 +41,7 @@ func newTestStore(t *testing.T, ns ...string) StoreCloser {
|
||||
|
||||
// Ensure that the store can recover from a previously saved state.
|
||||
func TestStoreRecover(t *testing.T) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
s := newTestStore(t)
|
||||
defer s.Close()
|
||||
var eidx uint64 = 4
|
||||
|
@ -22,20 +22,20 @@ import (
|
||||
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/api/v2v3"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
type v2v3TestStore struct {
|
||||
v2store.Store
|
||||
clus *integration.ClusterV3
|
||||
clus *integration2.ClusterV3
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
func (s *v2v3TestStore) Close() { s.clus.Terminate(s.t) }
|
||||
|
||||
func newTestStore(t *testing.T, ns ...string) StoreCloser {
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
return &v2v3TestStore{
|
||||
v2v3.NewStore(clus.Client(0), "/v2/"),
|
||||
clus,
|
||||
|
@ -21,15 +21,15 @@ import (
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/api/v2v3"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
// TODO: fix tests
|
||||
|
||||
func runWithCluster(t testing.TB, runner func(testing.TB, []string)) {
|
||||
integration.BeforeTest(t)
|
||||
cfg := integration.ClusterConfig{Size: 1}
|
||||
clus := integration.NewClusterV3(t, &cfg)
|
||||
integration2.BeforeTest(t)
|
||||
cfg := integration2.ClusterConfig{Size: 1}
|
||||
clus := integration2.NewClusterV3(t, &cfg)
|
||||
defer clus.Terminate(t)
|
||||
endpoints := []string{clus.Client(0).Endpoints()[0]}
|
||||
runner(t, endpoints)
|
||||
@ -38,7 +38,7 @@ func runWithCluster(t testing.TB, runner func(testing.TB, []string)) {
|
||||
func TestCreateKV(t *testing.T) { runWithCluster(t, testCreateKV) }
|
||||
|
||||
func testCreateKV(t testing.TB, endpoints []string) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
testCases := []struct {
|
||||
key string
|
||||
value string
|
||||
@ -54,7 +54,7 @@ func testCreateKV(t testing.TB, endpoints []string) {
|
||||
//{key: "hello", value: "3", unique: true, wantKeyMatch: false},
|
||||
}
|
||||
|
||||
cli, err := integration.NewClient(t, clientv3.Config{Endpoints: endpoints})
|
||||
cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: endpoints})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -102,7 +102,7 @@ func testSetKV(t testing.TB, endpoints []string) {
|
||||
{key: "/sdir/set", value: "4", wantIndexMatch: false},
|
||||
}
|
||||
|
||||
cli, err := integration.NewClient(t, clientv3.Config{Endpoints: endpoints})
|
||||
cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: endpoints})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -130,7 +130,7 @@ func testSetKV(t testing.TB, endpoints []string) {
|
||||
func TestCreateSetDir(t *testing.T) { runWithCluster(t, testCreateSetDir) }
|
||||
|
||||
func testCreateSetDir(t testing.TB, endpoints []string) {
|
||||
integration.BeforeTest(t)
|
||||
integration2.BeforeTest(t)
|
||||
testCases := []struct {
|
||||
dir string
|
||||
}{
|
||||
@ -138,7 +138,7 @@ func testCreateSetDir(t testing.TB, endpoints []string) {
|
||||
{dir: "/ddir/1/2/3"},
|
||||
}
|
||||
|
||||
cli, err := integration.NewClient(t, clientv3.Config{Endpoints: endpoints})
|
||||
cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: endpoints})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -27,24 +27,25 @@ import (
|
||||
"go.etcd.io/etcd/pkg/v3/traceutil"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/mvcc"
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"go.uber.org/zap/zaptest"
|
||||
)
|
||||
|
||||
// TestV3StorageQuotaApply tests the V3 server respects quotas during apply
|
||||
func TestV3StorageQuotaApply(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
quotasize := int64(16 * os.Getpagesize())
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 2, UseBridge: true})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
kvc0 := toGRPC(clus.Client(0)).KV
|
||||
kvc1 := toGRPC(clus.Client(1)).KV
|
||||
kvc0 := integration.ToGRPC(clus.Client(0)).KV
|
||||
kvc1 := integration.ToGRPC(clus.Client(1)).KV
|
||||
|
||||
// Set a quota on one node
|
||||
clus.Members[0].QuotaBackendBytes = quotasize
|
||||
clus.Members[0].Stop(t)
|
||||
clus.Members[0].Restart(t)
|
||||
clus.waitLeader(t, clus.Members)
|
||||
clus.WaitMembersForLeader(t, clus.Members)
|
||||
waitForRestart(t, kvc0)
|
||||
|
||||
key := []byte("abc")
|
||||
@ -73,7 +74,7 @@ func TestV3StorageQuotaApply(t *testing.T) {
|
||||
stopc := time.After(5 * time.Second)
|
||||
for {
|
||||
req := &pb.AlarmRequest{Action: pb.AlarmRequest_GET}
|
||||
resp, aerr := clus.Members[0].s.Alarm(context.TODO(), req)
|
||||
resp, aerr := clus.Members[0].Server.Alarm(context.TODO(), req)
|
||||
if aerr != nil {
|
||||
t.Fatal(aerr)
|
||||
}
|
||||
@ -87,7 +88,7 @@ func TestV3StorageQuotaApply(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), RequestWaitTimeout)
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), integration.RequestWaitTimeout)
|
||||
defer cancel()
|
||||
|
||||
// small quota machine should reject put
|
||||
@ -103,7 +104,7 @@ func TestV3StorageQuotaApply(t *testing.T) {
|
||||
// reset large quota node to ensure alarm persisted
|
||||
clus.Members[1].Stop(t)
|
||||
clus.Members[1].Restart(t)
|
||||
clus.waitLeader(t, clus.Members)
|
||||
clus.WaitMembersForLeader(t, clus.Members)
|
||||
|
||||
if _, err := kvc1.Put(context.TODO(), &pb.PutRequest{Key: key, Value: smallbuf}); err == nil {
|
||||
t.Fatalf("alarmed instance should reject put after reset")
|
||||
@ -112,12 +113,12 @@ func TestV3StorageQuotaApply(t *testing.T) {
|
||||
|
||||
// TestV3AlarmDeactivate ensures that space alarms can be deactivated so puts go through.
|
||||
func TestV3AlarmDeactivate(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
kvc := toGRPC(clus.RandClient()).KV
|
||||
mt := toGRPC(clus.RandClient()).Maintenance
|
||||
kvc := integration.ToGRPC(clus.RandClient()).KV
|
||||
mt := integration.ToGRPC(clus.RandClient()).Maintenance
|
||||
|
||||
alarmReq := &pb.AlarmRequest{
|
||||
MemberID: 123,
|
||||
@ -146,8 +147,8 @@ func TestV3AlarmDeactivate(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV3CorruptAlarm(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3, UseBridge: true})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
@ -26,18 +26,19 @@ import (
|
||||
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
|
||||
"go.etcd.io/etcd/client/pkg/v3/testutil"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
// TestV3AuthEmptyUserGet ensures that a get with an empty user will return an empty user error.
|
||||
func TestV3AuthEmptyUserGet(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
api := toGRPC(clus.Client(0))
|
||||
api := integration.ToGRPC(clus.Client(0))
|
||||
authSetupRoot(t, api.Auth)
|
||||
|
||||
_, err := api.KV.Range(ctx, &pb.RangeRequest{Key: []byte("abc")})
|
||||
@ -49,13 +50,13 @@ func TestV3AuthEmptyUserGet(t *testing.T) {
|
||||
// TestV3AuthTokenWithDisable tests that auth won't crash if
|
||||
// given a valid token when authentication is disabled
|
||||
func TestV3AuthTokenWithDisable(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
authSetupRoot(t, toGRPC(clus.Client(0)).Auth)
|
||||
authSetupRoot(t, integration.ToGRPC(clus.Client(0)).Auth)
|
||||
|
||||
c, cerr := NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "root", Password: "123"})
|
||||
c, cerr := integration.NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "root", Password: "123"})
|
||||
if cerr != nil {
|
||||
t.Fatal(cerr)
|
||||
}
|
||||
@ -81,11 +82,11 @@ func TestV3AuthTokenWithDisable(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV3AuthRevision(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
api := toGRPC(clus.Client(0))
|
||||
api := integration.ToGRPC(clus.Client(0))
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
presp, perr := api.KV.Put(ctx, &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")})
|
||||
@ -109,25 +110,25 @@ func TestV3AuthRevision(t *testing.T) {
|
||||
// TestV3AuthWithLeaseRevokeWithRoot ensures that granted leases
|
||||
// with root user be revoked after TTL.
|
||||
func TestV3AuthWithLeaseRevokeWithRoot(t *testing.T) {
|
||||
testV3AuthWithLeaseRevokeWithRoot(t, ClusterConfig{Size: 1})
|
||||
testV3AuthWithLeaseRevokeWithRoot(t, integration.ClusterConfig{Size: 1})
|
||||
}
|
||||
|
||||
// TestV3AuthWithLeaseRevokeWithRootJWT creates a lease with a JWT-token enabled cluster.
|
||||
// And tests if server is able to revoke expiry lease item.
|
||||
func TestV3AuthWithLeaseRevokeWithRootJWT(t *testing.T) {
|
||||
testV3AuthWithLeaseRevokeWithRoot(t, ClusterConfig{Size: 1, AuthToken: defaultTokenJWT})
|
||||
testV3AuthWithLeaseRevokeWithRoot(t, integration.ClusterConfig{Size: 1, AuthToken: integration.DefaultTokenJWT})
|
||||
}
|
||||
|
||||
func testV3AuthWithLeaseRevokeWithRoot(t *testing.T, ccfg ClusterConfig) {
|
||||
BeforeTest(t)
|
||||
func testV3AuthWithLeaseRevokeWithRoot(t *testing.T, ccfg integration.ClusterConfig) {
|
||||
integration.BeforeTest(t)
|
||||
|
||||
clus := NewClusterV3(t, &ccfg)
|
||||
clus := integration.NewClusterV3(t, &ccfg)
|
||||
defer clus.Terminate(t)
|
||||
|
||||
api := toGRPC(clus.Client(0))
|
||||
api := integration.ToGRPC(clus.Client(0))
|
||||
authSetupRoot(t, api.Auth)
|
||||
|
||||
rootc, cerr := NewClient(t, clientv3.Config{
|
||||
rootc, cerr := integration.NewClient(t, clientv3.Config{
|
||||
Endpoints: clus.Client(0).Endpoints(),
|
||||
Username: "root",
|
||||
Password: "123",
|
||||
@ -177,8 +178,8 @@ type user struct {
|
||||
}
|
||||
|
||||
func TestV3AuthWithLeaseRevoke(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
users := []user{
|
||||
@ -190,11 +191,11 @@ func TestV3AuthWithLeaseRevoke(t *testing.T) {
|
||||
end: "k2",
|
||||
},
|
||||
}
|
||||
authSetupUsers(t, toGRPC(clus.Client(0)).Auth, users)
|
||||
authSetupUsers(t, integration.ToGRPC(clus.Client(0)).Auth, users)
|
||||
|
||||
authSetupRoot(t, toGRPC(clus.Client(0)).Auth)
|
||||
authSetupRoot(t, integration.ToGRPC(clus.Client(0)).Auth)
|
||||
|
||||
rootc, cerr := NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "root", Password: "123"})
|
||||
rootc, cerr := integration.NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "root", Password: "123"})
|
||||
if cerr != nil {
|
||||
t.Fatal(cerr)
|
||||
}
|
||||
@ -211,7 +212,7 @@ func TestV3AuthWithLeaseRevoke(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
userc, cerr := NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "user1", Password: "user1-123"})
|
||||
userc, cerr := integration.NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "user1", Password: "user1-123"})
|
||||
if cerr != nil {
|
||||
t.Fatal(cerr)
|
||||
}
|
||||
@ -223,8 +224,8 @@ func TestV3AuthWithLeaseRevoke(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV3AuthWithLeaseAttach(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
users := []user{
|
||||
@ -243,17 +244,17 @@ func TestV3AuthWithLeaseAttach(t *testing.T) {
|
||||
end: "k4",
|
||||
},
|
||||
}
|
||||
authSetupUsers(t, toGRPC(clus.Client(0)).Auth, users)
|
||||
authSetupUsers(t, integration.ToGRPC(clus.Client(0)).Auth, users)
|
||||
|
||||
authSetupRoot(t, toGRPC(clus.Client(0)).Auth)
|
||||
authSetupRoot(t, integration.ToGRPC(clus.Client(0)).Auth)
|
||||
|
||||
user1c, cerr := NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "user1", Password: "user1-123"})
|
||||
user1c, cerr := integration.NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "user1", Password: "user1-123"})
|
||||
if cerr != nil {
|
||||
t.Fatal(cerr)
|
||||
}
|
||||
defer user1c.Close()
|
||||
|
||||
user2c, cerr := NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "user2", Password: "user2-123"})
|
||||
user2c, cerr := integration.NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "user2", Password: "user2-123"})
|
||||
if cerr != nil {
|
||||
t.Fatal(cerr)
|
||||
}
|
||||
@ -335,8 +336,8 @@ func authSetupRoot(t *testing.T, auth pb.AuthClient) {
|
||||
}
|
||||
|
||||
func TestV3AuthNonAuthorizedRPCs(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
nonAuthedKV := clus.Client(0).KV
|
||||
@ -348,7 +349,7 @@ func TestV3AuthNonAuthorizedRPCs(t *testing.T) {
|
||||
t.Fatalf("couldn't put key (%v)", err)
|
||||
}
|
||||
|
||||
authSetupRoot(t, toGRPC(clus.Client(0)).Auth)
|
||||
authSetupRoot(t, integration.ToGRPC(clus.Client(0)).Auth)
|
||||
|
||||
respput, err := nonAuthedKV.Put(context.TODO(), key, val)
|
||||
if !eqErrGRPC(err, rpctypes.ErrGRPCUserEmpty) {
|
||||
@ -358,13 +359,13 @@ func TestV3AuthNonAuthorizedRPCs(t *testing.T) {
|
||||
|
||||
func TestV3AuthOldRevConcurrent(t *testing.T) {
|
||||
t.Skip() // TODO(jingyih): re-enable the test when #10408 is fixed.
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
authSetupRoot(t, toGRPC(clus.Client(0)).Auth)
|
||||
authSetupRoot(t, integration.ToGRPC(clus.Client(0)).Auth)
|
||||
|
||||
c, cerr := NewClient(t, clientv3.Config{
|
||||
c, cerr := integration.NewClient(t, clientv3.Config{
|
||||
Endpoints: clus.Client(0).Endpoints(),
|
||||
DialTimeout: 5 * time.Second,
|
||||
Username: "root",
|
||||
|
@ -22,20 +22,21 @@ import (
|
||||
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/client/v3/concurrency"
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
// TestElectionWait tests if followers can correctly wait for elections.
|
||||
func TestElectionWait(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
leaders := 3
|
||||
followers := 3
|
||||
var clients []*clientv3.Client
|
||||
newClient := MakeMultiNodeClients(t, clus, &clients)
|
||||
newClient := integration.MakeMultiNodeClients(t, clus, &clients)
|
||||
defer func() {
|
||||
CloseClients(t, clients)
|
||||
integration.CloseClients(t, clients)
|
||||
}()
|
||||
|
||||
electedc := make(chan string)
|
||||
@ -108,8 +109,8 @@ func TestElectionWait(t *testing.T) {
|
||||
|
||||
// TestElectionFailover tests that an election will
|
||||
func TestElectionFailover(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cctx, cancel := context.WithCancel(context.TODO())
|
||||
@ -119,7 +120,7 @@ func TestElectionFailover(t *testing.T) {
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
var err error
|
||||
ss[i], err = concurrency.NewSession(clus.clients[i])
|
||||
ss[i], err = concurrency.NewSession(clus.Clients[i])
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@ -176,8 +177,8 @@ func TestElectionFailover(t *testing.T) {
|
||||
// TestElectionSessionRelock ensures that campaigning twice on the same election
|
||||
// with the same lock will Proclaim instead of deadlocking.
|
||||
func TestElectionSessionRecampaign(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
cli := clus.RandClient()
|
||||
|
||||
@ -209,8 +210,8 @@ func TestElectionSessionRecampaign(t *testing.T) {
|
||||
// of bug #6278. https://github.com/etcd-io/etcd/issues/6278
|
||||
//
|
||||
func TestElectionOnPrefixOfExistingKey(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.RandClient()
|
||||
@ -236,8 +237,8 @@ func TestElectionOnPrefixOfExistingKey(t *testing.T) {
|
||||
// in a new session with the same lease id) does not result in loss of
|
||||
// leadership.
|
||||
func TestElectionOnSessionRestart(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
cli := clus.RandClient()
|
||||
|
||||
@ -283,8 +284,8 @@ func TestElectionOnSessionRestart(t *testing.T) {
|
||||
// TestElectionObserveCompacted checks that observe can tolerate
|
||||
// a leader key with a modrev less than the compaction revision.
|
||||
func TestElectionObserveCompacted(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
|
||||
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
@ -30,12 +31,12 @@ import (
|
||||
// TestV3MaintenanceDefragmentInflightRange ensures inflight range requests
|
||||
// does not panic the mvcc backend while defragment is running.
|
||||
func TestV3MaintenanceDefragmentInflightRange(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.RandClient()
|
||||
kvc := toGRPC(cli).KV
|
||||
kvc := integration.ToGRPC(cli).KV
|
||||
if _, err := kvc.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -48,7 +49,7 @@ func TestV3MaintenanceDefragmentInflightRange(t *testing.T) {
|
||||
kvc.Range(ctx, &pb.RangeRequest{Key: []byte("foo")})
|
||||
}()
|
||||
|
||||
mvc := toGRPC(cli).Maintenance
|
||||
mvc := integration.ToGRPC(cli).Maintenance
|
||||
mvc.Defragment(context.Background(), &pb.DefragmentRequest{})
|
||||
cancel()
|
||||
|
||||
@ -60,12 +61,12 @@ func TestV3MaintenanceDefragmentInflightRange(t *testing.T) {
|
||||
// They are either finished or canceled, but never crash the backend.
|
||||
// See https://github.com/etcd-io/etcd/issues/7322 for more detail.
|
||||
func TestV3KVInflightRangeRequests(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1, UseBridge: true})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.RandClient()
|
||||
kvc := toGRPC(cli).KV
|
||||
kvc := integration.ToGRPC(cli).KV
|
||||
|
||||
if _, err := kvc.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -30,6 +30,7 @@ import (
|
||||
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
|
||||
"go.etcd.io/etcd/client/pkg/v3/transport"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
@ -37,14 +38,14 @@ import (
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// TestV3PutOverwrite puts a key with the v3 api to a random cluster member,
|
||||
// TestV3PutOverwrite puts a key with the v3 api to a random Cluster member,
|
||||
// overwrites it, then checks that the change was applied.
|
||||
func TestV3PutOverwrite(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kvc := toGRPC(clus.RandClient()).KV
|
||||
kvc := integration.ToGRPC(clus.RandClient()).KV
|
||||
key := []byte("foo")
|
||||
reqput := &pb.PutRequest{Key: key, Value: []byte("bar"), PrevKv: true}
|
||||
|
||||
@ -88,26 +89,26 @@ func TestV3PutOverwrite(t *testing.T) {
|
||||
|
||||
// TestPutRestart checks if a put after an unrelated member restart succeeds
|
||||
func TestV3PutRestart(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3, UseBridge: true})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kvIdx := rand.Intn(3)
|
||||
kvc := toGRPC(clus.Client(kvIdx)).KV
|
||||
kvc := integration.ToGRPC(clus.Client(kvIdx)).KV
|
||||
|
||||
stopIdx := kvIdx
|
||||
for stopIdx == kvIdx {
|
||||
stopIdx = rand.Intn(3)
|
||||
}
|
||||
|
||||
clus.clients[stopIdx].Close()
|
||||
clus.Clients[stopIdx].Close()
|
||||
clus.Members[stopIdx].Stop(t)
|
||||
clus.Members[stopIdx].Restart(t)
|
||||
c, cerr := NewClientV3(clus.Members[stopIdx])
|
||||
c, cerr := integration.NewClientV3(clus.Members[stopIdx])
|
||||
if cerr != nil {
|
||||
t.Fatalf("cannot create client: %v", cerr)
|
||||
}
|
||||
clus.clients[stopIdx] = c
|
||||
clus.Clients[stopIdx] = c
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), 10*time.Second)
|
||||
defer cancel()
|
||||
@ -120,11 +121,11 @@ func TestV3PutRestart(t *testing.T) {
|
||||
|
||||
// TestV3CompactCurrentRev ensures keys are present when compacting on current revision.
|
||||
func TestV3CompactCurrentRev(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kvc := toGRPC(clus.RandClient()).KV
|
||||
kvc := integration.ToGRPC(clus.RandClient()).KV
|
||||
preq := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
|
||||
for i := 0; i < 3; i++ {
|
||||
if _, err := kvc.Put(context.Background(), preq); err != nil {
|
||||
@ -154,12 +155,12 @@ func TestV3CompactCurrentRev(t *testing.T) {
|
||||
|
||||
// TestV3HashKV ensures that multiple calls of HashKV on same node return same hash and compact rev.
|
||||
func TestV3HashKV(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kvc := toGRPC(clus.RandClient()).KV
|
||||
mvc := toGRPC(clus.RandClient()).Maintenance
|
||||
kvc := integration.ToGRPC(clus.RandClient()).KV
|
||||
mvc := integration.ToGRPC(clus.RandClient()).Maintenance
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
resp, err := kvc.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte(fmt.Sprintf("bar%d", i))})
|
||||
@ -202,12 +203,12 @@ func TestV3HashKV(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV3TxnTooManyOps(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
maxTxnOps := uint(128)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3, MaxTxnOps: maxTxnOps})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, MaxTxnOps: maxTxnOps})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kvc := toGRPC(clus.RandClient()).KV
|
||||
kvc := integration.ToGRPC(clus.RandClient()).KV
|
||||
|
||||
// unique keys
|
||||
i := new(int)
|
||||
@ -278,8 +279,8 @@ func TestV3TxnTooManyOps(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV3TxnDuplicateKeys(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
putreq := &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: &pb.PutRequest{Key: []byte("abc"), Value: []byte("def")}}}
|
||||
@ -323,7 +324,7 @@ func TestV3TxnDuplicateKeys(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
kvc := toGRPC(clus.RandClient()).KV
|
||||
kvc := integration.ToGRPC(clus.RandClient()).KV
|
||||
tests := []struct {
|
||||
txnSuccess []*pb.RequestOp
|
||||
|
||||
@ -396,11 +397,11 @@ func TestV3TxnDuplicateKeys(t *testing.T) {
|
||||
|
||||
// Testv3TxnRevision tests that the transaction header revision is set as expected.
|
||||
func TestV3TxnRevision(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kvc := toGRPC(clus.RandClient()).KV
|
||||
kvc := integration.ToGRPC(clus.RandClient()).KV
|
||||
pr := &pb.PutRequest{Key: []byte("abc"), Value: []byte("def")}
|
||||
presp, err := kvc.Put(context.TODO(), pr)
|
||||
if err != nil {
|
||||
@ -447,11 +448,11 @@ func TestV3TxnRevision(t *testing.T) {
|
||||
// Testv3TxnCmpHeaderRev tests that the txn header revision is set as expected
|
||||
// when compared to the Succeeded field in the txn response.
|
||||
func TestV3TxnCmpHeaderRev(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kvc := toGRPC(clus.RandClient()).KV
|
||||
kvc := integration.ToGRPC(clus.RandClient()).KV
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
// Concurrently put a key with a txn comparing on it.
|
||||
@ -503,8 +504,8 @@ func TestV3TxnCmpHeaderRev(t *testing.T) {
|
||||
|
||||
// TestV3TxnRangeCompare tests range comparisons in txns
|
||||
func TestV3TxnRangeCompare(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// put keys, named by expected revision
|
||||
@ -598,7 +599,7 @@ func TestV3TxnRangeCompare(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
kvc := toGRPC(clus.Client(0)).KV
|
||||
kvc := integration.ToGRPC(clus.Client(0)).KV
|
||||
for i, tt := range tests {
|
||||
txn := &pb.TxnRequest{}
|
||||
txn.Compare = append(txn.Compare, &tt.cmp)
|
||||
@ -614,11 +615,11 @@ func TestV3TxnRangeCompare(t *testing.T) {
|
||||
|
||||
// TestV3TxnNested tests nested txns follow paths as expected.
|
||||
func TestV3TxnNestedPath(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kvc := toGRPC(clus.RandClient()).KV
|
||||
kvc := integration.ToGRPC(clus.RandClient()).KV
|
||||
|
||||
cmpTrue := &pb.Compare{
|
||||
Result: pb.Compare_EQUAL,
|
||||
@ -667,17 +668,17 @@ func TestV3TxnNestedPath(t *testing.T) {
|
||||
|
||||
// TestV3PutIgnoreValue ensures that writes with ignore_value overwrites with previous key-value pair.
|
||||
func TestV3PutIgnoreValue(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kvc := toGRPC(clus.RandClient()).KV
|
||||
kvc := integration.ToGRPC(clus.RandClient()).KV
|
||||
key, val := []byte("foo"), []byte("bar")
|
||||
putReq := pb.PutRequest{Key: key, Value: val}
|
||||
|
||||
// create lease
|
||||
lc := toGRPC(clus.RandClient()).Lease
|
||||
lc := integration.ToGRPC(clus.RandClient()).Lease
|
||||
lresp, err := lc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -800,15 +801,15 @@ func TestV3PutIgnoreValue(t *testing.T) {
|
||||
|
||||
// TestV3PutIgnoreLease ensures that writes with ignore_lease uses previous lease for the key overwrites.
|
||||
func TestV3PutIgnoreLease(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kvc := toGRPC(clus.RandClient()).KV
|
||||
kvc := integration.ToGRPC(clus.RandClient()).KV
|
||||
|
||||
// create lease
|
||||
lc := toGRPC(clus.RandClient()).Lease
|
||||
lc := integration.ToGRPC(clus.RandClient()).Lease
|
||||
lresp, err := lc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -940,11 +941,11 @@ func TestV3PutIgnoreLease(t *testing.T) {
|
||||
|
||||
// TestV3PutMissingLease ensures that a Put on a key with a bogus lease fails.
|
||||
func TestV3PutMissingLease(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kvc := toGRPC(clus.RandClient()).KV
|
||||
kvc := integration.ToGRPC(clus.RandClient()).KV
|
||||
key := []byte("foo")
|
||||
preq := &pb.PutRequest{Key: key, Lease: 123456}
|
||||
tests := []func(){
|
||||
@ -1011,7 +1012,7 @@ func TestV3PutMissingLease(t *testing.T) {
|
||||
|
||||
// TestV3DeleteRange tests various edge cases in the DeleteRange API.
|
||||
func TestV3DeleteRange(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
@ -1069,8 +1070,8 @@ func TestV3DeleteRange(t *testing.T) {
|
||||
|
||||
for i, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
||||
kvc := toGRPC(clus.RandClient()).KV
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
kvc := integration.ToGRPC(clus.RandClient()).KV
|
||||
defer clus.Terminate(t)
|
||||
|
||||
ks := tt.keySet
|
||||
@ -1123,11 +1124,11 @@ func TestV3DeleteRange(t *testing.T) {
|
||||
|
||||
// TestV3TxnInvalidRange tests that invalid ranges are rejected in txns.
|
||||
func TestV3TxnInvalidRange(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kvc := toGRPC(clus.RandClient()).KV
|
||||
kvc := integration.ToGRPC(clus.RandClient()).KV
|
||||
preq := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
@ -1166,12 +1167,12 @@ func TestV3TxnInvalidRange(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV3TooLargeRequest(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kvc := toGRPC(clus.RandClient()).KV
|
||||
kvc := integration.ToGRPC(clus.RandClient()).KV
|
||||
|
||||
// 2MB request value
|
||||
largeV := make([]byte, 2*1024*1024)
|
||||
@ -1185,13 +1186,13 @@ func TestV3TooLargeRequest(t *testing.T) {
|
||||
|
||||
// TestV3Hash tests hash.
|
||||
func TestV3Hash(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.RandClient()
|
||||
kvc := toGRPC(cli).KV
|
||||
m := toGRPC(cli).Maintenance
|
||||
kvc := integration.ToGRPC(cli).KV
|
||||
m := integration.ToGRPC(cli).Maintenance
|
||||
|
||||
preq := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
|
||||
|
||||
@ -1210,12 +1211,12 @@ func TestV3Hash(t *testing.T) {
|
||||
|
||||
// TestV3HashRestart ensures that hash stays the same after restart.
|
||||
func TestV3HashRestart(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1, UseBridge: true})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.RandClient()
|
||||
resp, err := toGRPC(cli).Maintenance.Hash(context.Background(), &pb.HashRequest{})
|
||||
resp, err := integration.ToGRPC(cli).Maintenance.Hash(context.Background(), &pb.HashRequest{})
|
||||
if err != nil || resp.Hash == 0 {
|
||||
t.Fatalf("couldn't hash (%v, hash %d)", err, resp.Hash)
|
||||
}
|
||||
@ -1223,12 +1224,12 @@ func TestV3HashRestart(t *testing.T) {
|
||||
|
||||
clus.Members[0].Stop(t)
|
||||
clus.Members[0].Restart(t)
|
||||
clus.waitLeader(t, clus.Members)
|
||||
kvc := toGRPC(clus.Client(0)).KV
|
||||
clus.WaitMembersForLeader(t, clus.Members)
|
||||
kvc := integration.ToGRPC(clus.Client(0)).KV
|
||||
waitForRestart(t, kvc)
|
||||
|
||||
cli = clus.RandClient()
|
||||
resp, err = toGRPC(cli).Maintenance.Hash(context.Background(), &pb.HashRequest{})
|
||||
resp, err = integration.ToGRPC(cli).Maintenance.Hash(context.Background(), &pb.HashRequest{})
|
||||
if err != nil || resp.Hash == 0 {
|
||||
t.Fatalf("couldn't hash (%v, hash %d)", err, resp.Hash)
|
||||
}
|
||||
@ -1241,10 +1242,10 @@ func TestV3HashRestart(t *testing.T) {
|
||||
|
||||
// TestV3StorageQuotaAPI tests the V3 server respects quotas at the API layer
|
||||
func TestV3StorageQuotaAPI(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
quotasize := int64(16 * os.Getpagesize())
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3, UseBridge: true})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
|
||||
// Set a quota on one node
|
||||
clus.Members[0].QuotaBackendBytes = quotasize
|
||||
@ -1252,7 +1253,7 @@ func TestV3StorageQuotaAPI(t *testing.T) {
|
||||
clus.Members[0].Restart(t)
|
||||
|
||||
defer clus.Terminate(t)
|
||||
kvc := toGRPC(clus.Client(0)).KV
|
||||
kvc := integration.ToGRPC(clus.Client(0)).KV
|
||||
waitForRestart(t, kvc)
|
||||
|
||||
key := []byte("abc")
|
||||
@ -1288,7 +1289,7 @@ func TestV3StorageQuotaAPI(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV3RangeRequest(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
@ -1508,10 +1509,10 @@ func TestV3RangeRequest(t *testing.T) {
|
||||
|
||||
for i, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
for _, k := range tt.putKeys {
|
||||
kvc := toGRPC(clus.RandClient()).KV
|
||||
kvc := integration.ToGRPC(clus.RandClient()).KV
|
||||
req := &pb.PutRequest{Key: []byte(k), Value: []byte("bar")}
|
||||
if _, err := kvc.Put(context.TODO(), req); err != nil {
|
||||
t.Fatalf("#%d: couldn't put key (%v)", i, err)
|
||||
@ -1519,7 +1520,7 @@ func TestV3RangeRequest(t *testing.T) {
|
||||
}
|
||||
|
||||
for j, req := range tt.reqs {
|
||||
kvc := toGRPC(clus.RandClient()).KV
|
||||
kvc := integration.ToGRPC(clus.RandClient()).KV
|
||||
resp, err := kvc.Range(context.TODO(), &req)
|
||||
if err != nil {
|
||||
t.Errorf("#%d.%d: Range error: %v", i, j, err)
|
||||
@ -1550,24 +1551,24 @@ func TestV3RangeRequest(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func newClusterV3NoClients(t *testing.T, cfg *ClusterConfig) *ClusterV3 {
|
||||
func newClusterV3NoClients(t *testing.T, cfg *integration.ClusterConfig) *integration.ClusterV3 {
|
||||
cfg.UseGRPC = true
|
||||
clus := &ClusterV3{cluster: NewClusterByConfig(t, cfg)}
|
||||
clus := &integration.ClusterV3{Cluster: integration.NewClusterByConfig(t, cfg)}
|
||||
clus.Launch(t)
|
||||
return clus
|
||||
}
|
||||
|
||||
// TestTLSGRPCRejectInsecureClient checks that connection is rejected if server is TLS but not client.
|
||||
func TestTLSGRPCRejectInsecureClient(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
cfg := ClusterConfig{Size: 3, ClientTLS: &testTLSInfo}
|
||||
cfg := integration.ClusterConfig{Size: 3, ClientTLS: &integration.TestTLSInfo}
|
||||
clus := newClusterV3NoClients(t, &cfg)
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// nil out TLS field so client will use an insecure connection
|
||||
clus.Members[0].ClientTLSInfo = nil
|
||||
client, err := NewClientV3(clus.Members[0])
|
||||
client, err := integration.NewClientV3(clus.Members[0])
|
||||
if err != nil && err != context.DeadlineExceeded {
|
||||
t.Fatalf("unexpected error (%v)", err)
|
||||
} else if client == nil {
|
||||
@ -1582,7 +1583,7 @@ func TestTLSGRPCRejectInsecureClient(t *testing.T) {
|
||||
go func() {
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second)
|
||||
reqput := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
|
||||
_, perr := toGRPC(client).KV.Put(ctx, reqput)
|
||||
_, perr := integration.ToGRPC(client).KV.Put(ctx, reqput)
|
||||
cancel()
|
||||
donec <- perr
|
||||
}()
|
||||
@ -1594,16 +1595,16 @@ func TestTLSGRPCRejectInsecureClient(t *testing.T) {
|
||||
|
||||
// TestTLSGRPCRejectSecureClient checks that connection is rejected if client is TLS but not server.
|
||||
func TestTLSGRPCRejectSecureClient(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
cfg := ClusterConfig{Size: 3}
|
||||
cfg := integration.ClusterConfig{Size: 3}
|
||||
clus := newClusterV3NoClients(t, &cfg)
|
||||
defer clus.Terminate(t)
|
||||
|
||||
clus.Members[0].ClientTLSInfo = &testTLSInfo
|
||||
clus.Members[0].ClientTLSInfo = &integration.TestTLSInfo
|
||||
clus.Members[0].DialOptions = []grpc.DialOption{grpc.WithBlock()}
|
||||
clus.Members[0].grpcURL = strings.Replace(clus.Members[0].grpcURL, "http://", "https://", 1)
|
||||
client, err := NewClientV3(clus.Members[0])
|
||||
clus.Members[0].GrpcURL = strings.Replace(clus.Members[0].GrpcURL, "http://", "https://", 1)
|
||||
client, err := integration.NewClientV3(clus.Members[0])
|
||||
if client != nil || err == nil {
|
||||
client.Close()
|
||||
t.Fatalf("expected no client")
|
||||
@ -1614,20 +1615,20 @@ func TestTLSGRPCRejectSecureClient(t *testing.T) {
|
||||
|
||||
// TestTLSGRPCAcceptSecureAll checks that connection is accepted if both client and server are TLS
|
||||
func TestTLSGRPCAcceptSecureAll(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
cfg := ClusterConfig{Size: 3, ClientTLS: &testTLSInfo}
|
||||
cfg := integration.ClusterConfig{Size: 3, ClientTLS: &integration.TestTLSInfo}
|
||||
clus := newClusterV3NoClients(t, &cfg)
|
||||
defer clus.Terminate(t)
|
||||
|
||||
client, err := NewClientV3(clus.Members[0])
|
||||
client, err := integration.NewClientV3(clus.Members[0])
|
||||
if err != nil {
|
||||
t.Fatalf("expected tls client (%v)", err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
reqput := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
|
||||
if _, err := toGRPC(client).KV.Put(context.TODO(), reqput); err != nil {
|
||||
if _, err := integration.ToGRPC(client).KV.Put(context.TODO(), reqput); err != nil {
|
||||
t.Fatalf("unexpected error on put over tls (%v)", err)
|
||||
}
|
||||
}
|
||||
@ -1656,11 +1657,11 @@ func TestTLSReloadAtomicReplace(t *testing.T) {
|
||||
defer os.RemoveAll(certsDirExp)
|
||||
|
||||
cloneFunc := func() transport.TLSInfo {
|
||||
tlsInfo, terr := copyTLSFiles(testTLSInfo, certsDir)
|
||||
tlsInfo, terr := copyTLSFiles(integration.TestTLSInfo, certsDir)
|
||||
if terr != nil {
|
||||
t.Fatal(terr)
|
||||
}
|
||||
if _, err = copyTLSFiles(testTLSInfoExpired, certsDirExp); err != nil {
|
||||
if _, err = copyTLSFiles(integration.TestTLSInfoExpired, certsDirExp); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return tlsInfo
|
||||
@ -1702,19 +1703,19 @@ func TestTLSReloadCopy(t *testing.T) {
|
||||
defer os.RemoveAll(certsDir)
|
||||
|
||||
cloneFunc := func() transport.TLSInfo {
|
||||
tlsInfo, terr := copyTLSFiles(testTLSInfo, certsDir)
|
||||
tlsInfo, terr := copyTLSFiles(integration.TestTLSInfo, certsDir)
|
||||
if terr != nil {
|
||||
t.Fatal(terr)
|
||||
}
|
||||
return tlsInfo
|
||||
}
|
||||
replaceFunc := func() {
|
||||
if _, err = copyTLSFiles(testTLSInfoExpired, certsDir); err != nil {
|
||||
if _, err = copyTLSFiles(integration.TestTLSInfoExpired, certsDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
revertFunc := func() {
|
||||
if _, err = copyTLSFiles(testTLSInfo, certsDir); err != nil {
|
||||
if _, err = copyTLSFiles(integration.TestTLSInfo, certsDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@ -1732,19 +1733,19 @@ func TestTLSReloadCopyIPOnly(t *testing.T) {
|
||||
defer os.RemoveAll(certsDir)
|
||||
|
||||
cloneFunc := func() transport.TLSInfo {
|
||||
tlsInfo, terr := copyTLSFiles(testTLSInfoIP, certsDir)
|
||||
tlsInfo, terr := copyTLSFiles(integration.TestTLSInfoIP, certsDir)
|
||||
if terr != nil {
|
||||
t.Fatal(terr)
|
||||
}
|
||||
return tlsInfo
|
||||
}
|
||||
replaceFunc := func() {
|
||||
if _, err = copyTLSFiles(testTLSInfoExpiredIP, certsDir); err != nil {
|
||||
if _, err = copyTLSFiles(integration.TestTLSInfoExpiredIP, certsDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
revertFunc := func() {
|
||||
if _, err = copyTLSFiles(testTLSInfoIP, certsDir); err != nil {
|
||||
if _, err = copyTLSFiles(integration.TestTLSInfoIP, certsDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@ -1757,13 +1758,13 @@ func testTLSReload(
|
||||
replaceFunc func(),
|
||||
revertFunc func(),
|
||||
useIP bool) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
// 1. separate copies for TLS assets modification
|
||||
tlsInfo := cloneFunc()
|
||||
|
||||
// 2. start cluster with valid certs
|
||||
clus := NewClusterV3(t, &ClusterConfig{
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
|
||||
Size: 1,
|
||||
PeerTLS: &tlsInfo,
|
||||
ClientTLS: &tlsInfo,
|
||||
@ -1785,7 +1786,7 @@ func testTLSReload(
|
||||
t.Log(err)
|
||||
continue
|
||||
}
|
||||
cli, cerr := NewClient(t, clientv3.Config{
|
||||
cli, cerr := integration.NewClient(t, clientv3.Config{
|
||||
DialOptions: []grpc.DialOption{grpc.WithBlock()},
|
||||
Endpoints: []string{clus.Members[0].GRPCURL()},
|
||||
DialTimeout: time.Second,
|
||||
@ -1820,7 +1821,7 @@ func testTLSReload(
|
||||
if terr != nil {
|
||||
t.Fatal(terr)
|
||||
}
|
||||
cl, cerr := NewClient(t, clientv3.Config{
|
||||
cl, cerr := integration.NewClient(t, clientv3.Config{
|
||||
Endpoints: []string{clus.Members[0].GRPCURL()},
|
||||
DialTimeout: 5 * time.Second,
|
||||
TLS: tls,
|
||||
@ -1832,46 +1833,46 @@ func testTLSReload(
|
||||
}
|
||||
|
||||
func TestGRPCRequireLeader(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
cfg := ClusterConfig{Size: 3}
|
||||
cfg := integration.ClusterConfig{Size: 3}
|
||||
clus := newClusterV3NoClients(t, &cfg)
|
||||
defer clus.Terminate(t)
|
||||
|
||||
clus.Members[1].Stop(t)
|
||||
clus.Members[2].Stop(t)
|
||||
|
||||
client, err := NewClientV3(clus.Members[0])
|
||||
client, err := integration.NewClientV3(clus.Members[0])
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create client: %v", err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
// wait for election timeout, then member[0] will not have a leader.
|
||||
time.Sleep(time.Duration(3*electionTicks) * tickDuration)
|
||||
time.Sleep(time.Duration(3*integration.ElectionTicks) * integration.TickDuration)
|
||||
|
||||
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
|
||||
ctx := metadata.NewOutgoingContext(context.Background(), md)
|
||||
reqput := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
|
||||
if _, err := toGRPC(client).KV.Put(ctx, reqput); rpctypes.ErrorDesc(err) != rpctypes.ErrNoLeader.Error() {
|
||||
if _, err := integration.ToGRPC(client).KV.Put(ctx, reqput); rpctypes.ErrorDesc(err) != rpctypes.ErrNoLeader.Error() {
|
||||
t.Errorf("err = %v, want %v", err, rpctypes.ErrNoLeader)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGRPCStreamRequireLeader(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
cfg := ClusterConfig{Size: 3, UseBridge: true}
|
||||
cfg := integration.ClusterConfig{Size: 3, UseBridge: true}
|
||||
clus := newClusterV3NoClients(t, &cfg)
|
||||
defer clus.Terminate(t)
|
||||
|
||||
client, err := NewClientV3(clus.Members[0])
|
||||
client, err := integration.NewClientV3(clus.Members[0])
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create client (%v)", err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
wAPI := toGRPC(client).Watch
|
||||
wAPI := integration.ToGRPC(client).Watch
|
||||
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
|
||||
ctx := metadata.NewOutgoingContext(context.Background(), md)
|
||||
wStream, err := wAPI.Watch(ctx)
|
||||
@ -1901,8 +1902,8 @@ func TestGRPCStreamRequireLeader(t *testing.T) {
|
||||
clus.Members[1].Restart(t)
|
||||
clus.Members[2].Restart(t)
|
||||
|
||||
clus.waitLeader(t, clus.Members)
|
||||
time.Sleep(time.Duration(2*electionTicks) * tickDuration)
|
||||
clus.WaitMembersForLeader(t, clus.Members)
|
||||
time.Sleep(time.Duration(2*integration.ElectionTicks) * integration.TickDuration)
|
||||
|
||||
// new stream should also be OK now after we restarted the other members
|
||||
wStream, err = wAPI.Watch(ctx)
|
||||
@ -1922,7 +1923,7 @@ func TestGRPCStreamRequireLeader(t *testing.T) {
|
||||
|
||||
// TestV3LargeRequests ensures that configurable MaxRequestBytes works as intended.
|
||||
func TestV3LargeRequests(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
tests := []struct {
|
||||
maxRequestBytes uint
|
||||
valueSize int
|
||||
@ -1936,9 +1937,9 @@ func TestV3LargeRequests(t *testing.T) {
|
||||
}
|
||||
for i, test := range tests {
|
||||
t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) {
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1, MaxRequestBytes: test.maxRequestBytes})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, MaxRequestBytes: test.maxRequestBytes})
|
||||
defer clus.Terminate(t)
|
||||
kvcli := toGRPC(clus.Client(0)).KV
|
||||
kvcli := integration.ToGRPC(clus.Client(0)).KV
|
||||
reqput := &pb.PutRequest{Key: []byte("foo"), Value: make([]byte, test.valueSize)}
|
||||
_, err := kvcli.Put(context.TODO(), reqput)
|
||||
if !eqErrGRPC(err, test.expectError) {
|
||||
|
@ -18,13 +18,14 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
healthpb "google.golang.org/grpc/health/grpc_health_v1"
|
||||
)
|
||||
|
||||
func TestHealthCheck(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := healthpb.NewHealthClient(clus.RandClient().ActiveConnection())
|
||||
|
@ -6,13 +6,14 @@ import (
|
||||
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/client/v3/namespace"
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
// TestKVWithEmptyValue ensures that a get/delete with an empty value, and with WithFromKey/WithPrefix function will return an empty error.
|
||||
func TestKVWithEmptyValue(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
client := clus.RandClient()
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
|
||||
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
@ -30,13 +31,13 @@ func TestMoveLeader(t *testing.T) { testMoveLeader(t, true) }
|
||||
func TestMoveLeaderService(t *testing.T) { testMoveLeader(t, false) }
|
||||
|
||||
func testMoveLeader(t *testing.T, auto bool) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
oldLeadIdx := clus.WaitLeader(t)
|
||||
oldLeadID := uint64(clus.Members[oldLeadIdx].s.ID())
|
||||
oldLeadID := uint64(clus.Members[oldLeadIdx].Server.ID())
|
||||
|
||||
// ensure followers go through leader transition while leadership transfer
|
||||
idc := make(chan uint64)
|
||||
@ -45,23 +46,23 @@ func testMoveLeader(t *testing.T, auto bool) {
|
||||
|
||||
for i := range clus.Members {
|
||||
if oldLeadIdx != i {
|
||||
go func(m *member) {
|
||||
go func(m *integration.Member) {
|
||||
select {
|
||||
case idc <- checkLeaderTransition(m, oldLeadID):
|
||||
case idc <- integration.CheckLeaderTransition(m, oldLeadID):
|
||||
case <-stopc:
|
||||
}
|
||||
}(clus.Members[i])
|
||||
}
|
||||
}
|
||||
|
||||
target := uint64(clus.Members[(oldLeadIdx+1)%3].s.ID())
|
||||
target := uint64(clus.Members[(oldLeadIdx+1)%3].Server.ID())
|
||||
if auto {
|
||||
err := clus.Members[oldLeadIdx].s.TransferLeadership()
|
||||
err := clus.Members[oldLeadIdx].Server.TransferLeadership()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
mvc := toGRPC(clus.Client(oldLeadIdx)).Maintenance
|
||||
mvc := integration.ToGRPC(clus.Client(oldLeadIdx)).Maintenance
|
||||
_, err := mvc.MoveLeader(context.TODO(), &pb.MoveLeaderRequest{TargetID: target})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -98,17 +99,17 @@ func testMoveLeader(t *testing.T, auto bool) {
|
||||
|
||||
// TestMoveLeaderError ensures that request to non-leader fail.
|
||||
func TestMoveLeaderError(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
oldLeadIdx := clus.WaitLeader(t)
|
||||
followerIdx := (oldLeadIdx + 1) % 3
|
||||
|
||||
target := uint64(clus.Members[(oldLeadIdx+2)%3].s.ID())
|
||||
target := uint64(clus.Members[(oldLeadIdx+2)%3].Server.ID())
|
||||
|
||||
mvc := toGRPC(clus.Client(followerIdx)).Maintenance
|
||||
mvc := integration.ToGRPC(clus.Client(followerIdx)).Maintenance
|
||||
_, err := mvc.MoveLeader(context.TODO(), &pb.MoveLeaderRequest{TargetID: target})
|
||||
if !eqErrGRPC(err, rpctypes.ErrGRPCNotLeader) {
|
||||
t.Errorf("err = %v, want %v", err, rpctypes.ErrGRPCNotLeader)
|
||||
@ -117,9 +118,9 @@ func TestMoveLeaderError(t *testing.T) {
|
||||
|
||||
// TestMoveLeaderToLearnerError ensures that leader transfer to learner member will fail.
|
||||
func TestMoveLeaderToLearnerError(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// we have to add and launch learner member after initial cluster was created, because
|
||||
@ -128,10 +129,10 @@ func TestMoveLeaderToLearnerError(t *testing.T) {
|
||||
|
||||
learners, err := clus.GetLearnerMembers()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get the learner members in cluster: %v", err)
|
||||
t.Fatalf("failed to get the learner members in Cluster: %v", err)
|
||||
}
|
||||
if len(learners) != 1 {
|
||||
t.Fatalf("added 1 learner to cluster, got %d", len(learners))
|
||||
t.Fatalf("added 1 learner to Cluster, got %d", len(learners))
|
||||
}
|
||||
|
||||
learnerID := learners[0].ID
|
||||
@ -150,19 +151,19 @@ func TestMoveLeaderToLearnerError(t *testing.T) {
|
||||
// TestTransferLeadershipWithLearner ensures TransferLeadership does not timeout due to learner is
|
||||
// automatically picked by leader as transferee.
|
||||
func TestTransferLeadershipWithLearner(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
clus.AddAndLaunchLearnerMember(t)
|
||||
|
||||
learners, err := clus.GetLearnerMembers()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get the learner members in cluster: %v", err)
|
||||
t.Fatalf("failed to get the learner members in Cluster: %v", err)
|
||||
}
|
||||
if len(learners) != 1 {
|
||||
t.Fatalf("added 1 learner to cluster, got %d", len(learners))
|
||||
t.Fatalf("added 1 learner to Cluster, got %d", len(learners))
|
||||
}
|
||||
|
||||
leaderIdx := clus.WaitLeader(t)
|
||||
@ -170,7 +171,7 @@ func TestTransferLeadershipWithLearner(t *testing.T) {
|
||||
go func() {
|
||||
// note that this cluster has 1 leader and 1 learner. TransferLeadership should return nil.
|
||||
// Leadership transfer is skipped in cluster with 1 voting member.
|
||||
errCh <- clus.Members[leaderIdx].s.TransferLeadership()
|
||||
errCh <- clus.Members[leaderIdx].Server.TransferLeadership()
|
||||
}()
|
||||
select {
|
||||
case err := <-errCh:
|
||||
@ -183,10 +184,10 @@ func TestTransferLeadershipWithLearner(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFirstCommitNotification(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
ctx := context.Background()
|
||||
clusterSize := 3
|
||||
cluster := NewClusterV3(t, &ClusterConfig{Size: clusterSize})
|
||||
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: clusterSize})
|
||||
defer cluster.Terminate(t)
|
||||
|
||||
oldLeaderIdx := cluster.WaitLeader(t)
|
||||
@ -197,7 +198,7 @@ func TestFirstCommitNotification(t *testing.T) {
|
||||
|
||||
notifiers := make(map[int]<-chan struct{}, clusterSize)
|
||||
for i, clusterMember := range cluster.Members {
|
||||
notifiers[i] = clusterMember.s.FirstCommitInTermNotify()
|
||||
notifiers[i] = clusterMember.Server.FirstCommitInTermNotify()
|
||||
}
|
||||
|
||||
_, err := oldLeaderClient.MoveLeader(context.Background(), newLeaderId)
|
||||
@ -215,7 +216,7 @@ func TestFirstCommitNotification(t *testing.T) {
|
||||
}
|
||||
|
||||
// It's guaranteed now that leader contains the 'foo'->'bar' index entry.
|
||||
leaderAppliedIndex := cluster.Members[newLeaderIdx].s.AppliedIndex()
|
||||
leaderAppliedIndex := cluster.Members[newLeaderIdx].Server.AppliedIndex()
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
@ -238,13 +239,13 @@ func TestFirstCommitNotification(t *testing.T) {
|
||||
func checkFirstCommitNotification(
|
||||
ctx context.Context,
|
||||
t testing.TB,
|
||||
member *member,
|
||||
member *integration.Member,
|
||||
leaderAppliedIndex uint64,
|
||||
notifier <-chan struct{},
|
||||
) error {
|
||||
// wait until server applies all the changes of leader
|
||||
for member.s.AppliedIndex() < leaderAppliedIndex {
|
||||
t.Logf("member.s.AppliedIndex():%v <= leaderAppliedIndex:%v", member.s.AppliedIndex(), leaderAppliedIndex)
|
||||
for member.Server.AppliedIndex() < leaderAppliedIndex {
|
||||
t.Logf("member.Server.AppliedIndex():%v <= leaderAppliedIndex:%v", member.Server.AppliedIndex(), leaderAppliedIndex)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
@ -262,7 +263,7 @@ func checkFirstCommitNotification(
|
||||
)
|
||||
}
|
||||
default:
|
||||
t.Logf("member.s.AppliedIndex():%v >= leaderAppliedIndex:%v", member.s.AppliedIndex(), leaderAppliedIndex)
|
||||
t.Logf("member.Server.AppliedIndex():%v >= leaderAppliedIndex:%v", member.Server.AppliedIndex(), leaderAppliedIndex)
|
||||
return fmt.Errorf(
|
||||
"notification was not triggered, member ID: %d",
|
||||
member.ID(),
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"go.etcd.io/etcd/api/v3/mvccpb"
|
||||
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
|
||||
"go.etcd.io/etcd/client/pkg/v3/testutil"
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
@ -34,13 +35,13 @@ import (
|
||||
// to the primary lessor, refresh the leases and start to manage leases.
|
||||
// TODO: use customized clock to make this test go faster?
|
||||
func TestV3LeasePromote(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3, UseBridge: true})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// create lease
|
||||
lresp, err := toGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 3})
|
||||
lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 3})
|
||||
ttl := time.Duration(lresp.TTL) * time.Second
|
||||
afterGrant := time.Now()
|
||||
if err != nil {
|
||||
@ -54,19 +55,19 @@ func TestV3LeasePromote(t *testing.T) {
|
||||
time.Sleep(time.Until(afterGrant.Add(ttl - time.Second)))
|
||||
|
||||
// kill the current leader, all leases should be refreshed.
|
||||
toStop := clus.waitLeader(t, clus.Members)
|
||||
toStop := clus.WaitMembersForLeader(t, clus.Members)
|
||||
beforeStop := time.Now()
|
||||
clus.Members[toStop].Stop(t)
|
||||
|
||||
var toWait []*member
|
||||
var toWait []*integration.Member
|
||||
for i, m := range clus.Members {
|
||||
if i != toStop {
|
||||
toWait = append(toWait, m)
|
||||
}
|
||||
}
|
||||
clus.waitLeader(t, toWait)
|
||||
clus.WaitMembersForLeader(t, toWait)
|
||||
clus.Members[toStop].Restart(t)
|
||||
clus.waitLeader(t, clus.Members)
|
||||
clus.WaitMembersForLeader(t, clus.Members)
|
||||
afterReelect := time.Now()
|
||||
|
||||
// ensure lease is refreshed by waiting for a "long" time.
|
||||
@ -96,9 +97,9 @@ func TestV3LeasePromote(t *testing.T) {
|
||||
|
||||
// TestV3LeaseRevoke ensures a key is deleted once its lease is revoked.
|
||||
func TestV3LeaseRevoke(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
testLeaseRemoveLeasedKey(t, func(clus *ClusterV3, leaseID int64) error {
|
||||
lc := toGRPC(clus.RandClient()).Lease
|
||||
integration.BeforeTest(t)
|
||||
testLeaseRemoveLeasedKey(t, func(clus *integration.ClusterV3, leaseID int64) error {
|
||||
lc := integration.ToGRPC(clus.RandClient()).Lease
|
||||
_, err := lc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: leaseID})
|
||||
return err
|
||||
})
|
||||
@ -106,12 +107,12 @@ func TestV3LeaseRevoke(t *testing.T) {
|
||||
|
||||
// TestV3LeaseGrantById ensures leases may be created by a given id.
|
||||
func TestV3LeaseGrantByID(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// create fixed lease
|
||||
lresp, err := toGRPC(clus.RandClient()).Lease.LeaseGrant(
|
||||
lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(
|
||||
context.TODO(),
|
||||
&pb.LeaseGrantRequest{ID: 1, TTL: 1})
|
||||
if err != nil {
|
||||
@ -122,7 +123,7 @@ func TestV3LeaseGrantByID(t *testing.T) {
|
||||
}
|
||||
|
||||
// create duplicate fixed lease
|
||||
_, err = toGRPC(clus.RandClient()).Lease.LeaseGrant(
|
||||
_, err = integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(
|
||||
context.TODO(),
|
||||
&pb.LeaseGrantRequest{ID: 1, TTL: 1})
|
||||
if !eqErrGRPC(err, rpctypes.ErrGRPCLeaseExist) {
|
||||
@ -130,7 +131,7 @@ func TestV3LeaseGrantByID(t *testing.T) {
|
||||
}
|
||||
|
||||
// create fresh fixed lease
|
||||
lresp, err = toGRPC(clus.RandClient()).Lease.LeaseGrant(
|
||||
lresp, err = integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(
|
||||
context.TODO(),
|
||||
&pb.LeaseGrantRequest{ID: 2, TTL: 1})
|
||||
if err != nil {
|
||||
@ -143,13 +144,13 @@ func TestV3LeaseGrantByID(t *testing.T) {
|
||||
|
||||
// TestV3LeaseExpire ensures a key is deleted once a key expires.
|
||||
func TestV3LeaseExpire(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
testLeaseRemoveLeasedKey(t, func(clus *ClusterV3, leaseID int64) error {
|
||||
integration.BeforeTest(t)
|
||||
testLeaseRemoveLeasedKey(t, func(clus *integration.ClusterV3, leaseID int64) error {
|
||||
// let lease lapse; wait for deleted key
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
wStream, err := toGRPC(clus.RandClient()).Watch.Watch(ctx)
|
||||
wStream, err := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -195,9 +196,9 @@ func TestV3LeaseExpire(t *testing.T) {
|
||||
|
||||
// TestV3LeaseKeepAlive ensures keepalive keeps the lease alive.
|
||||
func TestV3LeaseKeepAlive(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
testLeaseRemoveLeasedKey(t, func(clus *ClusterV3, leaseID int64) error {
|
||||
lc := toGRPC(clus.RandClient()).Lease
|
||||
integration.BeforeTest(t)
|
||||
testLeaseRemoveLeasedKey(t, func(clus *integration.ClusterV3, leaseID int64) error {
|
||||
lc := integration.ToGRPC(clus.RandClient()).Lease
|
||||
lreq := &pb.LeaseKeepAliveRequest{ID: leaseID}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
@ -229,11 +230,11 @@ func TestV3LeaseKeepAlive(t *testing.T) {
|
||||
// TestV3LeaseCheckpoint ensures a lease checkpoint results in a remaining TTL being persisted
|
||||
// across leader elections.
|
||||
func TestV3LeaseCheckpoint(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
var ttl int64 = 300
|
||||
leaseInterval := 2 * time.Second
|
||||
clus := NewClusterV3(t, &ClusterConfig{
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
|
||||
Size: 3,
|
||||
EnableLeaseCheckpoint: true,
|
||||
LeaseCheckpointInterval: leaseInterval,
|
||||
@ -244,7 +245,7 @@ func TestV3LeaseCheckpoint(t *testing.T) {
|
||||
// create lease
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
c := toGRPC(clus.RandClient())
|
||||
c := integration.ToGRPC(clus.RandClient())
|
||||
lresp, err := c.Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: ttl})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -257,10 +258,10 @@ func TestV3LeaseCheckpoint(t *testing.T) {
|
||||
leaderId := clus.WaitLeader(t)
|
||||
leader := clus.Members[leaderId]
|
||||
leader.Stop(t)
|
||||
time.Sleep(time.Duration(3*electionTicks) * tickDuration)
|
||||
time.Sleep(time.Duration(3*integration.ElectionTicks) * integration.TickDuration)
|
||||
leader.Restart(t)
|
||||
newLeaderId := clus.WaitLeader(t)
|
||||
c2 := toGRPC(clus.Client(newLeaderId))
|
||||
c2 := integration.ToGRPC(clus.Client(newLeaderId))
|
||||
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
|
||||
@ -284,14 +285,14 @@ func TestV3LeaseCheckpoint(t *testing.T) {
|
||||
|
||||
// TestV3LeaseExists creates a lease on a random client and confirms it exists in the cluster.
|
||||
func TestV3LeaseExists(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// create lease
|
||||
ctx0, cancel0 := context.WithCancel(context.Background())
|
||||
defer cancel0()
|
||||
lresp, err := toGRPC(clus.RandClient()).Lease.LeaseGrant(
|
||||
lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(
|
||||
ctx0,
|
||||
&pb.LeaseGrantRequest{TTL: 30})
|
||||
if err != nil {
|
||||
@ -308,8 +309,8 @@ func TestV3LeaseExists(t *testing.T) {
|
||||
|
||||
// TestV3LeaseLeases creates leases and confirms list RPC fetches created ones.
|
||||
func TestV3LeaseLeases(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
ctx0, cancel0 := context.WithCancel(context.Background())
|
||||
@ -318,7 +319,7 @@ func TestV3LeaseLeases(t *testing.T) {
|
||||
// create leases
|
||||
ids := []int64{}
|
||||
for i := 0; i < 5; i++ {
|
||||
lresp, err := toGRPC(clus.RandClient()).Lease.LeaseGrant(
|
||||
lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(
|
||||
ctx0,
|
||||
&pb.LeaseGrantRequest{TTL: 30})
|
||||
if err != nil {
|
||||
@ -330,7 +331,7 @@ func TestV3LeaseLeases(t *testing.T) {
|
||||
ids = append(ids, lresp.ID)
|
||||
}
|
||||
|
||||
lresp, err := toGRPC(clus.RandClient()).Lease.LeaseLeases(
|
||||
lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseLeases(
|
||||
context.Background(),
|
||||
&pb.LeaseLeasesRequest{})
|
||||
if err != nil {
|
||||
@ -358,8 +359,8 @@ func TestV3LeaseTimeToLiveStress(t *testing.T) {
|
||||
}
|
||||
|
||||
func testLeaseStress(t *testing.T, stresser func(context.Context, pb.LeaseClient) error) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
@ -368,7 +369,7 @@ func testLeaseStress(t *testing.T, stresser func(context.Context, pb.LeaseClient
|
||||
|
||||
for i := 0; i < 30; i++ {
|
||||
for j := 0; j < 3; j++ {
|
||||
go func(i int) { errc <- stresser(ctx, toGRPC(clus.Client(i)).Lease) }(j)
|
||||
go func(i int) { errc <- stresser(ctx, integration.ToGRPC(clus.Client(i)).Lease) }(j)
|
||||
}
|
||||
}
|
||||
|
||||
@ -429,8 +430,8 @@ func stressLeaseTimeToLive(tctx context.Context, lc pb.LeaseClient) (reterr erro
|
||||
}
|
||||
|
||||
func TestV3PutOnNonExistLease(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@ -438,7 +439,7 @@ func TestV3PutOnNonExistLease(t *testing.T) {
|
||||
|
||||
badLeaseID := int64(0x12345678)
|
||||
putr := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar"), Lease: badLeaseID}
|
||||
_, err := toGRPC(clus.RandClient()).KV.Put(ctx, putr)
|
||||
_, err := integration.ToGRPC(clus.RandClient()).KV.Put(ctx, putr)
|
||||
if !eqErrGRPC(err, rpctypes.ErrGRPCLeaseNotFound) {
|
||||
t.Errorf("err = %v, want %v", err, rpctypes.ErrGRPCLeaseNotFound)
|
||||
}
|
||||
@ -447,13 +448,13 @@ func TestV3PutOnNonExistLease(t *testing.T) {
|
||||
// TestV3GetNonExistLease ensures client retrieving nonexistent lease on a follower doesn't result node panic
|
||||
// related issue https://github.com/etcd-io/etcd/issues/6537
|
||||
func TestV3GetNonExistLease(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
lc := toGRPC(clus.RandClient()).Lease
|
||||
lc := integration.ToGRPC(clus.RandClient()).Lease
|
||||
lresp, err := lc.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: 10})
|
||||
if err != nil {
|
||||
t.Errorf("failed to create lease %v", err)
|
||||
@ -468,12 +469,12 @@ func TestV3GetNonExistLease(t *testing.T) {
|
||||
Keys: true,
|
||||
}
|
||||
|
||||
for _, client := range clus.clients {
|
||||
for _, client := range clus.Clients {
|
||||
// quorum-read to ensure revoke completes before TimeToLive
|
||||
if _, err := toGRPC(client).KV.Range(ctx, &pb.RangeRequest{Key: []byte("_")}); err != nil {
|
||||
if _, err := integration.ToGRPC(client).KV.Range(ctx, &pb.RangeRequest{Key: []byte("_")}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resp, err := toGRPC(client).Lease.LeaseTimeToLive(ctx, leaseTTLr)
|
||||
resp, err := integration.ToGRPC(client).Lease.LeaseTimeToLive(ctx, leaseTTLr)
|
||||
if err != nil {
|
||||
t.Fatalf("expected non nil error, but go %v", err)
|
||||
}
|
||||
@ -485,8 +486,8 @@ func TestV3GetNonExistLease(t *testing.T) {
|
||||
|
||||
// TestV3LeaseSwitch tests a key can be switched from one lease to another.
|
||||
func TestV3LeaseSwitch(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
key := "foo"
|
||||
@ -494,34 +495,34 @@ func TestV3LeaseSwitch(t *testing.T) {
|
||||
// create lease
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
lresp1, err1 := toGRPC(clus.RandClient()).Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: 30})
|
||||
lresp1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: 30})
|
||||
if err1 != nil {
|
||||
t.Fatal(err1)
|
||||
}
|
||||
lresp2, err2 := toGRPC(clus.RandClient()).Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: 30})
|
||||
lresp2, err2 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: 30})
|
||||
if err2 != nil {
|
||||
t.Fatal(err2)
|
||||
}
|
||||
|
||||
// attach key on lease1 then switch it to lease2
|
||||
put1 := &pb.PutRequest{Key: []byte(key), Lease: lresp1.ID}
|
||||
_, err := toGRPC(clus.RandClient()).KV.Put(ctx, put1)
|
||||
_, err := integration.ToGRPC(clus.RandClient()).KV.Put(ctx, put1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
put2 := &pb.PutRequest{Key: []byte(key), Lease: lresp2.ID}
|
||||
_, err = toGRPC(clus.RandClient()).KV.Put(ctx, put2)
|
||||
_, err = integration.ToGRPC(clus.RandClient()).KV.Put(ctx, put2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// revoke lease1 should not remove key
|
||||
_, err = toGRPC(clus.RandClient()).Lease.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lresp1.ID})
|
||||
_, err = integration.ToGRPC(clus.RandClient()).Lease.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lresp1.ID})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rreq := &pb.RangeRequest{Key: []byte("foo")}
|
||||
rresp, err := toGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq)
|
||||
rresp, err := integration.ToGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -530,11 +531,11 @@ func TestV3LeaseSwitch(t *testing.T) {
|
||||
}
|
||||
|
||||
// revoke lease2 should remove key
|
||||
_, err = toGRPC(clus.RandClient()).Lease.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lresp2.ID})
|
||||
_, err = integration.ToGRPC(clus.RandClient()).Lease.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lresp2.ID})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rresp, err = toGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq)
|
||||
rresp, err = integration.ToGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -547,14 +548,14 @@ func TestV3LeaseSwitch(t *testing.T) {
|
||||
// election timeout after it loses its quorum. And the new leader extends the TTL of
|
||||
// the lease to at least TTL + election timeout.
|
||||
func TestV3LeaseFailover(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
toIsolate := clus.waitLeader(t, clus.Members)
|
||||
toIsolate := clus.WaitMembersForLeader(t, clus.Members)
|
||||
|
||||
lc := toGRPC(clus.Client(toIsolate)).Lease
|
||||
lc := integration.ToGRPC(clus.Client(toIsolate)).Lease
|
||||
|
||||
// create lease
|
||||
lresp, err := lc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 5})
|
||||
@ -595,7 +596,7 @@ func TestV3LeaseFailover(t *testing.T) {
|
||||
}
|
||||
|
||||
clus.Members[toIsolate].Resume()
|
||||
clus.waitLeader(t, clus.Members)
|
||||
clus.WaitMembersForLeader(t, clus.Members)
|
||||
|
||||
// lease should not expire at the last received expire deadline.
|
||||
time.Sleep(time.Until(expectedExp) - 500*time.Millisecond)
|
||||
@ -608,12 +609,12 @@ func TestV3LeaseFailover(t *testing.T) {
|
||||
// TestV3LeaseRequireLeader ensures that a Recv will get a leader
|
||||
// loss error if there is no leader.
|
||||
func TestV3LeaseRequireLeader(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lc := toGRPC(clus.Client(0)).Lease
|
||||
lc := integration.ToGRPC(clus.Client(0)).Lease
|
||||
clus.Members[1].Stop(t)
|
||||
clus.Members[2].Stop(t)
|
||||
|
||||
@ -648,13 +649,13 @@ const fiveMinTTL int64 = 300
|
||||
|
||||
// TestV3LeaseRecoverAndRevoke ensures that revoking a lease after restart deletes the attached key.
|
||||
func TestV3LeaseRecoverAndRevoke(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1, UseBridge: true})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kvc := toGRPC(clus.Client(0)).KV
|
||||
lsc := toGRPC(clus.Client(0)).Lease
|
||||
kvc := integration.ToGRPC(clus.Client(0)).KV
|
||||
lsc := integration.ToGRPC(clus.Client(0)).Lease
|
||||
|
||||
lresp, err := lsc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: fiveMinTTL})
|
||||
if err != nil {
|
||||
@ -671,16 +672,16 @@ func TestV3LeaseRecoverAndRevoke(t *testing.T) {
|
||||
// restart server and ensure lease still exists
|
||||
clus.Members[0].Stop(t)
|
||||
clus.Members[0].Restart(t)
|
||||
clus.waitLeader(t, clus.Members)
|
||||
clus.WaitMembersForLeader(t, clus.Members)
|
||||
|
||||
// overwrite old client with newly dialed connection
|
||||
// otherwise, error with "grpc: RPC failed fast due to transport failure"
|
||||
nc, err := NewClientV3(clus.Members[0])
|
||||
nc, err := integration.NewClientV3(clus.Members[0])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kvc = toGRPC(nc).KV
|
||||
lsc = toGRPC(nc).Lease
|
||||
kvc = integration.ToGRPC(nc).KV
|
||||
lsc = integration.ToGRPC(nc).Lease
|
||||
defer nc.Close()
|
||||
|
||||
// revoke should delete the key
|
||||
@ -699,13 +700,13 @@ func TestV3LeaseRecoverAndRevoke(t *testing.T) {
|
||||
|
||||
// TestV3LeaseRevokeAndRecover ensures that revoked key stays deleted after restart.
|
||||
func TestV3LeaseRevokeAndRecover(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1, UseBridge: true})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kvc := toGRPC(clus.Client(0)).KV
|
||||
lsc := toGRPC(clus.Client(0)).Lease
|
||||
kvc := integration.ToGRPC(clus.Client(0)).KV
|
||||
lsc := integration.ToGRPC(clus.Client(0)).Lease
|
||||
|
||||
lresp, err := lsc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: fiveMinTTL})
|
||||
if err != nil {
|
||||
@ -728,15 +729,15 @@ func TestV3LeaseRevokeAndRecover(t *testing.T) {
|
||||
// restart server and ensure revoked key doesn't exist
|
||||
clus.Members[0].Stop(t)
|
||||
clus.Members[0].Restart(t)
|
||||
clus.waitLeader(t, clus.Members)
|
||||
clus.WaitMembersForLeader(t, clus.Members)
|
||||
|
||||
// overwrite old client with newly dialed connection
|
||||
// otherwise, error with "grpc: RPC failed fast due to transport failure"
|
||||
nc, err := NewClientV3(clus.Members[0])
|
||||
nc, err := integration.NewClientV3(clus.Members[0])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kvc = toGRPC(nc).KV
|
||||
kvc = integration.ToGRPC(nc).KV
|
||||
defer nc.Close()
|
||||
|
||||
rresp, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")})
|
||||
@ -751,13 +752,13 @@ func TestV3LeaseRevokeAndRecover(t *testing.T) {
|
||||
// TestV3LeaseRecoverKeyWithDetachedLease ensures that revoking a detached lease after restart
|
||||
// does not delete the key.
|
||||
func TestV3LeaseRecoverKeyWithDetachedLease(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1, UseBridge: true})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kvc := toGRPC(clus.Client(0)).KV
|
||||
lsc := toGRPC(clus.Client(0)).Lease
|
||||
kvc := integration.ToGRPC(clus.Client(0)).KV
|
||||
lsc := integration.ToGRPC(clus.Client(0)).Lease
|
||||
|
||||
lresp, err := lsc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: fiveMinTTL})
|
||||
if err != nil {
|
||||
@ -780,16 +781,16 @@ func TestV3LeaseRecoverKeyWithDetachedLease(t *testing.T) {
|
||||
// restart server and ensure lease still exists
|
||||
clus.Members[0].Stop(t)
|
||||
clus.Members[0].Restart(t)
|
||||
clus.waitLeader(t, clus.Members)
|
||||
clus.WaitMembersForLeader(t, clus.Members)
|
||||
|
||||
// overwrite old client with newly dialed connection
|
||||
// otherwise, error with "grpc: RPC failed fast due to transport failure"
|
||||
nc, err := NewClientV3(clus.Members[0])
|
||||
nc, err := integration.NewClientV3(clus.Members[0])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kvc = toGRPC(nc).KV
|
||||
lsc = toGRPC(nc).Lease
|
||||
kvc = integration.ToGRPC(nc).KV
|
||||
lsc = integration.ToGRPC(nc).Lease
|
||||
defer nc.Close()
|
||||
|
||||
// revoke the detached lease
|
||||
@ -807,13 +808,13 @@ func TestV3LeaseRecoverKeyWithDetachedLease(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV3LeaseRecoverKeyWithMutipleLease(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1, UseBridge: true})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kvc := toGRPC(clus.Client(0)).KV
|
||||
lsc := toGRPC(clus.Client(0)).Lease
|
||||
kvc := integration.ToGRPC(clus.Client(0)).KV
|
||||
lsc := integration.ToGRPC(clus.Client(0)).Lease
|
||||
|
||||
var leaseIDs []int64
|
||||
for i := 0; i < 2; i++ {
|
||||
@ -835,7 +836,7 @@ func TestV3LeaseRecoverKeyWithMutipleLease(t *testing.T) {
|
||||
// restart server and ensure lease still exists
|
||||
clus.Members[0].Stop(t)
|
||||
clus.Members[0].Restart(t)
|
||||
clus.waitLeader(t, clus.Members)
|
||||
clus.WaitMembersForLeader(t, clus.Members)
|
||||
for i, leaseID := range leaseIDs {
|
||||
if !leaseExist(t, clus, leaseID) {
|
||||
t.Errorf("#%d: unexpected lease not exists", i)
|
||||
@ -844,12 +845,12 @@ func TestV3LeaseRecoverKeyWithMutipleLease(t *testing.T) {
|
||||
|
||||
// overwrite old client with newly dialed connection
|
||||
// otherwise, error with "grpc: RPC failed fast due to transport failure"
|
||||
nc, err := NewClientV3(clus.Members[0])
|
||||
nc, err := integration.NewClientV3(clus.Members[0])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kvc = toGRPC(nc).KV
|
||||
lsc = toGRPC(nc).Lease
|
||||
kvc = integration.ToGRPC(nc).KV
|
||||
lsc = integration.ToGRPC(nc).Lease
|
||||
defer nc.Close()
|
||||
|
||||
// revoke the old lease
|
||||
@ -881,9 +882,9 @@ func TestV3LeaseRecoverKeyWithMutipleLease(t *testing.T) {
|
||||
}
|
||||
|
||||
// acquireLeaseAndKey creates a new lease and creates an attached key.
|
||||
func acquireLeaseAndKey(clus *ClusterV3, key string) (int64, error) {
|
||||
func acquireLeaseAndKey(clus *integration.ClusterV3, key string) (int64, error) {
|
||||
// create lease
|
||||
lresp, err := toGRPC(clus.RandClient()).Lease.LeaseGrant(
|
||||
lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(
|
||||
context.TODO(),
|
||||
&pb.LeaseGrantRequest{TTL: 1})
|
||||
if err != nil {
|
||||
@ -894,7 +895,7 @@ func acquireLeaseAndKey(clus *ClusterV3, key string) (int64, error) {
|
||||
}
|
||||
// attach to key
|
||||
put := &pb.PutRequest{Key: []byte(key), Lease: lresp.ID}
|
||||
if _, err := toGRPC(clus.RandClient()).KV.Put(context.TODO(), put); err != nil {
|
||||
if _, err := integration.ToGRPC(clus.RandClient()).KV.Put(context.TODO(), put); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return lresp.ID, nil
|
||||
@ -902,8 +903,8 @@ func acquireLeaseAndKey(clus *ClusterV3, key string) (int64, error) {
|
||||
|
||||
// testLeaseRemoveLeasedKey performs some action while holding a lease with an
|
||||
// attached key "foo", then confirms the key is gone.
|
||||
func testLeaseRemoveLeasedKey(t *testing.T, act func(*ClusterV3, int64) error) {
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
||||
func testLeaseRemoveLeasedKey(t *testing.T, act func(*integration.ClusterV3, int64) error) {
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
leaseID, err := acquireLeaseAndKey(clus, "foo")
|
||||
@ -917,7 +918,7 @@ func testLeaseRemoveLeasedKey(t *testing.T, act func(*ClusterV3, int64) error) {
|
||||
|
||||
// confirm no key
|
||||
rreq := &pb.RangeRequest{Key: []byte("foo")}
|
||||
rresp, err := toGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq)
|
||||
rresp, err := integration.ToGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -926,8 +927,8 @@ func testLeaseRemoveLeasedKey(t *testing.T, act func(*ClusterV3, int64) error) {
|
||||
}
|
||||
}
|
||||
|
||||
func leaseExist(t *testing.T, clus *ClusterV3, leaseID int64) bool {
|
||||
l := toGRPC(clus.RandClient()).Lease
|
||||
func leaseExist(t *testing.T, clus *integration.ClusterV3, leaseID int64) bool {
|
||||
l := integration.ToGRPC(clus.RandClient()).Lease
|
||||
|
||||
_, err := l.LeaseGrant(context.Background(), &pb.LeaseGrantRequest{ID: leaseID, TTL: 5})
|
||||
if err == nil {
|
||||
|
@ -24,13 +24,14 @@ import (
|
||||
"go.etcd.io/etcd/client/pkg/v3/testutil"
|
||||
v3 "go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/client/v3/concurrency"
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
// TestSTMConflict tests that conflicts are retried.
|
||||
func TestSTMConflict(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
etcdc := clus.RandClient()
|
||||
@ -96,9 +97,9 @@ func TestSTMConflict(t *testing.T) {
|
||||
|
||||
// TestSTMPutNewKey confirms a STM put on a new key is visible after commit.
|
||||
func TestSTMPutNewKey(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
etcdc := clus.RandClient()
|
||||
@ -123,9 +124,9 @@ func TestSTMPutNewKey(t *testing.T) {
|
||||
|
||||
// TestSTMAbort tests that an aborted txn does not modify any keys.
|
||||
func TestSTMAbort(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
etcdc := clus.RandClient()
|
||||
@ -154,9 +155,9 @@ func TestSTMAbort(t *testing.T) {
|
||||
|
||||
// TestSTMSerialize tests that serialization is honored when serializable.
|
||||
func TestSTMSerialize(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
etcdc := clus.RandClient()
|
||||
@ -217,9 +218,9 @@ func TestSTMSerialize(t *testing.T) {
|
||||
// TestSTMApplyOnConcurrentDeletion ensures that concurrent key deletion
|
||||
// fails the first GET revision comparison within STM; trigger retry.
|
||||
func TestSTMApplyOnConcurrentDeletion(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
etcdc := clus.RandClient()
|
||||
@ -266,9 +267,9 @@ func TestSTMApplyOnConcurrentDeletion(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSTMSerializableSnapshotPut(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"time"
|
||||
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
@ -30,7 +31,7 @@ func TestTLSClientCipherSuitesMismatch(t *testing.T) { testTLSCipherSuites(t, fa
|
||||
// testTLSCipherSuites ensures mismatching client-side cipher suite
|
||||
// fail TLS handshake with the server.
|
||||
func testTLSCipherSuites(t *testing.T, valid bool) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
cipherSuites := []uint16{
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
@ -40,21 +41,21 @@ func testTLSCipherSuites(t *testing.T, valid bool) {
|
||||
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
|
||||
}
|
||||
srvTLS, cliTLS := testTLSInfo, testTLSInfo
|
||||
srvTLS, cliTLS := integration.TestTLSInfo, integration.TestTLSInfo
|
||||
if valid {
|
||||
srvTLS.CipherSuites, cliTLS.CipherSuites = cipherSuites, cipherSuites
|
||||
} else {
|
||||
srvTLS.CipherSuites, cliTLS.CipherSuites = cipherSuites[:2], cipherSuites[2:]
|
||||
}
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1, ClientTLS: &srvTLS})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, ClientTLS: &srvTLS})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cc, err := cliTLS.ClientConfig()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cli, cerr := NewClient(t, clientv3.Config{
|
||||
cli, cerr := integration.NewClient(t, clientv3.Config{
|
||||
Endpoints: []string{clus.Members[0].GRPCURL()},
|
||||
DialTimeout: time.Second,
|
||||
DialOptions: []grpc.DialOption{grpc.WithBlock()},
|
||||
|
@ -21,13 +21,14 @@ import (
|
||||
"time"
|
||||
|
||||
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
// MustFetchNotEmptyMetric attempts to fetch given 'metric' from 'member',
|
||||
// waiting for not-empty value or 'timeout'.
|
||||
func MustFetchNotEmptyMetric(tb testing.TB, member *member, metric string, timeout <-chan time.Time) string {
|
||||
func MustFetchNotEmptyMetric(tb testing.TB, member *integration.Member, metric string, timeout <-chan time.Time) string {
|
||||
metricValue := ""
|
||||
tick := time.Tick(tickDuration)
|
||||
tick := time.Tick(integration.TickDuration)
|
||||
for metricValue == "" {
|
||||
tb.Logf("Waiting for metric: %v", metric)
|
||||
select {
|
||||
@ -50,9 +51,9 @@ func MustFetchNotEmptyMetric(tb testing.TB, member *member, metric string, timeo
|
||||
// that were created in synced watcher group in the first place.
|
||||
// TODO: fix panic with gRPC proxy "panic: watcher current revision should not exceed current revision"
|
||||
func TestV3WatchRestoreSnapshotUnsync(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
|
||||
Size: 3,
|
||||
SnapshotCount: 10,
|
||||
SnapshotCatchUpEntries: 5,
|
||||
@ -62,7 +63,7 @@ func TestV3WatchRestoreSnapshotUnsync(t *testing.T) {
|
||||
// spawn a watcher before shutdown, and put it in synced watcher
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
wStream, errW := toGRPC(clus.Client(0)).Watch.Watch(ctx)
|
||||
wStream, errW := integration.ToGRPC(clus.Client(0)).Watch.Watch(ctx)
|
||||
if errW != nil {
|
||||
t.Fatal(errW)
|
||||
}
|
||||
@ -79,13 +80,13 @@ func TestV3WatchRestoreSnapshotUnsync(t *testing.T) {
|
||||
}
|
||||
|
||||
clus.Members[0].InjectPartition(t, clus.Members[1:]...)
|
||||
initialLead := clus.waitLeader(t, clus.Members[1:])
|
||||
t.Logf("elected lead: %v", clus.Members[initialLead].s.ID())
|
||||
initialLead := clus.WaitMembersForLeader(t, clus.Members[1:])
|
||||
t.Logf("elected lead: %v", clus.Members[initialLead].Server.ID())
|
||||
t.Logf("sleeping for 2 seconds")
|
||||
time.Sleep(2 * time.Second)
|
||||
t.Logf("sleeping for 2 seconds DONE")
|
||||
|
||||
kvc := toGRPC(clus.Client(1)).KV
|
||||
kvc := integration.ToGRPC(clus.Client(1)).KV
|
||||
|
||||
// to trigger snapshot from the leader to the stopped follower
|
||||
for i := 0; i < 15; i++ {
|
||||
@ -98,7 +99,7 @@ func TestV3WatchRestoreSnapshotUnsync(t *testing.T) {
|
||||
// trigger snapshot send from leader to this slow follower
|
||||
// which then calls watchable store Restore
|
||||
clus.Members[0].RecoverPartition(t, clus.Members[1:]...)
|
||||
// We don't expect leadership change here, just recompute the leader's index
|
||||
// We don't expect leadership change here, just recompute the leader'Server index
|
||||
// within clus.Members list.
|
||||
lead := clus.WaitLeader(t)
|
||||
|
||||
|
@ -27,11 +27,12 @@ import (
|
||||
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||
"go.etcd.io/etcd/api/v3/mvccpb"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc"
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
// TestV3WatchFromCurrentRevision tests Watch APIs from current revision.
|
||||
func TestV3WatchFromCurrentRevision(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
@ -206,10 +207,10 @@ func TestV3WatchFromCurrentRevision(t *testing.T) {
|
||||
|
||||
for i, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
wAPI := toGRPC(clus.RandClient()).Watch
|
||||
wAPI := integration.ToGRPC(clus.RandClient()).Watch
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
wStream, err := wAPI.Watch(ctx)
|
||||
@ -243,7 +244,7 @@ func TestV3WatchFromCurrentRevision(t *testing.T) {
|
||||
ch := make(chan struct{}, 1)
|
||||
go func() {
|
||||
for _, k := range tt.putKeys {
|
||||
kvc := toGRPC(clus.RandClient()).KV
|
||||
kvc := integration.ToGRPC(clus.RandClient()).KV
|
||||
req := &pb.PutRequest{Key: []byte(k), Value: []byte("bar")}
|
||||
if _, err := kvc.Put(context.TODO(), req); err != nil {
|
||||
t.Errorf("#%d: couldn't put key (%v)", i, err)
|
||||
@ -291,12 +292,12 @@ func TestV3WatchFromCurrentRevision(t *testing.T) {
|
||||
|
||||
// TestV3WatchFutureRevision tests Watch APIs from a future revision.
|
||||
func TestV3WatchFutureRevision(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
wAPI := toGRPC(clus.RandClient()).Watch
|
||||
wAPI := integration.ToGRPC(clus.RandClient()).Watch
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
wStream, err := wAPI.Watch(ctx)
|
||||
@ -322,7 +323,7 @@ func TestV3WatchFutureRevision(t *testing.T) {
|
||||
t.Fatalf("create %v, want %v", cresp.Created, true)
|
||||
}
|
||||
|
||||
kvc := toGRPC(clus.RandClient()).KV
|
||||
kvc := integration.ToGRPC(clus.RandClient()).KV
|
||||
for {
|
||||
req := &pb.PutRequest{Key: wkey, Value: []byte("bar")}
|
||||
resp, rerr := kvc.Put(context.TODO(), req)
|
||||
@ -352,12 +353,12 @@ func TestV3WatchFutureRevision(t *testing.T) {
|
||||
|
||||
// TestV3WatchWrongRange tests wrong range does not create watchers.
|
||||
func TestV3WatchWrongRange(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
wAPI := toGRPC(clus.RandClient()).Watch
|
||||
wAPI := integration.ToGRPC(clus.RandClient()).Watch
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
wStream, err := wAPI.Watch(ctx)
|
||||
@ -397,23 +398,23 @@ func TestV3WatchWrongRange(t *testing.T) {
|
||||
|
||||
// TestV3WatchCancelSynced tests Watch APIs cancellation from synced map.
|
||||
func TestV3WatchCancelSynced(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
testV3WatchCancel(t, 0)
|
||||
}
|
||||
|
||||
// TestV3WatchCancelUnsynced tests Watch APIs cancellation from unsynced map.
|
||||
func TestV3WatchCancelUnsynced(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
testV3WatchCancel(t, 1)
|
||||
}
|
||||
|
||||
func testV3WatchCancel(t *testing.T, startRev int64) {
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
wStream, errW := toGRPC(clus.RandClient()).Watch.Watch(ctx)
|
||||
wStream, errW := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx)
|
||||
if errW != nil {
|
||||
t.Fatalf("wAPI.Watch error: %v", errW)
|
||||
}
|
||||
@ -448,7 +449,7 @@ func testV3WatchCancel(t *testing.T, startRev int64) {
|
||||
t.Errorf("cresp.Canceled got = %v, want = true", cresp.Canceled)
|
||||
}
|
||||
|
||||
kvc := toGRPC(clus.RandClient()).KV
|
||||
kvc := integration.ToGRPC(clus.RandClient()).KV
|
||||
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
|
||||
t.Errorf("couldn't put key (%v)", err)
|
||||
}
|
||||
@ -463,13 +464,13 @@ func testV3WatchCancel(t *testing.T, startRev int64) {
|
||||
// TestV3WatchCurrentPutOverlap ensures current watchers receive all events with
|
||||
// overlapping puts.
|
||||
func TestV3WatchCurrentPutOverlap(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
wStream, wErr := toGRPC(clus.RandClient()).Watch.Watch(ctx)
|
||||
wStream, wErr := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx)
|
||||
if wErr != nil {
|
||||
t.Fatalf("wAPI.Watch error: %v", wErr)
|
||||
}
|
||||
@ -482,7 +483,7 @@ func TestV3WatchCurrentPutOverlap(t *testing.T) {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
kvc := toGRPC(clus.RandClient()).KV
|
||||
kvc := integration.ToGRPC(clus.RandClient()).KV
|
||||
req := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
|
||||
if _, err := kvc.Put(context.TODO(), req); err != nil {
|
||||
t.Errorf("couldn't put key (%v)", err)
|
||||
@ -547,15 +548,15 @@ func TestV3WatchCurrentPutOverlap(t *testing.T) {
|
||||
|
||||
// TestV3WatchEmptyKey ensures synced watchers see empty key PUTs as PUT events
|
||||
func TestV3WatchEmptyKey(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
ws, werr := toGRPC(clus.RandClient()).Watch.Watch(ctx)
|
||||
ws, werr := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx)
|
||||
if werr != nil {
|
||||
t.Fatal(werr)
|
||||
}
|
||||
@ -570,7 +571,7 @@ func TestV3WatchEmptyKey(t *testing.T) {
|
||||
}
|
||||
|
||||
// put a key with empty value
|
||||
kvc := toGRPC(clus.RandClient()).KV
|
||||
kvc := integration.ToGRPC(clus.RandClient()).KV
|
||||
preq := &pb.PutRequest{Key: []byte("foo")}
|
||||
if _, err := kvc.Put(context.TODO(), preq); err != nil {
|
||||
t.Fatal(err)
|
||||
@ -593,12 +594,12 @@ func TestV3WatchEmptyKey(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV3WatchMultipleWatchersSynced(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
testV3WatchMultipleWatchers(t, 0)
|
||||
}
|
||||
|
||||
func TestV3WatchMultipleWatchersUnsynced(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
testV3WatchMultipleWatchers(t, 1)
|
||||
}
|
||||
|
||||
@ -607,14 +608,14 @@ func TestV3WatchMultipleWatchersUnsynced(t *testing.T) {
|
||||
// that matches all watchers, and another key that matches only
|
||||
// one watcher to test if it receives expected events.
|
||||
func testV3WatchMultipleWatchers(t *testing.T, startRev int64) {
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kvc := toGRPC(clus.RandClient()).KV
|
||||
kvc := integration.ToGRPC(clus.RandClient()).KV
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
wStream, errW := toGRPC(clus.RandClient()).Watch.Watch(ctx)
|
||||
wStream, errW := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx)
|
||||
if errW != nil {
|
||||
t.Fatalf("wAPI.Watch error: %v", errW)
|
||||
}
|
||||
@ -698,23 +699,23 @@ func testV3WatchMultipleWatchers(t *testing.T, startRev int64) {
|
||||
}
|
||||
|
||||
func TestV3WatchMultipleEventsTxnSynced(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
testV3WatchMultipleEventsTxn(t, 0)
|
||||
}
|
||||
|
||||
func TestV3WatchMultipleEventsTxnUnsynced(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
testV3WatchMultipleEventsTxn(t, 1)
|
||||
}
|
||||
|
||||
// testV3WatchMultipleEventsTxn tests Watch APIs when it receives multiple events.
|
||||
func testV3WatchMultipleEventsTxn(t *testing.T, startRev int64) {
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
wStream, wErr := toGRPC(clus.RandClient()).Watch.Watch(ctx)
|
||||
wStream, wErr := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx)
|
||||
if wErr != nil {
|
||||
t.Fatalf("wAPI.Watch error: %v", wErr)
|
||||
}
|
||||
@ -729,7 +730,7 @@ func testV3WatchMultipleEventsTxn(t *testing.T, startRev int64) {
|
||||
t.Fatalf("create response failed: resp=%v, err=%v", resp, err)
|
||||
}
|
||||
|
||||
kvc := toGRPC(clus.RandClient()).KV
|
||||
kvc := integration.ToGRPC(clus.RandClient()).KV
|
||||
txn := pb.TxnRequest{}
|
||||
for i := 0; i < 3; i++ {
|
||||
ru := &pb.RequestOp{}
|
||||
@ -791,11 +792,11 @@ func (evs eventsSortByKey) Less(i, j int) bool {
|
||||
}
|
||||
|
||||
func TestV3WatchMultipleEventsPutUnsynced(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kvc := toGRPC(clus.RandClient()).KV
|
||||
kvc := integration.ToGRPC(clus.RandClient()).KV
|
||||
|
||||
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo0"), Value: []byte("bar")}); err != nil {
|
||||
t.Fatalf("couldn't put key (%v)", err)
|
||||
@ -806,7 +807,7 @@ func TestV3WatchMultipleEventsPutUnsynced(t *testing.T) {
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
wStream, wErr := toGRPC(clus.RandClient()).Watch.Watch(ctx)
|
||||
wStream, wErr := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx)
|
||||
if wErr != nil {
|
||||
t.Fatalf("wAPI.Watch error: %v", wErr)
|
||||
}
|
||||
@ -870,22 +871,22 @@ func TestV3WatchMultipleEventsPutUnsynced(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV3WatchMultipleStreamsSynced(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
testV3WatchMultipleStreams(t, 0)
|
||||
}
|
||||
|
||||
func TestV3WatchMultipleStreamsUnsynced(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
testV3WatchMultipleStreams(t, 1)
|
||||
}
|
||||
|
||||
// testV3WatchMultipleStreams tests multiple watchers on the same key on multiple streams.
|
||||
func testV3WatchMultipleStreams(t *testing.T, startRev int64) {
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
wAPI := toGRPC(clus.RandClient()).Watch
|
||||
kvc := toGRPC(clus.RandClient()).KV
|
||||
wAPI := integration.ToGRPC(clus.RandClient()).Watch
|
||||
kvc := integration.ToGRPC(clus.RandClient()).KV
|
||||
|
||||
streams := make([]pb.Watch_WatchClient, 5)
|
||||
for i := range streams {
|
||||
@ -983,13 +984,13 @@ func TestWatchWithProgressNotify(t *testing.T) {
|
||||
testInterval := 3 * time.Second
|
||||
defer func() { v3rpc.SetProgressReportInterval(oldpi) }()
|
||||
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
wStream, wErr := toGRPC(clus.RandClient()).Watch.Watch(ctx)
|
||||
wStream, wErr := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx)
|
||||
if wErr != nil {
|
||||
t.Fatalf("wAPI.Watch error: %v", wErr)
|
||||
}
|
||||
@ -1033,12 +1034,12 @@ func TestWatchWithProgressNotify(t *testing.T) {
|
||||
|
||||
// TestV3WatcMultiOpenhClose opens many watchers concurrently on multiple streams.
|
||||
func TestV3WatchClose(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1, UseBridge: true})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
c := clus.Client(0)
|
||||
wapi := toGRPC(c).Watch
|
||||
wapi := integration.ToGRPC(c).Watch
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(100)
|
||||
@ -1068,15 +1069,15 @@ func TestV3WatchClose(t *testing.T) {
|
||||
|
||||
// TestV3WatchWithFilter ensures watcher filters out the events correctly.
|
||||
func TestV3WatchWithFilter(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
ws, werr := toGRPC(clus.RandClient()).Watch.Watch(ctx)
|
||||
ws, werr := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx)
|
||||
if werr != nil {
|
||||
t.Fatal(werr)
|
||||
}
|
||||
@ -1103,7 +1104,7 @@ func TestV3WatchWithFilter(t *testing.T) {
|
||||
}()
|
||||
|
||||
// put a key with empty value
|
||||
kvc := toGRPC(clus.RandClient()).KV
|
||||
kvc := integration.ToGRPC(clus.RandClient()).KV
|
||||
preq := &pb.PutRequest{Key: []byte("foo")}
|
||||
if _, err := kvc.Put(context.TODO(), preq); err != nil {
|
||||
t.Fatal(err)
|
||||
@ -1137,8 +1138,8 @@ func TestV3WatchWithFilter(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV3WatchWithPrevKV(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
wctx, wcancel := context.WithCancel(context.Background())
|
||||
@ -1158,12 +1159,12 @@ func TestV3WatchWithPrevKV(t *testing.T) {
|
||||
vals: []string{"first", "second"},
|
||||
}}
|
||||
for i, tt := range tests {
|
||||
kvc := toGRPC(clus.RandClient()).KV
|
||||
kvc := integration.ToGRPC(clus.RandClient()).KV
|
||||
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte(tt.key), Value: []byte(tt.vals[0])}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ws, werr := toGRPC(clus.RandClient()).Watch.Watch(wctx)
|
||||
ws, werr := integration.ToGRPC(clus.RandClient()).Watch.Watch(wctx)
|
||||
if werr != nil {
|
||||
t.Fatal(werr)
|
||||
}
|
||||
@ -1211,9 +1212,9 @@ func TestV3WatchWithPrevKV(t *testing.T) {
|
||||
|
||||
// TestV3WatchCancellation ensures that watch cancellation frees up server resources.
|
||||
func TestV3WatchCancellation(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
@ -1239,7 +1240,7 @@ func TestV3WatchCancellation(t *testing.T) {
|
||||
}
|
||||
|
||||
var expected string
|
||||
if ThroughProxy {
|
||||
if integration.ThroughProxy {
|
||||
// grpc proxy has additional 2 watches open
|
||||
expected = "3"
|
||||
} else {
|
||||
@ -1253,9 +1254,9 @@ func TestV3WatchCancellation(t *testing.T) {
|
||||
|
||||
// TestV3WatchCloseCancelRace ensures that watch close doesn't decrement the watcher total too far.
|
||||
func TestV3WatchCloseCancelRace(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
integration.BeforeTest(t)
|
||||
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
@ -1278,7 +1279,7 @@ func TestV3WatchCloseCancelRace(t *testing.T) {
|
||||
}
|
||||
|
||||
var expected string
|
||||
if ThroughProxy {
|
||||
if integration.ThroughProxy {
|
||||
// grpc proxy has additional 2 watches open
|
||||
expected = "2"
|
||||
} else {
|
||||
|
@ -22,25 +22,26 @@ import (
|
||||
|
||||
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||
epb "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb"
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
// TestV3ElectionCampaign checks that Campaign will not give
|
||||
// simultaneous leadership to multiple campaigners.
|
||||
func TestV3ElectionCampaign(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lease1, err1 := toGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})
|
||||
lease1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})
|
||||
if err1 != nil {
|
||||
t.Fatal(err1)
|
||||
}
|
||||
lease2, err2 := toGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})
|
||||
lease2, err2 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})
|
||||
if err2 != nil {
|
||||
t.Fatal(err2)
|
||||
}
|
||||
|
||||
lc := toGRPC(clus.Client(0)).Election
|
||||
lc := integration.ToGRPC(clus.Client(0)).Election
|
||||
req1 := &epb.CampaignRequest{Name: []byte("foo"), Lease: lease1.ID, Value: []byte("abc")}
|
||||
l1, lerr1 := lc.Campaign(context.TODO(), req1)
|
||||
if lerr1 != nil {
|
||||
@ -89,11 +90,11 @@ func TestV3ElectionCampaign(t *testing.T) {
|
||||
// TestV3ElectionObserve checks that an Observe stream receives
|
||||
// proclamations from different leaders uninterrupted.
|
||||
func TestV3ElectionObserve(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lc := toGRPC(clus.Client(0)).Election
|
||||
lc := integration.ToGRPC(clus.Client(0)).Election
|
||||
|
||||
// observe leadership events
|
||||
observec := make(chan struct{}, 1)
|
||||
@ -125,7 +126,7 @@ func TestV3ElectionObserve(t *testing.T) {
|
||||
t.Fatalf("observe stream took too long to start")
|
||||
}
|
||||
|
||||
lease1, err1 := toGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})
|
||||
lease1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})
|
||||
if err1 != nil {
|
||||
t.Fatal(err1)
|
||||
}
|
||||
@ -139,7 +140,7 @@ func TestV3ElectionObserve(t *testing.T) {
|
||||
go func() {
|
||||
defer close(leader2c)
|
||||
|
||||
lease2, err2 := toGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})
|
||||
lease2, err2 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})
|
||||
if err2 != nil {
|
||||
t.Error(err2)
|
||||
}
|
||||
|
@ -21,25 +21,26 @@ import (
|
||||
|
||||
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||
lockpb "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb"
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
// TestV3LockLockWaiter tests that a client will wait for a lock, then acquire it
|
||||
// once it is unlocked.
|
||||
func TestV3LockLockWaiter(t *testing.T) {
|
||||
BeforeTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
integration.BeforeTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lease1, err1 := toGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})
|
||||
lease1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})
|
||||
if err1 != nil {
|
||||
t.Fatal(err1)
|
||||
}
|
||||
lease2, err2 := toGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})
|
||||
lease2, err2 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})
|
||||
if err2 != nil {
|
||||
t.Fatal(err2)
|
||||
}
|
||||
|
||||
lc := toGRPC(clus.Client(0)).Lock
|
||||
lc := integration.ToGRPC(clus.Client(0)).Lock
|
||||
l1, lerr1 := lc.Lock(context.TODO(), &lockpb.LockRequest{Name: []byte("foo"), Lease: lease1.ID})
|
||||
if lerr1 != nil {
|
||||
t.Fatal(lerr1)
|
||||
|
Loading…
x
Reference in New Issue
Block a user