mirror of
https://github.com/etcd-io/etcd.git
synced 2024-09-27 06:25:44 +00:00
tests: Make using bridge optional
This commit is contained in:
parent
f2dd5d80a1
commit
451eb5d711
@ -38,6 +38,7 @@ func TestBalancerUnderBlackholeKeepAliveWatch(t *testing.T) {
|
|||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
|
||||||
Size: 2,
|
Size: 2,
|
||||||
GRPCKeepAliveMinTime: time.Millisecond, // avoid too_many_pings
|
GRPCKeepAliveMinTime: time.Millisecond, // avoid too_many_pings
|
||||||
|
UseBridge: true,
|
||||||
})
|
})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
@ -170,6 +171,7 @@ func testBalancerUnderBlackholeNoKeepAlive(t *testing.T, op func(*clientv3.Clien
|
|||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
|
||||||
Size: 2,
|
Size: 2,
|
||||||
SkipCreatingClient: true,
|
SkipCreatingClient: true,
|
||||||
|
UseBridge: true,
|
||||||
})
|
})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
|
@ -35,6 +35,7 @@ func TestBalancerUnderServerShutdownWatch(t *testing.T) {
|
|||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
|
||||||
Size: 3,
|
Size: 3,
|
||||||
SkipCreatingClient: true,
|
SkipCreatingClient: true,
|
||||||
|
UseBridge: true,
|
||||||
})
|
})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
@ -278,6 +279,7 @@ func testBalancerUnderServerStopInflightRangeOnRestart(t *testing.T, linearizabl
|
|||||||
cfg := &integration.ClusterConfig{
|
cfg := &integration.ClusterConfig{
|
||||||
Size: 2,
|
Size: 2,
|
||||||
SkipCreatingClient: true,
|
SkipCreatingClient: true,
|
||||||
|
UseBridge: true,
|
||||||
}
|
}
|
||||||
if linearizable {
|
if linearizable {
|
||||||
cfg.Size = 3
|
cfg.Size = 3
|
||||||
|
@ -712,7 +712,7 @@ func TestKVGetRetry(t *testing.T) {
|
|||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
|
|
||||||
clusterSize := 3
|
clusterSize := 3
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: clusterSize})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: clusterSize, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
// because killing leader and following election
|
// because killing leader and following election
|
||||||
@ -765,7 +765,7 @@ func TestKVGetRetry(t *testing.T) {
|
|||||||
func TestKVPutFailGetRetry(t *testing.T) {
|
func TestKVPutFailGetRetry(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
|
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
kv := clus.Client(0)
|
kv := clus.Client(0)
|
||||||
@ -876,7 +876,7 @@ func TestKVPutStoppedServerAndClose(t *testing.T) {
|
|||||||
// in the presence of network errors.
|
// in the presence of network errors.
|
||||||
func TestKVPutAtMostOnce(t *testing.T) {
|
func TestKVPutAtMostOnce(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
if _, err := clus.Client(0).Put(context.TODO(), "k", "1"); err != nil {
|
if _, err := clus.Client(0).Put(context.TODO(), "k", "1"); err != nil {
|
||||||
|
@ -190,7 +190,7 @@ func TestLeaseKeepAliveHandleFailure(t *testing.T) {
|
|||||||
|
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
|
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
// TODO: change this line to get a cluster client
|
// TODO: change this line to get a cluster client
|
||||||
@ -416,7 +416,7 @@ func TestLeaseRevokeNewAfterClose(t *testing.T) {
|
|||||||
func TestLeaseKeepAliveCloseAfterDisconnectRevoke(t *testing.T) {
|
func TestLeaseKeepAliveCloseAfterDisconnectRevoke(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
|
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
cli := clus.Client(0)
|
cli := clus.Client(0)
|
||||||
@ -462,7 +462,7 @@ func TestLeaseKeepAliveCloseAfterDisconnectRevoke(t *testing.T) {
|
|||||||
func TestLeaseKeepAliveInitTimeout(t *testing.T) {
|
func TestLeaseKeepAliveInitTimeout(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
|
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
cli := clus.Client(0)
|
cli := clus.Client(0)
|
||||||
@ -495,7 +495,7 @@ func TestLeaseKeepAliveInitTimeout(t *testing.T) {
|
|||||||
func TestLeaseKeepAliveTTLTimeout(t *testing.T) {
|
func TestLeaseKeepAliveTTLTimeout(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
|
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
cli := clus.Client(0)
|
cli := clus.Client(0)
|
||||||
@ -530,7 +530,7 @@ func TestLeaseKeepAliveTTLTimeout(t *testing.T) {
|
|||||||
func TestLeaseTimeToLive(t *testing.T) {
|
func TestLeaseTimeToLive(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
|
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
c := clus.RandClient()
|
c := clus.RandClient()
|
||||||
@ -656,7 +656,7 @@ func TestLeaseLeases(t *testing.T) {
|
|||||||
func TestLeaseRenewLostQuorum(t *testing.T) {
|
func TestLeaseRenewLostQuorum(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
|
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
cli := clus.Client(0)
|
cli := clus.Client(0)
|
||||||
@ -728,7 +728,7 @@ func TestLeaseKeepAliveLoopExit(t *testing.T) {
|
|||||||
// transient cluster failure.
|
// transient cluster failure.
|
||||||
func TestV3LeaseFailureOverlap(t *testing.T) {
|
func TestV3LeaseFailureOverlap(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
numReqs := 5
|
numReqs := 5
|
||||||
@ -782,7 +782,7 @@ func TestV3LeaseFailureOverlap(t *testing.T) {
|
|||||||
func TestLeaseWithRequireLeader(t *testing.T) {
|
func TestLeaseWithRequireLeader(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
|
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
c := clus.Client(0)
|
c := clus.Client(0)
|
||||||
|
@ -195,7 +195,7 @@ func TestLeasingPutInvalidateExisting(t *testing.T) {
|
|||||||
// TestLeasingGetNoLeaseTTL checks a key with a TTL is not leased.
|
// TestLeasingGetNoLeaseTTL checks a key with a TTL is not leased.
|
||||||
func TestLeasingGetNoLeaseTTL(t *testing.T) {
|
func TestLeasingGetNoLeaseTTL(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||||
@ -224,7 +224,7 @@ func TestLeasingGetNoLeaseTTL(t *testing.T) {
|
|||||||
// when the etcd cluster is partitioned.
|
// when the etcd cluster is partitioned.
|
||||||
func TestLeasingGetSerializable(t *testing.T) {
|
func TestLeasingGetSerializable(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||||
@ -326,7 +326,7 @@ func TestLeasingRevGet(t *testing.T) {
|
|||||||
// TestLeasingGetWithOpts checks options that can be served through the cache do not depend on the server.
|
// TestLeasingGetWithOpts checks options that can be served through the cache do not depend on the server.
|
||||||
func TestLeasingGetWithOpts(t *testing.T) {
|
func TestLeasingGetWithOpts(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||||
@ -418,7 +418,7 @@ func TestLeasingConcurrentPut(t *testing.T) {
|
|||||||
|
|
||||||
func TestLeasingDisconnectedGet(t *testing.T) {
|
func TestLeasingDisconnectedGet(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||||
@ -550,7 +550,7 @@ func TestLeasingOverwriteResponse(t *testing.T) {
|
|||||||
|
|
||||||
func TestLeasingOwnerPutResponse(t *testing.T) {
|
func TestLeasingOwnerPutResponse(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||||
@ -617,7 +617,7 @@ func TestLeasingTxnOwnerGetRange(t *testing.T) {
|
|||||||
|
|
||||||
func TestLeasingTxnOwnerGet(t *testing.T) {
|
func TestLeasingTxnOwnerGet(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
client := clus.Client(0)
|
client := clus.Client(0)
|
||||||
@ -773,7 +773,7 @@ func TestLeasingTxnOwnerDelete(t *testing.T) {
|
|||||||
|
|
||||||
func TestLeasingTxnOwnerIf(t *testing.T) {
|
func TestLeasingTxnOwnerIf(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||||
@ -867,7 +867,7 @@ func TestLeasingTxnOwnerIf(t *testing.T) {
|
|||||||
|
|
||||||
func TestLeasingTxnCancel(t *testing.T) {
|
func TestLeasingTxnCancel(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
lkv1, closeLKV1, err := leasing.NewKV(clus.Client(0), "pfx/")
|
lkv1, closeLKV1, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||||
@ -1085,7 +1085,7 @@ func TestLeasingTxnRandIfThenOrElse(t *testing.T) {
|
|||||||
|
|
||||||
func TestLeasingOwnerPutError(t *testing.T) {
|
func TestLeasingOwnerPutError(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||||
@ -1106,7 +1106,7 @@ func TestLeasingOwnerPutError(t *testing.T) {
|
|||||||
|
|
||||||
func TestLeasingOwnerDeleteError(t *testing.T) {
|
func TestLeasingOwnerDeleteError(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||||
@ -1127,7 +1127,7 @@ func TestLeasingOwnerDeleteError(t *testing.T) {
|
|||||||
|
|
||||||
func TestLeasingNonOwnerPutError(t *testing.T) {
|
func TestLeasingNonOwnerPutError(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
|
||||||
@ -1201,7 +1201,7 @@ func testLeasingOwnerDelete(t *testing.T, del clientv3.Op) {
|
|||||||
|
|
||||||
func TestLeasingDeleteRangeBounds(t *testing.T) {
|
func TestLeasingDeleteRangeBounds(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
delkv, closeDelKV, err := leasing.NewKV(clus.Client(0), "0/")
|
delkv, closeDelKV, err := leasing.NewKV(clus.Client(0), "0/")
|
||||||
@ -1376,7 +1376,7 @@ func TestLeasingPutGetDeleteConcurrent(t *testing.T) {
|
|||||||
// disconnected when trying to submit revoke txn.
|
// disconnected when trying to submit revoke txn.
|
||||||
func TestLeasingReconnectOwnerRevoke(t *testing.T) {
|
func TestLeasingReconnectOwnerRevoke(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
lkv1, closeLKV1, err1 := leasing.NewKV(clus.Client(0), "foo/")
|
lkv1, closeLKV1, err1 := leasing.NewKV(clus.Client(0), "foo/")
|
||||||
@ -1437,7 +1437,7 @@ func TestLeasingReconnectOwnerRevoke(t *testing.T) {
|
|||||||
// disconnected and the watch is compacted.
|
// disconnected and the watch is compacted.
|
||||||
func TestLeasingReconnectOwnerRevokeCompact(t *testing.T) {
|
func TestLeasingReconnectOwnerRevokeCompact(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
lkv1, closeLKV1, err1 := leasing.NewKV(clus.Client(0), "foo/")
|
lkv1, closeLKV1, err1 := leasing.NewKV(clus.Client(0), "foo/")
|
||||||
@ -1490,7 +1490,7 @@ func TestLeasingReconnectOwnerRevokeCompact(t *testing.T) {
|
|||||||
// not cause inconsistency between the server and the client.
|
// not cause inconsistency between the server and the client.
|
||||||
func TestLeasingReconnectOwnerConsistency(t *testing.T) {
|
func TestLeasingReconnectOwnerConsistency(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/")
|
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/")
|
||||||
@ -1650,7 +1650,7 @@ func TestLeasingTxnAtomicCache(t *testing.T) {
|
|||||||
// TestLeasingReconnectTxn checks that Txn is resilient to disconnects.
|
// TestLeasingReconnectTxn checks that Txn is resilient to disconnects.
|
||||||
func TestLeasingReconnectTxn(t *testing.T) {
|
func TestLeasingReconnectTxn(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/")
|
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/")
|
||||||
@ -1686,7 +1686,7 @@ func TestLeasingReconnectTxn(t *testing.T) {
|
|||||||
// not cause inconsistency between the server and the client.
|
// not cause inconsistency between the server and the client.
|
||||||
func TestLeasingReconnectNonOwnerGet(t *testing.T) {
|
func TestLeasingReconnectNonOwnerGet(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/")
|
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/")
|
||||||
@ -1814,7 +1814,7 @@ func TestLeasingDo(t *testing.T) {
|
|||||||
|
|
||||||
func TestLeasingTxnOwnerPutBranch(t *testing.T) {
|
func TestLeasingTxnOwnerPutBranch(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/")
|
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/")
|
||||||
@ -1908,7 +1908,7 @@ func randCmps(pfx string, dat []*clientv3.PutResponse) (cmps []clientv3.Cmp, the
|
|||||||
|
|
||||||
func TestLeasingSessionExpire(t *testing.T) {
|
func TestLeasingSessionExpire(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/", concurrency.WithTTL(1))
|
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/", concurrency.WithTTL(1))
|
||||||
@ -1984,7 +1984,7 @@ func TestLeasingSessionExpireCancel(t *testing.T) {
|
|||||||
for i := range tests {
|
for i := range tests {
|
||||||
t.Run(fmt.Sprintf("test %d", i), func(t *testing.T) {
|
t.Run(fmt.Sprintf("test %d", i), func(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/", concurrency.WithTTL(1))
|
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/", concurrency.WithTTL(1))
|
||||||
|
@ -192,7 +192,7 @@ func TestMaintenanceSnapshotErrorInflight(t *testing.T) {
|
|||||||
func testMaintenanceSnapshotErrorInflight(t *testing.T, snapshot func(context.Context, *clientv3.Client) (io.ReadCloser, error)) {
|
func testMaintenanceSnapshotErrorInflight(t *testing.T, snapshot func(context.Context, *clientv3.Client) (io.ReadCloser, error)) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
|
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
// take about 1-second to read snapshot
|
// take about 1-second to read snapshot
|
||||||
|
@ -30,7 +30,7 @@ func TestDetectKvOrderViolation(t *testing.T) {
|
|||||||
var errOrderViolation = errors.New("DetectedOrderViolation")
|
var errOrderViolation = errors.New("DetectedOrderViolation")
|
||||||
|
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
cfg := clientv3.Config{
|
cfg := clientv3.Config{
|
||||||
@ -97,7 +97,7 @@ func TestDetectTxnOrderViolation(t *testing.T) {
|
|||||||
var errOrderViolation = errors.New("DetectedOrderViolation")
|
var errOrderViolation = errors.New("DetectedOrderViolation")
|
||||||
|
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
cfg := clientv3.Config{
|
cfg := clientv3.Config{
|
||||||
|
@ -80,7 +80,7 @@ func TestEndpointSwitchResolvesViolation(t *testing.T) {
|
|||||||
|
|
||||||
func TestUnresolvableOrderViolation(t *testing.T) {
|
func TestUnresolvableOrderViolation(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 5, SkipCreatingClient: true})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 5, SkipCreatingClient: true, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
cfg := clientv3.Config{
|
cfg := clientv3.Config{
|
||||||
Endpoints: []string{
|
Endpoints: []string{
|
||||||
|
@ -53,7 +53,7 @@ func TestTxnError(t *testing.T) {
|
|||||||
func TestTxnWriteFail(t *testing.T) {
|
func TestTxnWriteFail(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
|
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
kv := clus.Client(0)
|
kv := clus.Client(0)
|
||||||
@ -103,7 +103,7 @@ func TestTxnReadRetry(t *testing.T) {
|
|||||||
|
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
|
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
kv := clus.Client(0)
|
kv := clus.Client(0)
|
||||||
|
@ -47,7 +47,7 @@ type watchctx struct {
|
|||||||
func runWatchTest(t *testing.T, f watcherTest) {
|
func runWatchTest(t *testing.T, f watcherTest) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
|
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
wclientMember := rand.Intn(3)
|
wclientMember := rand.Intn(3)
|
||||||
@ -348,7 +348,7 @@ func putAndWatch(t *testing.T, wctx *watchctx, key, val string) {
|
|||||||
|
|
||||||
func TestWatchResumeInitRev(t *testing.T) {
|
func TestWatchResumeInitRev(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
cli := clus.Client(0)
|
cli := clus.Client(0)
|
||||||
@ -404,7 +404,7 @@ func TestWatchResumeInitRev(t *testing.T) {
|
|||||||
func TestWatchResumeCompacted(t *testing.T) {
|
func TestWatchResumeCompacted(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
|
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
// create a waiting watcher at rev 1
|
// create a waiting watcher at rev 1
|
||||||
@ -955,7 +955,7 @@ func TestWatchWithCreatedNotification(t *testing.T) {
|
|||||||
func TestWatchWithCreatedNotificationDropConn(t *testing.T) {
|
func TestWatchWithCreatedNotificationDropConn(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
|
|
||||||
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||||
defer cluster.Terminate(t)
|
defer cluster.Terminate(t)
|
||||||
|
|
||||||
client := cluster.RandClient()
|
client := cluster.RandClient()
|
||||||
@ -1063,7 +1063,7 @@ func TestWatchOverlapDropConnContextCancel(t *testing.T) {
|
|||||||
|
|
||||||
func testWatchOverlapContextCancel(t *testing.T, f func(*integration.ClusterV3)) {
|
func testWatchOverlapContextCancel(t *testing.T, f func(*integration.ClusterV3)) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
n := 100
|
n := 100
|
||||||
@ -1154,7 +1154,7 @@ func TestWatchCancelAndCloseClient(t *testing.T) {
|
|||||||
// then closes the watcher interface to ensure correct clean up.
|
// then closes the watcher interface to ensure correct clean up.
|
||||||
func TestWatchStressResumeClose(t *testing.T) {
|
func TestWatchStressResumeClose(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
cli := clus.Client(0)
|
cli := clus.Client(0)
|
||||||
|
|
||||||
|
@ -153,6 +153,9 @@ type ClusterConfig struct {
|
|||||||
|
|
||||||
// UseIP is true to use only IP for gRPC requests.
|
// UseIP is true to use only IP for gRPC requests.
|
||||||
UseIP bool
|
UseIP bool
|
||||||
|
// UseBridge adds bridge between client and grpc server. Should be used in tests that
|
||||||
|
// want to manipulate connection or require connection not breaking despite server stop/restart.
|
||||||
|
UseBridge bool
|
||||||
|
|
||||||
EnableLeaseCheckpoint bool
|
EnableLeaseCheckpoint bool
|
||||||
LeaseCheckpointInterval time.Duration
|
LeaseCheckpointInterval time.Duration
|
||||||
@ -313,6 +316,7 @@ func (c *cluster) mustNewMember(t testutil.TB) *member {
|
|||||||
clientMaxCallSendMsgSize: c.cfg.ClientMaxCallSendMsgSize,
|
clientMaxCallSendMsgSize: c.cfg.ClientMaxCallSendMsgSize,
|
||||||
clientMaxCallRecvMsgSize: c.cfg.ClientMaxCallRecvMsgSize,
|
clientMaxCallRecvMsgSize: c.cfg.ClientMaxCallRecvMsgSize,
|
||||||
useIP: c.cfg.UseIP,
|
useIP: c.cfg.UseIP,
|
||||||
|
useBridge: c.cfg.UseBridge,
|
||||||
enableLeaseCheckpoint: c.cfg.EnableLeaseCheckpoint,
|
enableLeaseCheckpoint: c.cfg.EnableLeaseCheckpoint,
|
||||||
leaseCheckpointInterval: c.cfg.LeaseCheckpointInterval,
|
leaseCheckpointInterval: c.cfg.LeaseCheckpointInterval,
|
||||||
WatchProgressNotifyInterval: c.cfg.WatchProgressNotifyInterval,
|
WatchProgressNotifyInterval: c.cfg.WatchProgressNotifyInterval,
|
||||||
@ -582,6 +586,7 @@ type member struct {
|
|||||||
clientMaxCallSendMsgSize int
|
clientMaxCallSendMsgSize int
|
||||||
clientMaxCallRecvMsgSize int
|
clientMaxCallRecvMsgSize int
|
||||||
useIP bool
|
useIP bool
|
||||||
|
useBridge bool
|
||||||
|
|
||||||
isLearner bool
|
isLearner bool
|
||||||
closed bool
|
closed bool
|
||||||
@ -605,6 +610,7 @@ type memberConfig struct {
|
|||||||
clientMaxCallSendMsgSize int
|
clientMaxCallSendMsgSize int
|
||||||
clientMaxCallRecvMsgSize int
|
clientMaxCallRecvMsgSize int
|
||||||
useIP bool
|
useIP bool
|
||||||
|
useBridge bool
|
||||||
enableLeaseCheckpoint bool
|
enableLeaseCheckpoint bool
|
||||||
leaseCheckpointInterval time.Duration
|
leaseCheckpointInterval time.Duration
|
||||||
WatchProgressNotifyInterval time.Duration
|
WatchProgressNotifyInterval time.Duration
|
||||||
@ -698,6 +704,7 @@ func mustNewMember(t testutil.TB, mcfg memberConfig) *member {
|
|||||||
m.clientMaxCallSendMsgSize = mcfg.clientMaxCallSendMsgSize
|
m.clientMaxCallSendMsgSize = mcfg.clientMaxCallSendMsgSize
|
||||||
m.clientMaxCallRecvMsgSize = mcfg.clientMaxCallRecvMsgSize
|
m.clientMaxCallRecvMsgSize = mcfg.clientMaxCallRecvMsgSize
|
||||||
m.useIP = mcfg.useIP
|
m.useIP = mcfg.useIP
|
||||||
|
m.useBridge = mcfg.useBridge
|
||||||
m.EnableLeaseCheckpoint = mcfg.enableLeaseCheckpoint
|
m.EnableLeaseCheckpoint = mcfg.enableLeaseCheckpoint
|
||||||
m.LeaseCheckpointInterval = mcfg.leaseCheckpointInterval
|
m.LeaseCheckpointInterval = mcfg.leaseCheckpointInterval
|
||||||
|
|
||||||
@ -731,32 +738,50 @@ func memberLogger(t testutil.TB, name string) *zap.Logger {
|
|||||||
// listenGRPC starts a grpc server over a unix domain socket on the member
|
// listenGRPC starts a grpc server over a unix domain socket on the member
|
||||||
func (m *member) listenGRPC() error {
|
func (m *member) listenGRPC() error {
|
||||||
// prefix with localhost so cert has right domain
|
// prefix with localhost so cert has right domain
|
||||||
m.grpcURL = "localhost:" + m.Name
|
grpcAddr := m.grpcAddr()
|
||||||
m.Logger.Info("LISTEN GRPC", zap.String("m.grpcURL", m.grpcURL), zap.String("m.Name", m.Name))
|
m.Logger.Info("LISTEN GRPC", zap.String("grpcAddr", grpcAddr), zap.String("m.Name", m.Name))
|
||||||
if m.useIP { // for IP-only TLS certs
|
grpcListener, err := transport.NewUnixListener(grpcAddr)
|
||||||
m.grpcURL = "127.0.0.1:" + m.Name
|
|
||||||
}
|
|
||||||
grpcListener, err := transport.NewUnixListener(m.grpcURL)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("listen failed on grpc socket %s (%v)", m.grpcURL, err)
|
return fmt.Errorf("listen failed on grpc socket %s (%v)", grpcAddr, err)
|
||||||
}
|
}
|
||||||
bridgeAddr := m.grpcURL + "0"
|
m.grpcURL = schemeFromTLSInfo(m.ClientTLSInfo) + "://" + grpcAddr
|
||||||
bridgeListener, err := transport.NewUnixListener(bridgeAddr)
|
if m.useBridge {
|
||||||
|
_, err = m.addBridge()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
grpcListener.Close()
|
|
||||||
return fmt.Errorf("listen failed on bridge socket %s (%v)", m.grpcURL, err)
|
|
||||||
}
|
|
||||||
m.grpcBridge, err = newBridge(dialer{network: "unix", addr: m.grpcURL}, bridgeListener)
|
|
||||||
if err != nil {
|
|
||||||
bridgeListener.Close()
|
|
||||||
grpcListener.Close()
|
grpcListener.Close()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
m.grpcURL = schemeFromTLSInfo(m.ClientTLSInfo) + "://" + bridgeAddr
|
}
|
||||||
m.grpcListener = grpcListener
|
m.grpcListener = grpcListener
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *member) addBridge() (*bridge, error) {
|
||||||
|
grpcAddr := m.grpcAddr()
|
||||||
|
bridgeAddr := grpcAddr + "0"
|
||||||
|
m.Logger.Info("LISTEN BRIDGE", zap.String("grpc-address", bridgeAddr), zap.String("member", m.Name))
|
||||||
|
bridgeListener, err := transport.NewUnixListener(bridgeAddr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("listen failed on bridge socket %s (%v)", grpcAddr, err)
|
||||||
|
}
|
||||||
|
m.grpcBridge, err = newBridge(dialer{network: "unix", addr: grpcAddr}, bridgeListener)
|
||||||
|
if err != nil {
|
||||||
|
bridgeListener.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
m.grpcURL = schemeFromTLSInfo(m.ClientTLSInfo) + "://" + bridgeAddr
|
||||||
|
return m.grpcBridge, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *member) grpcAddr() string {
|
||||||
|
// prefix with localhost so cert has right domain
|
||||||
|
addr := "localhost:" + m.Name
|
||||||
|
if m.useIP { // for IP-only TLS certs
|
||||||
|
addr = "127.0.0.1:" + m.Name
|
||||||
|
}
|
||||||
|
return addr
|
||||||
|
}
|
||||||
|
|
||||||
type dialer struct {
|
type dialer struct {
|
||||||
network string
|
network string
|
||||||
addr string
|
addr string
|
||||||
|
@ -173,7 +173,7 @@ func testDecreaseClusterSize(t *testing.T, size int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestForceNewCluster(t *testing.T) {
|
func TestForceNewCluster(t *testing.T) {
|
||||||
c := NewCluster(t, 3)
|
c := newCluster(t, &ClusterConfig{Size: 3, UseBridge: true})
|
||||||
c.Launch(t)
|
c.Launch(t)
|
||||||
cc := MustNewHTTPClient(t, []string{c.Members[0].URL()}, nil)
|
cc := MustNewHTTPClient(t, []string{c.Members[0].URL()}, nil)
|
||||||
kapi := client.NewKeysAPI(cc)
|
kapi := client.NewKeysAPI(cc)
|
||||||
@ -283,7 +283,7 @@ func testIssue2746(t *testing.T, members int) {
|
|||||||
func TestIssue2904(t *testing.T) {
|
func TestIssue2904(t *testing.T) {
|
||||||
BeforeTest(t)
|
BeforeTest(t)
|
||||||
// start 1-member cluster to ensure member 0 is the leader of the cluster.
|
// start 1-member cluster to ensure member 0 is the leader of the cluster.
|
||||||
c := NewCluster(t, 1)
|
c := newCluster(t, &ClusterConfig{Size: 1, UseBridge: true})
|
||||||
c.Launch(t)
|
c.Launch(t)
|
||||||
defer c.Terminate(t)
|
defer c.Terminate(t)
|
||||||
|
|
||||||
@ -319,7 +319,7 @@ func TestIssue2904(t *testing.T) {
|
|||||||
func TestIssue3699(t *testing.T) {
|
func TestIssue3699(t *testing.T) {
|
||||||
// start a cluster of 3 nodes a, b, c
|
// start a cluster of 3 nodes a, b, c
|
||||||
BeforeTest(t)
|
BeforeTest(t)
|
||||||
c := NewCluster(t, 3)
|
c := newCluster(t, &ClusterConfig{Size: 3, UseBridge: true})
|
||||||
c.Launch(t)
|
c.Launch(t)
|
||||||
defer c.Terminate(t)
|
defer c.Terminate(t)
|
||||||
|
|
||||||
@ -371,7 +371,7 @@ func TestIssue3699(t *testing.T) {
|
|||||||
// TestRejectUnhealthyAdd ensures an unhealthy cluster rejects adding members.
|
// TestRejectUnhealthyAdd ensures an unhealthy cluster rejects adding members.
|
||||||
func TestRejectUnhealthyAdd(t *testing.T) {
|
func TestRejectUnhealthyAdd(t *testing.T) {
|
||||||
BeforeTest(t)
|
BeforeTest(t)
|
||||||
c := NewCluster(t, 3)
|
c := newCluster(t, &ClusterConfig{Size: 3, UseBridge: true})
|
||||||
for _, m := range c.Members {
|
for _, m := range c.Members {
|
||||||
m.ServerConfig.StrictReconfigCheck = true
|
m.ServerConfig.StrictReconfigCheck = true
|
||||||
}
|
}
|
||||||
@ -415,7 +415,7 @@ func TestRejectUnhealthyAdd(t *testing.T) {
|
|||||||
// if quorum will be lost.
|
// if quorum will be lost.
|
||||||
func TestRejectUnhealthyRemove(t *testing.T) {
|
func TestRejectUnhealthyRemove(t *testing.T) {
|
||||||
BeforeTest(t)
|
BeforeTest(t)
|
||||||
c := NewCluster(t, 5)
|
c := newCluster(t, &ClusterConfig{Size: 5, UseBridge: true})
|
||||||
for _, m := range c.Members {
|
for _, m := range c.Members {
|
||||||
m.ServerConfig.StrictReconfigCheck = true
|
m.ServerConfig.StrictReconfigCheck = true
|
||||||
}
|
}
|
||||||
@ -464,7 +464,7 @@ func TestRestartRemoved(t *testing.T) {
|
|||||||
BeforeTest(t)
|
BeforeTest(t)
|
||||||
|
|
||||||
// 1. start single-member cluster
|
// 1. start single-member cluster
|
||||||
c := NewCluster(t, 1)
|
c := newCluster(t, &ClusterConfig{Size: 1, UseBridge: true})
|
||||||
for _, m := range c.Members {
|
for _, m := range c.Members {
|
||||||
m.ServerConfig.StrictReconfigCheck = true
|
m.ServerConfig.StrictReconfigCheck = true
|
||||||
}
|
}
|
||||||
@ -540,7 +540,7 @@ func clusterMustProgress(t *testing.T, membs []*member) {
|
|||||||
|
|
||||||
func TestSpeedyTerminate(t *testing.T) {
|
func TestSpeedyTerminate(t *testing.T) {
|
||||||
BeforeTest(t)
|
BeforeTest(t)
|
||||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
clus := NewClusterV3(t, &ClusterConfig{Size: 3, UseBridge: true})
|
||||||
// Stop/Restart so requests will time out on lost leaders
|
// Stop/Restart so requests will time out on lost leaders
|
||||||
for i := 0; i < 3; i++ {
|
for i := 0; i < 3; i++ {
|
||||||
clus.Members[i].Stop(t)
|
clus.Members[i].Stop(t)
|
||||||
|
@ -46,7 +46,7 @@ func TestPauseMember(t *testing.T) {
|
|||||||
|
|
||||||
func TestRestartMember(t *testing.T) {
|
func TestRestartMember(t *testing.T) {
|
||||||
BeforeTest(t)
|
BeforeTest(t)
|
||||||
c := NewCluster(t, 3)
|
c := newCluster(t, &ClusterConfig{Size: 3, UseBridge: true})
|
||||||
c.Launch(t)
|
c.Launch(t)
|
||||||
defer c.Terminate(t)
|
defer c.Terminate(t)
|
||||||
|
|
||||||
@ -88,7 +88,7 @@ func TestLaunchDuplicateMemberShouldFail(t *testing.T) {
|
|||||||
|
|
||||||
func TestSnapshotAndRestartMember(t *testing.T) {
|
func TestSnapshotAndRestartMember(t *testing.T) {
|
||||||
BeforeTest(t)
|
BeforeTest(t)
|
||||||
m := mustNewMember(t, memberConfig{name: "snapAndRestartTest"})
|
m := mustNewMember(t, memberConfig{name: "snapAndRestartTest", useBridge: true})
|
||||||
m.SnapshotCount = 100
|
m.SnapshotCount = 100
|
||||||
m.Launch()
|
m.Launch()
|
||||||
defer m.Terminate(t)
|
defer m.Terminate(t)
|
||||||
|
@ -35,7 +35,7 @@ func TestV3StorageQuotaApply(t *testing.T) {
|
|||||||
BeforeTest(t)
|
BeforeTest(t)
|
||||||
quotasize := int64(16 * os.Getpagesize())
|
quotasize := int64(16 * os.Getpagesize())
|
||||||
|
|
||||||
clus := NewClusterV3(t, &ClusterConfig{Size: 2})
|
clus := NewClusterV3(t, &ClusterConfig{Size: 2, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
kvc0 := toGRPC(clus.Client(0)).KV
|
kvc0 := toGRPC(clus.Client(0)).KV
|
||||||
kvc1 := toGRPC(clus.Client(1)).KV
|
kvc1 := toGRPC(clus.Client(1)).KV
|
||||||
@ -147,7 +147,7 @@ func TestV3AlarmDeactivate(t *testing.T) {
|
|||||||
|
|
||||||
func TestV3CorruptAlarm(t *testing.T) {
|
func TestV3CorruptAlarm(t *testing.T) {
|
||||||
BeforeTest(t)
|
BeforeTest(t)
|
||||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
clus := NewClusterV3(t, &ClusterConfig{Size: 3, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
|
@ -61,7 +61,7 @@ func TestV3MaintenanceDefragmentInflightRange(t *testing.T) {
|
|||||||
// See https://github.com/etcd-io/etcd/issues/7322 for more detail.
|
// See https://github.com/etcd-io/etcd/issues/7322 for more detail.
|
||||||
func TestV3KVInflightRangeRequests(t *testing.T) {
|
func TestV3KVInflightRangeRequests(t *testing.T) {
|
||||||
BeforeTest(t)
|
BeforeTest(t)
|
||||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
clus := NewClusterV3(t, &ClusterConfig{Size: 1, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
cli := clus.RandClient()
|
cli := clus.RandClient()
|
||||||
|
@ -88,7 +88,7 @@ func TestV3PutOverwrite(t *testing.T) {
|
|||||||
// TestPutRestart checks if a put after an unrelated member restart succeeds
|
// TestPutRestart checks if a put after an unrelated member restart succeeds
|
||||||
func TestV3PutRestart(t *testing.T) {
|
func TestV3PutRestart(t *testing.T) {
|
||||||
BeforeTest(t)
|
BeforeTest(t)
|
||||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
clus := NewClusterV3(t, &ClusterConfig{Size: 3, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
kvIdx := rand.Intn(3)
|
kvIdx := rand.Intn(3)
|
||||||
@ -1210,7 +1210,7 @@ func TestV3Hash(t *testing.T) {
|
|||||||
// TestV3HashRestart ensures that hash stays the same after restart.
|
// TestV3HashRestart ensures that hash stays the same after restart.
|
||||||
func TestV3HashRestart(t *testing.T) {
|
func TestV3HashRestart(t *testing.T) {
|
||||||
BeforeTest(t)
|
BeforeTest(t)
|
||||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
clus := NewClusterV3(t, &ClusterConfig{Size: 1, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
cli := clus.RandClient()
|
cli := clus.RandClient()
|
||||||
@ -1243,7 +1243,7 @@ func TestV3StorageQuotaAPI(t *testing.T) {
|
|||||||
BeforeTest(t)
|
BeforeTest(t)
|
||||||
quotasize := int64(16 * os.Getpagesize())
|
quotasize := int64(16 * os.Getpagesize())
|
||||||
|
|
||||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
clus := NewClusterV3(t, &ClusterConfig{Size: 3, UseBridge: true})
|
||||||
|
|
||||||
// Set a quota on one node
|
// Set a quota on one node
|
||||||
clus.Members[0].QuotaBackendBytes = quotasize
|
clus.Members[0].QuotaBackendBytes = quotasize
|
||||||
@ -1858,7 +1858,7 @@ func TestGRPCRequireLeader(t *testing.T) {
|
|||||||
func TestGRPCStreamRequireLeader(t *testing.T) {
|
func TestGRPCStreamRequireLeader(t *testing.T) {
|
||||||
BeforeTest(t)
|
BeforeTest(t)
|
||||||
|
|
||||||
cfg := ClusterConfig{Size: 3}
|
cfg := ClusterConfig{Size: 3, UseBridge: true}
|
||||||
clus := newClusterV3NoClients(t, &cfg)
|
clus := newClusterV3NoClients(t, &cfg)
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
|
@ -36,7 +36,7 @@ import (
|
|||||||
func TestV3LeasePromote(t *testing.T) {
|
func TestV3LeasePromote(t *testing.T) {
|
||||||
BeforeTest(t)
|
BeforeTest(t)
|
||||||
|
|
||||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
clus := NewClusterV3(t, &ClusterConfig{Size: 3, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
// create lease
|
// create lease
|
||||||
@ -237,6 +237,7 @@ func TestV3LeaseCheckpoint(t *testing.T) {
|
|||||||
Size: 3,
|
Size: 3,
|
||||||
EnableLeaseCheckpoint: true,
|
EnableLeaseCheckpoint: true,
|
||||||
LeaseCheckpointInterval: leaseInterval,
|
LeaseCheckpointInterval: leaseInterval,
|
||||||
|
UseBridge: true,
|
||||||
})
|
})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
@ -649,7 +650,7 @@ const fiveMinTTL int64 = 300
|
|||||||
func TestV3LeaseRecoverAndRevoke(t *testing.T) {
|
func TestV3LeaseRecoverAndRevoke(t *testing.T) {
|
||||||
BeforeTest(t)
|
BeforeTest(t)
|
||||||
|
|
||||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
clus := NewClusterV3(t, &ClusterConfig{Size: 1, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
kvc := toGRPC(clus.Client(0)).KV
|
kvc := toGRPC(clus.Client(0)).KV
|
||||||
@ -700,7 +701,7 @@ func TestV3LeaseRecoverAndRevoke(t *testing.T) {
|
|||||||
func TestV3LeaseRevokeAndRecover(t *testing.T) {
|
func TestV3LeaseRevokeAndRecover(t *testing.T) {
|
||||||
BeforeTest(t)
|
BeforeTest(t)
|
||||||
|
|
||||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
clus := NewClusterV3(t, &ClusterConfig{Size: 1, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
kvc := toGRPC(clus.Client(0)).KV
|
kvc := toGRPC(clus.Client(0)).KV
|
||||||
@ -752,7 +753,7 @@ func TestV3LeaseRevokeAndRecover(t *testing.T) {
|
|||||||
func TestV3LeaseRecoverKeyWithDetachedLease(t *testing.T) {
|
func TestV3LeaseRecoverKeyWithDetachedLease(t *testing.T) {
|
||||||
BeforeTest(t)
|
BeforeTest(t)
|
||||||
|
|
||||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
clus := NewClusterV3(t, &ClusterConfig{Size: 1, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
kvc := toGRPC(clus.Client(0)).KV
|
kvc := toGRPC(clus.Client(0)).KV
|
||||||
@ -808,7 +809,7 @@ func TestV3LeaseRecoverKeyWithDetachedLease(t *testing.T) {
|
|||||||
func TestV3LeaseRecoverKeyWithMutipleLease(t *testing.T) {
|
func TestV3LeaseRecoverKeyWithMutipleLease(t *testing.T) {
|
||||||
BeforeTest(t)
|
BeforeTest(t)
|
||||||
|
|
||||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
clus := NewClusterV3(t, &ClusterConfig{Size: 1, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
kvc := toGRPC(clus.Client(0)).KV
|
kvc := toGRPC(clus.Client(0)).KV
|
||||||
|
@ -1034,7 +1034,7 @@ func TestWatchWithProgressNotify(t *testing.T) {
|
|||||||
// TestV3WatcMultiOpenhClose opens many watchers concurrently on multiple streams.
|
// TestV3WatcMultiOpenhClose opens many watchers concurrently on multiple streams.
|
||||||
func TestV3WatchClose(t *testing.T) {
|
func TestV3WatchClose(t *testing.T) {
|
||||||
BeforeTest(t)
|
BeforeTest(t)
|
||||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
clus := NewClusterV3(t, &ClusterConfig{Size: 1, UseBridge: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
c := clus.Client(0)
|
c := clus.Client(0)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user