mirror of
https://github.com/etcd-io/etcd.git
synced 2024-09-27 06:25:44 +00:00
Merge pull request #12411 from ptabor/20201021-move-contrib-recipies
Modularization: Move contrib/recipies to clientv3/experimental/recipies/...
This commit is contained in:
commit
97354af44b
@ -12,27 +12,28 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
package integration
|
package recipes_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"go.etcd.io/etcd/client/v3"
|
"go.etcd.io/etcd/client/v3"
|
||||||
|
recipe "go.etcd.io/etcd/client/v3/experimental/recipes"
|
||||||
"go.etcd.io/etcd/pkg/v3/testutil"
|
"go.etcd.io/etcd/pkg/v3/testutil"
|
||||||
recipe "go.etcd.io/etcd/v3/contrib/recipes"
|
"go.etcd.io/etcd/tests/v3/integration"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestBarrierSingleNode(t *testing.T) {
|
func TestBarrierSingleNode(t *testing.T) {
|
||||||
defer testutil.AfterTest(t)
|
defer testutil.AfterTest(t)
|
||||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
testBarrier(t, 5, func() *clientv3.Client { return clus.clients[0] })
|
testBarrier(t, 5, func() *clientv3.Client { return clus.Client(0) })
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBarrierMultiNode(t *testing.T) {
|
func TestBarrierMultiNode(t *testing.T) {
|
||||||
defer testutil.AfterTest(t)
|
defer testutil.AfterTest(t)
|
||||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
testBarrier(t, 5, func() *clientv3.Client { return clus.RandClient() })
|
testBarrier(t, 5, func() *clientv3.Client { return clus.RandClient() })
|
||||||
}
|
}
|
@ -12,18 +12,19 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
package integration
|
package recipes_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"go.etcd.io/etcd/client/v3/concurrency"
|
"go.etcd.io/etcd/client/v3/concurrency"
|
||||||
recipe "go.etcd.io/etcd/v3/contrib/recipes"
|
recipe "go.etcd.io/etcd/client/v3/experimental/recipes"
|
||||||
|
"go.etcd.io/etcd/tests/v3/integration"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestDoubleBarrier(t *testing.T) {
|
func TestDoubleBarrier(t *testing.T) {
|
||||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
waiters := 10
|
waiters := 10
|
||||||
@ -94,18 +95,18 @@ func TestDoubleBarrier(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestDoubleBarrierFailover(t *testing.T) {
|
func TestDoubleBarrierFailover(t *testing.T) {
|
||||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
waiters := 10
|
waiters := 10
|
||||||
donec := make(chan struct{})
|
donec := make(chan struct{})
|
||||||
|
|
||||||
s0, err := concurrency.NewSession(clus.clients[0])
|
s0, err := concurrency.NewSession(clus.Client(0))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
defer s0.Orphan()
|
defer s0.Orphan()
|
||||||
s1, err := concurrency.NewSession(clus.clients[0])
|
s1, err := concurrency.NewSession(clus.Client(0))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
@ -12,38 +12,38 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
package integration
|
package recipes_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"sync"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"go.etcd.io/etcd/api/v3/mvccpb"
|
"go.etcd.io/etcd/api/v3/mvccpb"
|
||||||
"go.etcd.io/etcd/client/v3"
|
"go.etcd.io/etcd/client/v3"
|
||||||
"go.etcd.io/etcd/client/v3/concurrency"
|
"go.etcd.io/etcd/client/v3/concurrency"
|
||||||
|
recipe "go.etcd.io/etcd/client/v3/experimental/recipes"
|
||||||
"go.etcd.io/etcd/pkg/v3/testutil"
|
"go.etcd.io/etcd/pkg/v3/testutil"
|
||||||
recipe "go.etcd.io/etcd/v3/contrib/recipes"
|
"go.etcd.io/etcd/tests/v3/integration"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMutexLockSingleNode(t *testing.T) {
|
func TestMutexLockSingleNode(t *testing.T) {
|
||||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
var clients []*clientv3.Client
|
var clients []*clientv3.Client
|
||||||
testMutexLock(t, 5, makeSingleNodeClients(t, clus.cluster, &clients))
|
testMutexLock(t, 5, integration.MakeSingleNodeClients(t, clus, &clients))
|
||||||
closeClients(t, clients)
|
integration.CloseClients(t, clients)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMutexLockMultiNode(t *testing.T) {
|
func TestMutexLockMultiNode(t *testing.T) {
|
||||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
var clients []*clientv3.Client
|
var clients []*clientv3.Client
|
||||||
testMutexLock(t, 5, makeMultiNodeClients(t, clus.cluster, &clients))
|
testMutexLock(t, 5, integration.MakeMultiNodeClients(t, clus, &clients))
|
||||||
closeClients(t, clients)
|
integration.CloseClients(t, clients)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testMutexLock(t *testing.T, waiters int, chooseClient func() *clientv3.Client) {
|
func testMutexLock(t *testing.T, waiters int, chooseClient func() *clientv3.Client) {
|
||||||
@ -90,21 +90,21 @@ func testMutexLock(t *testing.T, waiters int, chooseClient func() *clientv3.Clie
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestMutexTryLockSingleNode(t *testing.T) {
|
func TestMutexTryLockSingleNode(t *testing.T) {
|
||||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
var clients []*clientv3.Client
|
var clients []*clientv3.Client
|
||||||
testMutexTryLock(t, 5, makeSingleNodeClients(t, clus.cluster, &clients))
|
testMutexTryLock(t, 5, integration.MakeSingleNodeClients(t, clus, &clients))
|
||||||
closeClients(t, clients)
|
integration.CloseClients(t, clients)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMutexTryLockMultiNode(t *testing.T) {
|
func TestMutexTryLockMultiNode(t *testing.T) {
|
||||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
var clients []*clientv3.Client
|
var clients []*clientv3.Client
|
||||||
testMutexTryLock(t, 5, makeMultiNodeClients(t, clus.cluster, &clients))
|
testMutexTryLock(t, 5, integration.MakeMultiNodeClients(t, clus, &clients))
|
||||||
closeClients(t, clients)
|
integration.CloseClients(t, clients)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testMutexTryLock(t *testing.T, lockers int, chooseClient func() *clientv3.Client) {
|
func testMutexTryLock(t *testing.T, lockers int, chooseClient func() *clientv3.Client) {
|
||||||
@ -156,7 +156,7 @@ func testMutexTryLock(t *testing.T, lockers int, chooseClient func() *clientv3.C
|
|||||||
// TestMutexSessionRelock ensures that acquiring the same lock with the same
|
// TestMutexSessionRelock ensures that acquiring the same lock with the same
|
||||||
// session will not result in deadlock.
|
// session will not result in deadlock.
|
||||||
func TestMutexSessionRelock(t *testing.T) {
|
func TestMutexSessionRelock(t *testing.T) {
|
||||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
session, err := concurrency.NewSession(clus.RandClient())
|
session, err := concurrency.NewSession(clus.RandClient())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -180,7 +180,7 @@ func TestMutexSessionRelock(t *testing.T) {
|
|||||||
func TestMutexWaitsOnCurrentHolder(t *testing.T) {
|
func TestMutexWaitsOnCurrentHolder(t *testing.T) {
|
||||||
defer testutil.AfterTest(t)
|
defer testutil.AfterTest(t)
|
||||||
|
|
||||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
cctx := context.Background()
|
cctx := context.Background()
|
||||||
@ -287,7 +287,7 @@ func TestMutexWaitsOnCurrentHolder(t *testing.T) {
|
|||||||
|
|
||||||
func BenchmarkMutex4Waiters(b *testing.B) {
|
func BenchmarkMutex4Waiters(b *testing.B) {
|
||||||
// XXX switch tests to use TB interface
|
// XXX switch tests to use TB interface
|
||||||
clus := NewClusterV3(nil, &ClusterConfig{Size: 3})
|
clus := integration.NewClusterV3(nil, &integration.ClusterConfig{Size: 3})
|
||||||
defer clus.Terminate(nil)
|
defer clus.Terminate(nil)
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
testMutexLock(nil, 4, func() *clientv3.Client { return clus.RandClient() })
|
testMutexLock(nil, 4, func() *clientv3.Client { return clus.RandClient() })
|
||||||
@ -295,13 +295,13 @@ func BenchmarkMutex4Waiters(b *testing.B) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestRWMutexSingleNode(t *testing.T) {
|
func TestRWMutexSingleNode(t *testing.T) {
|
||||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
testRWMutex(t, 5, func() *clientv3.Client { return clus.clients[0] })
|
testRWMutex(t, 5, func() *clientv3.Client { return clus.Client(0) })
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRWMutexMultiNode(t *testing.T) {
|
func TestRWMutexMultiNode(t *testing.T) {
|
||||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
testRWMutex(t, 5, func() *clientv3.Client { return clus.RandClient() })
|
testRWMutex(t, 5, func() *clientv3.Client { return clus.RandClient() })
|
||||||
}
|
}
|
||||||
@ -357,38 +357,3 @@ func testRWMutex(t *testing.T, waiters int, chooseClient func() *clientv3.Client
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeClients(t *testing.T, clients *[]*clientv3.Client, choose func() *member) func() *clientv3.Client {
|
|
||||||
var mu sync.Mutex
|
|
||||||
*clients = nil
|
|
||||||
return func() *clientv3.Client {
|
|
||||||
cli, err := NewClientV3(choose())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("cannot create client: %v", err)
|
|
||||||
}
|
|
||||||
mu.Lock()
|
|
||||||
*clients = append(*clients, cli)
|
|
||||||
mu.Unlock()
|
|
||||||
return cli
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeSingleNodeClients(t *testing.T, clus *cluster, clients *[]*clientv3.Client) func() *clientv3.Client {
|
|
||||||
return makeClients(t, clients, func() *member {
|
|
||||||
return clus.Members[0]
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeMultiNodeClients(t *testing.T, clus *cluster, clients *[]*clientv3.Client) func() *clientv3.Client {
|
|
||||||
return makeClients(t, clients, func() *member {
|
|
||||||
return clus.Members[rand.Intn(len(clus.Members))]
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func closeClients(t *testing.T, clients []*clientv3.Client) {
|
|
||||||
for _, cli := range clients {
|
|
||||||
if err := cli.Close(); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -12,7 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
package integration
|
package recipes_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -20,7 +20,8 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
recipe "go.etcd.io/etcd/v3/contrib/recipes"
|
recipe "go.etcd.io/etcd/client/v3/experimental/recipes"
|
||||||
|
"go.etcd.io/etcd/tests/v3/integration"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -30,7 +31,7 @@ const (
|
|||||||
|
|
||||||
// TestQueueOneReaderOneWriter confirms the queue is FIFO
|
// TestQueueOneReaderOneWriter confirms the queue is FIFO
|
||||||
func TestQueueOneReaderOneWriter(t *testing.T) {
|
func TestQueueOneReaderOneWriter(t *testing.T) {
|
||||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
@ -76,7 +77,7 @@ func TestQueueManyReaderManyWriter(t *testing.T) {
|
|||||||
// BenchmarkQueue benchmarks Queues using many/many readers/writers
|
// BenchmarkQueue benchmarks Queues using many/many readers/writers
|
||||||
func BenchmarkQueue(b *testing.B) {
|
func BenchmarkQueue(b *testing.B) {
|
||||||
// XXX switch tests to use TB interface
|
// XXX switch tests to use TB interface
|
||||||
clus := NewClusterV3(nil, &ClusterConfig{Size: 3})
|
clus := integration.NewClusterV3(nil, &integration.ClusterConfig{Size: 3})
|
||||||
defer clus.Terminate(nil)
|
defer clus.Terminate(nil)
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
testQueueNReaderMWriter(nil, manyQueueClients, manyQueueClients)
|
testQueueNReaderMWriter(nil, manyQueueClients, manyQueueClients)
|
||||||
@ -85,7 +86,7 @@ func BenchmarkQueue(b *testing.B) {
|
|||||||
|
|
||||||
// TestPrQueueOneReaderOneWriter tests whether priority queues respect priorities.
|
// TestPrQueueOneReaderOneWriter tests whether priority queues respect priorities.
|
||||||
func TestPrQueueOneReaderOneWriter(t *testing.T) {
|
func TestPrQueueOneReaderOneWriter(t *testing.T) {
|
||||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
// write out five items with random priority
|
// write out five items with random priority
|
||||||
@ -117,7 +118,7 @@ func TestPrQueueOneReaderOneWriter(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPrQueueManyReaderManyWriter(t *testing.T) {
|
func TestPrQueueManyReaderManyWriter(t *testing.T) {
|
||||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
rqs := newPriorityQueues(clus, manyQueueClients)
|
rqs := newPriorityQueues(clus, manyQueueClients)
|
||||||
wqs := newPriorityQueues(clus, manyQueueClients)
|
wqs := newPriorityQueues(clus, manyQueueClients)
|
||||||
@ -127,7 +128,7 @@ func TestPrQueueManyReaderManyWriter(t *testing.T) {
|
|||||||
// BenchmarkQueue benchmarks Queues using n/n readers/writers
|
// BenchmarkQueue benchmarks Queues using n/n readers/writers
|
||||||
func BenchmarkPrQueueOneReaderOneWriter(b *testing.B) {
|
func BenchmarkPrQueueOneReaderOneWriter(b *testing.B) {
|
||||||
// XXX switch tests to use TB interface
|
// XXX switch tests to use TB interface
|
||||||
clus := NewClusterV3(nil, &ClusterConfig{Size: 3})
|
clus := integration.NewClusterV3(nil, &integration.ClusterConfig{Size: 3})
|
||||||
defer clus.Terminate(nil)
|
defer clus.Terminate(nil)
|
||||||
rqs := newPriorityQueues(clus, 1)
|
rqs := newPriorityQueues(clus, 1)
|
||||||
wqs := newPriorityQueues(clus, 1)
|
wqs := newPriorityQueues(clus, 1)
|
||||||
@ -137,12 +138,12 @@ func BenchmarkPrQueueOneReaderOneWriter(b *testing.B) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func testQueueNReaderMWriter(t *testing.T, n int, m int) {
|
func testQueueNReaderMWriter(t *testing.T, n int, m int) {
|
||||||
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
testReadersWriters(t, newQueues(clus, n), newQueues(clus, m))
|
testReadersWriters(t, newQueues(clus, n), newQueues(clus, m))
|
||||||
}
|
}
|
||||||
|
|
||||||
func newQueues(clus *ClusterV3, n int) (qs []testQueue) {
|
func newQueues(clus *integration.ClusterV3, n int) (qs []testQueue) {
|
||||||
for i := 0; i < n; i++ {
|
for i := 0; i < n; i++ {
|
||||||
etcdc := clus.RandClient()
|
etcdc := clus.RandClient()
|
||||||
qs = append(qs, recipe.NewQueue(etcdc, "q"))
|
qs = append(qs, recipe.NewQueue(etcdc, "q"))
|
||||||
@ -150,7 +151,7 @@ func newQueues(clus *ClusterV3, n int) (qs []testQueue) {
|
|||||||
return qs
|
return qs
|
||||||
}
|
}
|
||||||
|
|
||||||
func newPriorityQueues(clus *ClusterV3, n int) (qs []testQueue) {
|
func newPriorityQueues(clus *integration.ClusterV3, n int) (qs []testQueue) {
|
||||||
for i := 0; i < n; i++ {
|
for i := 0; i < n; i++ {
|
||||||
etcdc := clus.RandClient()
|
etcdc := clus.RandClient()
|
||||||
q := &flatPriorityQueue{recipe.NewPriorityQueue(etcdc, "prq")}
|
q := &flatPriorityQueue{recipe.NewPriorityQueue(etcdc, "prq")}
|
@ -1312,6 +1312,47 @@ func (c *ClusterV3) Client(i int) *clientv3.Client {
|
|||||||
return c.clients[i]
|
return c.clients[i]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewClientV3 creates a new grpc client connection to the member
|
||||||
|
func (c *ClusterV3) NewClientV3(memberIndex int) (*clientv3.Client, error) {
|
||||||
|
return NewClientV3(c.Members[memberIndex])
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeClients(t *testing.T, clus *ClusterV3, clients *[]*clientv3.Client, chooseMemberIndex func() int) func() *clientv3.Client {
|
||||||
|
var mu sync.Mutex
|
||||||
|
*clients = nil
|
||||||
|
return func() *clientv3.Client {
|
||||||
|
cli, err := clus.NewClientV3(chooseMemberIndex())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("cannot create client: %v", err)
|
||||||
|
}
|
||||||
|
mu.Lock()
|
||||||
|
*clients = append(*clients, cli)
|
||||||
|
mu.Unlock()
|
||||||
|
return cli
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeSingleNodeClients creates factory of clients that all connect to member 0.
|
||||||
|
// All the created clients are put on the 'clients' list. The factory is thread-safe.
|
||||||
|
func MakeSingleNodeClients(t *testing.T, clus *ClusterV3, clients *[]*clientv3.Client) func() *clientv3.Client {
|
||||||
|
return makeClients(t, clus, clients, func() int { return 0 })
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeMultiNodeClients creates factory of clients that all connect to random members.
|
||||||
|
// All the created clients are put on the 'clients' list. The factory is thread-safe.
|
||||||
|
func MakeMultiNodeClients(t *testing.T, clus *ClusterV3, clients *[]*clientv3.Client) func() *clientv3.Client {
|
||||||
|
return makeClients(t, clus, clients, func() int { return rand.Intn(len(clus.Members)) })
|
||||||
|
}
|
||||||
|
|
||||||
|
// CloseClients closes all the clients from the 'clients' list.
|
||||||
|
func CloseClients(t *testing.T, clients []*clientv3.Client) {
|
||||||
|
for _, cli := range clients {
|
||||||
|
if err := cli.Close(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type grpcAPI struct {
|
type grpcAPI struct {
|
||||||
// Cluster is the cluster API for the client's connection.
|
// Cluster is the cluster API for the client's connection.
|
||||||
Cluster pb.ClusterClient
|
Cluster pb.ClusterClient
|
||||||
|
@ -32,7 +32,10 @@ func TestElectionWait(t *testing.T) {
|
|||||||
leaders := 3
|
leaders := 3
|
||||||
followers := 3
|
followers := 3
|
||||||
var clients []*clientv3.Client
|
var clients []*clientv3.Client
|
||||||
newClient := makeMultiNodeClients(t, clus.cluster, &clients)
|
newClient := MakeMultiNodeClients(t, clus, &clients)
|
||||||
|
defer func() {
|
||||||
|
CloseClients(t, clients)
|
||||||
|
}()
|
||||||
|
|
||||||
electedc := make(chan string)
|
electedc := make(chan string)
|
||||||
nextc := []chan struct{}{}
|
nextc := []chan struct{}{}
|
||||||
@ -100,8 +103,6 @@ func TestElectionWait(t *testing.T) {
|
|||||||
for i := 0; i < followers; i++ {
|
for i := 0; i < followers; i++ {
|
||||||
<-donec
|
<-donec
|
||||||
}
|
}
|
||||||
|
|
||||||
closeClients(t, clients)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestElectionFailover tests that an election will
|
// TestElectionFailover tests that an election will
|
||||||
|
Loading…
x
Reference in New Issue
Block a user