diff --git a/contrib/recipes/barrier.go b/client/v3/experimental/recipes/barrier.go similarity index 100% rename from contrib/recipes/barrier.go rename to client/v3/experimental/recipes/barrier.go diff --git a/contrib/recipes/client.go b/client/v3/experimental/recipes/client.go similarity index 100% rename from contrib/recipes/client.go rename to client/v3/experimental/recipes/client.go diff --git a/contrib/recipes/doc.go b/client/v3/experimental/recipes/doc.go similarity index 100% rename from contrib/recipes/doc.go rename to client/v3/experimental/recipes/doc.go diff --git a/contrib/recipes/double_barrier.go b/client/v3/experimental/recipes/double_barrier.go similarity index 100% rename from contrib/recipes/double_barrier.go rename to client/v3/experimental/recipes/double_barrier.go diff --git a/contrib/recipes/grpc_gateway/user_add.sh b/client/v3/experimental/recipes/grpc_gateway/user_add.sh similarity index 100% rename from contrib/recipes/grpc_gateway/user_add.sh rename to client/v3/experimental/recipes/grpc_gateway/user_add.sh diff --git a/contrib/recipes/key.go b/client/v3/experimental/recipes/key.go similarity index 100% rename from contrib/recipes/key.go rename to client/v3/experimental/recipes/key.go diff --git a/contrib/recipes/priority_queue.go b/client/v3/experimental/recipes/priority_queue.go similarity index 100% rename from contrib/recipes/priority_queue.go rename to client/v3/experimental/recipes/priority_queue.go diff --git a/contrib/recipes/queue.go b/client/v3/experimental/recipes/queue.go similarity index 100% rename from contrib/recipes/queue.go rename to client/v3/experimental/recipes/queue.go diff --git a/contrib/recipes/rwmutex.go b/client/v3/experimental/recipes/rwmutex.go similarity index 100% rename from contrib/recipes/rwmutex.go rename to client/v3/experimental/recipes/rwmutex.go diff --git a/contrib/recipes/watch.go b/client/v3/experimental/recipes/watch.go similarity index 100% rename from contrib/recipes/watch.go rename to client/v3/experimental/recipes/watch.go diff --git a/tests/integration/v3_barrier_test.go b/tests/integration/clientv3/experimental/recipes/v3_barrier_test.go similarity index 85% rename from tests/integration/v3_barrier_test.go rename to tests/integration/clientv3/experimental/recipes/v3_barrier_test.go index bba58633b..b9dafe3ee 100644 --- a/tests/integration/v3_barrier_test.go +++ b/tests/integration/clientv3/experimental/recipes/v3_barrier_test.go @@ -12,27 +12,28 @@ // See the License for the specific language governing permissions and // limitations under the License. -package integration +package recipes_test import ( "testing" "time" "go.etcd.io/etcd/client/v3" + recipe "go.etcd.io/etcd/client/v3/experimental/recipes" "go.etcd.io/etcd/pkg/v3/testutil" - recipe "go.etcd.io/etcd/v3/contrib/recipes" + "go.etcd.io/etcd/tests/v3/integration" ) func TestBarrierSingleNode(t *testing.T) { defer testutil.AfterTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) - testBarrier(t, 5, func() *clientv3.Client { return clus.clients[0] }) + testBarrier(t, 5, func() *clientv3.Client { return clus.Client(0) }) } func TestBarrierMultiNode(t *testing.T) { defer testutil.AfterTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) testBarrier(t, 5, func() *clientv3.Client { return clus.RandClient() }) } diff --git a/tests/integration/v3_double_barrier_test.go b/tests/integration/clientv3/experimental/recipes/v3_double_barrier_test.go similarity index 90% rename from tests/integration/v3_double_barrier_test.go rename to tests/integration/clientv3/experimental/recipes/v3_double_barrier_test.go index da234d595..73ad8cd3d 100644 --- a/tests/integration/v3_double_barrier_test.go +++ b/tests/integration/clientv3/experimental/recipes/v3_double_barrier_test.go @@ -12,18 +12,19 @@ // See the License for the specific language governing permissions and // limitations under the License. -package integration +package recipes_test import ( "testing" "time" "go.etcd.io/etcd/client/v3/concurrency" - recipe "go.etcd.io/etcd/v3/contrib/recipes" + recipe "go.etcd.io/etcd/client/v3/experimental/recipes" + "go.etcd.io/etcd/tests/v3/integration" ) func TestDoubleBarrier(t *testing.T) { - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) waiters := 10 @@ -94,18 +95,18 @@ func TestDoubleBarrier(t *testing.T) { } func TestDoubleBarrierFailover(t *testing.T) { - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) waiters := 10 donec := make(chan struct{}) - s0, err := concurrency.NewSession(clus.clients[0]) + s0, err := concurrency.NewSession(clus.Client(0)) if err != nil { t.Error(err) } defer s0.Orphan() - s1, err := concurrency.NewSession(clus.clients[0]) + s1, err := concurrency.NewSession(clus.Client(0)) if err != nil { t.Error(err) } diff --git a/tests/integration/v3_lock_test.go b/tests/integration/clientv3/experimental/recipes/v3_lock_test.go similarity index 81% rename from tests/integration/v3_lock_test.go rename to tests/integration/clientv3/experimental/recipes/v3_lock_test.go index 89a219d68..1447e17cd 100644 --- a/tests/integration/v3_lock_test.go +++ b/tests/integration/clientv3/experimental/recipes/v3_lock_test.go @@ -12,38 +12,38 @@ // See the License for the specific language governing permissions and // limitations under the License. -package integration +package recipes_test import ( "context" "math/rand" - "sync" "testing" "time" "go.etcd.io/etcd/api/v3/mvccpb" "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/concurrency" + recipe "go.etcd.io/etcd/client/v3/experimental/recipes" "go.etcd.io/etcd/pkg/v3/testutil" - recipe "go.etcd.io/etcd/v3/contrib/recipes" + "go.etcd.io/etcd/tests/v3/integration" ) func TestMutexLockSingleNode(t *testing.T) { - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) var clients []*clientv3.Client - testMutexLock(t, 5, makeSingleNodeClients(t, clus.cluster, &clients)) - closeClients(t, clients) + testMutexLock(t, 5, integration.MakeSingleNodeClients(t, clus, &clients)) + integration.CloseClients(t, clients) } func TestMutexLockMultiNode(t *testing.T) { - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) var clients []*clientv3.Client - testMutexLock(t, 5, makeMultiNodeClients(t, clus.cluster, &clients)) - closeClients(t, clients) + testMutexLock(t, 5, integration.MakeMultiNodeClients(t, clus, &clients)) + integration.CloseClients(t, clients) } func testMutexLock(t *testing.T, waiters int, chooseClient func() *clientv3.Client) { @@ -90,21 +90,21 @@ func testMutexLock(t *testing.T, waiters int, chooseClient func() *clientv3.Clie } func TestMutexTryLockSingleNode(t *testing.T) { - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) var clients []*clientv3.Client - testMutexTryLock(t, 5, makeSingleNodeClients(t, clus.cluster, &clients)) - closeClients(t, clients) + testMutexTryLock(t, 5, integration.MakeSingleNodeClients(t, clus, &clients)) + integration.CloseClients(t, clients) } func TestMutexTryLockMultiNode(t *testing.T) { - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) var clients []*clientv3.Client - testMutexTryLock(t, 5, makeMultiNodeClients(t, clus.cluster, &clients)) - closeClients(t, clients) + testMutexTryLock(t, 5, integration.MakeMultiNodeClients(t, clus, &clients)) + integration.CloseClients(t, clients) } func testMutexTryLock(t *testing.T, lockers int, chooseClient func() *clientv3.Client) { @@ -156,7 +156,7 @@ func testMutexTryLock(t *testing.T, lockers int, chooseClient func() *clientv3.C // TestMutexSessionRelock ensures that acquiring the same lock with the same // session will not result in deadlock. func TestMutexSessionRelock(t *testing.T) { - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) session, err := concurrency.NewSession(clus.RandClient()) if err != nil { @@ -180,7 +180,7 @@ func TestMutexSessionRelock(t *testing.T) { func TestMutexWaitsOnCurrentHolder(t *testing.T) { defer testutil.AfterTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) cctx := context.Background() @@ -287,7 +287,7 @@ func TestMutexWaitsOnCurrentHolder(t *testing.T) { func BenchmarkMutex4Waiters(b *testing.B) { // XXX switch tests to use TB interface - clus := NewClusterV3(nil, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(nil, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(nil) for i := 0; i < b.N; i++ { testMutexLock(nil, 4, func() *clientv3.Client { return clus.RandClient() }) @@ -295,13 +295,13 @@ func BenchmarkMutex4Waiters(b *testing.B) { } func TestRWMutexSingleNode(t *testing.T) { - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) - testRWMutex(t, 5, func() *clientv3.Client { return clus.clients[0] }) + testRWMutex(t, 5, func() *clientv3.Client { return clus.Client(0) }) } func TestRWMutexMultiNode(t *testing.T) { - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) testRWMutex(t, 5, func() *clientv3.Client { return clus.RandClient() }) } @@ -357,38 +357,3 @@ func testRWMutex(t *testing.T, waiters int, chooseClient func() *clientv3.Client } } } - -func makeClients(t *testing.T, clients *[]*clientv3.Client, choose func() *member) func() *clientv3.Client { - var mu sync.Mutex - *clients = nil - return func() *clientv3.Client { - cli, err := NewClientV3(choose()) - if err != nil { - t.Fatalf("cannot create client: %v", err) - } - mu.Lock() - *clients = append(*clients, cli) - mu.Unlock() - return cli - } -} - -func makeSingleNodeClients(t *testing.T, clus *cluster, clients *[]*clientv3.Client) func() *clientv3.Client { - return makeClients(t, clients, func() *member { - return clus.Members[0] - }) -} - -func makeMultiNodeClients(t *testing.T, clus *cluster, clients *[]*clientv3.Client) func() *clientv3.Client { - return makeClients(t, clients, func() *member { - return clus.Members[rand.Intn(len(clus.Members))] - }) -} - -func closeClients(t *testing.T, clients []*clientv3.Client) { - for _, cli := range clients { - if err := cli.Close(); err != nil { - t.Fatal(err) - } - } -} diff --git a/tests/integration/v3_queue_test.go b/tests/integration/clientv3/experimental/recipes/v3_queue_test.go similarity index 88% rename from tests/integration/v3_queue_test.go rename to tests/integration/clientv3/experimental/recipes/v3_queue_test.go index 0dc2af258..e20932f33 100644 --- a/tests/integration/v3_queue_test.go +++ b/tests/integration/clientv3/experimental/recipes/v3_queue_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package integration +package recipes_test import ( "fmt" @@ -20,7 +20,8 @@ import ( "sync/atomic" "testing" - recipe "go.etcd.io/etcd/v3/contrib/recipes" + recipe "go.etcd.io/etcd/client/v3/experimental/recipes" + "go.etcd.io/etcd/tests/v3/integration" ) const ( @@ -30,7 +31,7 @@ const ( // TestQueueOneReaderOneWriter confirms the queue is FIFO func TestQueueOneReaderOneWriter(t *testing.T) { - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) done := make(chan struct{}) @@ -76,7 +77,7 @@ func TestQueueManyReaderManyWriter(t *testing.T) { // BenchmarkQueue benchmarks Queues using many/many readers/writers func BenchmarkQueue(b *testing.B) { // XXX switch tests to use TB interface - clus := NewClusterV3(nil, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(nil, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(nil) for i := 0; i < b.N; i++ { testQueueNReaderMWriter(nil, manyQueueClients, manyQueueClients) @@ -85,7 +86,7 @@ func BenchmarkQueue(b *testing.B) { // TestPrQueueOneReaderOneWriter tests whether priority queues respect priorities. func TestPrQueueOneReaderOneWriter(t *testing.T) { - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) // write out five items with random priority @@ -117,7 +118,7 @@ func TestPrQueueOneReaderOneWriter(t *testing.T) { } func TestPrQueueManyReaderManyWriter(t *testing.T) { - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) rqs := newPriorityQueues(clus, manyQueueClients) wqs := newPriorityQueues(clus, manyQueueClients) @@ -127,7 +128,7 @@ func TestPrQueueManyReaderManyWriter(t *testing.T) { // BenchmarkQueue benchmarks Queues using n/n readers/writers func BenchmarkPrQueueOneReaderOneWriter(b *testing.B) { // XXX switch tests to use TB interface - clus := NewClusterV3(nil, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(nil, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(nil) rqs := newPriorityQueues(clus, 1) wqs := newPriorityQueues(clus, 1) @@ -137,12 +138,12 @@ func BenchmarkPrQueueOneReaderOneWriter(b *testing.B) { } func testQueueNReaderMWriter(t *testing.T, n int, m int) { - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) testReadersWriters(t, newQueues(clus, n), newQueues(clus, m)) } -func newQueues(clus *ClusterV3, n int) (qs []testQueue) { +func newQueues(clus *integration.ClusterV3, n int) (qs []testQueue) { for i := 0; i < n; i++ { etcdc := clus.RandClient() qs = append(qs, recipe.NewQueue(etcdc, "q")) @@ -150,7 +151,7 @@ func newQueues(clus *ClusterV3, n int) (qs []testQueue) { return qs } -func newPriorityQueues(clus *ClusterV3, n int) (qs []testQueue) { +func newPriorityQueues(clus *integration.ClusterV3, n int) (qs []testQueue) { for i := 0; i < n; i++ { etcdc := clus.RandClient() q := &flatPriorityQueue{recipe.NewPriorityQueue(etcdc, "prq")} diff --git a/tests/integration/cluster.go b/tests/integration/cluster.go index 55e46de28..f9af71b33 100644 --- a/tests/integration/cluster.go +++ b/tests/integration/cluster.go @@ -1312,6 +1312,47 @@ func (c *ClusterV3) Client(i int) *clientv3.Client { return c.clients[i] } +// NewClientV3 creates a new grpc client connection to the member +func (c *ClusterV3) NewClientV3(memberIndex int) (*clientv3.Client, error) { + return NewClientV3(c.Members[memberIndex]) +} + +func makeClients(t *testing.T, clus *ClusterV3, clients *[]*clientv3.Client, chooseMemberIndex func() int) func() *clientv3.Client { + var mu sync.Mutex + *clients = nil + return func() *clientv3.Client { + cli, err := clus.NewClientV3(chooseMemberIndex()) + if err != nil { + t.Fatalf("cannot create client: %v", err) + } + mu.Lock() + *clients = append(*clients, cli) + mu.Unlock() + return cli + } +} + +// MakeSingleNodeClients creates factory of clients that all connect to member 0. +// All the created clients are put on the 'clients' list. The factory is thread-safe. +func MakeSingleNodeClients(t *testing.T, clus *ClusterV3, clients *[]*clientv3.Client) func() *clientv3.Client { + return makeClients(t, clus, clients, func() int { return 0 }) +} + +// MakeMultiNodeClients creates factory of clients that all connect to random members. +// All the created clients are put on the 'clients' list. The factory is thread-safe. +func MakeMultiNodeClients(t *testing.T, clus *ClusterV3, clients *[]*clientv3.Client) func() *clientv3.Client { + return makeClients(t, clus, clients, func() int { return rand.Intn(len(clus.Members)) }) +} + +// CloseClients closes all the clients from the 'clients' list. +func CloseClients(t *testing.T, clients []*clientv3.Client) { + for _, cli := range clients { + if err := cli.Close(); err != nil { + t.Fatal(err) + } + } +} + type grpcAPI struct { // Cluster is the cluster API for the client's connection. Cluster pb.ClusterClient diff --git a/tests/integration/v3_election_test.go b/tests/integration/v3_election_test.go index cef981e45..8ef54d23e 100644 --- a/tests/integration/v3_election_test.go +++ b/tests/integration/v3_election_test.go @@ -32,7 +32,10 @@ func TestElectionWait(t *testing.T) { leaders := 3 followers := 3 var clients []*clientv3.Client - newClient := makeMultiNodeClients(t, clus.cluster, &clients) + newClient := MakeMultiNodeClients(t, clus, &clients) + defer func() { + CloseClients(t, clients) + }() electedc := make(chan string) nextc := []chan struct{}{} @@ -100,8 +103,6 @@ func TestElectionWait(t *testing.T) { for i := 0; i < followers; i++ { <-donec } - - closeClients(t, clients) } // TestElectionFailover tests that an election will