Benjamin Wang 3c51c42417 test: fix nil pointer panic in testMutexLock
Refer to: https://github.com/etcd-io/etcd/actions/runs/3671847902/jobs/6207463700

```
panic: runtime error: invalid memory address or nil pointer dereference
[signal SIGSEGV: segmentation violation code=0x1 addr=0x0 pc=0xedc388]

goroutine 5253 [running]:
go.etcd.io/etcd/client/v3/concurrency.(*Session).Client(...)
	/home/runner/work/etcd/etcd/client/v3/concurrency/session.go:76
go.etcd.io/etcd/client/v3/concurrency.(*Mutex).tryAcquire(0xc000133140, {0x18a8668, 0xc000050158})
	/home/runner/work/etcd/etcd/client/v3/concurrency/mutex.go:111 +0x88
go.etcd.io/etcd/client/v3/concurrency.(*Mutex).Lock(0xc000133140, {0x18a8668, 0xc000050158})
	/home/runner/work/etcd/etcd/client/v3/concurrency/mutex.go:74 +0x68
go.etcd.io/etcd/tests/v3/integration/clientv3/experimental/recipes_test.testMutexLock.func1()
	/home/runner/work/etcd/etcd/tests/integration/clientv3/experimental/recipes/v3_lock_test.go:65 +0x285
created by go.etcd.io/etcd/tests/v3/integration/clientv3/experimental/recipes_test.testMutexLock
	/home/runner/work/etcd/etcd/tests/integration/clientv3/experimental/recipes/v3_lock_test.go:59 +0xda
FAIL	go.etcd.io/etcd/tests/v3/integration/clientv3/experimental/recipes	7.070s
FAIL
```

Signed-off-by: Benjamin Wang <wachao@vmware.com>
2022-12-12 10:18:45 +08:00

380 lines
10 KiB
Go

// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package recipes_test
import (
"context"
"fmt"
"math/rand"
"sync"
"testing"
"time"
"go.etcd.io/etcd/api/v3/mvccpb"
clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/concurrency"
recipe "go.etcd.io/etcd/client/v3/experimental/recipes"
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
)
func TestMutexLockSingleNode(t *testing.T) {
integration2.BeforeTest(t)
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
var clients []*clientv3.Client
testMutexLock(t, 5, integration2.MakeSingleNodeClients(t, clus, &clients))
integration2.CloseClients(t, clients)
}
func TestMutexLockMultiNode(t *testing.T) {
integration2.BeforeTest(t)
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
var clients []*clientv3.Client
testMutexLock(t, 5, integration2.MakeMultiNodeClients(t, clus, &clients))
integration2.CloseClients(t, clients)
}
func testMutexLock(t *testing.T, waiters int, chooseClient func() *clientv3.Client) {
// stream lock acquisitions
lockedC := make(chan *concurrency.Mutex, waiters)
errC := make(chan error, waiters)
var wg sync.WaitGroup
wg.Add(waiters)
for i := 0; i < waiters; i++ {
go func(i int) {
defer wg.Done()
session, err := concurrency.NewSession(chooseClient())
if err != nil {
errC <- fmt.Errorf("#%d: failed to create new session: %w", i, err)
return
}
m := concurrency.NewMutex(session, "test-mutex")
if err := m.Lock(context.TODO()); err != nil {
errC <- fmt.Errorf("#%d: failed to wait on lock: %w", i, err)
return
}
lockedC <- m
}(i)
}
// unlock locked mutexes
timerC := time.After(time.Duration(waiters) * time.Second)
for i := 0; i < waiters; i++ {
select {
case <-timerC:
t.Fatalf("timed out waiting for lock %d", i)
case err := <-errC:
t.Fatalf("Unexpected error: %v", err)
case m := <-lockedC:
// lock acquired with m
select {
case <-lockedC:
t.Fatalf("lock %d followers did not wait", i)
default:
}
if err := m.Unlock(context.TODO()); err != nil {
t.Fatalf("could not release lock (%v)", err)
}
}
}
wg.Wait()
}
func TestMutexTryLockSingleNode(t *testing.T) {
integration2.BeforeTest(t)
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
t.Logf("3 nodes cluster created...")
var clients []*clientv3.Client
testMutexTryLock(t, 5, integration2.MakeSingleNodeClients(t, clus, &clients))
integration2.CloseClients(t, clients)
}
func TestMutexTryLockMultiNode(t *testing.T) {
integration2.BeforeTest(t)
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
var clients []*clientv3.Client
testMutexTryLock(t, 5, integration2.MakeMultiNodeClients(t, clus, &clients))
integration2.CloseClients(t, clients)
}
func testMutexTryLock(t *testing.T, lockers int, chooseClient func() *clientv3.Client) {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
lockedC := make(chan *concurrency.Mutex)
notlockedC := make(chan *concurrency.Mutex)
for i := 0; i < lockers; i++ {
go func(i int) {
session, err := concurrency.NewSession(chooseClient())
if err != nil {
t.Error(err)
}
m := concurrency.NewMutex(session, "test-mutex-try-lock")
err = m.TryLock(ctx)
if err == nil {
select {
case lockedC <- m:
case <-ctx.Done():
t.Errorf("Thread: %v, Context failed: %v", i, err)
}
} else if err == concurrency.ErrLocked {
select {
case notlockedC <- m:
case <-ctx.Done():
t.Errorf("Thread: %v, Context failed: %v", i, err)
}
} else {
t.Errorf("Thread: %v; Unexpected Error %v", i, err)
}
}(i)
}
timerC := time.After(30 * time.Second)
select {
case <-lockedC:
for i := 0; i < lockers-1; i++ {
select {
case <-lockedC:
t.Fatalf("Multiple Mutes locked on same key")
case <-notlockedC:
case <-timerC:
t.Errorf("timed out waiting for lock")
}
}
case <-timerC:
t.Errorf("timed out waiting for lock (30s)")
}
}
// TestMutexSessionRelock ensures that acquiring the same lock with the same
// session will not result in deadlock.
func TestMutexSessionRelock(t *testing.T) {
integration2.BeforeTest(t)
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
session, err := concurrency.NewSession(clus.RandClient())
if err != nil {
t.Error(err)
}
m := concurrency.NewMutex(session, "test-mutex")
if err := m.Lock(context.TODO()); err != nil {
t.Fatal(err)
}
m2 := concurrency.NewMutex(session, "test-mutex")
if err := m2.Lock(context.TODO()); err != nil {
t.Fatal(err)
}
}
// TestMutexWaitsOnCurrentHolder ensures a mutex is only acquired once all
// waiters older than the new owner are gone by testing the case where
// the waiter prior to the acquirer expires before the current holder.
func TestMutexWaitsOnCurrentHolder(t *testing.T) {
integration2.BeforeTest(t)
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cctx := context.Background()
cli := clus.Client(0)
firstOwnerSession, err := concurrency.NewSession(cli)
if err != nil {
t.Error(err)
}
defer firstOwnerSession.Close()
firstOwnerMutex := concurrency.NewMutex(firstOwnerSession, "test-mutex")
if err = firstOwnerMutex.Lock(cctx); err != nil {
t.Fatal(err)
}
victimSession, err := concurrency.NewSession(cli)
if err != nil {
t.Error(err)
}
defer victimSession.Close()
victimDonec := make(chan struct{})
go func() {
defer close(victimDonec)
concurrency.NewMutex(victimSession, "test-mutex").Lock(cctx)
}()
// ensure mutexes associated with firstOwnerSession and victimSession waits before new owner
wch := cli.Watch(cctx, "test-mutex", clientv3.WithPrefix(), clientv3.WithRev(1))
putCounts := 0
for putCounts < 2 {
select {
case wrp := <-wch:
putCounts += len(wrp.Events)
case <-time.After(time.Second):
t.Fatal("failed to receive watch response")
}
}
if putCounts != 2 {
t.Fatalf("expect 2 put events, but got %v", putCounts)
}
newOwnerSession, err := concurrency.NewSession(cli)
if err != nil {
t.Error(err)
}
defer newOwnerSession.Close()
newOwnerDonec := make(chan struct{})
go func() {
defer close(newOwnerDonec)
concurrency.NewMutex(newOwnerSession, "test-mutex").Lock(cctx)
}()
select {
case wrp := <-wch:
if len(wrp.Events) != 1 {
t.Fatalf("expect a event, but got %v events", len(wrp.Events))
}
if e := wrp.Events[0]; e.Type != mvccpb.PUT {
t.Fatalf("expect a put event on prefix test-mutex, but got event type %v", e.Type)
}
case <-time.After(time.Second):
t.Fatalf("failed to receive a watch response")
}
// simulate losing the client that's next in line to acquire the lock
victimSession.Close()
// ensures the deletion of victim waiter from server side.
select {
case wrp := <-wch:
if len(wrp.Events) != 1 {
t.Fatalf("expect a event, but got %v events", len(wrp.Events))
}
if e := wrp.Events[0]; e.Type != mvccpb.DELETE {
t.Fatalf("expect a delete event on prefix test-mutex, but got event type %v", e.Type)
}
case <-time.After(time.Second):
t.Fatal("failed to receive a watch response")
}
select {
case <-newOwnerDonec:
t.Fatal("new owner obtained lock before first owner unlocked")
default:
}
if err := firstOwnerMutex.Unlock(cctx); err != nil {
t.Fatal(err)
}
select {
case <-newOwnerDonec:
case <-time.After(time.Second):
t.Fatal("new owner failed to obtain lock")
}
select {
case <-victimDonec:
case <-time.After(time.Second):
t.Fatal("victim mutex failed to exit after first owner releases lock")
}
}
func BenchmarkMutex4Waiters(b *testing.B) {
integration2.BeforeTest(b)
// XXX switch tests to use TB interface
clus := integration2.NewCluster(nil, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(nil)
for i := 0; i < b.N; i++ {
testMutexLock(nil, 4, func() *clientv3.Client { return clus.RandClient() })
}
}
func TestRWMutexSingleNode(t *testing.T) {
integration2.BeforeTest(t)
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
testRWMutex(t, 5, func() *clientv3.Client { return clus.Client(0) })
}
func TestRWMutexMultiNode(t *testing.T) {
integration2.BeforeTest(t)
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
testRWMutex(t, 5, func() *clientv3.Client { return clus.RandClient() })
}
func testRWMutex(t *testing.T, waiters int, chooseClient func() *clientv3.Client) {
// stream rwlock acquistions
rlockedC := make(chan *recipe.RWMutex, 1)
wlockedC := make(chan *recipe.RWMutex, 1)
for i := 0; i < waiters; i++ {
go func() {
session, err := concurrency.NewSession(chooseClient())
if err != nil {
t.Error(err)
}
rwm := recipe.NewRWMutex(session, "test-rwmutex")
if rand.Intn(2) == 0 {
if err := rwm.RLock(); err != nil {
t.Errorf("could not rlock (%v)", err)
}
rlockedC <- rwm
} else {
if err := rwm.Lock(); err != nil {
t.Errorf("could not lock (%v)", err)
}
wlockedC <- rwm
}
}()
}
// unlock locked rwmutexes
timerC := time.After(time.Duration(waiters) * time.Second)
for i := 0; i < waiters; i++ {
select {
case <-timerC:
t.Fatalf("timed out waiting for lock %d", i)
case wl := <-wlockedC:
select {
case <-rlockedC:
t.Fatalf("rlock %d readers did not wait", i)
default:
}
if err := wl.Unlock(); err != nil {
t.Fatalf("could not release lock (%v)", err)
}
case rl := <-rlockedC:
select {
case <-wlockedC:
t.Fatalf("rlock %d writers did not wait", i)
default:
}
if err := rl.RUnlock(); err != nil {
t.Fatalf("could not release rlock (%v)", err)
}
}
}
}