integration: test 'inflight' range requests

- Test https://github.com/coreos/etcd/issues/7322.
- Remove test case added in https://github.com/coreos/etcd/pull/6662.

Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
This commit is contained in:
Gyu-Ho Lee 2017-04-14 11:11:06 -07:00
parent c407e097e2
commit 472a536052
2 changed files with 41 additions and 26 deletions

View File

@ -719,7 +719,7 @@ func (m *member) Close() {
m.serverClient = nil
}
if m.grpcServer != nil {
m.grpcServer.Stop()
m.grpcServer.GracefulStop()
m.grpcServer = nil
}
m.s.HardStop()

View File

@ -15,39 +15,17 @@
package integration
import (
"sync"
"testing"
"time"
"google.golang.org/grpc"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/pkg/testutil"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
// TestV3MaintenanceHashInflight ensures inflight Hash call
// to embedded being-stopped EtcdServer does not trigger panic.
func TestV3MaintenanceHashInflight(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.RandClient()
mvc := toGRPC(cli).Maintenance
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
donec := make(chan struct{})
go func() {
defer close(donec)
mvc.Hash(ctx, &pb.HashRequest{}, grpc.FailFast(false))
}()
clus.Members[0].s.HardStop()
cancel()
<-donec
}
// TestV3MaintenanceDefragmentInflightRange ensures inflight range requests
// does not panic the mvcc backend while defragment is running.
func TestV3MaintenanceDefragmentInflightRange(t *testing.T) {
@ -75,3 +53,40 @@ func TestV3MaintenanceDefragmentInflightRange(t *testing.T) {
<-donec
}
// TestV3KVInflightRangeRequests ensures that inflight requests
// (sent before server shutdown) are gracefully handled by server-side.
// They are either finished or canceled, but never crash the backend.
// See https://github.com/coreos/etcd/issues/7322 for more detail.
func TestV3KVInflightRangeRequests(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.RandClient()
kvc := toGRPC(cli).KV
if _, err := kvc.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
t.Fatal(err)
}
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
reqN := 10 // use 500+ for fast machine
var wg sync.WaitGroup
wg.Add(reqN)
for i := 0; i < reqN; i++ {
go func() {
defer wg.Done()
_, err := kvc.Range(ctx, &pb.RangeRequest{Key: []byte("foo"), Serializable: true}, grpc.FailFast(false))
if err != nil && grpc.ErrorDesc(err) != context.Canceled.Error() {
t.Fatalf("inflight request should be canceld with %v, got %v", context.Canceled, err)
}
}()
}
clus.Members[0].Stop(t)
cancel()
wg.Wait()
}