diff --git a/integration/cluster.go b/integration/cluster.go index 211758ce5..b2e0566ac 100644 --- a/integration/cluster.go +++ b/integration/cluster.go @@ -719,7 +719,7 @@ func (m *member) Close() { m.serverClient = nil } if m.grpcServer != nil { - m.grpcServer.Stop() + m.grpcServer.GracefulStop() m.grpcServer = nil } m.s.HardStop() diff --git a/integration/v3_maintenance_test.go b/integration/v3_grpc_inflight_test.go similarity index 63% rename from integration/v3_maintenance_test.go rename to integration/v3_grpc_inflight_test.go index e82219230..1994af06d 100644 --- a/integration/v3_maintenance_test.go +++ b/integration/v3_grpc_inflight_test.go @@ -15,39 +15,17 @@ package integration import ( + "sync" "testing" "time" - "google.golang.org/grpc" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/pkg/testutil" + "golang.org/x/net/context" + "google.golang.org/grpc" ) -// TestV3MaintenanceHashInflight ensures inflight Hash call -// to embedded being-stopped EtcdServer does not trigger panic. -func TestV3MaintenanceHashInflight(t *testing.T) { - defer testutil.AfterTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - cli := clus.RandClient() - mvc := toGRPC(cli).Maintenance - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - - donec := make(chan struct{}) - go func() { - defer close(donec) - mvc.Hash(ctx, &pb.HashRequest{}, grpc.FailFast(false)) - }() - - clus.Members[0].s.HardStop() - cancel() - - <-donec -} - // TestV3MaintenanceDefragmentInflightRange ensures inflight range requests // does not panic the mvcc backend while defragment is running. func TestV3MaintenanceDefragmentInflightRange(t *testing.T) { @@ -75,3 +53,40 @@ func TestV3MaintenanceDefragmentInflightRange(t *testing.T) { <-donec } + +// TestV3KVInflightRangeRequests ensures that inflight requests +// (sent before server shutdown) are gracefully handled by server-side. +// They are either finished or canceled, but never crash the backend. +// See https://github.com/coreos/etcd/issues/7322 for more detail. +func TestV3KVInflightRangeRequests(t *testing.T) { + defer testutil.AfterTest(t) + clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + defer clus.Terminate(t) + + cli := clus.RandClient() + kvc := toGRPC(cli).KV + + if _, err := kvc.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil { + t.Fatal(err) + } + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + + reqN := 10 // use 500+ for fast machine + var wg sync.WaitGroup + wg.Add(reqN) + for i := 0; i < reqN; i++ { + go func() { + defer wg.Done() + _, err := kvc.Range(ctx, &pb.RangeRequest{Key: []byte("foo"), Serializable: true}, grpc.FailFast(false)) + if err != nil && grpc.ErrorDesc(err) != context.Canceled.Error() { + t.Fatalf("inflight request should be canceld with %v, got %v", context.Canceled, err) + } + }() + } + + clus.Members[0].Stop(t) + cancel() + + wg.Wait() +}