clientv3: cancel watches proactively on client context cancellation

Currently, watch cancel requests are only sent to the server after a
message comes through on a watch where the client has cancelled. This
means that cancelled watches that don't receive any new messages are
never cancelled; they persist for the lifetime of the client stream.
This has negative connotations for locking applications where a watch
may observe a key which might never change again after cancellation,
leading to many accumulating watches on the server.

By cancelling proactively, in most cases we simply move the cancel
request to happen earlier, and additionally we solve the case where the
cancel request would never be sent.

Fixes #9416
Heavy inspiration drawn from the solutions proposed there.
This commit is contained in:
Jack Kleeman 2020-05-06 11:57:19 +01:00 committed by tangcong
parent 37ac22205b
commit 36452a1c1d
2 changed files with 52 additions and 1 deletions

View File

@ -25,6 +25,7 @@ import (
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
mvccpb "go.etcd.io/etcd/mvcc/mvccpb"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
@ -616,7 +617,10 @@ func (w *watchGrpcStream) run() {
},
}
req := &pb.WatchRequest{RequestUnion: cr}
wc.Send(req)
lg.Info("sending watch cancel request for failed dispatch", zap.Int64("watch-id", pbresp.WatchId))
if err := wc.Send(req); err != nil {
lg.Warning("failed to send watch cancel request", zap.Int64("watch-id", pbresp.WatchId), zap.Error(err))
}
}
// watch client failed on Recv; spawn another if possible
@ -637,6 +641,21 @@ func (w *watchGrpcStream) run() {
return
case ws := <-w.closingc:
if ws.id != -1 {
// client is closing an established watch; close it on the server proactively instead of waiting
// to close when the next message arrives
cancelSet[ws.id] = struct{}{}
cr := &pb.WatchRequest_CancelRequest{
CancelRequest: &pb.WatchCancelRequest{
WatchId: ws.id,
},
}
req := &pb.WatchRequest{RequestUnion: cr}
lg.Info("sending watch cancel request for closed watcher", zap.Int64("watch-id", ws.id))
if err := wc.Send(req); err != nil {
lg.Warning("failed to send watch cancel request", zap.Int64("watch-id", ws.id), zap.Error(err))
}
}
w.closeSubstream(ws)
delete(closing, ws)
// no more watchers on this stream, shutdown

View File

@ -1211,3 +1211,35 @@ func TestV3WatchWithPrevKV(t *testing.T) {
}
}
}
// TestV3WatchCancellation ensures that watch cancellation frees up server resources.
func TestV3WatchCancellation(t *testing.T) {
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
cli := clus.RandClient()
// increment watcher total count and keep a stream open
cli.Watch(ctx, "/foo")
for i := 0; i < 1000; i++ {
ctx, cancel := context.WithCancel(ctx)
cli.Watch(ctx, "/foo")
cancel()
}
// Wait a little for cancellations to take hold
time.Sleep(3 * time.Second)
minWatches, err := clus.Members[0].Metric("etcd_debugging_mvcc_watcher_total")
if err != nil {
t.Fatal(err)
}
if minWatches != "1" {
t.Fatalf("expected one watch, got %s", minWatches)
}
}