Merge pull request #15542 from mitake/revert-14548-v2

[3.4] Revert 14548
This commit is contained in:
Benjamin Wang 2023-03-22 06:19:30 +08:00 committed by GitHub
commit 336ac78ebe
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 6 additions and 87 deletions

View File

@ -18,7 +18,6 @@ import (
"context"
"errors"
"fmt"
"strings"
"sync"
"time"
@ -586,26 +585,6 @@ func (w *watchGrpcStream) run() {
switch {
case pbresp.Created:
cancelReasonError := v3rpc.Error(errors.New(pbresp.CancelReason))
if shouldRetryWatch(cancelReasonError) {
var newErr error
if wc, newErr = w.newWatchClient(); newErr != nil {
w.lg.Error("failed to create a new watch client", zap.Error(newErr))
return
}
if len(w.resuming) != 0 {
if ws := w.resuming[0]; ws != nil {
if err := wc.Send(ws.initReq.toPB()); err != nil {
w.lg.Debug("error when sending request", zap.Error(err))
}
}
}
cur = nil
continue
}
// response to head of queue creation
if ws := w.resuming[0]; ws != nil {
w.addSubstream(pbresp, ws)
@ -725,11 +704,6 @@ func (w *watchGrpcStream) run() {
}
}
func shouldRetryWatch(cancelReasonError error) bool {
return (strings.Compare(cancelReasonError.Error(), v3rpc.ErrGRPCInvalidAuthToken.Error()) == 0) ||
(strings.Compare(cancelReasonError.Error(), v3rpc.ErrGRPCAuthOldRevision.Error()) == 0)
}
// nextResume chooses the next resuming to register with the grpc stream. Abandoned
// streams are marked as nil in the queue since the head must wait for its inflight registration.
func (w *watchGrpcStream) nextResume() *watcherStream {

View File

@ -234,16 +234,16 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
return err
}
func (sws *serverWatchStream) isWatchPermitted(wcr *pb.WatchCreateRequest) error {
func (sws *serverWatchStream) isWatchPermitted(wcr *pb.WatchCreateRequest) bool {
authInfo, err := sws.ag.AuthInfoFromCtx(sws.gRPCStream.Context())
if err != nil {
return err
return false
}
if authInfo == nil {
// if auth is enabled, IsRangePermitted() can cause an error
authInfo = &auth.AuthInfo{}
}
return sws.ag.AuthStore().IsRangePermitted(authInfo, wcr.Key, wcr.RangeEnd)
return sws.ag.AuthStore().IsRangePermitted(authInfo, wcr.Key, wcr.RangeEnd) == nil
}
func (sws *serverWatchStream) recvLoop() error {
@ -277,29 +277,13 @@ func (sws *serverWatchStream) recvLoop() error {
creq.RangeEnd = []byte{}
}
err := sws.isWatchPermitted(creq)
if err != nil {
var cancelReason string
switch err {
case auth.ErrInvalidAuthToken:
cancelReason = rpctypes.ErrGRPCInvalidAuthToken.Error()
case auth.ErrAuthOldRevision:
cancelReason = rpctypes.ErrGRPCAuthOldRevision.Error()
case auth.ErrUserEmpty:
cancelReason = rpctypes.ErrGRPCUserEmpty.Error()
default:
if err != auth.ErrPermissionDenied {
sws.lg.Error("unexpected error code", zap.Error(err))
}
cancelReason = rpctypes.ErrGRPCPermissionDenied.Error()
}
if !sws.isWatchPermitted(creq) {
wr := &pb.WatchResponse{
Header: sws.newResponseHeader(sws.watchStream.Rev()),
WatchId: clientv3.InvalidWatchID,
Canceled: true,
Created: true,
CancelReason: cancelReason,
CancelReason: rpctypes.ErrGRPCPermissionDenied.Error(),
}
select {

View File

@ -126,8 +126,7 @@ type ClusterConfig struct {
DiscoveryURL string
AuthToken string
AuthTokenTTL uint
AuthToken string
UseGRPC bool
@ -286,7 +285,6 @@ func (c *cluster) mustNewMember(t testing.TB) *member {
memberConfig{
name: c.name(rand.Int()),
authToken: c.cfg.AuthToken,
authTokenTTL: c.cfg.AuthTokenTTL,
peerTLS: c.cfg.PeerTLS,
clientTLS: c.cfg.ClientTLS,
quotaBackendBytes: c.cfg.QuotaBackendBytes,
@ -581,7 +579,6 @@ type memberConfig struct {
peerTLS *transport.TLSInfo
clientTLS *transport.TLSInfo
authToken string
authTokenTTL uint
quotaBackendBytes int64
maxTxnOps uint
maxRequestBytes uint
@ -668,9 +665,6 @@ func mustNewMember(t testing.TB, mcfg memberConfig) *member {
if mcfg.authToken != "" {
m.AuthToken = mcfg.authToken
}
if mcfg.authTokenTTL != 0 {
m.TokenTTL = mcfg.authTokenTTL
}
m.BcryptCost = uint(bcrypt.MinCost) // use min bcrypt cost to speedy up integration testing

View File

@ -472,39 +472,6 @@ func TestV3AuthRestartMember(t *testing.T) {
testutil.AssertNil(t, err)
}
func TestV3AuthWatchAndTokenExpire(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1, AuthTokenTTL: 3})
defer clus.Terminate(t)
ctx, cancel := context.WithTimeout(context.TODO(), 10*time.Second)
defer cancel()
authSetupRoot(t, toGRPC(clus.Client(0)).Auth)
c, cerr := clientv3.New(clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "root", Password: "123"})
if cerr != nil {
t.Fatal(cerr)
}
defer c.Close()
_, err := c.Put(ctx, "key", "val")
if err != nil {
t.Fatalf("Unexpected error from Put: %v", err)
}
// The first watch gets a valid auth token through watcher.newWatcherGrpcStream()
// We should discard the first one by waiting TTL after the first watch.
wChan := c.Watch(ctx, "key", clientv3.WithRev(1))
watchResponse := <-wChan
time.Sleep(5 * time.Second)
wChan = c.Watch(ctx, "key", clientv3.WithRev(1))
watchResponse = <-wChan
testutil.AssertNil(t, watchResponse.Err())
}
func TestV3AuthWatchErrorAndWatchId0(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})