etcd/server/etcdserver/api/v3rpc/maintenance.go
Wei Fu 1ba577e499 server/etcdserver: togRPCError for maintenance API
It's to deflake TestAuthMemberRemove.

When the client has multiple endpoints, the client might send a request
with valid token to the follower member which hasn't received token
replicated log yet. The member will reject the request.

For instance, the maintenance.Status API will return "auth: invalid auth
token". But the client doesn't identify the error. The client won't retry to
refresh auth token. The maintenance.Status should togRPCError before return
so that the client can reflesh token. It's align with existing API.

Since the maintenance client always creates one connection to target
member, the member will have the token after refresh auth.

Maybe we can introduce a sync to wait for member is ready with token,
instead of refreshing.

Fixes: #15758

Signed-off-by: Wei Fu <fuweid89@gmail.com>
2023-04-22 18:35:53 +08:00

334 lines
9.7 KiB
Go

// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v3rpc
import (
"context"
"crypto/sha256"
"io"
"time"
"github.com/dustin/go-humanize"
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/api/v3/version"
"go.etcd.io/etcd/server/v3/etcdserver"
"go.etcd.io/etcd/server/v3/etcdserver/apply"
"go.etcd.io/etcd/server/v3/etcdserver/errors"
serverversion "go.etcd.io/etcd/server/v3/etcdserver/version"
"go.etcd.io/etcd/server/v3/storage/backend"
"go.etcd.io/etcd/server/v3/storage/mvcc"
"go.etcd.io/etcd/server/v3/storage/schema"
"go.etcd.io/raft/v3"
"go.uber.org/zap"
)
type KVGetter interface {
KV() mvcc.WatchableKV
}
type BackendGetter interface {
Backend() backend.Backend
}
type Alarmer interface {
// Alarms is implemented in Server interface located in etcdserver/server.go
// It returns a list of alarms present in the AlarmStore
Alarms() []*pb.AlarmMember
Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error)
}
type Downgrader interface {
Downgrade(ctx context.Context, dr *pb.DowngradeRequest) (*pb.DowngradeResponse, error)
}
type LeaderTransferrer interface {
MoveLeader(ctx context.Context, lead, target uint64) error
}
type ClusterStatusGetter interface {
IsLearner() bool
}
type maintenanceServer struct {
lg *zap.Logger
rg apply.RaftStatusGetter
hasher mvcc.HashStorage
bg BackendGetter
a Alarmer
lt LeaderTransferrer
hdr header
cs ClusterStatusGetter
d Downgrader
vs serverversion.Server
}
func NewMaintenanceServer(s *etcdserver.EtcdServer) pb.MaintenanceServer {
srv := &maintenanceServer{lg: s.Cfg.Logger, rg: s, hasher: s.KV().HashStorage(), bg: s, a: s, lt: s, hdr: newHeader(s), cs: s, d: s, vs: etcdserver.NewServerVersionAdapter(s)}
if srv.lg == nil {
srv.lg = zap.NewNop()
}
return &authMaintenanceServer{srv, &AuthAdmin{s}}
}
func (ms *maintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
ms.lg.Info("starting defragment")
err := ms.bg.Backend().Defrag()
if err != nil {
ms.lg.Warn("failed to defragment", zap.Error(err))
return nil, togRPCError(err)
}
ms.lg.Info("finished defragment")
return &pb.DefragmentResponse{}, nil
}
// big enough size to hold >1 OS pages in the buffer
const snapshotSendBufferSize = 32 * 1024
func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error {
ver := schema.ReadStorageVersion(ms.bg.Backend().ReadTx())
storageVersion := ""
if ver != nil {
storageVersion = ver.String()
}
snap := ms.bg.Backend().Snapshot()
pr, pw := io.Pipe()
defer pr.Close()
go func() {
snap.WriteTo(pw)
if err := snap.Close(); err != nil {
ms.lg.Warn("failed to close snapshot", zap.Error(err))
}
pw.Close()
}()
// record SHA digest of snapshot data
// used for integrity checks during snapshot restore operation
h := sha256.New()
sent := int64(0)
total := snap.Size()
size := humanize.Bytes(uint64(total))
start := time.Now()
ms.lg.Info("sending database snapshot to client",
zap.Int64("total-bytes", total),
zap.String("size", size),
zap.String("storage-version", storageVersion),
)
for total-sent > 0 {
// buffer just holds read bytes from stream
// response size is multiple of OS page size, fetched in boltdb
// e.g. 4*1024
// NOTE: srv.Send does not wait until the message is received by the client.
// Therefore the buffer can not be safely reused between Send operations
buf := make([]byte, snapshotSendBufferSize)
n, err := io.ReadFull(pr, buf)
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
return togRPCError(err)
}
sent += int64(n)
// if total is x * snapshotSendBufferSize. it is possible that
// resp.RemainingBytes == 0
// resp.Blob == zero byte but not nil
// does this make server response sent to client nil in proto
// and client stops receiving from snapshot stream before
// server sends snapshot SHA?
// No, the client will still receive non-nil response
// until server closes the stream with EOF
resp := &pb.SnapshotResponse{
RemainingBytes: uint64(total - sent),
Blob: buf[:n],
Version: storageVersion,
}
if err = srv.Send(resp); err != nil {
return togRPCError(err)
}
h.Write(buf[:n])
}
// send SHA digest for integrity checks
// during snapshot restore operation
sha := h.Sum(nil)
ms.lg.Info("sending database sha256 checksum to client",
zap.Int64("total-bytes", total),
zap.Int("checksum-size", len(sha)),
)
hresp := &pb.SnapshotResponse{RemainingBytes: 0, Blob: sha, Version: storageVersion}
if err := srv.Send(hresp); err != nil {
return togRPCError(err)
}
ms.lg.Info("successfully sent database snapshot to client",
zap.Int64("total-bytes", total),
zap.String("size", size),
zap.Duration("took", time.Since(start)),
zap.String("storage-version", storageVersion),
)
return nil
}
func (ms *maintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) {
h, rev, err := ms.hasher.Hash()
if err != nil {
return nil, togRPCError(err)
}
resp := &pb.HashResponse{Header: &pb.ResponseHeader{Revision: rev}, Hash: h}
ms.hdr.fill(resp.Header)
return resp, nil
}
func (ms *maintenanceServer) HashKV(ctx context.Context, r *pb.HashKVRequest) (*pb.HashKVResponse, error) {
h, rev, err := ms.hasher.HashByRev(r.Revision)
if err != nil {
return nil, togRPCError(err)
}
resp := &pb.HashKVResponse{
Header: &pb.ResponseHeader{Revision: rev},
Hash: h.Hash,
CompactRevision: h.CompactRevision,
HashRevision: h.Revision,
}
ms.hdr.fill(resp.Header)
return resp, nil
}
func (ms *maintenanceServer) Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error) {
resp, err := ms.a.Alarm(ctx, ar)
if err != nil {
return nil, togRPCError(err)
}
if resp.Header == nil {
resp.Header = &pb.ResponseHeader{}
}
ms.hdr.fill(resp.Header)
return resp, nil
}
func (ms *maintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) {
hdr := &pb.ResponseHeader{}
ms.hdr.fill(hdr)
resp := &pb.StatusResponse{
Header: hdr,
Version: version.Version,
Leader: uint64(ms.rg.Leader()),
RaftIndex: ms.rg.CommittedIndex(),
RaftAppliedIndex: ms.rg.AppliedIndex(),
RaftTerm: ms.rg.Term(),
DbSize: ms.bg.Backend().Size(),
DbSizeInUse: ms.bg.Backend().SizeInUse(),
IsLearner: ms.cs.IsLearner(),
}
if storageVersion := ms.vs.GetStorageVersion(); storageVersion != nil {
resp.StorageVersion = storageVersion.String()
}
if resp.Leader == raft.None {
resp.Errors = append(resp.Errors, errors.ErrNoLeader.Error())
}
for _, a := range ms.a.Alarms() {
resp.Errors = append(resp.Errors, a.String())
}
return resp, nil
}
func (ms *maintenanceServer) MoveLeader(ctx context.Context, tr *pb.MoveLeaderRequest) (*pb.MoveLeaderResponse, error) {
if ms.rg.MemberId() != ms.rg.Leader() {
return nil, rpctypes.ErrGRPCNotLeader
}
if err := ms.lt.MoveLeader(ctx, uint64(ms.rg.Leader()), tr.TargetID); err != nil {
return nil, togRPCError(err)
}
return &pb.MoveLeaderResponse{}, nil
}
func (ms *maintenanceServer) Downgrade(ctx context.Context, r *pb.DowngradeRequest) (*pb.DowngradeResponse, error) {
resp, err := ms.d.Downgrade(ctx, r)
if err != nil {
return nil, togRPCError(err)
}
resp.Header = &pb.ResponseHeader{}
ms.hdr.fill(resp.Header)
return resp, nil
}
type authMaintenanceServer struct {
*maintenanceServer
*AuthAdmin
}
func (ams *authMaintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
if err := ams.isPermitted(ctx); err != nil {
return nil, togRPCError(err)
}
return ams.maintenanceServer.Defragment(ctx, sr)
}
func (ams *authMaintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error {
if err := ams.isPermitted(srv.Context()); err != nil {
return togRPCError(err)
}
return ams.maintenanceServer.Snapshot(sr, srv)
}
func (ams *authMaintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) {
if err := ams.isPermitted(ctx); err != nil {
return nil, togRPCError(err)
}
return ams.maintenanceServer.Hash(ctx, r)
}
func (ams *authMaintenanceServer) HashKV(ctx context.Context, r *pb.HashKVRequest) (*pb.HashKVResponse, error) {
if err := ams.isPermitted(ctx); err != nil {
return nil, togRPCError(err)
}
return ams.maintenanceServer.HashKV(ctx, r)
}
func (ams *authMaintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) {
if err := ams.isPermitted(ctx); err != nil {
return nil, togRPCError(err)
}
return ams.maintenanceServer.Status(ctx, ar)
}
func (ams *authMaintenanceServer) MoveLeader(ctx context.Context, tr *pb.MoveLeaderRequest) (*pb.MoveLeaderResponse, error) {
if err := ams.isPermitted(ctx); err != nil {
return nil, togRPCError(err)
}
return ams.maintenanceServer.MoveLeader(ctx, tr)
}
func (ams *authMaintenanceServer) Downgrade(ctx context.Context, r *pb.DowngradeRequest) (*pb.DowngradeResponse, error) {
if err := ams.isPermitted(ctx); err != nil {
return nil, togRPCError(err)
}
return ams.maintenanceServer.Downgrade(ctx, r)
}