*: over 20 staticcheck fixes

This commit is contained in:
wpedrak 2021-03-18 15:05:22 +01:00
parent 2932969b91
commit dac6e37ea1
21 changed files with 61 additions and 68 deletions

View File

@ -348,7 +348,7 @@ func newClient(cfg *Config) (*Client, error) {
return nil, fmt.Errorf("gRPC message recv limit (%d bytes) must be greater than send limit (%d bytes)", cfg.MaxCallRecvMsgSize, cfg.MaxCallSendMsgSize) return nil, fmt.Errorf("gRPC message recv limit (%d bytes) must be greater than send limit (%d bytes)", cfg.MaxCallRecvMsgSize, cfg.MaxCallSendMsgSize)
} }
callOpts := []grpc.CallOption{ callOpts := []grpc.CallOption{
defaultFailFast, defaultWaitForReady,
defaultMaxCallSendMsgSize, defaultMaxCallSendMsgSize,
defaultMaxCallRecvMsgSize, defaultMaxCallRecvMsgSize,
} }

View File

@ -238,17 +238,17 @@ func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse,
func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) { func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) {
r := toLeaseTimeToLiveRequest(id, opts...) r := toLeaseTimeToLiveRequest(id, opts...)
resp, err := l.remote.LeaseTimeToLive(ctx, r, l.callOpts...) resp, err := l.remote.LeaseTimeToLive(ctx, r, l.callOpts...)
if err == nil { if err != nil {
gresp := &LeaseTimeToLiveResponse{ return nil, toErr(ctx, err)
ResponseHeader: resp.GetHeader(),
ID: LeaseID(resp.ID),
TTL: resp.TTL,
GrantedTTL: resp.GrantedTTL,
Keys: resp.Keys,
}
return gresp, nil
} }
return nil, toErr(ctx, err) gresp := &LeaseTimeToLiveResponse{
ResponseHeader: resp.GetHeader(),
ID: LeaseID(resp.ID),
TTL: resp.TTL,
GrantedTTL: resp.GrantedTTL,
Keys: resp.Keys,
}
return gresp, nil
} }
func (l *lessor) Leases(ctx context.Context) (*LeaseLeasesResponse, error) { func (l *lessor) Leases(ctx context.Context) (*LeaseLeasesResponse, error) {

View File

@ -23,10 +23,10 @@ import (
var ( var (
// client-side handling retrying of request failures where data was not written to the wire or // client-side handling retrying of request failures where data was not written to the wire or
// where server indicates it did not process the data. gRPC default is default is "FailFast(true)" // where server indicates it did not process the data. gRPC default is default is "WaitForReady(false)"
// but for etcd we default to "FailFast(false)" to minimize client request error responses due to // but for etcd we default to "WaitForReady(true)" to minimize client request error responses due to
// transient failures. // transient failures.
defaultFailFast = grpc.FailFast(false) defaultWaitForReady = grpc.WaitForReady(true)
// client-side request send limit, gRPC default is math.MaxInt32 // client-side request send limit, gRPC default is math.MaxInt32
// Make sure that "client-side send limit < server-side default send/recv limit" // Make sure that "client-side send limit < server-side default send/recv limit"
@ -59,7 +59,11 @@ var (
// defaultCallOpts defines a list of default "gRPC.CallOption". // defaultCallOpts defines a list of default "gRPC.CallOption".
// Some options are exposed to "clientv3.Config". // Some options are exposed to "clientv3.Config".
// Defaults will be overridden by the settings in "clientv3.Config". // Defaults will be overridden by the settings in "clientv3.Config".
var defaultCallOpts = []grpc.CallOption{defaultFailFast, defaultMaxCallSendMsgSize, defaultMaxCallRecvMsgSize} var defaultCallOpts = []grpc.CallOption{
defaultWaitForReady,
defaultMaxCallSendMsgSize,
defaultMaxCallRecvMsgSize,
}
// MaxLeaseTTL is the maximum lease TTL value // MaxLeaseTTL is the maximum lease TTL value
const MaxLeaseTTL = 9000000000 const MaxLeaseTTL = 9000000000

View File

@ -120,8 +120,8 @@ func (ep *ExpectProcess) ExpectFunc(f func(string) bool) (string, error) {
} }
} }
ep.mu.Unlock() ep.mu.Unlock()
return "", fmt.Errorf("Match not found."+ return "", fmt.Errorf("match not found."+
" Set EXPECT_DEBUG for more info Err: %v, last lines:\n%s\n\n", " Set EXPECT_DEBUG for more info Err: %v, last lines:\n%s",
ep.err, strings.Join(lastLinesBuffer, "")) ep.err, strings.Join(lastLinesBuffer, ""))
} }

View File

@ -36,7 +36,7 @@ func (lo *ListenerOptions) IsSocketOpts() bool {
if lo.socketOpts == nil { if lo.socketOpts == nil {
return false return false
} }
return lo.socketOpts.ReusePort == true || lo.socketOpts.ReuseAddress == true return lo.socketOpts.ReusePort || lo.socketOpts.ReuseAddress
} }
// IsTLS returns true if listner options includes TLSInfo. // IsTLS returns true if listner options includes TLSInfo.
@ -44,7 +44,7 @@ func (lo *ListenerOptions) IsTLS() bool {
if lo.tlsInfo == nil { if lo.tlsInfo == nil {
return false return false
} }
return lo.tlsInfo.Empty() == false return !lo.tlsInfo.Empty()
} }
// ListenerOption are options which can be applied to the listener. // ListenerOption are options which can be applied to the listener.

View File

@ -41,5 +41,5 @@ func getControls(sopts *SocketOpts) Controls {
} }
func (sopts *SocketOpts) Empty() bool { func (sopts *SocketOpts) Empty() bool {
return sopts.ReuseAddress == false && sopts.ReusePort == false return !sopts.ReuseAddress && !sopts.ReusePort
} }

View File

@ -461,7 +461,7 @@ func startNode(cfg config.ServerConfig, cl *membership.RaftCluster, ids []types.
CheckQuorum: true, CheckQuorum: true,
PreVote: cfg.PreVote, PreVote: cfg.PreVote,
} }
c.Logger, err = getRaftLogger(cfg) c.Logger, _ = getRaftLogger(cfg)
if len(peers) == 0 { if len(peers) == 0 {
n = raft.RestartNode(c) n = raft.RestartNode(c)

View File

@ -268,7 +268,7 @@ func (s *store) compact(trace *traceutil.Trace, rev int64) (<-chan struct{}, err
keep := s.kvindex.Compact(rev) keep := s.kvindex.Compact(rev)
indexCompactionPauseMs.Observe(float64(time.Since(start) / time.Millisecond)) indexCompactionPauseMs.Observe(float64(time.Since(start) / time.Millisecond))
if !s.scheduleCompaction(rev, keep) { if !s.scheduleCompaction(rev, keep) {
s.compactBarrier(nil, ch) s.compactBarrier(context.TODO(), ch)
return return
} }
close(ch) close(ch)

View File

@ -356,8 +356,7 @@ func (s *watchableStore) syncWatchers() int {
tx.RLock() tx.RLock()
revs, vs := tx.UnsafeRange(keyBucketName, minBytes, maxBytes, 0) revs, vs := tx.UnsafeRange(keyBucketName, minBytes, maxBytes, 0)
tx.RUnlock() tx.RUnlock()
var evs []mvccpb.Event evs := kvsToEvents(s.store.lg, wg, revs, vs)
evs = kvsToEvents(s.store.lg, wg, revs, vs)
var victims watcherBatch var victims watcherBatch
wb := newWatcherBatch(wg, evs) wb := newWatcherBatch(wg, evs)

View File

@ -74,7 +74,7 @@ func NewLeaseProxy(ctx context.Context, c *clientv3.Client) (pb.LeaseServer, <-c
} }
func (lp *leaseProxy) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { func (lp *leaseProxy) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
rp, err := lp.leaseClient.LeaseGrant(ctx, cr, grpc.FailFast(false)) rp, err := lp.leaseClient.LeaseGrant(ctx, cr, grpc.WaitForReady(true))
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -186,7 +186,7 @@ func (m *Member) RevHash() (int64, int64, error) {
mt := pb.NewMaintenanceClient(conn) mt := pb.NewMaintenanceClient(conn)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
resp, err := mt.Hash(ctx, &pb.HashRequest{}, grpc.FailFast(false)) resp, err := mt.Hash(ctx, &pb.HashRequest{}, grpc.WaitForReady(true))
cancel() cancel()
if err != nil { if err != nil {

View File

@ -189,7 +189,7 @@ func (lc *leaseExpireChecker) check(expired bool, leases map[int64]time.Time) er
return nil return nil
} }
// TODO: handle failures from "grpc.FailFast(false)" // TODO: handle failures from "grpc.WaitForReady(true)"
func (lc *leaseExpireChecker) getLeaseByID(ctx context.Context, leaseID int64) (*clientv3.LeaseTimeToLiveResponse, error) { func (lc *leaseExpireChecker) getLeaseByID(ctx context.Context, leaseID int64) (*clientv3.LeaseTimeToLiveResponse, error) {
return lc.cli.TimeToLive( return lc.cli.TimeToLive(
ctx, ctx,

View File

@ -122,7 +122,7 @@ func (s *keyStresser) run() {
// and immediate leader election. Find out what other cases this // and immediate leader election. Find out what other cases this
// could be timed out. // could be timed out.
sctx, scancel := context.WithTimeout(s.ctx, 10*time.Second) sctx, scancel := context.WithTimeout(s.ctx, 10*time.Second)
err, modifiedKeys := s.stressTable.choose()(sctx) modifiedKeys, err := s.stressTable.choose()(sctx)
scancel() scancel()
if err == nil { if err == nil {
atomic.AddInt64(&s.atomicModifiedKeys, modifiedKeys) atomic.AddInt64(&s.atomicModifiedKeys, modifiedKeys)
@ -221,7 +221,7 @@ func (s *keyStresser) ModifiedKeys() int64 {
return atomic.LoadInt64(&s.atomicModifiedKeys) return atomic.LoadInt64(&s.atomicModifiedKeys)
} }
type stressFunc func(ctx context.Context) (err error, modifiedKeys int64) type stressFunc func(ctx context.Context) (modifiedKeys int64, err error)
type stressEntry struct { type stressEntry struct {
weight float64 weight float64
@ -256,13 +256,13 @@ func (st *stressTable) choose() stressFunc {
} }
func newStressPut(cli *clientv3.Client, keySuffixRange, keySize int) stressFunc { func newStressPut(cli *clientv3.Client, keySuffixRange, keySize int) stressFunc {
return func(ctx context.Context) (error, int64) { return func(ctx context.Context) (int64, error) {
_, err := cli.Put( _, err := cli.Put(
ctx, ctx,
fmt.Sprintf("foo%016x", rand.Intn(keySuffixRange)), fmt.Sprintf("foo%016x", rand.Intn(keySuffixRange)),
string(randBytes(keySize)), string(randBytes(keySize)),
) )
return err, 1 return 1, err
} }
} }
@ -275,7 +275,7 @@ func newStressTxn(cli *clientv3.Client, keyTxnSuffixRange, txnOps int) stressFun
} }
func writeTxn(cli *clientv3.Client, keys []string, txnOps int) stressFunc { func writeTxn(cli *clientv3.Client, keys []string, txnOps int) stressFunc {
return func(ctx context.Context) (error, int64) { return func(ctx context.Context) (int64, error) {
ks := make(map[string]struct{}, txnOps) ks := make(map[string]struct{}, txnOps)
for len(ks) != txnOps { for len(ks) != txnOps {
ks[keys[rand.Intn(len(keys))]] = struct{}{} ks[keys[rand.Intn(len(keys))]] = struct{}{}
@ -303,7 +303,7 @@ func writeTxn(cli *clientv3.Client, keys []string, txnOps int) stressFunc {
Then(thenOps...). Then(thenOps...).
Else(elseOps...). Else(elseOps...).
Commit() Commit()
return err, int64(txnOps) return int64(txnOps), err
} }
} }
@ -319,14 +319,14 @@ func getTxnOps(k, v string) (
} }
func newStressRange(cli *clientv3.Client, keySuffixRange int) stressFunc { func newStressRange(cli *clientv3.Client, keySuffixRange int) stressFunc {
return func(ctx context.Context) (error, int64) { return func(ctx context.Context) (int64, error) {
_, err := cli.Get(ctx, fmt.Sprintf("foo%016x", rand.Intn(keySuffixRange))) _, err := cli.Get(ctx, fmt.Sprintf("foo%016x", rand.Intn(keySuffixRange)))
return err, 0 return 0, err
} }
} }
func newStressRangeInterval(cli *clientv3.Client, keySuffixRange int) stressFunc { func newStressRangeInterval(cli *clientv3.Client, keySuffixRange int) stressFunc {
return func(ctx context.Context) (error, int64) { return func(ctx context.Context) (int64, error) {
start := rand.Intn(keySuffixRange) start := rand.Intn(keySuffixRange)
end := start + 500 end := start + 500
_, err := cli.Get( _, err := cli.Get(
@ -334,19 +334,19 @@ func newStressRangeInterval(cli *clientv3.Client, keySuffixRange int) stressFunc
fmt.Sprintf("foo%016x", start), fmt.Sprintf("foo%016x", start),
clientv3.WithRange(fmt.Sprintf("foo%016x", end)), clientv3.WithRange(fmt.Sprintf("foo%016x", end)),
) )
return err, 0 return 0, err
} }
} }
func newStressDelete(cli *clientv3.Client, keySuffixRange int) stressFunc { func newStressDelete(cli *clientv3.Client, keySuffixRange int) stressFunc {
return func(ctx context.Context) (error, int64) { return func(ctx context.Context) (int64, error) {
_, err := cli.Delete(ctx, fmt.Sprintf("foo%016x", rand.Intn(keySuffixRange))) _, err := cli.Delete(ctx, fmt.Sprintf("foo%016x", rand.Intn(keySuffixRange)))
return err, 1 return 1, err
} }
} }
func newStressDeleteInterval(cli *clientv3.Client, keySuffixRange int) stressFunc { func newStressDeleteInterval(cli *clientv3.Client, keySuffixRange int) stressFunc {
return func(ctx context.Context) (error, int64) { return func(ctx context.Context) (int64, error) {
start := rand.Intn(keySuffixRange) start := rand.Intn(keySuffixRange)
end := start + 500 end := start + 500
resp, err := cli.Delete(ctx, resp, err := cli.Delete(ctx,
@ -354,8 +354,8 @@ func newStressDeleteInterval(cli *clientv3.Client, keySuffixRange int) stressFun
clientv3.WithRange(fmt.Sprintf("foo%016x", end)), clientv3.WithRange(fmt.Sprintf("foo%016x", end)),
) )
if err == nil { if err == nil {
return nil, resp.Deleted return resp.Deleted, nil
} }
return err, 0 return 0, err
} }
} }

View File

@ -68,20 +68,15 @@ func TestResumeElection(t *testing.T) {
defer close(respChan) defer close(respChan)
o := e.Observe(ctx) o := e.Observe(ctx)
respChan <- nil respChan <- nil
for { for resp := range o {
select { // Ignore any observations that candidate1 was elected
case resp, ok := <-o: if string(resp.Kvs[0].Value) == "candidate1" {
if !ok { continue
t.Error("Observe() channel closed prematurely")
}
// Ignore any observations that candidate1 was elected
if string(resp.Kvs[0].Value) == "candidate1" {
continue
}
respChan <- &resp
return
} }
respChan <- &resp
return
} }
t.Error("Observe() channel closed prematurely")
}() }()
// wait until observe goroutine is running // wait until observe goroutine is running

View File

@ -32,6 +32,7 @@ import (
"go.etcd.io/etcd/tests/v3/integration" "go.etcd.io/etcd/tests/v3/integration"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
) )
func TestKVPutError(t *testing.T) { func TestKVPutError(t *testing.T) {
@ -951,7 +952,7 @@ func TestKVLargeRequests(t *testing.T) {
maxCallSendBytesClient: 10 * 1024 * 1024, maxCallSendBytesClient: 10 * 1024 * 1024,
maxCallRecvBytesClient: 0, maxCallRecvBytesClient: 0,
valueSize: 10 * 1024 * 1024, valueSize: 10 * 1024 * 1024,
expectError: grpc.Errorf(codes.ResourceExhausted, "trying to send message larger than max "), expectError: status.Errorf(codes.ResourceExhausted, "trying to send message larger than max "),
}, },
{ {
maxRequestBytesServer: 10 * 1024 * 1024, maxRequestBytesServer: 10 * 1024 * 1024,
@ -965,7 +966,7 @@ func TestKVLargeRequests(t *testing.T) {
maxCallSendBytesClient: 10 * 1024 * 1024, maxCallSendBytesClient: 10 * 1024 * 1024,
maxCallRecvBytesClient: 0, maxCallRecvBytesClient: 0,
valueSize: 10*1024*1024 + 5, valueSize: 10*1024*1024 + 5,
expectError: grpc.Errorf(codes.ResourceExhausted, "trying to send message larger than max "), expectError: status.Errorf(codes.ResourceExhausted, "trying to send message larger than max "),
}, },
} }
for i, test := range tests { for i, test := range tests {

View File

@ -103,7 +103,6 @@ func TestDetectTxnOrderViolation(t *testing.T) {
}, },
} }
cli, err := clientv3.New(cfg) cli, err := clientv3.New(cfg)
defer cli.Close()
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -528,10 +528,6 @@ func (c *cluster) waitVersion() {
} }
} }
func (c *cluster) name(i int) string {
return fmt.Sprint(i)
}
// isMembersEqual checks whether two members equal except ID field. // isMembersEqual checks whether two members equal except ID field.
// The given wmembs should always set ID field to empty string. // The given wmembs should always set ID field to empty string.
func isMembersEqual(membs []client.Member, wmembs []client.Member) bool { func isMembersEqual(membs []client.Member, wmembs []client.Member) bool {

View File

@ -97,7 +97,6 @@ func testSetKV(t testing.TB, endpoints []string) {
testCases := []struct { testCases := []struct {
key string key string
value string value string
dir bool
wantIndexMatch bool wantIndexMatch bool
}{ }{
{key: "/sdir/set", value: "1", wantIndexMatch: true}, {key: "/sdir/set", value: "1", wantIndexMatch: true},
@ -147,7 +146,7 @@ func testCreateSetDir(t testing.TB, endpoints []string) {
v2 := v2v3.NewStore(cli, "") v2 := v2v3.NewStore(cli, "")
for ti, tc := range testCases { for ti, tc := range testCases {
ev, err := v2.Create(tc.dir, true, "", false, v2store.TTLOptionSet{}) _, err := v2.Create(tc.dir, true, "", false, v2store.TTLOptionSet{})
if err != nil { if err != nil {
t.Skipf("%d: got err %v", ti, err) t.Skipf("%d: got err %v", ti, err)
} }
@ -156,7 +155,7 @@ func testCreateSetDir(t testing.TB, endpoints []string) {
t.Skipf("%d: expected err got nil", ti) t.Skipf("%d: expected err got nil", ti)
} }
ev, err = v2.Delete("ddir", true, true) ev, err := v2.Delete("ddir", true, true)
if err != nil { if err != nil {
t.Skipf("%d: got err %v", ti, err) t.Skipf("%d: got err %v", ti, err)
} }

View File

@ -79,7 +79,7 @@ func TestV3KVInflightRangeRequests(t *testing.T) {
for i := 0; i < reqN; i++ { for i := 0; i < reqN; i++ {
go func() { go func() {
defer wg.Done() defer wg.Done()
_, err := kvc.Range(ctx, &pb.RangeRequest{Key: []byte("foo"), Serializable: true}, grpc.FailFast(false)) _, err := kvc.Range(ctx, &pb.RangeRequest{Key: []byte("foo"), Serializable: true}, grpc.WaitForReady(true))
if err != nil { if err != nil {
errCode := status.Convert(err).Code() errCode := status.Convert(err).Code()
errDesc := rpctypes.ErrorDesc(err) errDesc := rpctypes.ErrorDesc(err)

View File

@ -1943,7 +1943,7 @@ func waitForRestart(t *testing.T, kvc pb.KVClient) {
// TODO: Remove retry loop once the new grpc load balancer provides retry. // TODO: Remove retry loop once the new grpc load balancer provides retry.
var err error var err error
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
if _, err = kvc.Range(context.TODO(), req, grpc.FailFast(false)); err != nil { if _, err = kvc.Range(context.TODO(), req, grpc.WaitForReady(true)); err != nil {
if status, ok := status.FromError(err); ok && status.Code() == codes.Unavailable { if status, ok := status.FromError(err); ok && status.Code() == codes.Unavailable {
time.Sleep(time.Millisecond * 250) time.Sleep(time.Millisecond * 250)
} else { } else {

View File

@ -50,13 +50,13 @@ func TestKVWithEmptyValue(t *testing.T) {
} }
//Remove all keys without WithFromKey/WithPrefix func //Remove all keys without WithFromKey/WithPrefix func
respDel, err := client.Delete(context.Background(), "") _, err = client.Delete(context.Background(), "")
if err == nil { if err == nil {
// fatal error duo to without WithFromKey/WithPrefix func called. // fatal error duo to without WithFromKey/WithPrefix func called.
t.Fatal(err) t.Fatal(err)
} }
respDel, err = client.Delete(context.Background(), "", clientv3.WithFromKey()) respDel, err := client.Delete(context.Background(), "", clientv3.WithFromKey())
if err != nil { if err != nil {
// fatal error duo to with WithFromKey/WithPrefix func called. // fatal error duo to with WithFromKey/WithPrefix func called.
t.Fatal(err) t.Fatal(err)