*: fix govet-shadow lint

Signed-off-by: Wei Fu <fuweid89@gmail.com>
This commit is contained in:
Wei Fu 2023-09-18 19:58:29 +08:00
parent c70ac642ae
commit 5e3910d96c
44 changed files with 184 additions and 141 deletions

View File

@ -64,7 +64,8 @@ fuzz:
verify: verify-gofmt verify-bom verify-lint verify-dep verify-shellcheck verify-goword \
verify-govet verify-license-header verify-receiver-name verify-mod-tidy verify-shellcheck \
verify-shellws verify-proto-annotations verify-genproto verify-goimport verify-yamllint
verify-shellws verify-proto-annotations verify-genproto verify-goimport verify-yamllint \
verify-govet-shadow
fix: fix-goimports fix-bom fix-lint fix-yamllint
./scripts/fix.sh
@ -140,6 +141,10 @@ fix-goimports:
verify-yamllint:
yamllint --config-file tools/.yamllint .
.PHONY: verify-govet-shadow
verify-govet-shadow:
PASSES="govet_shadow" ./scripts/test.sh
YAMLFMT_VERSION = $(shell cd tools/mod && go list -m -f '{{.Version}}' github.com/google/yamlfmt)
.PHONY: fix-yamllint

View File

@ -409,9 +409,9 @@ func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (karesp *LeaseKe
}
defer func() {
if err := stream.CloseSend(); err != nil {
if cerr := stream.CloseSend(); cerr != nil {
if ferr == nil {
ferr = toErr(ctx, err)
ferr = toErr(ctx, cerr)
}
return
}

View File

@ -244,18 +244,19 @@ func (m *maintenance) SnapshotWithVersion(ctx context.Context) (*SnapshotRespons
}
go func() {
// Saving response is blocking
err = m.save(resp, pw)
err := m.save(resp, pw)
if err != nil {
m.logAndCloseWithError(err, pw)
return
}
for {
resp, err := ss.Recv()
sresp, err := ss.Recv()
if err != nil {
m.logAndCloseWithError(err, pw)
return
}
err = m.save(resp, pw)
err = m.save(sresp, pw)
if err != nil {
m.logAndCloseWithError(err, pw)
return
@ -267,7 +268,7 @@ func (m *maintenance) SnapshotWithVersion(ctx context.Context) (*SnapshotRespons
Header: resp.GetHeader(),
Snapshot: &snapshotReadCloser{ctx: ctx, ReadCloser: pr},
Version: resp.GetVersion(),
}, err
}, nil
}
func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
@ -293,7 +294,7 @@ func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
}
}
}()
return &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, err
return &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, nil
}
func (m *maintenance) logAndCloseWithError(err error, pw *io.PipeWriter) {

View File

@ -144,16 +144,16 @@ func (s *v3Manager) Status(dbPath string) (ds Status, err error) {
if b == nil {
return fmt.Errorf("cannot get hash of bucket %s", string(next))
}
if _, err := h.Write(next); err != nil {
if _, err = h.Write(next); err != nil {
return fmt.Errorf("cannot write bucket %s : %v", string(next), err)
}
iskeyb := (string(next) == "key")
if err := b.ForEach(func(k, v []byte) error {
if _, err := h.Write(k); err != nil {
return fmt.Errorf("cannot write to bucket %s", err.Error())
if err = b.ForEach(func(k, v []byte) error {
if _, herr := h.Write(k); herr != nil {
return fmt.Errorf("cannot write to bucket %s", herr.Error())
}
if _, err := h.Write(v); err != nil {
return fmt.Errorf("cannot write to bucket %s", err.Error())
if _, herr := h.Write(v); herr != nil {
return fmt.Errorf("cannot write to bucket %s", herr.Error())
}
if iskeyb {
rev := bytesToRev(k)

View File

@ -53,8 +53,7 @@ func TestExpectFuncTimeout(t *testing.T) {
go func() {
// It's enough to have "talkative" process to stuck in the infinite loop of reading
for {
err := ep.Send("new line\n")
if err != nil {
if serr := ep.Send("new line\n"); serr != nil {
return
}
}
@ -67,7 +66,7 @@ func TestExpectFuncTimeout(t *testing.T) {
require.ErrorAs(t, err, &context.DeadlineExceeded)
if err := ep.Stop(); err != nil {
if err = ep.Stop(); err != nil {
t.Fatal(err)
}
@ -111,7 +110,7 @@ func TestExpectFuncExitFailureStop(t *testing.T) {
require.Equal(t, 1, exitCode)
require.NoError(t, err)
if err := ep.Stop(); err != nil {
if err = ep.Stop(); err != nil {
t.Fatal(err)
}
err = ep.Close()

View File

@ -181,33 +181,33 @@ func (t *Trace) logInfo(threshold time.Duration) (string, []zap.Field) {
var steps []string
lastStepTime := t.startTime
for i := 0; i < len(t.steps); i++ {
step := t.steps[i]
tstep := t.steps[i]
// add subtrace common fields which defined at the beginning to each sub-steps
if step.isSubTraceStart {
if tstep.isSubTraceStart {
for j := i + 1; j < len(t.steps) && !t.steps[j].isSubTraceEnd; j++ {
t.steps[j].fields = append(step.fields, t.steps[j].fields...)
t.steps[j].fields = append(tstep.fields, t.steps[j].fields...)
}
continue
}
// add subtrace common fields which defined at the end to each sub-steps
if step.isSubTraceEnd {
if tstep.isSubTraceEnd {
for j := i - 1; j >= 0 && !t.steps[j].isSubTraceStart; j-- {
t.steps[j].fields = append(step.fields, t.steps[j].fields...)
t.steps[j].fields = append(tstep.fields, t.steps[j].fields...)
}
continue
}
}
for i := 0; i < len(t.steps); i++ {
step := t.steps[i]
if step.isSubTraceStart || step.isSubTraceEnd {
tstep := t.steps[i]
if tstep.isSubTraceStart || tstep.isSubTraceEnd {
continue
}
stepDuration := step.time.Sub(lastStepTime)
stepDuration := tstep.time.Sub(lastStepTime)
if stepDuration > threshold {
steps = append(steps, fmt.Sprintf("trace[%d] '%v' %s (duration: %v)",
traceNum, step.msg, writeFields(step.fields), stepDuration))
traceNum, tstep.msg, writeFields(tstep.fields), stepDuration))
}
lastStepTime = step.time
lastStepTime = tstep.time
}
fs := []zap.Field{zap.String("detail", writeFields(t.fields)),

View File

@ -379,13 +379,45 @@ function govet_pass {
run_for_modules generic_checker run go vet
}
function govet_shadow_pass {
# TODO: we should ignore the generated packages?
function govet_shadow_per_package {
local shadow
shadow=$1
# skip grpc_gateway packages because
#
# stderr: etcdserverpb/gw/rpc.pb.gw.go:2100:3: declaration of "ctx" shadows declaration at line 2005
local skip_pkgs=(
"go.etcd.io/etcd/api/v3/etcdserverpb/gw"
"go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb/gw"
"go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb/gw"
)
local pkgs=()
while IFS= read -r line; do
local in_skip_pkgs="false"
for pkg in "${skip_pkgs[@]}"; do
if [ "${pkg}" == "${line}" ]; then
in_skip_pkgs="true"
break
fi
done
if [ "${in_skip_pkgs}" == "true" ]; then
continue
fi
pkgs+=("${line}")
done < <(go list ./...)
run go vet -all -vettool="${shadow}" "${pkgs[@]}"
}
function govet_shadow_pass {
local shadow
shadow=$(tool_get_bin "golang.org/x/tools/go/analysis/passes/shadow/cmd/shadow")
run_for_modules generic_checker run go vet -all -vettool="${shadow}"
run_for_modules generic_checker govet_shadow_per_package "${shadow}"
}
function unparam_pass {

View File

@ -246,7 +246,7 @@ func (logRotationConfig) Sync() error { return nil }
// setupLogRotation initializes log rotation for a single file path target.
func setupLogRotation(logOutputs []string, logRotateConfigJSON string) error {
var logRotationConfig logRotationConfig
var logRotationCfg logRotationConfig
outputFilePaths := 0
for _, v := range logOutputs {
switch v {
@ -265,7 +265,7 @@ func setupLogRotation(logOutputs []string, logRotateConfigJSON string) error {
return ErrLogRotationInvalidLogOutput
}
if err := json.Unmarshal([]byte(logRotateConfigJSON), &logRotationConfig); err != nil {
if err := json.Unmarshal([]byte(logRotateConfigJSON), &logRotationCfg); err != nil {
var unmarshalTypeError *json.UnmarshalTypeError
var syntaxError *json.SyntaxError
switch {
@ -278,8 +278,8 @@ func setupLogRotation(logOutputs []string, logRotateConfigJSON string) error {
}
}
zap.RegisterSink("rotate", func(u *url.URL) (zap.Sink, error) {
logRotationConfig.Filename = u.Path[1:]
return &logRotationConfig, nil
logRotationCfg.Filename = u.Path[1:]
return &logRotationCfg, nil
})
return nil
}

View File

@ -231,9 +231,9 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
if srvcfg.ExperimentalEnableDistributedTracing {
tctx := context.Background()
tracingExporter, err := newTracingExporter(tctx, cfg)
if err != nil {
return e, err
tracingExporter, terr := newTracingExporter(tctx, cfg)
if terr != nil {
return e, terr
}
e.tracingExporterShutdown = func() {
tracingExporter.Close(tctx)

View File

@ -153,7 +153,7 @@ func (sctx *serveCtx) serve(
Handler: createAccessController(sctx.lg, s, httpmux),
ErrorLog: logger, // do not log user error
}
if err := configureHttpServer(srv, s.Cfg); err != nil {
if err = configureHttpServer(srv, s.Cfg); err != nil {
sctx.lg.Error("Configure http server failed", zap.Error(err))
return err
}

View File

@ -216,7 +216,7 @@ func TestPeriodicSkipRevNotChange(t *testing.T) {
fc.Advance(tb.getRetryInterval())
}
_, err := compactable.Wait(1)
_, err = compactable.Wait(1)
if err == nil {
t.Fatal(errors.New("should not compact since the revision not change"))
}

View File

@ -211,7 +211,7 @@ func (d *discovery) joinCluster(config string) (string, error) {
return "", err
}
if err := d.registerSelf(config); err != nil {
if err = d.registerSelf(config); err != nil {
return "", err
}

View File

@ -190,7 +190,7 @@ func TestBootstrapBackend(t *testing.T) {
}
if tt.prepareData != nil {
if err := tt.prepareData(cfg); err != nil {
if err = tt.prepareData(cfg); err != nil {
t.Fatalf("failed to prepare data, unexpected error: %v", err)
}
}

View File

@ -477,8 +477,10 @@ func (s *EtcdServer) getPeerHashKVs(rev int64) []*peerHashKVResp {
respsLen := len(resps)
var lastErr error
for _, ep := range p.eps {
var resp *pb.HashKVResponse
ctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout())
resp, lastErr := HashByRev(ctx, s.cluster.ID(), cc, ep, rev)
resp, lastErr = HashByRev(ctx, s.cluster.ID(), cc, ep, rev)
cancel()
if lastErr == nil {
resps = append(resps, &peerHashKVResp{peerInfo: p, resp: resp, err: nil})
@ -535,7 +537,7 @@ func (h *hashKVHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
req := &pb.HashKVRequest{}
if err := json.Unmarshal(b, req); err != nil {
if err = json.Unmarshal(b, req); err != nil {
h.lg.Warn("failed to unmarshal request", zap.Error(err))
http.Error(w, "error unmarshalling request", http.StatusBadRequest)
return

View File

@ -49,7 +49,9 @@ func NewTmpWAL(t testing.TB, reqs []etcdserverpb.InternalRaftRequest) (*wal.WAL,
if err != nil {
t.Fatalf("Failed to open WAL: %v", err)
}
_, state, _, err := w.ReadAll()
var state raftpb.HardState
_, state, _, err = w.ReadAll()
if err != nil {
t.Fatalf("Failed to read WAL: %v", err)
}

View File

@ -161,7 +161,7 @@ func visitMessageDescriptor(md protoreflect.MessageDescriptor, visitor Visitor)
enums := md.Enums()
for i := 0; i < enums.Len(); i++ {
err := visitEnumDescriptor(enums.Get(i), visitor)
err = visitEnumDescriptor(enums.Get(i), visitor)
if err != nil {
return err
}

View File

@ -61,7 +61,7 @@ func TestAlarm(t *testing.T) {
}
// check that Put is rejected when alarm is on
if err := cc.Put(ctx, "3rd_test", smallbuf, config.PutOptions{}); err != nil {
if err = cc.Put(ctx, "3rd_test", smallbuf, config.PutOptions{}); err != nil {
if !strings.Contains(err.Error(), "etcdserver: mvcc: database space exceeded") {
t.Fatal(err)
}

View File

@ -128,7 +128,7 @@ func TestInPlaceRecovery(t *testing.T) {
oldCc, err := e2e.NewEtcdctl(epcOld.Cfg.Client, epcOld.EndpointsGRPC())
assert.NoError(t, err)
for i := 0; i < 10; i++ {
err := oldCc.Put(ctx, testutil.PickKey(int64(i)), fmt.Sprint(i), config.PutOptions{})
err = oldCc.Put(ctx, testutil.PickKey(int64(i)), fmt.Sprint(i), config.PutOptions{})
assert.NoError(t, err, "error on put")
}
@ -203,7 +203,7 @@ func TestPeriodicCheckDetectsCorruption(t *testing.T) {
cc := epc.Etcdctl()
for i := 0; i < 10; i++ {
err := cc.Put(ctx, testutil.PickKey(int64(i)), fmt.Sprint(i), config.PutOptions{})
err = cc.Put(ctx, testutil.PickKey(int64(i)), fmt.Sprint(i), config.PutOptions{})
assert.NoError(t, err, "error on put")
}
@ -249,7 +249,7 @@ func TestCompactHashCheckDetectCorruption(t *testing.T) {
cc := epc.Etcdctl()
for i := 0; i < 10; i++ {
err := cc.Put(ctx, testutil.PickKey(int64(i)), fmt.Sprint(i), config.PutOptions{})
err = cc.Put(ctx, testutil.PickKey(int64(i)), fmt.Sprint(i), config.PutOptions{})
assert.NoError(t, err, "error on put")
}
members, err := cc.MemberList(ctx, false)
@ -318,7 +318,7 @@ func TestCompactHashCheckDetectCorruptionInterrupt(t *testing.T) {
t.Log("putting 10 values to the identical key...")
cc := epc.Etcdctl()
for i := 0; i < 10; i++ {
err := cc.Put(ctx, "key", fmt.Sprint(i), config.PutOptions{})
err = cc.Put(ctx, "key", fmt.Sprint(i), config.PutOptions{})
require.NoError(t, err, "error on put")
}

View File

@ -405,8 +405,8 @@ func TestRestoreCompactionRevBump(t *testing.T) {
t.Log("Ensuring the restored member has the correct data...")
hasKVs(t, ctl, kvs, currentRev, baseRev)
for i := range unsnappedKVs {
v, err := ctl.Get(context.Background(), unsnappedKVs[i].Key, config.GetOptions{})
require.NoError(t, err)
v, gerr := ctl.Get(context.Background(), unsnappedKVs[i].Key, config.GetOptions{})
require.NoError(t, gerr)
require.Equal(t, int64(0), v.Count)
}

View File

@ -57,7 +57,7 @@ func testClusterUsingDiscovery(t *testing.T, size int, peerTLS bool) {
dcc := MustNewHTTPClient(t, dc.EndpointsHTTP(), nil)
dkapi := client.NewKeysAPI(dcc)
ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout)
if _, err := dkapi.Create(ctx, "/_config/size", fmt.Sprintf("%d", size)); err != nil {
if _, err = dkapi.Create(ctx, "/_config/size", fmt.Sprintf("%d", size)); err != nil {
t.Fatal(err)
}
cancel()

View File

@ -64,7 +64,7 @@ func testClusterUsingV3Discovery(t *testing.T, discoveryClusterSize, targetClust
discoveryToken := "8A591FAB-1D72-41FA-BDF2-A27162FDA1E0"
configSizeKey := fmt.Sprintf("/_etcd/registry/%s/_config/size", discoveryToken)
configSizeValStr := strconv.Itoa(targetClusterSize)
if err := ctlV3Put(ctlCtx{epc: ds}, configSizeKey, configSizeValStr, ""); err != nil {
if err = ctlV3Put(ctlCtx{epc: ds}, configSizeKey, configSizeValStr, ""); err != nil {
t.Errorf("failed to configure cluster size to discovery serivce, error: %v", err)
}

View File

@ -74,8 +74,8 @@ func mixVersionsSnapshotTestByAddingMember(t *testing.T, clusterVersion, newInst
)
require.NoError(t, err, "failed to start etcd cluster: %v", err)
defer func() {
err := epc.Close()
require.NoError(t, err, "failed to close etcd cluster: %v", err)
derr := epc.Close()
require.NoError(t, derr, "failed to close etcd cluster: %v", derr)
}()
// Write more than SnapshotCount entries to trigger at least a snapshot.
@ -83,7 +83,7 @@ func mixVersionsSnapshotTestByAddingMember(t *testing.T, clusterVersion, newInst
for i := 0; i < 20; i++ {
key := fmt.Sprintf("key-%d", i)
value := fmt.Sprintf("value-%d", i)
err := epc.Etcdctl().Put(context.TODO(), key, value, config.PutOptions{})
err = epc.Etcdctl().Put(context.TODO(), key, value, config.PutOptions{})
require.NoError(t, err, "failed to put %q, error: %v", key, err)
}
@ -156,8 +156,8 @@ func mixVersionsSnapshotTestByMockPartition(t *testing.T, clusterVersion e2e.Clu
)
require.NoError(t, err, "failed to start etcd cluster: %v", err)
defer func() {
err := epc.Close()
require.NoError(t, err, "failed to close etcd cluster: %v", err)
derr := epc.Close()
require.NoError(t, derr, "failed to close etcd cluster: %v", derr)
}()
toPartitionedMember := epc.Procs[mockPartitionNodeIndex]
@ -170,7 +170,7 @@ func mixVersionsSnapshotTestByMockPartition(t *testing.T, clusterVersion e2e.Clu
for i := 0; i < 20; i++ {
key := fmt.Sprintf("key-%d", i)
value := fmt.Sprintf("value-%d", i)
err := epc.Etcdctl().Put(context.TODO(), key, value, config.PutOptions{})
err = epc.Etcdctl().Put(context.TODO(), key, value, config.PutOptions{})
require.NoError(t, err, "failed to put %q, error: %v", key, err)
}

View File

@ -62,7 +62,7 @@ func TestReleaseUpgrade(t *testing.T) {
kvs = append(kvs, kv{key: fmt.Sprintf("foo%d", i), val: "bar"})
}
for i := range kvs {
if err := ctlV3Put(cx, kvs[i].key, kvs[i].val, ""); err != nil {
if err = ctlV3Put(cx, kvs[i].key, kvs[i].val, ""); err != nil {
cx.t.Fatalf("#%d: ctlV3Put error (%v)", i, err)
}
}
@ -71,7 +71,7 @@ func TestReleaseUpgrade(t *testing.T) {
for i := range epc.Procs {
t.Logf("Stopping node: %v", i)
if err := epc.Procs[i].Stop(); err != nil {
if err = epc.Procs[i].Stop(); err != nil {
t.Fatalf("#%d: error closing etcd process (%v)", i, err)
}
t.Logf("Stopped node: %v", i)
@ -79,13 +79,13 @@ func TestReleaseUpgrade(t *testing.T) {
epc.Procs[i].Config().KeepDataDir = true
t.Logf("Restarting node in the new version: %v", i)
if err := epc.Procs[i].Restart(context.TODO()); err != nil {
if err = epc.Procs[i].Restart(context.TODO()); err != nil {
t.Fatalf("error restarting etcd process (%v)", err)
}
t.Logf("Testing reads after node restarts: %v", i)
for j := range kvs {
if err := ctlV3Get(cx, []string{kvs[j].key}, []kv{kvs[j]}...); err != nil {
if err = ctlV3Get(cx, []string{kvs[j].key}, []kv{kvs[j]}...); err != nil {
cx.t.Fatalf("#%d-%d: ctlV3Get error (%v)", i, j, err)
}
}

View File

@ -44,11 +44,10 @@ func newClient(t *testing.T, entpoints []string, cfg e2e.ClientConfig) *clientv3
DialOptions: []grpc.DialOption{grpc.WithBlock()},
}
if tlscfg != nil {
tls, err := tlscfg.ClientConfig()
ccfg.TLS, err = tlscfg.ClientConfig()
if err != nil {
t.Fatal(err)
}
ccfg.TLS = tls
}
c, err := clientv3.New(ccfg)
if err != nil {

View File

@ -91,8 +91,8 @@ func testCurlV3Auth(cx ctlCtx) {
//grant root role
for i := 0; i < len(usernames); i++ {
grantroleroot, err := json.Marshal(&pb.AuthUserGrantRoleRequest{User: usernames[i], Role: "root"})
testutil.AssertNil(cx.t, err)
grantroleroot, merr := json.Marshal(&pb.AuthUserGrantRoleRequest{User: usernames[i], Role: "root"})
testutil.AssertNil(cx.t, merr)
if err = e2e.CURLPost(cx.epc, e2e.CURLReq{
Endpoint: "/v3/auth/user/grant",
@ -188,7 +188,7 @@ func testCurlV3AuthUserBasicOperations(cx ctlCtx) {
// change password
user, err := json.Marshal(&pb.AuthUserChangePasswordRequest{Name: "user1", Password: "456"})
require.NoError(cx.t, err)
if err := e2e.CURLPost(cx.epc, e2e.CURLReq{
if err = e2e.CURLPost(cx.epc, e2e.CURLReq{
Endpoint: "/v3/auth/user/changepw",
Value: string(user),
Expected: expect.ExpectedResponse{Value: "revision"},
@ -200,11 +200,12 @@ func testCurlV3AuthUserBasicOperations(cx ctlCtx) {
usernames = []string{"user1", "userX"}
expectedResponse := []string{"revision", "etcdserver: user name not found"}
for i := 0; i < len(usernames); i++ {
user, err := json.Marshal(&pb.AuthUserGetRequest{
user, err = json.Marshal(&pb.AuthUserGetRequest{
Name: usernames[i],
})
require.NoError(cx.t, err)
if err := e2e.CURLPost(cx.epc, e2e.CURLReq{
if err = e2e.CURLPost(cx.epc, e2e.CURLReq{
Endpoint: "/v3/auth/user/get",
Value: string(user),
Expected: expect.ExpectedResponse{Value: expectedResponse[i]},
@ -217,11 +218,11 @@ func testCurlV3AuthUserBasicOperations(cx ctlCtx) {
usernames = []string{"user2", "userX"}
expectedResponse = []string{"revision", "etcdserver: user name not found"}
for i := 0; i < len(usernames); i++ {
user, err := json.Marshal(&pb.AuthUserDeleteRequest{
user, err = json.Marshal(&pb.AuthUserDeleteRequest{
Name: usernames[i],
})
require.NoError(cx.t, err)
if err := e2e.CURLPost(cx.epc, e2e.CURLReq{
if err = e2e.CURLPost(cx.epc, e2e.CURLReq{
Endpoint: "/v3/auth/user/delete",
Value: string(user),
Expected: expect.ExpectedResponse{Value: expectedResponse[i]},
@ -403,7 +404,7 @@ func testCurlV3AuthRoleManagePermission(cx ctlCtx) {
})
require.NoError(cx.t, err)
if err := e2e.CURLPost(cx.epc, e2e.CURLReq{
if err = e2e.CURLPost(cx.epc, e2e.CURLReq{
Endpoint: "/v3/auth/role/grant",
Value: string(grantPermissionReq),
Expected: expect.ExpectedResponse{Value: "revision"},

View File

@ -216,7 +216,7 @@ func (ep *EtcdServerProcess) Stop() (err error) {
ep.cfg.lg.Info("stopped server.", zap.String("name", ep.cfg.Name))
if ep.proxy != nil {
ep.cfg.lg.Info("stopping proxy...", zap.String("name", ep.cfg.Name))
err := ep.proxy.Close()
err = ep.proxy.Close()
ep.proxy = nil
if err != nil {
return err

View File

@ -195,11 +195,11 @@ func (ctl *EtcdctlV3) Txn(ctx context.Context, compares, ifSucess, ifFail []stri
return nil, err
}
for _, cmp := range compares {
if err := cmd.Send(cmp + "\r"); err != nil {
if err = cmd.Send(cmp + "\r"); err != nil {
return nil, err
}
}
if err := cmd.Send("\r"); err != nil {
if err = cmd.Send("\r"); err != nil {
return nil, err
}
_, err = cmd.ExpectWithContext(ctx, expect.ExpectedResponse{Value: "success requests (get, put, del):"})
@ -561,7 +561,7 @@ func (ctl *EtcdctlV3) UserAdd(ctx context.Context, name, password string, opts c
// If no password is provided, and NoPassword isn't set, the CLI will always
// wait for a password, send an enter in this case for an "empty" password.
if !opts.NoPassword && password == "" {
err := cmd.Send("\n")
err = cmd.Send("\n")
if err != nil {
return nil, err
}

View File

@ -451,9 +451,9 @@ func (c *Cluster) waitMembersForLeader(ctx context.Context, t testing.TB, membs
}
// ensure leader is up via linearizable get
for {
ctx, cancel := context.WithTimeout(ctx, 10*framecfg.TickDuration+time.Second)
_, err := cc.Get(ctx, "0")
cancel()
fctx, fcancel := context.WithTimeout(ctx, 10*framecfg.TickDuration+time.Second)
_, err := cc.Get(fctx, "0")
fcancel()
if err == nil || strings.Contains(err.Error(), "Key not found") {
break
}
@ -1087,9 +1087,9 @@ func (m *Member) Launch() error {
// different SAN fields (e.g. example.com). To work around,
// re-overwrite (*tls.Config).Certificates before starting
// test server.
tlsCert, err := tlsutil.NewCert(info.CertFile, info.KeyFile, nil)
if err != nil {
return err
tlsCert, nerr := tlsutil.NewCert(info.CertFile, info.KeyFile, nil)
if nerr != nil {
return nerr
}
hs.TLS.Certificates = []tls.Certificate{*tlsCert}

View File

@ -266,7 +266,7 @@ func TestMemberPromote(t *testing.T) {
// (the response has information on peer urls of the existing members in cluster)
learnerMember := clus.MustNewMember(t, memberAddResp)
if err := learnerMember.Launch(); err != nil {
if err = learnerMember.Launch(); err != nil {
t.Fatal(err)
}

View File

@ -46,7 +46,7 @@ func TestMutexLockSessionExpired(t *testing.T) {
m2 := concurrency.NewMutex(s2, "/my-lock/")
// acquire lock for s1
if err := m1.Lock(context.TODO()); err != nil {
if err = m1.Lock(context.TODO()); err != nil {
t.Fatal(err)
}
@ -94,11 +94,11 @@ func TestMutexUnlock(t *testing.T) {
t.Fatal(err)
}
if err := m1.Lock(context.TODO()); err != nil {
if err = m1.Lock(context.TODO()); err != nil {
t.Fatal(err)
}
if err := m1.Unlock(context.TODO()); err != nil {
if err = m1.Unlock(context.TODO()); err != nil {
t.Fatal(err)
}

View File

@ -126,20 +126,21 @@ func TestDoubleBarrierTooManyClients(t *testing.T) {
for i := 0; i < waiters; i++ {
go func() {
defer wgDone.Done()
session, err := concurrency.NewSession(clus.RandClient())
if err != nil {
t.Error(err)
gsession, gerr := concurrency.NewSession(clus.RandClient())
if gerr != nil {
t.Error(gerr)
}
defer session.Orphan()
defer gsession.Orphan()
bb := recipe.NewDoubleBarrier(session, "test-barrier", waiters)
if err := bb.Enter(); err != nil {
t.Errorf("could not enter on barrier (%v)", err)
if gerr = bb.Enter(); gerr != nil {
t.Errorf("could not enter on barrier (%v)", gerr)
}
wgEntered.Done()
<-donec
if err := bb.Leave(); err != nil {
t.Errorf("could not leave on barrier (%v)", err)
if gerr = bb.Leave(); gerr != nil {
t.Errorf("could not leave on barrier (%v)", gerr)
}
}()
}
@ -148,7 +149,7 @@ func TestDoubleBarrierTooManyClients(t *testing.T) {
// no any other client can enter the barrier.
wgEntered.Wait()
t.Log("Try to enter into double barrier")
if err := b.Enter(); err != recipe.ErrTooManyClients {
if err = b.Enter(); err != recipe.ErrTooManyClients {
t.Errorf("Unexcepted error, expected: ErrTooManyClients, got: %v", err)
}

View File

@ -90,7 +90,7 @@ func TestKVPutWithLease(t *testing.T) {
key := "hello"
val := "world"
if _, err := kv.Put(ctx, key, val, clientv3.WithLease(lease.ID)); err != nil {
if _, err = kv.Put(ctx, key, val, clientv3.WithLease(lease.ID)); err != nil {
t.Fatalf("couldn't put %q (%v)", key, err)
}
resp, err := kv.Get(ctx, key)
@ -885,7 +885,7 @@ func TestBalancerSupportLearner(t *testing.T) {
// wait until learner member is ready
<-clus.Members[3].ReadyNotify()
if _, err := cli.Get(context.Background(), "foo"); err == nil {
if _, err = cli.Get(context.Background(), "foo"); err == nil {
t.Fatalf("expect Get request to learner to fail, got no error")
}
t.Logf("Expected: Read from learner error: %v", err)

View File

@ -1287,7 +1287,7 @@ func testLeasingDeleteRangeContend(t *testing.T, op clientv3.Op) {
defer close(donec)
for i := 0; ctx.Err() == nil; i++ {
key := fmt.Sprintf("key/%d", i%maxKey)
if _, err := putkv.Put(context.TODO(), key, "123"); err != nil {
if _, err = putkv.Put(context.TODO(), key, "123"); err != nil {
t.Errorf("fail putting key %s: %v", key, err)
}
if _, err = putkv.Get(context.TODO(), key); err != nil {

View File

@ -59,13 +59,11 @@ func TestV3ClientMetrics(t *testing.T) {
// listen for all Prometheus metrics
go func() {
var err error
defer close(donec)
err = srv.Serve(ln)
if err != nil && !transport.IsClosedConnError(err) {
t.Errorf("Err serving http requests: %v", err)
serr := srv.Serve(ln)
if serr != nil && !transport.IsClosedConnError(serr) {
t.Errorf("Err serving http requests: %v", serr)
}
}()

View File

@ -77,7 +77,7 @@ func TestPeriodicCheckDetectsCorruption(t *testing.T) {
ctx := context.Background()
for i := 0; i < 10; i++ {
_, err := cc.Put(ctx, testutil.PickKey(int64(i)), fmt.Sprint(i))
_, err = cc.Put(ctx, testutil.PickKey(int64(i)), fmt.Sprint(i))
assert.NoError(t, err, "error on put")
}
@ -152,7 +152,7 @@ func TestCompactHashCheckDetectCorruption(t *testing.T) {
ctx := context.Background()
for i := 0; i < 10; i++ {
_, err := cc.Put(ctx, testutil.PickKey(int64(i)), fmt.Sprint(i))
_, err = cc.Put(ctx, testutil.PickKey(int64(i)), fmt.Sprint(i))
assert.NoError(t, err, "error on put")
}
@ -189,7 +189,7 @@ func TestCompactHashCheckDetectMultipleCorruption(t *testing.T) {
ctx := context.Background()
for i := 0; i < 10; i++ {
_, err := cc.Put(ctx, testutil.PickKey(int64(i)), fmt.Sprint(i))
_, err = cc.Put(ctx, testutil.PickKey(int64(i)), fmt.Sprint(i))
assert.NoError(t, err, "error on put")
}

View File

@ -102,18 +102,19 @@ func testMetricDbSizeDefrag(t *testing.T, name string) {
validateAfterCompactionInUse := func() error {
// Put to move PendingPages to FreePages
if _, err = kvc.Put(context.TODO(), putreq); err != nil {
t.Fatal(err)
_, verr := kvc.Put(context.TODO(), putreq)
if verr != nil {
t.Fatal(verr)
}
time.Sleep(500 * time.Millisecond)
afterCompactionInUse, err := clus.Members[0].Metric("etcd_mvcc_db_total_size_in_use_in_bytes")
if err != nil {
t.Fatal(err)
afterCompactionInUse, verr := clus.Members[0].Metric("etcd_mvcc_db_total_size_in_use_in_bytes")
if verr != nil {
t.Fatal(verr)
}
aciu, err := strconv.Atoi(afterCompactionInUse)
if err != nil {
t.Fatal(err)
aciu, verr := strconv.Atoi(afterCompactionInUse)
if verr != nil {
t.Fatal(verr)
}
if biu <= aciu {
return fmt.Errorf("expected less than %d, got %d after compaction", biu, aciu)
@ -125,7 +126,7 @@ func testMetricDbSizeDefrag(t *testing.T, name string) {
// which causes the result to be flaky. Retry 3 times.
maxRetry, retry := 3, 0
for {
err := validateAfterCompactionInUse()
err = validateAfterCompactionInUse()
if err == nil {
break
}

View File

@ -50,7 +50,7 @@ func TestEtcdVersionFromWAL(t *testing.T) {
// with the cluster's minimum version. As it's updated asynchronously,
// it could not be updated in time before close. Wait for it to become
// ready.
if err := waitForClusterVersionReady(srv); err != nil {
if err = waitForClusterVersionReady(srv); err != nil {
srv.Close()
t.Fatalf("failed to wait for cluster version to become ready: %v", err)
}

View File

@ -284,13 +284,13 @@ func TestV3CorruptAlarmWithLeaseCorrupted(t *testing.T) {
putr := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar"), Lease: lresp.ID}
// Trigger snapshot from the leader to new member
for i := 0; i < 15; i++ {
_, err := integration.ToGRPC(clus.RandClient()).KV.Put(ctx, putr)
_, err = integration.ToGRPC(clus.RandClient()).KV.Put(ctx, putr)
if err != nil {
t.Errorf("#%d: couldn't put key (%v)", i, err)
}
}
if err := clus.RemoveMember(t, clus.Client(1), uint64(clus.Members[2].ID())); err != nil {
if err = clus.RemoveMember(t, clus.Client(1), uint64(clus.Members[2].ID())); err != nil {
t.Fatal(err)
}
clus.WaitMembersForLeader(t, clus.Members)
@ -314,11 +314,11 @@ func TestV3CorruptAlarmWithLeaseCorrupted(t *testing.T) {
schema.MustUnsafePutLease(tx, &lpb)
tx.Commit()
if err := be.Close(); err != nil {
if err = be.Close(); err != nil {
t.Fatal(err)
}
if err := clus.Members[2].Restart(t); err != nil {
if err = clus.Members[2].Restart(t); err != nil {
t.Fatal(err)
}

View File

@ -212,7 +212,7 @@ func TestFirstCommitNotification(t *testing.T) {
t.Logf("Submitting write to make sure empty and 'foo' index entry was already flushed")
cli := cluster.RandClient()
if _, err := cli.Put(ctx, "foo", "bar"); err != nil {
if _, err = cli.Put(ctx, "foo", "bar"); err != nil {
t.Fatalf("Failed to put kv pair.")
}
@ -225,7 +225,8 @@ func TestFirstCommitNotification(t *testing.T) {
group, groupContext := errgroup.WithContext(ctx)
for i, notifier := range notifiers {
member, notifier := cluster.Members[i], notifier
member := cluster.Members[i]
notifier := notifier
group.Go(func() error {
return checkFirstCommitNotification(groupContext, t, member, leaderAppliedIndex, notifier)
})

View File

@ -199,7 +199,7 @@ func TestV3LeaseNegativeID(t *testing.T) {
time.Sleep(100 * time.Millisecond)
// restore lessor from db file
clus.Members[2].Stop(t)
if err := clus.Members[2].Restart(t); err != nil {
if err = clus.Members[2].Restart(t); err != nil {
t.Fatal(err)
}

View File

@ -1335,9 +1335,9 @@ func TestV3WatchCancellation(t *testing.T) {
cli.Watch(ctx, "/foo")
for i := 0; i < 1000; i++ {
ctx, cancel := context.WithCancel(ctx)
cli.Watch(ctx, "/foo")
cancel()
wctx, wcancel := context.WithCancel(ctx)
cli.Watch(wctx, "/foo")
wcancel()
}
// Wait a little for cancellations to take hold
@ -1374,9 +1374,9 @@ func TestV3WatchCloseCancelRace(t *testing.T) {
cli := clus.RandClient()
for i := 0; i < 1000; i++ {
ctx, cancel := context.WithCancel(ctx)
cli.Watch(ctx, "/foo")
cancel()
wctx, wcancel := context.WithCancel(ctx)
cli.Watch(wctx, "/foo")
wcancel()
}
// Wait a little for cancellations to take hold

View File

@ -370,9 +370,9 @@ func (t triggerCompact) Trigger(_ *testing.T, ctx context.Context, member e2e.Et
var rev int64
for {
resp, err := cc.Get(ctx, "/")
if err != nil {
return err
resp, gerr := cc.Get(ctx, "/")
if gerr != nil {
return gerr
}
rev = resp.Header.Revision

View File

@ -69,9 +69,9 @@ func SimulateTraffic(ctx context.Context, t *testing.T, lg *zap.Logger, clus *e2
nonUniqueWriteLimiter := NewConcurrencyLimiter(profile.MaxNonUniqueRequestConcurrency)
for i := 0; i < profile.ClientCount; i++ {
wg.Add(1)
c, err := NewClient([]string{endpoints[i%len(endpoints)]}, ids, baseTime)
if err != nil {
t.Fatal(err)
c, nerr := NewClient([]string{endpoints[i%len(endpoints)]}, ids, baseTime)
if nerr != nil {
t.Fatal(nerr)
}
go func(c *RecordingClient) {
defer wg.Done()

View File

@ -123,9 +123,10 @@ func readUsingReadAll(lg *zap.Logger, index *uint64, snapfile *string, dataDir s
case nil:
walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
nodes := genIDSlice(snapshot.Metadata.ConfState.Voters)
confStateJSON, err := json.Marshal(snapshot.Metadata.ConfState)
if err != nil {
confStateJSON = []byte(fmt.Sprintf("confstate err: %v", err))
confStateJSON, merr := json.Marshal(snapshot.Metadata.ConfState)
if merr != nil {
confStateJSON = []byte(fmt.Sprintf("confstate err: %v", merr))
}
fmt.Printf("Snapshot:\nterm=%d index=%d nodes=%s confstate=%s\n",
walsnap.Term, walsnap.Index, nodes, confStateJSON)