mirror of
https://github.com/etcd-io/etcd.git
synced 2024-09-27 06:25:44 +00:00
*: fix revive linter
Remove old revive_pass in the bash scripts and migirate the revive.toml into golangci linter_settings. Signed-off-by: Wei Fu <fuweid89@gmail.com>
This commit is contained in:
parent
e1ebc260a9
commit
07effc4d0a
@ -433,11 +433,6 @@ function lint_fix_pass {
|
||||
run_for_modules generic_checker run golangci-lint run --config "${ETCD_ROOT_DIR}/tools/.golangci.yaml" --fix
|
||||
}
|
||||
|
||||
function revive_pass {
|
||||
# TODO: etcdserverpb/raft_internal_stringer.go:15:1: should have a package comment
|
||||
run_for_modules generic_checker run_go_tool "github.com/mgechev/revive" -config "${ETCD_ROOT_DIR}/tests/revive.toml" -exclude "vendor/..." -exclude "out/..."
|
||||
}
|
||||
|
||||
function unconvert_pass {
|
||||
# TODO: pb package should be filtered out.
|
||||
run_for_modules generic_checker run_go_tool "github.com/mdempsky/unconvert" unconvert -v
|
||||
|
@ -873,7 +873,7 @@ func (c *RaftCluster) Store(store v2store.Store) {
|
||||
zap.Bool("is-learner", m.IsLearner),
|
||||
)
|
||||
}
|
||||
for id, _ := range c.removed {
|
||||
for id := range c.removed {
|
||||
//We do not need to delete the member since the store is empty.
|
||||
mustAddToRemovedMembersInStore(c.lg, store, id)
|
||||
}
|
||||
|
@ -104,11 +104,7 @@ func setupAuth(c interfaces.Client, roles []authRole, users []authUser) error {
|
||||
}
|
||||
|
||||
// enable auth
|
||||
if err := c.AuthEnable(context.TODO()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return c.AuthEnable(context.TODO())
|
||||
}
|
||||
|
||||
func requireRolePermissionEqual(t *testing.T, expectRole authRole, actual []*authpb.Permission) {
|
||||
|
@ -91,14 +91,14 @@ func TestConnectionMultiplexing(t *testing.T) {
|
||||
name = "ClientTLS"
|
||||
}
|
||||
t.Run(name, func(t *testing.T) {
|
||||
testConnectionMultiplexing(t, ctx, clus.Procs[0], clientTLS)
|
||||
testConnectionMultiplexing(ctx, t, clus.Procs[0], clientTLS)
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testConnectionMultiplexing(t *testing.T, ctx context.Context, member e2e.EtcdProcess, connType e2e.ClientConnType) {
|
||||
func testConnectionMultiplexing(ctx context.Context, t *testing.T, member e2e.EtcdProcess, connType e2e.ClientConnType) {
|
||||
httpEndpoint := member.EndpointsHTTP()[0]
|
||||
grpcEndpoint := member.EndpointsGRPC()[0]
|
||||
switch connType {
|
||||
|
@ -71,9 +71,8 @@ func tlsInfo(t testing.TB, cfg e2e.ClientConfig) (*transport.TLSInfo, error) {
|
||||
return nil, fmt.Errorf("failed to generate cert: %s", err)
|
||||
}
|
||||
return &tls, nil
|
||||
} else {
|
||||
return &integration.TestTLSInfo, nil
|
||||
}
|
||||
return &integration.TestTLSInfo, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("config %v not supported", cfg)
|
||||
}
|
||||
|
@ -122,9 +122,8 @@ func TestWatchDelayForManualProgressNotification(t *testing.T) {
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "context deadline exceeded") {
|
||||
return nil
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
time.Sleep(watchResponsePeriod)
|
||||
}
|
||||
@ -155,9 +154,8 @@ func TestWatchDelayForEvent(t *testing.T) {
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "context deadline exceeded") {
|
||||
return nil
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
time.Sleep(watchResponsePeriod)
|
||||
}
|
||||
@ -204,9 +202,8 @@ func continuouslyExecuteGetAll(ctx context.Context, t *testing.T, g *errgroup.Gr
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "context deadline exceeded") {
|
||||
return nil
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
respSize := 0
|
||||
for _, kv := range resp.Kvs {
|
||||
|
@ -133,8 +133,8 @@ func (ep *EtcdServerProcess) EndpointsHTTP() []string {
|
||||
}
|
||||
func (ep *EtcdServerProcess) EndpointsMetrics() []string { return []string{ep.cfg.MetricsURL} }
|
||||
|
||||
func (epc *EtcdServerProcess) Etcdctl(opts ...config.ClientOption) *EtcdctlV3 {
|
||||
etcdctl, err := NewEtcdctl(epc.Config().Client, epc.EndpointsGRPC(), opts...)
|
||||
func (ep *EtcdServerProcess) Etcdctl(opts ...config.ClientOption) *EtcdctlV3 {
|
||||
etcdctl, err := NewEtcdctl(ep.Config().Client, ep.EndpointsGRPC(), opts...)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -58,7 +58,7 @@ func TestSessionTTLOptions(t *testing.T) {
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
var setTTL int = 90
|
||||
var setTTL = 90
|
||||
s, err := concurrency.NewSession(cli, concurrency.WithTTL(setTTL))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -87,7 +87,7 @@ func testRevisionMonotonicWithFailures(t *testing.T, testDuration time.Duration,
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
putWorker(t, ctx, clus)
|
||||
putWorker(ctx, t, clus)
|
||||
}()
|
||||
}
|
||||
|
||||
@ -95,7 +95,7 @@ func testRevisionMonotonicWithFailures(t *testing.T, testDuration time.Duration,
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
getWorker(t, ctx, clus)
|
||||
getWorker(ctx, t, clus)
|
||||
}()
|
||||
}
|
||||
|
||||
@ -109,7 +109,7 @@ func testRevisionMonotonicWithFailures(t *testing.T, testDuration time.Duration,
|
||||
t.Logf("Revision %d", resp.Header.Revision)
|
||||
}
|
||||
|
||||
func putWorker(t *testing.T, ctx context.Context, clus *integration.Cluster) {
|
||||
func putWorker(ctx context.Context, t *testing.T, clus *integration.Cluster) {
|
||||
for i := 0; ; i++ {
|
||||
kv := clus.Client(i % 3)
|
||||
_, err := kv.Put(ctx, "foo", fmt.Sprintf("%d", i))
|
||||
@ -122,7 +122,7 @@ func putWorker(t *testing.T, ctx context.Context, clus *integration.Cluster) {
|
||||
}
|
||||
}
|
||||
|
||||
func getWorker(t *testing.T, ctx context.Context, clus *integration.Cluster) {
|
||||
func getWorker(ctx context.Context, t *testing.T, clus *integration.Cluster) {
|
||||
var prevRev int64
|
||||
for i := 0; ; i++ {
|
||||
kv := clus.Client(i % 3)
|
||||
|
@ -543,7 +543,7 @@ func TestStoreCompareAndSwapPrevIndexFailsIfNotMatch(t *testing.T) {
|
||||
// TestStoreWatchCreate ensures that the store can watch for key creation.
|
||||
func TestStoreWatchCreate(t *testing.T) {
|
||||
s := v2store.New()
|
||||
var eidx uint64 = 0
|
||||
var eidx uint64
|
||||
w, _ := s.Watch("/foo", false, false, 0)
|
||||
c := w.EventChan()
|
||||
assert.Equal(t, w.StartIndex(), eidx)
|
||||
@ -564,7 +564,7 @@ func TestStoreWatchCreate(t *testing.T) {
|
||||
// can watch for recursive key creation.
|
||||
func TestStoreWatchRecursiveCreate(t *testing.T) {
|
||||
s := v2store.New()
|
||||
var eidx uint64 = 0
|
||||
var eidx uint64
|
||||
w, err := s.Watch("/foo", true, false, 0)
|
||||
testutil.AssertNil(t, err)
|
||||
assert.Equal(t, w.StartIndex(), eidx)
|
||||
|
@ -1,38 +0,0 @@
|
||||
ignoreGeneratedHeader = false
|
||||
severity = "warning"
|
||||
confidence = 0.8
|
||||
errorCode = 0
|
||||
warningCode = 0
|
||||
|
||||
[rule.blank-imports]
|
||||
[rule.context-as-argument]
|
||||
[rule.dot-imports]
|
||||
[rule.error-return]
|
||||
[rule.error-naming]
|
||||
[rule.if-return]
|
||||
[rule.increment-decrement]
|
||||
[rule.var-declaration]
|
||||
[rule.package-comments]
|
||||
[rule.range]
|
||||
[rule.receiver-naming]
|
||||
[rule.time-naming]
|
||||
[rule.indent-error-flow]
|
||||
[rule.errorf]
|
||||
|
||||
|
||||
# TODO: enable following
|
||||
|
||||
# grpcproxy context.WithValue(ctx, rpctypes.TokenFieldNameGRPC, token)
|
||||
# [rule.context-keys-type]
|
||||
|
||||
# punctuation in error value
|
||||
# [rule.error-strings]
|
||||
|
||||
# underscore variables
|
||||
# [rule.var-naming]
|
||||
|
||||
# godoc
|
||||
# [rule.exported]
|
||||
|
||||
# return unexported type
|
||||
# [rule.unexported-return]
|
@ -237,7 +237,7 @@ type goPanicFailpoint struct {
|
||||
}
|
||||
|
||||
type trigger interface {
|
||||
Trigger(t *testing.T, ctx context.Context, member e2e.EtcdProcess, clus *e2e.EtcdProcessCluster) error
|
||||
Trigger(ctx context.Context, t *testing.T, member e2e.EtcdProcess, clus *e2e.EtcdProcessCluster) error
|
||||
AvailabilityChecker
|
||||
}
|
||||
|
||||
@ -270,7 +270,7 @@ func (f goPanicFailpoint) Inject(ctx context.Context, t *testing.T, lg *zap.Logg
|
||||
}
|
||||
if f.trigger != nil {
|
||||
lg.Info("Triggering gofailpoint", zap.String("failpoint", f.Name()))
|
||||
err = f.trigger.Trigger(t, ctx, member, clus)
|
||||
err = f.trigger.Trigger(ctx, t, member, clus)
|
||||
if err != nil {
|
||||
lg.Info("gofailpoint trigger failed", zap.String("failpoint", f.Name()), zap.Error(err))
|
||||
}
|
||||
@ -330,7 +330,7 @@ func (f goPanicFailpoint) Name() string {
|
||||
|
||||
type triggerDefrag struct{}
|
||||
|
||||
func (t triggerDefrag) Trigger(_ *testing.T, ctx context.Context, member e2e.EtcdProcess, _ *e2e.EtcdProcessCluster) error {
|
||||
func (t triggerDefrag) Trigger(ctx context.Context, _ *testing.T, member e2e.EtcdProcess, _ *e2e.EtcdProcessCluster) error {
|
||||
cc, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: member.EndpointsGRPC(),
|
||||
Logger: zap.NewNop(),
|
||||
@ -356,7 +356,7 @@ type triggerCompact struct {
|
||||
multiBatchCompaction bool
|
||||
}
|
||||
|
||||
func (t triggerCompact) Trigger(_ *testing.T, ctx context.Context, member e2e.EtcdProcess, clus *e2e.EtcdProcessCluster) error {
|
||||
func (t triggerCompact) Trigger(ctx context.Context, _ *testing.T, member e2e.EtcdProcess, clus *e2e.EtcdProcessCluster) error {
|
||||
cc, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: member.EndpointsGRPC(),
|
||||
Logger: zap.NewNop(),
|
||||
@ -399,7 +399,7 @@ type blackholePeerNetworkFailpoint struct {
|
||||
|
||||
func (f blackholePeerNetworkFailpoint) Inject(ctx context.Context, t *testing.T, lg *zap.Logger, clus *e2e.EtcdProcessCluster) error {
|
||||
member := clus.Procs[rand.Int()%len(clus.Procs)]
|
||||
return f.Trigger(t, ctx, member, clus)
|
||||
return f.Trigger(ctx, t, member, clus)
|
||||
}
|
||||
|
||||
func (f blackholePeerNetworkFailpoint) Name() string {
|
||||
@ -410,8 +410,8 @@ type triggerBlackhole struct {
|
||||
waitTillSnapshot bool
|
||||
}
|
||||
|
||||
func (tb triggerBlackhole) Trigger(t *testing.T, ctx context.Context, member e2e.EtcdProcess, clus *e2e.EtcdProcessCluster) error {
|
||||
return blackhole(t, ctx, member, clus, tb.waitTillSnapshot)
|
||||
func (tb triggerBlackhole) Trigger(ctx context.Context, t *testing.T, member e2e.EtcdProcess, clus *e2e.EtcdProcessCluster) error {
|
||||
return blackhole(ctx, t, member, clus, tb.waitTillSnapshot)
|
||||
}
|
||||
|
||||
func (tb triggerBlackhole) Available(config e2e.EtcdProcessClusterConfig, process e2e.EtcdProcess) bool {
|
||||
@ -421,7 +421,7 @@ func (tb triggerBlackhole) Available(config e2e.EtcdProcessClusterConfig, proces
|
||||
return config.ClusterSize > 1 && process.PeerProxy() != nil
|
||||
}
|
||||
|
||||
func blackhole(t *testing.T, ctx context.Context, member e2e.EtcdProcess, clus *e2e.EtcdProcessCluster, shouldWaitTillSnapshot bool) error {
|
||||
func blackhole(ctx context.Context, t *testing.T, member e2e.EtcdProcess, clus *e2e.EtcdProcessCluster, shouldWaitTillSnapshot bool) error {
|
||||
proxy := member.PeerProxy()
|
||||
|
||||
// Blackholing will cause peers to not be able to use streamWriters registered with member
|
||||
@ -437,10 +437,9 @@ func blackhole(t *testing.T, ctx context.Context, member e2e.EtcdProcess, clus *
|
||||
}()
|
||||
if shouldWaitTillSnapshot {
|
||||
return waitTillSnapshot(ctx, t, clus, member)
|
||||
} else {
|
||||
time.Sleep(time.Second)
|
||||
return nil
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
return nil
|
||||
}
|
||||
|
||||
func waitTillSnapshot(ctx context.Context, t *testing.T, clus *e2e.EtcdProcessCluster, blackholedMember e2e.EtcdProcess) error {
|
||||
|
@ -99,9 +99,8 @@ func describeTxnResponse(request *TxnRequest, response *TxnResponse) string {
|
||||
}
|
||||
if response.Failure {
|
||||
return fmt.Sprintf("failure(%s)", description)
|
||||
} else {
|
||||
return fmt.Sprintf("success(%s)", description)
|
||||
}
|
||||
return fmt.Sprintf("success(%s)", description)
|
||||
}
|
||||
|
||||
func describeEtcdOperation(op EtcdOperation) string {
|
||||
@ -162,13 +161,12 @@ func describeRangeResponse(request RangeOptions, response RangeResponse) string
|
||||
kvs[i] = describeValueOrHash(kv.Value)
|
||||
}
|
||||
return fmt.Sprintf("[%s], count: %d", strings.Join(kvs, ","), response.Count)
|
||||
} else {
|
||||
if len(response.KVs) == 0 {
|
||||
return "nil"
|
||||
} else {
|
||||
return describeValueOrHash(response.KVs[0].Value)
|
||||
}
|
||||
}
|
||||
|
||||
if len(response.KVs) == 0 {
|
||||
return "nil"
|
||||
}
|
||||
return describeValueOrHash(response.KVs[0].Value)
|
||||
}
|
||||
|
||||
func describeValueOrHash(value ValueOrHash) string {
|
||||
|
@ -96,12 +96,11 @@ func (s EtcdState) Step(request EtcdRequest) (EtcdState, MaybeEtcdResponse) {
|
||||
if request.Range.Revision == 0 || request.Range.Revision == s.Revision {
|
||||
resp := s.getRange(request.Range.RangeOptions)
|
||||
return s, MaybeEtcdResponse{EtcdResponse: EtcdResponse{Range: &resp, Revision: s.Revision}}
|
||||
} else {
|
||||
if request.Range.Revision > s.Revision {
|
||||
return s, MaybeEtcdResponse{Error: EtcdFutureRevErr.Error()}
|
||||
}
|
||||
return s, MaybeEtcdResponse{PartialResponse: true, EtcdResponse: EtcdResponse{Revision: s.Revision}}
|
||||
}
|
||||
if request.Range.Revision > s.Revision {
|
||||
return s, MaybeEtcdResponse{Error: ErrEtcdFutureRev.Error()}
|
||||
}
|
||||
return s, MaybeEtcdResponse{PartialResponse: true, EtcdResponse: EtcdResponse{Revision: s.Revision}}
|
||||
case Txn:
|
||||
failure := false
|
||||
for _, cond := range request.Txn.Conditions {
|
||||
@ -148,7 +147,7 @@ func (s EtcdState) Step(request EtcdRequest) (EtcdState, MaybeEtcdResponse) {
|
||||
}
|
||||
}
|
||||
if increaseRevision {
|
||||
s.Revision += 1
|
||||
s.Revision++
|
||||
}
|
||||
return s, MaybeEtcdResponse{EtcdResponse: EtcdResponse{Txn: &TxnResponse{Failure: failure, Results: opResp}, Revision: s.Revision}}
|
||||
case LeaseGrant:
|
||||
@ -174,7 +173,7 @@ func (s EtcdState) Step(request EtcdRequest) (EtcdState, MaybeEtcdResponse) {
|
||||
//delete the lease
|
||||
delete(s.Leases, request.LeaseRevoke.LeaseID)
|
||||
if keyDeleted {
|
||||
s.Revision += 1
|
||||
s.Revision++
|
||||
}
|
||||
return s, MaybeEtcdResponse{EtcdResponse: EtcdResponse{Revision: s.Revision, LeaseRevoke: &LeaseRevokeResponse{}}}
|
||||
case Defragment:
|
||||
@ -193,7 +192,7 @@ func (s EtcdState) getRange(options RangeOptions) RangeResponse {
|
||||
for k, v := range s.KeyValues {
|
||||
if k >= options.Start && k < options.End {
|
||||
response.KVs = append(response.KVs, KeyValue{Key: k, ValueRevision: v})
|
||||
count += 1
|
||||
count++
|
||||
}
|
||||
}
|
||||
sort.Slice(response.KVs, func(j, k int) bool {
|
||||
@ -315,7 +314,7 @@ type MaybeEtcdResponse struct {
|
||||
Error string
|
||||
}
|
||||
|
||||
var EtcdFutureRevErr = errors.New("future rev")
|
||||
var ErrEtcdFutureRev = errors.New("future rev")
|
||||
|
||||
type EtcdResponse struct {
|
||||
Txn *TxnResponse
|
||||
|
@ -111,9 +111,8 @@ type Event struct {
|
||||
func (e Event) Match(request WatchRequest) bool {
|
||||
if request.WithPrefix {
|
||||
return strings.HasPrefix(e.Key, request.Key)
|
||||
} else {
|
||||
return e.Key == request.Key
|
||||
}
|
||||
return e.Key == request.Key
|
||||
}
|
||||
|
||||
type WatchRequest struct {
|
||||
|
@ -161,7 +161,7 @@ func (c etcdTrafficClient) Request(ctx context.Context, request etcdRequestType,
|
||||
opCtx, cancel := context.WithTimeout(ctx, RequestTimeout)
|
||||
defer cancel()
|
||||
|
||||
var limit int64 = 0
|
||||
var limit int64
|
||||
switch request {
|
||||
case StaleGet:
|
||||
_, rev, err = c.client.Get(opCtx, c.randomKey(), lastRev)
|
||||
|
@ -114,7 +114,7 @@ func (t kubernetesTraffic) Read(ctx context.Context, kc *kubernetesClient, s *st
|
||||
hasMore := true
|
||||
rangeStart := keyPrefix
|
||||
var kvs []*mvccpb.KeyValue
|
||||
var revision int64 = 0
|
||||
var revision int64
|
||||
|
||||
for hasMore {
|
||||
readCtx, cancel := context.WithTimeout(ctx, RequestTimeout)
|
||||
|
@ -43,7 +43,7 @@ func validateWatch(t *testing.T, cfg Config, reports []report.ClientReport) []mo
|
||||
}
|
||||
|
||||
func validateBookmarkable(t *testing.T, report report.ClientReport) {
|
||||
var lastProgressNotifyRevision int64 = 0
|
||||
var lastProgressNotifyRevision int64
|
||||
for _, op := range report.Watch {
|
||||
for _, resp := range op.Responses {
|
||||
for _, event := range resp.Events {
|
||||
@ -175,7 +175,7 @@ func mergeWatchEventHistory(t *testing.T, reports []report.ClientReport) []model
|
||||
}
|
||||
revisionToEvents := map[int64]revisionEvents{}
|
||||
var lastClientId = 0
|
||||
var lastRevision int64 = 0
|
||||
var lastRevision int64
|
||||
events := []model.WatchEvent{}
|
||||
for _, r := range reports {
|
||||
for _, op := range r.Watch {
|
||||
|
@ -66,8 +66,8 @@ type watchConfig struct {
|
||||
|
||||
// watchUntilRevision watches all changes until context is cancelled, it has observed revision provided via maxRevisionChan or maxRevisionChan was closed.
|
||||
func watchUntilRevision(ctx context.Context, t *testing.T, c *traffic.RecordingClient, maxRevisionChan <-chan int64, cfg watchConfig) {
|
||||
var maxRevision int64 = 0
|
||||
var lastRevision int64 = 0
|
||||
var maxRevision int64
|
||||
var lastRevision int64
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
watch := c.Watch(ctx, "", 1, true, true)
|
||||
|
@ -19,7 +19,7 @@ linters:
|
||||
# - varcheck
|
||||
- goimports
|
||||
- ineffassign
|
||||
# - revive # TODO: enable by #16610
|
||||
- revive
|
||||
- staticcheck
|
||||
# - stylecheck # TODO: enable by #16610
|
||||
# - unused # TODO: enable by #16610
|
||||
@ -27,6 +27,66 @@ linters:
|
||||
linters-settings: # please keep this alphabetized
|
||||
goimports:
|
||||
local-prefixes: go.etcd.io # Put imports beginning with prefix after 3rd-party packages.
|
||||
revive:
|
||||
ignore-generated-header: false
|
||||
severity: error
|
||||
confidence: 0.8
|
||||
enable-all-rules: false
|
||||
rules:
|
||||
- name: blank-imports
|
||||
severity: error
|
||||
disabled: false
|
||||
- name: context-as-argument
|
||||
severity: error
|
||||
disabled: false
|
||||
- name: dot-imports
|
||||
severity: error
|
||||
disabled: false
|
||||
- name: error-return
|
||||
severity: error
|
||||
disabled: false
|
||||
- name: error-naming
|
||||
severity: error
|
||||
disabled: false
|
||||
- name: if-return
|
||||
severity: error
|
||||
disabled: false
|
||||
- name: increment-decrement
|
||||
severity: error
|
||||
disabled: false
|
||||
- name: var-declaration
|
||||
severity: error
|
||||
disabled: false
|
||||
- name: package-comments
|
||||
severity: error
|
||||
disabled: false
|
||||
- name: range
|
||||
severity: error
|
||||
disabled: false
|
||||
- name: receiver-naming
|
||||
severity: error
|
||||
disabled: false
|
||||
- name: time-naming
|
||||
severity: error
|
||||
disabled: false
|
||||
- name: indent-error-flow
|
||||
severity: error
|
||||
disabled: false
|
||||
- name: errorf
|
||||
severity: error
|
||||
disabled: false
|
||||
- name: context-keys-type
|
||||
severity: error
|
||||
disabled: false
|
||||
# TODO: enable the following rules
|
||||
- name: error-strings
|
||||
disabled: true
|
||||
- name: var-naming
|
||||
disabled: true
|
||||
- name: exported
|
||||
disabled: true
|
||||
- name: unexported-return
|
||||
disabled: true
|
||||
staticcheck:
|
||||
checks:
|
||||
- all
|
||||
|
Loading…
x
Reference in New Issue
Block a user