etcd: modify declaring empty slices

declare an empty slice to var s []int replace  s :=[]int{}, https://github.com/golang/go/wiki/CodeReviewComments#declaring-empty-slices

Signed-off-by: demoManito <1430482733@qq.com>
This commit is contained in:
demoManito 2022-09-16 14:30:00 +08:00
parent b7ba0542f6
commit 72cf0cc04a
55 changed files with 106 additions and 104 deletions

View File

@ -45,7 +45,7 @@ func GetCluster(serviceScheme, service, name, dns string, apurls types.URLs) ([]
tcp2ap[tcpAddr.String()] = url
}
stringParts := []string{}
var stringParts []string
updateNodeMap := func(service, scheme string) error {
_, addrs, err := lookupSRV(service, "tcp", dns)
if err != nil {

View File

@ -52,7 +52,7 @@ func TestSRVGetCluster(t *testing.T) {
{Target: "2.example.com.", Port: 2480},
{Target: "3.example.com.", Port: 2480},
}
srvNone := []*net.SRV{}
var srvNone []*net.SRV
tests := []struct {
service string

View File

@ -222,7 +222,8 @@ func checkCertSAN(ctx context.Context, cert *x509.Certificate, remoteAddr string
func isHostInDNS(ctx context.Context, host string, dnsNames []string) (ok bool, err error) {
// reverse lookup
wildcards, names := []string{}, []string{}
var names []string
var wildcards []string
for _, dns := range dnsNames {
if strings.HasPrefix(dns, "*.") {
wildcards = append(wildcards, dns[1:])

View File

@ -39,7 +39,7 @@ func equal(a, b []string) bool {
func driveSetTests(t *testing.T, s Set) {
// Verify operations on an empty set
eValues := []string{}
var eValues []string
values := s.Values()
if !reflect.DeepEqual(values, eValues) {
t.Fatalf("Expect values=%v got %v", eValues, values)

View File

@ -20,7 +20,7 @@ import (
"time"
"github.com/spf13/cobra"
"go.etcd.io/etcd/client/v3"
clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/cobrautl"
)
@ -67,7 +67,7 @@ func getDelOp(args []string) (string, []clientv3.OpOption) {
cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("`--prefix` and `--from-key` cannot be set at the same time, choose one"))
}
opts := []clientv3.OpOption{}
var opts []clientv3.OpOption
key := args[0]
if len(args) > 1 {
if delPrefix || delFromKey {

View File

@ -23,7 +23,7 @@ import (
"go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/client/pkg/v3/logutil"
"go.etcd.io/etcd/client/v3"
clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/cobrautl"
"go.etcd.io/etcd/pkg/v3/flags"
@ -101,7 +101,7 @@ func epHealthCommandFunc(cmd *cobra.Command, args []string) {
ka := keepAliveTimeFromCmd(cmd)
kat := keepAliveTimeoutFromCmd(cmd)
auth := authCfgFromCmd(cmd)
cfgs := []*clientv3.Config{}
var cfgs []*clientv3.Config
for _, ep := range endpointsFromCluster(cmd) {
cfg, err := clientv3.NewClientConfig(&clientv3.ConfigSpec{
Endpoints: []string{ep},
@ -172,7 +172,7 @@ func epHealthCommandFunc(cmd *cobra.Command, args []string) {
close(hch)
errs := false
healthList := []epHealth{}
var healthList []epHealth
for h := range hch {
healthList = append(healthList, h)
if h.Error != "" {
@ -193,7 +193,7 @@ type epStatus struct {
func epStatusCommandFunc(cmd *cobra.Command, args []string) {
c := mustClientFromCmd(cmd)
statusList := []epStatus{}
var statusList []epStatus
var err error
for _, ep := range endpointsFromCluster(cmd) {
ctx, cancel := commandCtx(cmd)
@ -222,7 +222,7 @@ type epHashKV struct {
func epHashKVCommandFunc(cmd *cobra.Command, args []string) {
c := mustClientFromCmd(cmd)
hashList := []epHashKV{}
var hashList []epHashKV
var err error
for _, ep := range endpointsFromCluster(cmd) {
ctx, cancel := commandCtx(cmd)
@ -288,7 +288,7 @@ func endpointsFromCluster(cmd *cobra.Command) []string {
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
ret := []string{}
var ret []string
for _, m := range membs.Members {
ret = append(ret, m.ClientURLs...)
}

View File

@ -19,7 +19,7 @@ import (
"strings"
"github.com/spf13/cobra"
"go.etcd.io/etcd/client/v3"
clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/cobrautl"
)
@ -107,7 +107,7 @@ func getGetOp(args []string) (string, []clientv3.OpOption) {
cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("`--keys-only` and `--count-only` cannot be set at the same time, choose one"))
}
opts := []clientv3.OpOption{}
var opts []clientv3.OpOption
switch getConsistency {
case "s":
opts = append(opts, clientv3.WithSerializable())

View File

@ -26,7 +26,7 @@ import (
"go.etcd.io/etcd/client/pkg/v3/logutil"
"go.etcd.io/etcd/client/pkg/v3/srv"
"go.etcd.io/etcd/client/pkg/v3/transport"
"go.etcd.io/etcd/client/v3"
clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/cobrautl"
"go.etcd.io/etcd/pkg/v3/flags"
@ -363,7 +363,7 @@ func endpointsFromFlagValue(cmd *cobra.Command) ([]string, error) {
return eps, err
}
// strip insecure connections
ret := []string{}
var ret []string
for _, ep := range eps {
if strings.HasPrefix(ep, "http://") {
fmt.Fprintf(os.Stderr, "ignoring discovered insecure endpoint %q\n", ep)

View File

@ -27,7 +27,7 @@ import (
"go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/client/v3"
clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/mirror"
"github.com/spf13/cobra"
@ -191,7 +191,7 @@ func makeMirror(ctx context.Context, c *clientv3.Client, dc *clientv3.Client) er
}
var lastRev int64
ops := []clientv3.Op{}
var ops []clientv3.Op
for _, ev := range wr.Events {
nextRev := ev.Kv.ModRevision

View File

@ -21,7 +21,7 @@ import (
"strings"
"github.com/spf13/cobra"
"go.etcd.io/etcd/client/v3"
clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/cobrautl"
)
@ -157,7 +157,7 @@ func memberAddCommandFunc(cmd *cobra.Command, args []string) {
display.MemberAdd(*resp)
if _, ok := (display).(*simplePrinter); ok {
conf := []string{}
var conf []string
for _, memb := range resp.Members {
for _, u := range memb.PeerURLs {
n := memb.Name

View File

@ -101,7 +101,7 @@ func getPutOp(args []string) (string, string, []clientv3.OpOption) {
cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("bad lease ID (%v), expecting ID in Hex", err))
}
opts := []clientv3.OpOption{}
var opts []clientv3.OpOption
if id != 0 {
opts = append(opts, clientv3.WithLease(clientv3.LeaseID(id)))
}

View File

@ -36,7 +36,7 @@ var (
interruptRegisterMu, interruptExitMu sync.Mutex
// interruptHandlers holds all registered InterruptHandlers in order
// they will be executed.
interruptHandlers = []InterruptHandler{}
interruptHandlers []InterruptHandler
)
// RegisterInterruptHandler registers a new InterruptHandler. Handlers registered

View File

@ -124,7 +124,7 @@ func (t TimeSeries) String() string {
if err := wr.Write([]string{"UNIX-SECOND", "MIN-LATENCY-MS", "AVG-LATENCY-MS", "MAX-LATENCY-MS", "AVG-THROUGHPUT"}); err != nil {
log.Fatal(err)
}
rows := [][]string{}
var rows [][]string
for i := range t {
row := []string{
fmt.Sprintf("%d", t[i].Timestamp),

View File

@ -124,7 +124,7 @@ func TestNodeStepUnblock(t *testing.T) {
// TestNodePropose ensures that node.Propose sends the given proposal to the underlying raft.
func TestNodePropose(t *testing.T) {
msgs := []raftpb.Message{}
var msgs []raftpb.Message
appendStep := func(r *raft, m raftpb.Message) error {
msgs = append(msgs, m)
return nil
@ -166,7 +166,7 @@ func TestNodePropose(t *testing.T) {
// TestNodeReadIndex ensures that node.ReadIndex sends the MsgReadIndex message to the underlying raft.
// It also ensures that ReadState can be read out through ready chan.
func TestNodeReadIndex(t *testing.T) {
msgs := []raftpb.Message{}
var msgs []raftpb.Message
appendStep := func(r *raft, m raftpb.Message) error {
msgs = append(msgs, m)
return nil
@ -306,7 +306,7 @@ func TestNodeReadIndexToOldLeader(t *testing.T) {
// TestNodeProposeConfig ensures that node.ProposeConfChange sends the given configuration proposal
// to the underlying raft.
func TestNodeProposeConfig(t *testing.T) {
msgs := []raftpb.Message{}
var msgs []raftpb.Message
appendStep := func(r *raft, m raftpb.Message) error {
msgs = append(msgs, m)
return nil
@ -456,7 +456,7 @@ func TestBlockProposal(t *testing.T) {
}
func TestNodeProposeWaitDropped(t *testing.T) {
msgs := []raftpb.Message{}
var msgs []raftpb.Message
droppingMsg := []byte("test_dropping")
dropStep := func(r *raft, m raftpb.Message) error {
if m.Type == raftpb.MsgProp && strings.Contains(m.String(), string(droppingMsg)) {

View File

@ -640,13 +640,13 @@ func TestLogReplication(t *testing.T) {
t.Errorf("#%d.%d: committed = %d, want %d", i, j, sm.raftLog.committed, tt.wcommitted)
}
ents := []pb.Entry{}
var ents []pb.Entry
for _, e := range nextEnts(sm, tt.network.storage[j]) {
if e.Data != nil {
ents = append(ents, e)
}
}
props := []pb.Message{}
var props []pb.Message
for _, m := range tt.msgs {
if m.Type == pb.MsgProp {
props = append(props, m)
@ -4747,7 +4747,7 @@ func (nw *network) recover() {
}
func (nw *network) filter(msgs []pb.Message) []pb.Message {
mm := []pb.Message{}
var mm []pb.Message
for _, m := range msgs {
if nw.ignorem[m.Type] {
continue

View File

@ -585,7 +585,7 @@ func TestRawNodeProposeAddDuplicateNode(t *testing.T) {
// TestRawNodeReadIndex ensures that Rawnode.ReadIndex sends the MsgReadIndex message
// to the underlying raft. It also ensures that ReadState can be read out.
func TestRawNodeReadIndex(t *testing.T) {
msgs := []pb.Message{}
var msgs []pb.Message
appendStep := func(r *raft, m pb.Message) error {
msgs = append(msgs, m)
return nil

View File

@ -85,7 +85,7 @@ func (ro *readOnly) advance(m pb.Message) []*readIndexStatus {
)
ctx := string(m.Context)
rss := []*readIndexStatus{}
var rss []*readIndexStatus
for _, okctx := range ro.readIndexQueue {
i++

View File

@ -85,7 +85,7 @@ func (t txMock) UnsafeGetRole(s string) *authpb.Role {
}
func (t txMock) UnsafeGetAllUsers() []*authpb.User {
users := []*authpb.User{}
var users []*authpb.User
for _, u := range t.be.users {
users = append(users, u)
}
@ -93,7 +93,7 @@ func (t txMock) UnsafeGetAllUsers() []*authpb.User {
}
func (t txMock) UnsafeGetAllRoles() []*authpb.Role {
roles := []*authpb.Role{}
var roles []*authpb.Role
for _, r := range t.be.roles {
roles = append(roles, r)
}

View File

@ -271,7 +271,7 @@ func (c *ServerConfig) advertiseMatchesCluster() error {
initMap[url.String()] = struct{}{}
}
missing := []string{}
var missing []string
for url := range initMap {
if _, ok := apMap[url]; !ok {
missing = append(missing, url)

View File

@ -708,7 +708,7 @@ func (e *Etcd) serveClients() (err error) {
etcdhttp.HandleMetrics(mux)
etcdhttp.HandleHealth(e.cfg.logger, mux, e.Server)
gopts := []grpc.ServerOption{}
var gopts []grpc.ServerOption
if e.cfg.GRPCKeepAliveMinTime > time.Duration(0) {
gopts = append(gopts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
MinTime: e.cfg.GRPCKeepAliveMinTime,

View File

@ -220,7 +220,7 @@ func (s *Snapshotter) snapNames() ([]string, error) {
}
func (s *Snapshotter) checkSuffix(names []string) []string {
snaps := []string{}
var snaps []string
for i := range names {
if strings.HasSuffix(names[i], snapSuffix) {
snaps = append(snaps, names[i])

View File

@ -748,7 +748,7 @@ func TestApplyMultiConfChangeShouldStop(t *testing.T) {
consistIndex: ci,
beHooks: serverstorage.NewBackendHooks(lg, ci),
}
ents := []raftpb.Entry{}
var ents []raftpb.Entry
for i := 1; i <= 4; i++ {
ent := raftpb.Entry{
Term: 1,

View File

@ -575,7 +575,7 @@ func compareKV(c *pb.Compare, ckv mvccpb.KeyValue) bool {
rev := int64(0)
switch c.Target {
case pb.Compare_VALUE:
v := []byte{}
var v []byte
if tv, _ := c.TargetUnion.(*pb.Compare_Value); tv != nil {
v = tv.Value
}

View File

@ -143,7 +143,7 @@ func newCluster(lg *zap.Logger, memberCount int, ver semver.Version) *clusterMoc
func (c *clusterMock) StepMonitors() {
// Execute monitor functions in random order as it is not guaranteed
fs := []func(){}
var fs []func()
for _, m := range c.members {
fs = append(fs, m.monitor.UpdateStorageVersionIfNeeded)
if m.isLeader {

View File

@ -759,7 +759,7 @@ func (le *lessor) findDueScheduledCheckpoints(checkpointLimit int) []*pb.LeaseCh
}
now := time.Now()
cps := []*pb.LeaseCheckpoint{}
var cps []*pb.LeaseCheckpoint
for le.leaseCheckpointHeap.Len() > 0 && len(cps) < checkpointLimit {
lt := le.leaseCheckpointHeap[0]
if lt.time.After(now) /* lt.time: next checkpoint time */ {

View File

@ -18,7 +18,7 @@ import (
"context"
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/client/v3"
clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/server/v3/proxy/grpcproxy/cache"
)
@ -162,7 +162,7 @@ func requestOpToOp(union *pb.RequestOp) clientv3.Op {
}
func RangeRequestToOp(r *pb.RangeRequest) clientv3.Op {
opts := []clientv3.OpOption{}
var opts []clientv3.OpOption
if len(r.RangeEnd) != 0 {
opts = append(opts, clientv3.WithRange(string(r.RangeEnd)))
}
@ -190,7 +190,7 @@ func RangeRequestToOp(r *pb.RangeRequest) clientv3.Op {
}
func PutRequestToOp(r *pb.PutRequest) clientv3.Op {
opts := []clientv3.OpOption{}
var opts []clientv3.OpOption
opts = append(opts, clientv3.WithLease(clientv3.LeaseID(r.Lease)))
if r.IgnoreValue {
opts = append(opts, clientv3.WithIgnoreValue())
@ -205,7 +205,7 @@ func PutRequestToOp(r *pb.PutRequest) clientv3.Op {
}
func DelRequestToOp(r *pb.DeleteRangeRequest) clientv3.Op {
opts := []clientv3.OpOption{}
var opts []clientv3.OpOption
if len(r.RangeEnd) != 0 {
opts = append(opts, clientv3.WithRange(string(r.RangeEnd)))
}

View File

@ -96,7 +96,7 @@ func (tp *TCPProxy) Run() error {
tp.remotes = append(tp.remotes, &remote{srv: srv, addr: addr})
}
eps := []string{}
var eps []string
for _, ep := range tp.Endpoints {
eps = append(eps, fmt.Sprintf("%s:%d", ep.Target, ep.Port))
}

View File

@ -1009,7 +1009,7 @@ func (i *fakeIndex) KeyIndex(ki *keyIndex) *keyIndex {
}
func createBytesSlice(bytesN, sliceN int) [][]byte {
rs := [][]byte{}
var rs [][]byte
for len(rs) != sliceN {
v := make([]byte, bytesN)
if _, err := rand.Read(v); err != nil {

View File

@ -120,7 +120,8 @@ func CorruptBBolt(fpath string) error {
if b == nil {
return errors.New("got nil bucket for 'key'")
}
keys, vals := [][]byte{}, [][]byte{}
var vals [][]byte
var keys [][]byte
c := b.Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() {
keys = append(keys, k)

View File

@ -79,7 +79,7 @@ func (s *alarmBackend) GetAllAlarms() ([]*etcdserverpb.AlarmMember, error) {
}
func (s *alarmBackend) unsafeGetAllAlarms(tx backend.ReadTx) ([]*etcdserverpb.AlarmMember, error) {
ms := []*etcdserverpb.AlarmMember{}
var ms []*etcdserverpb.AlarmMember
err := tx.UnsafeForEach(Alarm, func(k, v []byte) error {
var m etcdserverpb.AlarmMember
if err := m.Unmarshal(k); err != nil {

View File

@ -139,7 +139,7 @@ func (d *decoder) isTornEntry(data []byte) bool {
fileOff := d.lastValidOff + frameSizeBytes
curOff := 0
chunks := [][]byte{}
var chunks [][]byte
// split data on sector boundaries
for curOff < len(data) {
chunkLen := int(minSectorSize - (fileOff % minSectorSize))

View File

@ -52,7 +52,7 @@ func NewTmpWAL(t testing.TB, reqs []etcdserverpb.InternalRaftRequest) (*wal.WAL,
if err != nil {
t.Fatalf("Failed to read WAL: %v", err)
}
entries := []raftpb.Entry{}
var entries []raftpb.Entry
for _, req := range reqs {
entries = append(entries, raftpb.Entry{
Term: 1,

View File

@ -106,7 +106,7 @@ func TestLeaseGrantAndList(t *testing.T) {
cc := clus.Client()
t.Logf("Created cluster and client")
testutils.ExecuteUntil(ctx, t, func() {
createdLeases := []clientv3.LeaseID{}
var createdLeases []clientv3.LeaseID
for i := 0; i < nc.leaseCount; i++ {
leaseResp, err := cc.Grant(ctx, 10)
t.Logf("Grant returned: resp:%s err:%v", leaseResp.String(), err)
@ -117,7 +117,7 @@ func TestLeaseGrantAndList(t *testing.T) {
// Because we're not guarunteed to talk to the same member, wait for
// listing to eventually return true, either by the result propagaing
// or by hitting an up to date member.
leases := []clientv3.LeaseStatus{}
var leases []clientv3.LeaseStatus
require.Eventually(t, func() bool {
resp, err := cc.Leases(ctx)
if err != nil {

View File

@ -124,7 +124,7 @@ func TestTxnFail(t *testing.T) {
}
func getRespValues(r *clientv3.TxnResponse) []string {
ss := []string{}
var ss []string
if r.Succeeded {
ss = append(ss, "SUCCESS")
} else {

View File

@ -120,7 +120,7 @@ func TestAuthority(t *testing.T) {
func templateEndpoints(t *testing.T, pattern string, clus *e2e.EtcdProcessCluster) []string {
t.Helper()
endpoints := []string{}
var endpoints []string
for i := 0; i < clus.Cfg.ClusterSize; i++ {
ent := pattern
if strings.Contains(ent, "%d") {
@ -135,7 +135,7 @@ func templateEndpoints(t *testing.T, pattern string, clus *e2e.EtcdProcessCluste
}
func assertAuthority(t *testing.T, expectAurhority string, clus *e2e.EtcdProcessCluster) {
logs := []e2e.LogsExpect{}
var logs []e2e.LogsExpect
for _, proc := range clus.Procs {
logs = append(logs, proc.Logs())
}

View File

@ -30,7 +30,7 @@ func TestCtlV3MakeMirrorWithWatchRev(t *testing.T) { testCtl(t, makeMirrorWi
func makeMirrorTest(cx ctlCtx) {
var (
flags = []string{}
flags []string
kvs = []kv{{"key1", "val1"}, {"key2", "val2"}, {"key3", "val3"}}
kvs2 = []kvExec{{key: "key1", val: "val1"}, {key: "key2", val: "val2"}, {key: "key3", val: "val3"}}
prefix = "key"

View File

@ -135,7 +135,7 @@ func (c integrationClient) Get(ctx context.Context, key string, o config.GetOpti
ctx, cancel = context.WithTimeout(ctx, o.Timeout)
defer cancel()
}
clientOpts := []clientv3.OpOption{}
var clientOpts []clientv3.OpOption
if o.Revision != 0 {
clientOpts = append(clientOpts, clientv3.WithRev(int64(o.Revision)))
}
@ -164,7 +164,7 @@ func (c integrationClient) Get(ctx context.Context, key string, o config.GetOpti
}
func (c integrationClient) Put(ctx context.Context, key, value string, opts config.PutOptions) error {
clientOpts := []clientv3.OpOption{}
var clientOpts []clientv3.OpOption
if opts.LeaseID != 0 {
clientOpts = append(clientOpts, clientv3.WithLease(opts.LeaseID))
}
@ -173,7 +173,7 @@ func (c integrationClient) Put(ctx context.Context, key, value string, opts conf
}
func (c integrationClient) Delete(ctx context.Context, key string, o config.DeleteOptions) (*clientv3.DeleteResponse, error) {
clientOpts := []clientv3.OpOption{}
var clientOpts []clientv3.OpOption
if o.Prefix {
clientOpts = append(clientOpts, clientv3.WithPrefix())
}
@ -192,7 +192,7 @@ func (c integrationClient) Compact(ctx context.Context, rev int64, o config.Comp
ctx, cancel = context.WithTimeout(ctx, o.Timeout)
defer cancel()
}
clientOpts := []clientv3.CompactOption{}
var clientOpts []clientv3.CompactOption
if o.Physical {
clientOpts = append(clientOpts, clientv3.WithCompactPhysical())
}
@ -253,7 +253,7 @@ func (c integrationClient) Defragment(ctx context.Context, o config.DefragOption
}
func (c integrationClient) TimeToLive(ctx context.Context, id clientv3.LeaseID, o config.LeaseOption) (*clientv3.LeaseTimeToLiveResponse, error) {
leaseOpts := []clientv3.LeaseOption{}
var leaseOpts []clientv3.LeaseOption
if o.WithAttachedKeys {
leaseOpts = append(leaseOpts, clientv3.WithAttachedKeys())
}
@ -274,7 +274,7 @@ func (c integrationClient) UserChangePass(ctx context.Context, user, newPass str
func (c integrationClient) Txn(ctx context.Context, compares, ifSucess, ifFail []string, o config.TxnOptions) (*clientv3.TxnResponse, error) {
txn := c.Client.Txn(ctx)
cmps := []clientv3.Cmp{}
var cmps []clientv3.Cmp
for _, c := range compares {
cmp, err := etcdctlcmd.ParseCompare(c)
if err != nil {
@ -299,7 +299,7 @@ func (c integrationClient) Txn(ctx context.Context, compares, ifSucess, ifFail [
}
func getOps(ss []string) ([]clientv3.Op, error) {
ops := []clientv3.Op{}
var ops []clientv3.Op
for _, s := range ss {
s = strings.TrimSpace(s)
args := etcdctlcmd.Argify(s)
@ -316,7 +316,7 @@ func getOps(ss []string) ([]clientv3.Op, error) {
}
func (c integrationClient) Watch(ctx context.Context, key string, opts config.WatchOptions) clientv3.WatchChan {
opOpts := []clientv3.OpOption{}
var opOpts []clientv3.OpOption
if opts.Prefix {
opOpts = append(opOpts, clientv3.WithPrefix())
}

View File

@ -39,7 +39,7 @@ import (
"go.etcd.io/etcd/client/pkg/v3/tlsutil"
"go.etcd.io/etcd/client/pkg/v3/transport"
"go.etcd.io/etcd/client/pkg/v3/types"
"go.etcd.io/etcd/client/v3"
clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/grpc_testing"
"go.etcd.io/etcd/raft/v3"
"go.etcd.io/etcd/server/v3/config"
@ -239,7 +239,7 @@ func (c *Cluster) Launch(t testutil.TB) {
// ProtoMembers returns a list of all active members as client.Members
func (c *Cluster) ProtoMembers() []*pb.Member {
ms := []*pb.Member{}
var ms []*pb.Member
for _, m := range c.Members {
pScheme := SchemeFromTLSInfo(m.PeerTLSInfo)
cScheme := SchemeFromTLSInfo(m.ClientTLSInfo)

View File

@ -21,7 +21,7 @@ import (
"strings"
"time"
"go.etcd.io/etcd/client/v3"
clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/tests/v3/functional/rpcpb"
"go.uber.org/zap"
@ -36,7 +36,7 @@ func inject_SIGQUIT_ETCD_AND_REMOVE_DATA(clus *Cluster, idx1 int) error {
var mresp *clientv3.MemberListResponse
mresp, err = cli1.MemberList(context.Background())
mss := []string{}
var mss []string
if err == nil && mresp != nil {
mss = describeMembers(mresp)
}
@ -161,7 +161,7 @@ func recover_SIGQUIT_ETCD_AND_REMOVE_DATA(clus *Cluster, idx1 int) error {
var mresp *clientv3.MemberListResponse
mresp, err = cli2.MemberList(context.Background())
mss := []string{}
var mss []string
if err == nil && mresp != nil {
mss = describeMembers(mresp)
}

View File

@ -20,7 +20,7 @@ import (
"strings"
"time"
"go.etcd.io/etcd/client/v3"
clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/tests/v3/functional/rpcpb"
"go.uber.org/zap"
@ -82,7 +82,7 @@ func (c *fetchSnapshotCaseQuorum) Inject(clus *Cluster) error {
defer leaderc.Close()
var mresp *clientv3.MemberListResponse
mresp, err = leaderc.MemberList(context.Background())
mss := []string{}
var mss []string
if err == nil && mresp != nil {
mss = describeMembers(mresp)
}
@ -148,7 +148,7 @@ func (c *fetchSnapshotCaseQuorum) Recover(clus *Cluster) error {
clus.Members[oldlead].EtcdOnSnapshotRestore = clus.Members[oldlead].Etcd
clus.Members[oldlead].EtcdOnSnapshotRestore.InitialClusterState = "existing"
name := clus.Members[oldlead].Etcd.Name
initClus := []string{}
var initClus []string
for _, u := range clus.Members[oldlead].Etcd.AdvertisePeerURLs {
initClus = append(initClus, fmt.Sprintf("%s=%s", name, u))
}

View File

@ -296,8 +296,8 @@ func (clus *Cluster) UpdateDelayLatencyMs() {
func (clus *Cluster) setStresserChecker() {
css := &compositeStresser{}
lss := []*leaseStresser{}
rss := []*runnerStresser{}
var lss []*leaseStresser
var rss []*runnerStresser
for _, m := range clus.Members {
sss := newStresser(clus, m)
css.stressers = append(css.stressers, &compositeStresser{sss})
@ -419,7 +419,7 @@ func (clus *Cluster) broadcast(op rpcpb.Operation) error {
wg.Wait()
close(errc)
errs := []string{}
var errs []string
for err := range errc {
if err == nil {
continue

View File

@ -258,7 +258,7 @@ func (clus *Cluster) doTestCase(t *testing.T, fa Case) {
t.Fatalf("wait full health error: %v", err)
}
checkerFailExceptions := []rpcpb.Checker{}
var checkerFailExceptions []rpcpb.Checker
switch fcase {
case rpcpb.Case_SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH:
// TODO: restore from snapshot

View File

@ -28,7 +28,7 @@ import (
"go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/api/v3/version"
"go.etcd.io/etcd/client/v3"
clientv3 "go.etcd.io/etcd/client/v3"
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
@ -384,7 +384,7 @@ func TestKVDeleteRange(t *testing.T) {
if err != nil {
t.Fatalf("#%d: couldn't get keys (%v)", i, err)
}
keys := []string{}
var keys []string
for _, kv := range resp.Kvs {
keys = append(keys, string(kv.Key))
}

View File

@ -24,7 +24,7 @@ import (
"time"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/client/v3"
clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/concurrency"
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
)
@ -249,7 +249,7 @@ func TestLeaseKeepAliveNotFound(t *testing.T) {
defer clus.Terminate(t)
cli := clus.RandClient()
lchs := []leaseCh{}
var lchs []leaseCh
for i := 0; i < 3; i++ {
resp, rerr := cli.Grant(context.TODO(), 5)
if rerr != nil {
@ -628,7 +628,7 @@ func TestLeaseLeases(t *testing.T) {
cli := clus.RandClient()
ids := []clientv3.LeaseID{}
var ids []clientv3.LeaseID
for i := 0; i < 5; i++ {
resp, err := cli.Grant(context.Background(), 10)
if err != nil {

View File

@ -357,7 +357,7 @@ func TestLeasingGetWithOpts(t *testing.T) {
}
}
getOpts := []clientv3.OpOption{}
var getOpts []clientv3.OpOption
for i := 0; i < len(opts); i++ {
getOpts = append(getOpts, opts[rand.Intn(len(opts))])
}
@ -963,7 +963,7 @@ func TestLeasingTxnNonOwnerPut(t *testing.T) {
clientv3.WithPrefix())
wresp := <-w
c := 0
evs := []clientv3.Event{}
var evs []clientv3.Event
for _, ev := range wresp.Events {
evs = append(evs, *ev)
if ev.Kv.ModRevision == tresp.Header.Revision {
@ -1021,7 +1021,7 @@ func TestLeasingTxnRandIfThenOrElse(t *testing.T) {
// random list of comparisons, all true
cmps, useThen := randCmps("k-", dat)
// random list of puts/gets; unique keys
ops := []clientv3.Op{}
var ops []clientv3.Op
usedIdx := make(map[int]struct{})
for i := 0; i < keyCount; i++ {
idx := rand.Intn(keyCount)

View File

@ -28,7 +28,7 @@ import (
grpcprom "github.com/grpc-ecosystem/go-grpc-prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"go.etcd.io/etcd/client/pkg/v3/transport"
"go.etcd.io/etcd/client/v3"
clientv3 "go.etcd.io/etcd/client/v3"
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
"google.golang.org/grpc"
)
@ -162,7 +162,7 @@ func getHTTPBodyAsLines(t *testing.T, url string) []string {
}
reader := bufio.NewReader(resp.Body)
lines := []string{}
var lines []string
for {
line, err := reader.ReadString('\n')
if err != nil {

View File

@ -26,7 +26,7 @@ import (
"go.etcd.io/etcd/client/pkg/v3/fileutil"
"go.etcd.io/etcd/client/pkg/v3/testutil"
"go.etcd.io/etcd/client/v3"
clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/snapshot"
"go.etcd.io/etcd/server/v3/embed"
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
@ -55,7 +55,7 @@ func TestSaveSnapshotFilePermissions(t *testing.T) {
// TestSaveSnapshotVersion ensures that the snapshot returns proper storage version.
func TestSaveSnapshotVersion(t *testing.T) {
// Put some keys to ensure that wal snapshot is triggered
kvs := []kv{}
var kvs []kv
for i := 0; i < 10; i++ {
kvs = append(kvs, kv{fmt.Sprintf("%d", i), "test"})
}

View File

@ -110,7 +110,7 @@ func testWatchMultiWatcher(t *testing.T, wctx *watchctx) {
t.Errorf("expected watcher channel, got nil")
}
readyc <- struct{}{}
evs := []*clientv3.Event{}
var evs []*clientv3.Event
for i := 0; i < numKeyUpdates*2; i++ {
resp, ok := <-prefixc
if !ok {
@ -120,14 +120,14 @@ func testWatchMultiWatcher(t *testing.T, wctx *watchctx) {
}
// check response
expected := []string{}
var expected []string
bkeys := []string{"bar", "baz"}
for _, k := range bkeys {
for i := 0; i < numKeyUpdates; i++ {
expected = append(expected, fmt.Sprintf("%s-%d", k, i))
}
}
got := []string{}
var got []string
for _, ev := range evs {
got = append(got, string(ev.Kv.Value))
}

View File

@ -150,7 +150,7 @@ func setupClient(t *testing.T, endpointPattern string, clus *integration.Cluster
func templateEndpoints(t *testing.T, pattern string, clus *integration.Cluster) []string {
t.Helper()
endpoints := []string{}
var endpoints []string
for _, m := range clus.Members {
ent := pattern
if strings.Contains(ent, "%d") {

View File

@ -20,7 +20,7 @@ import (
"testing"
"time"
"go.etcd.io/etcd/client/v3"
clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/concurrency"
"go.etcd.io/etcd/tests/v3/framework/integration"
)
@ -40,7 +40,7 @@ func TestElectionWait(t *testing.T) {
}()
electedc := make(chan string)
nextc := []chan struct{}{}
var nextc []chan struct{}
// wait for all elections
donec := make(chan struct{})

View File

@ -28,7 +28,7 @@ import (
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/client/pkg/v3/transport"
"go.etcd.io/etcd/client/v3"
clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/tests/v3/framework/config"
"go.etcd.io/etcd/tests/v3/framework/integration"
@ -1111,7 +1111,7 @@ func TestV3DeleteRange(t *testing.T) {
dresp.Header.Revision, rresp.Header.Revision)
}
keys := [][]byte{}
var keys [][]byte
for j := range rresp.Kvs {
keys = append(keys, rresp.Kvs[j].Key)
}

View File

@ -458,7 +458,7 @@ func TestV3LeaseLeases(t *testing.T) {
defer cancel0()
// create leases
ids := []int64{}
var ids []int64
for i := 0; i < 5; i++ {
lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(
ctx0,

View File

@ -174,7 +174,7 @@ func TestSTMSerialize(t *testing.T) {
defer close(updatec)
for i := 0; i < 5; i++ {
s := fmt.Sprintf("%d", i)
ops := []v3.Op{}
var ops []v3.Op
for _, k := range keys {
ops = append(ops, v3.OpPut(k, s))
}
@ -190,7 +190,7 @@ func TestSTMSerialize(t *testing.T) {
for range updatec {
curEtcdc := clus.RandClient()
applyf := func(stm concurrency.STM) error {
vs := []string{}
var vs []string
for i := range keys {
vs = append(vs, stm.Get(keys[i]))
}

View File

@ -748,7 +748,7 @@ func testV3WatchMultipleEventsTxn(t *testing.T, startRev int64) {
t.Fatalf("kvc.Txn failed: %+v", tresp)
}
events := []*mvccpb.Event{}
var events []*mvccpb.Event
for len(events) < 3 {
resp, err := wStream.Recv()
if err != nil {
@ -845,7 +845,7 @@ func TestV3WatchMultipleEventsPutUnsynced(t *testing.T) {
},
}
events := []*mvccpb.Event{}
var events []*mvccpb.Event
for len(events) < 4 {
resp, err := wStream.Recv()
if err != nil {

View File

@ -70,7 +70,7 @@ func (d *dispatcherPool) flush() {
// sort by sockets; preserve the packet ordering within a socket
pktmap := make(map[io.Writer][]dispatchPacket)
outs := []io.Writer{}
var outs []io.Writer
for _, pkt := range pkts {
opkts, ok := pktmap[pkt.out]
if !ok {
@ -103,7 +103,7 @@ func (d *dispatcherPool) Copy(w io.Writer, f fetchFunc) error {
return err
}
pkts := []dispatchPacket{}
var pkts []dispatchPacket
for len(b) > 0 {
pkt := b
if len(b) > dispatchPacketBytes {