etcd: modify declaring empty slices

declare an empty slice to var s []int replace  s :=[]int{}, https://github.com/golang/go/wiki/CodeReviewComments#declaring-empty-slices

Signed-off-by: demoManito <1430482733@qq.com>
This commit is contained in:
demoManito 2022-09-16 14:30:00 +08:00
parent b7ba0542f6
commit 72cf0cc04a
55 changed files with 106 additions and 104 deletions

View File

@ -45,7 +45,7 @@ func GetCluster(serviceScheme, service, name, dns string, apurls types.URLs) ([]
tcp2ap[tcpAddr.String()] = url tcp2ap[tcpAddr.String()] = url
} }
stringParts := []string{} var stringParts []string
updateNodeMap := func(service, scheme string) error { updateNodeMap := func(service, scheme string) error {
_, addrs, err := lookupSRV(service, "tcp", dns) _, addrs, err := lookupSRV(service, "tcp", dns)
if err != nil { if err != nil {

View File

@ -52,7 +52,7 @@ func TestSRVGetCluster(t *testing.T) {
{Target: "2.example.com.", Port: 2480}, {Target: "2.example.com.", Port: 2480},
{Target: "3.example.com.", Port: 2480}, {Target: "3.example.com.", Port: 2480},
} }
srvNone := []*net.SRV{} var srvNone []*net.SRV
tests := []struct { tests := []struct {
service string service string

View File

@ -222,7 +222,8 @@ func checkCertSAN(ctx context.Context, cert *x509.Certificate, remoteAddr string
func isHostInDNS(ctx context.Context, host string, dnsNames []string) (ok bool, err error) { func isHostInDNS(ctx context.Context, host string, dnsNames []string) (ok bool, err error) {
// reverse lookup // reverse lookup
wildcards, names := []string{}, []string{} var names []string
var wildcards []string
for _, dns := range dnsNames { for _, dns := range dnsNames {
if strings.HasPrefix(dns, "*.") { if strings.HasPrefix(dns, "*.") {
wildcards = append(wildcards, dns[1:]) wildcards = append(wildcards, dns[1:])

View File

@ -39,7 +39,7 @@ func equal(a, b []string) bool {
func driveSetTests(t *testing.T, s Set) { func driveSetTests(t *testing.T, s Set) {
// Verify operations on an empty set // Verify operations on an empty set
eValues := []string{} var eValues []string
values := s.Values() values := s.Values()
if !reflect.DeepEqual(values, eValues) { if !reflect.DeepEqual(values, eValues) {
t.Fatalf("Expect values=%v got %v", eValues, values) t.Fatalf("Expect values=%v got %v", eValues, values)

View File

@ -20,7 +20,7 @@ import (
"time" "time"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"go.etcd.io/etcd/client/v3" clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/cobrautl" "go.etcd.io/etcd/pkg/v3/cobrautl"
) )
@ -67,7 +67,7 @@ func getDelOp(args []string) (string, []clientv3.OpOption) {
cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("`--prefix` and `--from-key` cannot be set at the same time, choose one")) cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("`--prefix` and `--from-key` cannot be set at the same time, choose one"))
} }
opts := []clientv3.OpOption{} var opts []clientv3.OpOption
key := args[0] key := args[0]
if len(args) > 1 { if len(args) > 1 {
if delPrefix || delFromKey { if delPrefix || delFromKey {

View File

@ -23,7 +23,7 @@ import (
"go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/client/pkg/v3/logutil" "go.etcd.io/etcd/client/pkg/v3/logutil"
"go.etcd.io/etcd/client/v3" clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/cobrautl" "go.etcd.io/etcd/pkg/v3/cobrautl"
"go.etcd.io/etcd/pkg/v3/flags" "go.etcd.io/etcd/pkg/v3/flags"
@ -101,7 +101,7 @@ func epHealthCommandFunc(cmd *cobra.Command, args []string) {
ka := keepAliveTimeFromCmd(cmd) ka := keepAliveTimeFromCmd(cmd)
kat := keepAliveTimeoutFromCmd(cmd) kat := keepAliveTimeoutFromCmd(cmd)
auth := authCfgFromCmd(cmd) auth := authCfgFromCmd(cmd)
cfgs := []*clientv3.Config{} var cfgs []*clientv3.Config
for _, ep := range endpointsFromCluster(cmd) { for _, ep := range endpointsFromCluster(cmd) {
cfg, err := clientv3.NewClientConfig(&clientv3.ConfigSpec{ cfg, err := clientv3.NewClientConfig(&clientv3.ConfigSpec{
Endpoints: []string{ep}, Endpoints: []string{ep},
@ -172,7 +172,7 @@ func epHealthCommandFunc(cmd *cobra.Command, args []string) {
close(hch) close(hch)
errs := false errs := false
healthList := []epHealth{} var healthList []epHealth
for h := range hch { for h := range hch {
healthList = append(healthList, h) healthList = append(healthList, h)
if h.Error != "" { if h.Error != "" {
@ -193,7 +193,7 @@ type epStatus struct {
func epStatusCommandFunc(cmd *cobra.Command, args []string) { func epStatusCommandFunc(cmd *cobra.Command, args []string) {
c := mustClientFromCmd(cmd) c := mustClientFromCmd(cmd)
statusList := []epStatus{} var statusList []epStatus
var err error var err error
for _, ep := range endpointsFromCluster(cmd) { for _, ep := range endpointsFromCluster(cmd) {
ctx, cancel := commandCtx(cmd) ctx, cancel := commandCtx(cmd)
@ -222,7 +222,7 @@ type epHashKV struct {
func epHashKVCommandFunc(cmd *cobra.Command, args []string) { func epHashKVCommandFunc(cmd *cobra.Command, args []string) {
c := mustClientFromCmd(cmd) c := mustClientFromCmd(cmd)
hashList := []epHashKV{} var hashList []epHashKV
var err error var err error
for _, ep := range endpointsFromCluster(cmd) { for _, ep := range endpointsFromCluster(cmd) {
ctx, cancel := commandCtx(cmd) ctx, cancel := commandCtx(cmd)
@ -288,7 +288,7 @@ func endpointsFromCluster(cmd *cobra.Command) []string {
cobrautl.ExitWithError(cobrautl.ExitError, err) cobrautl.ExitWithError(cobrautl.ExitError, err)
} }
ret := []string{} var ret []string
for _, m := range membs.Members { for _, m := range membs.Members {
ret = append(ret, m.ClientURLs...) ret = append(ret, m.ClientURLs...)
} }

View File

@ -19,7 +19,7 @@ import (
"strings" "strings"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"go.etcd.io/etcd/client/v3" clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/cobrautl" "go.etcd.io/etcd/pkg/v3/cobrautl"
) )
@ -107,7 +107,7 @@ func getGetOp(args []string) (string, []clientv3.OpOption) {
cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("`--keys-only` and `--count-only` cannot be set at the same time, choose one")) cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("`--keys-only` and `--count-only` cannot be set at the same time, choose one"))
} }
opts := []clientv3.OpOption{} var opts []clientv3.OpOption
switch getConsistency { switch getConsistency {
case "s": case "s":
opts = append(opts, clientv3.WithSerializable()) opts = append(opts, clientv3.WithSerializable())

View File

@ -26,7 +26,7 @@ import (
"go.etcd.io/etcd/client/pkg/v3/logutil" "go.etcd.io/etcd/client/pkg/v3/logutil"
"go.etcd.io/etcd/client/pkg/v3/srv" "go.etcd.io/etcd/client/pkg/v3/srv"
"go.etcd.io/etcd/client/pkg/v3/transport" "go.etcd.io/etcd/client/pkg/v3/transport"
"go.etcd.io/etcd/client/v3" clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/cobrautl" "go.etcd.io/etcd/pkg/v3/cobrautl"
"go.etcd.io/etcd/pkg/v3/flags" "go.etcd.io/etcd/pkg/v3/flags"
@ -363,7 +363,7 @@ func endpointsFromFlagValue(cmd *cobra.Command) ([]string, error) {
return eps, err return eps, err
} }
// strip insecure connections // strip insecure connections
ret := []string{} var ret []string
for _, ep := range eps { for _, ep := range eps {
if strings.HasPrefix(ep, "http://") { if strings.HasPrefix(ep, "http://") {
fmt.Fprintf(os.Stderr, "ignoring discovered insecure endpoint %q\n", ep) fmt.Fprintf(os.Stderr, "ignoring discovered insecure endpoint %q\n", ep)

View File

@ -27,7 +27,7 @@ import (
"go.etcd.io/etcd/api/v3/mvccpb" "go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/client/v3" clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/mirror" "go.etcd.io/etcd/client/v3/mirror"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@ -191,7 +191,7 @@ func makeMirror(ctx context.Context, c *clientv3.Client, dc *clientv3.Client) er
} }
var lastRev int64 var lastRev int64
ops := []clientv3.Op{} var ops []clientv3.Op
for _, ev := range wr.Events { for _, ev := range wr.Events {
nextRev := ev.Kv.ModRevision nextRev := ev.Kv.ModRevision

View File

@ -21,7 +21,7 @@ import (
"strings" "strings"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"go.etcd.io/etcd/client/v3" clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/cobrautl" "go.etcd.io/etcd/pkg/v3/cobrautl"
) )
@ -157,7 +157,7 @@ func memberAddCommandFunc(cmd *cobra.Command, args []string) {
display.MemberAdd(*resp) display.MemberAdd(*resp)
if _, ok := (display).(*simplePrinter); ok { if _, ok := (display).(*simplePrinter); ok {
conf := []string{} var conf []string
for _, memb := range resp.Members { for _, memb := range resp.Members {
for _, u := range memb.PeerURLs { for _, u := range memb.PeerURLs {
n := memb.Name n := memb.Name

View File

@ -101,7 +101,7 @@ func getPutOp(args []string) (string, string, []clientv3.OpOption) {
cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("bad lease ID (%v), expecting ID in Hex", err)) cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("bad lease ID (%v), expecting ID in Hex", err))
} }
opts := []clientv3.OpOption{} var opts []clientv3.OpOption
if id != 0 { if id != 0 {
opts = append(opts, clientv3.WithLease(clientv3.LeaseID(id))) opts = append(opts, clientv3.WithLease(clientv3.LeaseID(id)))
} }

View File

@ -36,7 +36,7 @@ var (
interruptRegisterMu, interruptExitMu sync.Mutex interruptRegisterMu, interruptExitMu sync.Mutex
// interruptHandlers holds all registered InterruptHandlers in order // interruptHandlers holds all registered InterruptHandlers in order
// they will be executed. // they will be executed.
interruptHandlers = []InterruptHandler{} interruptHandlers []InterruptHandler
) )
// RegisterInterruptHandler registers a new InterruptHandler. Handlers registered // RegisterInterruptHandler registers a new InterruptHandler. Handlers registered

View File

@ -124,7 +124,7 @@ func (t TimeSeries) String() string {
if err := wr.Write([]string{"UNIX-SECOND", "MIN-LATENCY-MS", "AVG-LATENCY-MS", "MAX-LATENCY-MS", "AVG-THROUGHPUT"}); err != nil { if err := wr.Write([]string{"UNIX-SECOND", "MIN-LATENCY-MS", "AVG-LATENCY-MS", "MAX-LATENCY-MS", "AVG-THROUGHPUT"}); err != nil {
log.Fatal(err) log.Fatal(err)
} }
rows := [][]string{} var rows [][]string
for i := range t { for i := range t {
row := []string{ row := []string{
fmt.Sprintf("%d", t[i].Timestamp), fmt.Sprintf("%d", t[i].Timestamp),

View File

@ -124,7 +124,7 @@ func TestNodeStepUnblock(t *testing.T) {
// TestNodePropose ensures that node.Propose sends the given proposal to the underlying raft. // TestNodePropose ensures that node.Propose sends the given proposal to the underlying raft.
func TestNodePropose(t *testing.T) { func TestNodePropose(t *testing.T) {
msgs := []raftpb.Message{} var msgs []raftpb.Message
appendStep := func(r *raft, m raftpb.Message) error { appendStep := func(r *raft, m raftpb.Message) error {
msgs = append(msgs, m) msgs = append(msgs, m)
return nil return nil
@ -166,7 +166,7 @@ func TestNodePropose(t *testing.T) {
// TestNodeReadIndex ensures that node.ReadIndex sends the MsgReadIndex message to the underlying raft. // TestNodeReadIndex ensures that node.ReadIndex sends the MsgReadIndex message to the underlying raft.
// It also ensures that ReadState can be read out through ready chan. // It also ensures that ReadState can be read out through ready chan.
func TestNodeReadIndex(t *testing.T) { func TestNodeReadIndex(t *testing.T) {
msgs := []raftpb.Message{} var msgs []raftpb.Message
appendStep := func(r *raft, m raftpb.Message) error { appendStep := func(r *raft, m raftpb.Message) error {
msgs = append(msgs, m) msgs = append(msgs, m)
return nil return nil
@ -306,7 +306,7 @@ func TestNodeReadIndexToOldLeader(t *testing.T) {
// TestNodeProposeConfig ensures that node.ProposeConfChange sends the given configuration proposal // TestNodeProposeConfig ensures that node.ProposeConfChange sends the given configuration proposal
// to the underlying raft. // to the underlying raft.
func TestNodeProposeConfig(t *testing.T) { func TestNodeProposeConfig(t *testing.T) {
msgs := []raftpb.Message{} var msgs []raftpb.Message
appendStep := func(r *raft, m raftpb.Message) error { appendStep := func(r *raft, m raftpb.Message) error {
msgs = append(msgs, m) msgs = append(msgs, m)
return nil return nil
@ -456,7 +456,7 @@ func TestBlockProposal(t *testing.T) {
} }
func TestNodeProposeWaitDropped(t *testing.T) { func TestNodeProposeWaitDropped(t *testing.T) {
msgs := []raftpb.Message{} var msgs []raftpb.Message
droppingMsg := []byte("test_dropping") droppingMsg := []byte("test_dropping")
dropStep := func(r *raft, m raftpb.Message) error { dropStep := func(r *raft, m raftpb.Message) error {
if m.Type == raftpb.MsgProp && strings.Contains(m.String(), string(droppingMsg)) { if m.Type == raftpb.MsgProp && strings.Contains(m.String(), string(droppingMsg)) {

View File

@ -640,13 +640,13 @@ func TestLogReplication(t *testing.T) {
t.Errorf("#%d.%d: committed = %d, want %d", i, j, sm.raftLog.committed, tt.wcommitted) t.Errorf("#%d.%d: committed = %d, want %d", i, j, sm.raftLog.committed, tt.wcommitted)
} }
ents := []pb.Entry{} var ents []pb.Entry
for _, e := range nextEnts(sm, tt.network.storage[j]) { for _, e := range nextEnts(sm, tt.network.storage[j]) {
if e.Data != nil { if e.Data != nil {
ents = append(ents, e) ents = append(ents, e)
} }
} }
props := []pb.Message{} var props []pb.Message
for _, m := range tt.msgs { for _, m := range tt.msgs {
if m.Type == pb.MsgProp { if m.Type == pb.MsgProp {
props = append(props, m) props = append(props, m)
@ -4747,7 +4747,7 @@ func (nw *network) recover() {
} }
func (nw *network) filter(msgs []pb.Message) []pb.Message { func (nw *network) filter(msgs []pb.Message) []pb.Message {
mm := []pb.Message{} var mm []pb.Message
for _, m := range msgs { for _, m := range msgs {
if nw.ignorem[m.Type] { if nw.ignorem[m.Type] {
continue continue

View File

@ -585,7 +585,7 @@ func TestRawNodeProposeAddDuplicateNode(t *testing.T) {
// TestRawNodeReadIndex ensures that Rawnode.ReadIndex sends the MsgReadIndex message // TestRawNodeReadIndex ensures that Rawnode.ReadIndex sends the MsgReadIndex message
// to the underlying raft. It also ensures that ReadState can be read out. // to the underlying raft. It also ensures that ReadState can be read out.
func TestRawNodeReadIndex(t *testing.T) { func TestRawNodeReadIndex(t *testing.T) {
msgs := []pb.Message{} var msgs []pb.Message
appendStep := func(r *raft, m pb.Message) error { appendStep := func(r *raft, m pb.Message) error {
msgs = append(msgs, m) msgs = append(msgs, m)
return nil return nil

View File

@ -85,7 +85,7 @@ func (ro *readOnly) advance(m pb.Message) []*readIndexStatus {
) )
ctx := string(m.Context) ctx := string(m.Context)
rss := []*readIndexStatus{} var rss []*readIndexStatus
for _, okctx := range ro.readIndexQueue { for _, okctx := range ro.readIndexQueue {
i++ i++

View File

@ -85,7 +85,7 @@ func (t txMock) UnsafeGetRole(s string) *authpb.Role {
} }
func (t txMock) UnsafeGetAllUsers() []*authpb.User { func (t txMock) UnsafeGetAllUsers() []*authpb.User {
users := []*authpb.User{} var users []*authpb.User
for _, u := range t.be.users { for _, u := range t.be.users {
users = append(users, u) users = append(users, u)
} }
@ -93,7 +93,7 @@ func (t txMock) UnsafeGetAllUsers() []*authpb.User {
} }
func (t txMock) UnsafeGetAllRoles() []*authpb.Role { func (t txMock) UnsafeGetAllRoles() []*authpb.Role {
roles := []*authpb.Role{} var roles []*authpb.Role
for _, r := range t.be.roles { for _, r := range t.be.roles {
roles = append(roles, r) roles = append(roles, r)
} }

View File

@ -271,7 +271,7 @@ func (c *ServerConfig) advertiseMatchesCluster() error {
initMap[url.String()] = struct{}{} initMap[url.String()] = struct{}{}
} }
missing := []string{} var missing []string
for url := range initMap { for url := range initMap {
if _, ok := apMap[url]; !ok { if _, ok := apMap[url]; !ok {
missing = append(missing, url) missing = append(missing, url)

View File

@ -708,7 +708,7 @@ func (e *Etcd) serveClients() (err error) {
etcdhttp.HandleMetrics(mux) etcdhttp.HandleMetrics(mux)
etcdhttp.HandleHealth(e.cfg.logger, mux, e.Server) etcdhttp.HandleHealth(e.cfg.logger, mux, e.Server)
gopts := []grpc.ServerOption{} var gopts []grpc.ServerOption
if e.cfg.GRPCKeepAliveMinTime > time.Duration(0) { if e.cfg.GRPCKeepAliveMinTime > time.Duration(0) {
gopts = append(gopts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ gopts = append(gopts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
MinTime: e.cfg.GRPCKeepAliveMinTime, MinTime: e.cfg.GRPCKeepAliveMinTime,

View File

@ -220,7 +220,7 @@ func (s *Snapshotter) snapNames() ([]string, error) {
} }
func (s *Snapshotter) checkSuffix(names []string) []string { func (s *Snapshotter) checkSuffix(names []string) []string {
snaps := []string{} var snaps []string
for i := range names { for i := range names {
if strings.HasSuffix(names[i], snapSuffix) { if strings.HasSuffix(names[i], snapSuffix) {
snaps = append(snaps, names[i]) snaps = append(snaps, names[i])

View File

@ -748,7 +748,7 @@ func TestApplyMultiConfChangeShouldStop(t *testing.T) {
consistIndex: ci, consistIndex: ci,
beHooks: serverstorage.NewBackendHooks(lg, ci), beHooks: serverstorage.NewBackendHooks(lg, ci),
} }
ents := []raftpb.Entry{} var ents []raftpb.Entry
for i := 1; i <= 4; i++ { for i := 1; i <= 4; i++ {
ent := raftpb.Entry{ ent := raftpb.Entry{
Term: 1, Term: 1,

View File

@ -575,7 +575,7 @@ func compareKV(c *pb.Compare, ckv mvccpb.KeyValue) bool {
rev := int64(0) rev := int64(0)
switch c.Target { switch c.Target {
case pb.Compare_VALUE: case pb.Compare_VALUE:
v := []byte{} var v []byte
if tv, _ := c.TargetUnion.(*pb.Compare_Value); tv != nil { if tv, _ := c.TargetUnion.(*pb.Compare_Value); tv != nil {
v = tv.Value v = tv.Value
} }

View File

@ -143,7 +143,7 @@ func newCluster(lg *zap.Logger, memberCount int, ver semver.Version) *clusterMoc
func (c *clusterMock) StepMonitors() { func (c *clusterMock) StepMonitors() {
// Execute monitor functions in random order as it is not guaranteed // Execute monitor functions in random order as it is not guaranteed
fs := []func(){} var fs []func()
for _, m := range c.members { for _, m := range c.members {
fs = append(fs, m.monitor.UpdateStorageVersionIfNeeded) fs = append(fs, m.monitor.UpdateStorageVersionIfNeeded)
if m.isLeader { if m.isLeader {

View File

@ -759,7 +759,7 @@ func (le *lessor) findDueScheduledCheckpoints(checkpointLimit int) []*pb.LeaseCh
} }
now := time.Now() now := time.Now()
cps := []*pb.LeaseCheckpoint{} var cps []*pb.LeaseCheckpoint
for le.leaseCheckpointHeap.Len() > 0 && len(cps) < checkpointLimit { for le.leaseCheckpointHeap.Len() > 0 && len(cps) < checkpointLimit {
lt := le.leaseCheckpointHeap[0] lt := le.leaseCheckpointHeap[0]
if lt.time.After(now) /* lt.time: next checkpoint time */ { if lt.time.After(now) /* lt.time: next checkpoint time */ {

View File

@ -18,7 +18,7 @@ import (
"context" "context"
pb "go.etcd.io/etcd/api/v3/etcdserverpb" pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/client/v3" clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/server/v3/proxy/grpcproxy/cache" "go.etcd.io/etcd/server/v3/proxy/grpcproxy/cache"
) )
@ -162,7 +162,7 @@ func requestOpToOp(union *pb.RequestOp) clientv3.Op {
} }
func RangeRequestToOp(r *pb.RangeRequest) clientv3.Op { func RangeRequestToOp(r *pb.RangeRequest) clientv3.Op {
opts := []clientv3.OpOption{} var opts []clientv3.OpOption
if len(r.RangeEnd) != 0 { if len(r.RangeEnd) != 0 {
opts = append(opts, clientv3.WithRange(string(r.RangeEnd))) opts = append(opts, clientv3.WithRange(string(r.RangeEnd)))
} }
@ -190,7 +190,7 @@ func RangeRequestToOp(r *pb.RangeRequest) clientv3.Op {
} }
func PutRequestToOp(r *pb.PutRequest) clientv3.Op { func PutRequestToOp(r *pb.PutRequest) clientv3.Op {
opts := []clientv3.OpOption{} var opts []clientv3.OpOption
opts = append(opts, clientv3.WithLease(clientv3.LeaseID(r.Lease))) opts = append(opts, clientv3.WithLease(clientv3.LeaseID(r.Lease)))
if r.IgnoreValue { if r.IgnoreValue {
opts = append(opts, clientv3.WithIgnoreValue()) opts = append(opts, clientv3.WithIgnoreValue())
@ -205,7 +205,7 @@ func PutRequestToOp(r *pb.PutRequest) clientv3.Op {
} }
func DelRequestToOp(r *pb.DeleteRangeRequest) clientv3.Op { func DelRequestToOp(r *pb.DeleteRangeRequest) clientv3.Op {
opts := []clientv3.OpOption{} var opts []clientv3.OpOption
if len(r.RangeEnd) != 0 { if len(r.RangeEnd) != 0 {
opts = append(opts, clientv3.WithRange(string(r.RangeEnd))) opts = append(opts, clientv3.WithRange(string(r.RangeEnd)))
} }

View File

@ -96,7 +96,7 @@ func (tp *TCPProxy) Run() error {
tp.remotes = append(tp.remotes, &remote{srv: srv, addr: addr}) tp.remotes = append(tp.remotes, &remote{srv: srv, addr: addr})
} }
eps := []string{} var eps []string
for _, ep := range tp.Endpoints { for _, ep := range tp.Endpoints {
eps = append(eps, fmt.Sprintf("%s:%d", ep.Target, ep.Port)) eps = append(eps, fmt.Sprintf("%s:%d", ep.Target, ep.Port))
} }

View File

@ -1009,7 +1009,7 @@ func (i *fakeIndex) KeyIndex(ki *keyIndex) *keyIndex {
} }
func createBytesSlice(bytesN, sliceN int) [][]byte { func createBytesSlice(bytesN, sliceN int) [][]byte {
rs := [][]byte{} var rs [][]byte
for len(rs) != sliceN { for len(rs) != sliceN {
v := make([]byte, bytesN) v := make([]byte, bytesN)
if _, err := rand.Read(v); err != nil { if _, err := rand.Read(v); err != nil {

View File

@ -120,7 +120,8 @@ func CorruptBBolt(fpath string) error {
if b == nil { if b == nil {
return errors.New("got nil bucket for 'key'") return errors.New("got nil bucket for 'key'")
} }
keys, vals := [][]byte{}, [][]byte{} var vals [][]byte
var keys [][]byte
c := b.Cursor() c := b.Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() { for k, v := c.First(); k != nil; k, v = c.Next() {
keys = append(keys, k) keys = append(keys, k)

View File

@ -79,7 +79,7 @@ func (s *alarmBackend) GetAllAlarms() ([]*etcdserverpb.AlarmMember, error) {
} }
func (s *alarmBackend) unsafeGetAllAlarms(tx backend.ReadTx) ([]*etcdserverpb.AlarmMember, error) { func (s *alarmBackend) unsafeGetAllAlarms(tx backend.ReadTx) ([]*etcdserverpb.AlarmMember, error) {
ms := []*etcdserverpb.AlarmMember{} var ms []*etcdserverpb.AlarmMember
err := tx.UnsafeForEach(Alarm, func(k, v []byte) error { err := tx.UnsafeForEach(Alarm, func(k, v []byte) error {
var m etcdserverpb.AlarmMember var m etcdserverpb.AlarmMember
if err := m.Unmarshal(k); err != nil { if err := m.Unmarshal(k); err != nil {

View File

@ -139,7 +139,7 @@ func (d *decoder) isTornEntry(data []byte) bool {
fileOff := d.lastValidOff + frameSizeBytes fileOff := d.lastValidOff + frameSizeBytes
curOff := 0 curOff := 0
chunks := [][]byte{} var chunks [][]byte
// split data on sector boundaries // split data on sector boundaries
for curOff < len(data) { for curOff < len(data) {
chunkLen := int(minSectorSize - (fileOff % minSectorSize)) chunkLen := int(minSectorSize - (fileOff % minSectorSize))

View File

@ -52,7 +52,7 @@ func NewTmpWAL(t testing.TB, reqs []etcdserverpb.InternalRaftRequest) (*wal.WAL,
if err != nil { if err != nil {
t.Fatalf("Failed to read WAL: %v", err) t.Fatalf("Failed to read WAL: %v", err)
} }
entries := []raftpb.Entry{} var entries []raftpb.Entry
for _, req := range reqs { for _, req := range reqs {
entries = append(entries, raftpb.Entry{ entries = append(entries, raftpb.Entry{
Term: 1, Term: 1,

View File

@ -106,7 +106,7 @@ func TestLeaseGrantAndList(t *testing.T) {
cc := clus.Client() cc := clus.Client()
t.Logf("Created cluster and client") t.Logf("Created cluster and client")
testutils.ExecuteUntil(ctx, t, func() { testutils.ExecuteUntil(ctx, t, func() {
createdLeases := []clientv3.LeaseID{} var createdLeases []clientv3.LeaseID
for i := 0; i < nc.leaseCount; i++ { for i := 0; i < nc.leaseCount; i++ {
leaseResp, err := cc.Grant(ctx, 10) leaseResp, err := cc.Grant(ctx, 10)
t.Logf("Grant returned: resp:%s err:%v", leaseResp.String(), err) t.Logf("Grant returned: resp:%s err:%v", leaseResp.String(), err)
@ -117,7 +117,7 @@ func TestLeaseGrantAndList(t *testing.T) {
// Because we're not guarunteed to talk to the same member, wait for // Because we're not guarunteed to talk to the same member, wait for
// listing to eventually return true, either by the result propagaing // listing to eventually return true, either by the result propagaing
// or by hitting an up to date member. // or by hitting an up to date member.
leases := []clientv3.LeaseStatus{} var leases []clientv3.LeaseStatus
require.Eventually(t, func() bool { require.Eventually(t, func() bool {
resp, err := cc.Leases(ctx) resp, err := cc.Leases(ctx)
if err != nil { if err != nil {

View File

@ -124,7 +124,7 @@ func TestTxnFail(t *testing.T) {
} }
func getRespValues(r *clientv3.TxnResponse) []string { func getRespValues(r *clientv3.TxnResponse) []string {
ss := []string{} var ss []string
if r.Succeeded { if r.Succeeded {
ss = append(ss, "SUCCESS") ss = append(ss, "SUCCESS")
} else { } else {

View File

@ -120,7 +120,7 @@ func TestAuthority(t *testing.T) {
func templateEndpoints(t *testing.T, pattern string, clus *e2e.EtcdProcessCluster) []string { func templateEndpoints(t *testing.T, pattern string, clus *e2e.EtcdProcessCluster) []string {
t.Helper() t.Helper()
endpoints := []string{} var endpoints []string
for i := 0; i < clus.Cfg.ClusterSize; i++ { for i := 0; i < clus.Cfg.ClusterSize; i++ {
ent := pattern ent := pattern
if strings.Contains(ent, "%d") { if strings.Contains(ent, "%d") {
@ -135,7 +135,7 @@ func templateEndpoints(t *testing.T, pattern string, clus *e2e.EtcdProcessCluste
} }
func assertAuthority(t *testing.T, expectAurhority string, clus *e2e.EtcdProcessCluster) { func assertAuthority(t *testing.T, expectAurhority string, clus *e2e.EtcdProcessCluster) {
logs := []e2e.LogsExpect{} var logs []e2e.LogsExpect
for _, proc := range clus.Procs { for _, proc := range clus.Procs {
logs = append(logs, proc.Logs()) logs = append(logs, proc.Logs())
} }

View File

@ -30,7 +30,7 @@ func TestCtlV3MakeMirrorWithWatchRev(t *testing.T) { testCtl(t, makeMirrorWi
func makeMirrorTest(cx ctlCtx) { func makeMirrorTest(cx ctlCtx) {
var ( var (
flags = []string{} flags []string
kvs = []kv{{"key1", "val1"}, {"key2", "val2"}, {"key3", "val3"}} kvs = []kv{{"key1", "val1"}, {"key2", "val2"}, {"key3", "val3"}}
kvs2 = []kvExec{{key: "key1", val: "val1"}, {key: "key2", val: "val2"}, {key: "key3", val: "val3"}} kvs2 = []kvExec{{key: "key1", val: "val1"}, {key: "key2", val: "val2"}, {key: "key3", val: "val3"}}
prefix = "key" prefix = "key"

View File

@ -135,7 +135,7 @@ func (c integrationClient) Get(ctx context.Context, key string, o config.GetOpti
ctx, cancel = context.WithTimeout(ctx, o.Timeout) ctx, cancel = context.WithTimeout(ctx, o.Timeout)
defer cancel() defer cancel()
} }
clientOpts := []clientv3.OpOption{} var clientOpts []clientv3.OpOption
if o.Revision != 0 { if o.Revision != 0 {
clientOpts = append(clientOpts, clientv3.WithRev(int64(o.Revision))) clientOpts = append(clientOpts, clientv3.WithRev(int64(o.Revision)))
} }
@ -164,7 +164,7 @@ func (c integrationClient) Get(ctx context.Context, key string, o config.GetOpti
} }
func (c integrationClient) Put(ctx context.Context, key, value string, opts config.PutOptions) error { func (c integrationClient) Put(ctx context.Context, key, value string, opts config.PutOptions) error {
clientOpts := []clientv3.OpOption{} var clientOpts []clientv3.OpOption
if opts.LeaseID != 0 { if opts.LeaseID != 0 {
clientOpts = append(clientOpts, clientv3.WithLease(opts.LeaseID)) clientOpts = append(clientOpts, clientv3.WithLease(opts.LeaseID))
} }
@ -173,7 +173,7 @@ func (c integrationClient) Put(ctx context.Context, key, value string, opts conf
} }
func (c integrationClient) Delete(ctx context.Context, key string, o config.DeleteOptions) (*clientv3.DeleteResponse, error) { func (c integrationClient) Delete(ctx context.Context, key string, o config.DeleteOptions) (*clientv3.DeleteResponse, error) {
clientOpts := []clientv3.OpOption{} var clientOpts []clientv3.OpOption
if o.Prefix { if o.Prefix {
clientOpts = append(clientOpts, clientv3.WithPrefix()) clientOpts = append(clientOpts, clientv3.WithPrefix())
} }
@ -192,7 +192,7 @@ func (c integrationClient) Compact(ctx context.Context, rev int64, o config.Comp
ctx, cancel = context.WithTimeout(ctx, o.Timeout) ctx, cancel = context.WithTimeout(ctx, o.Timeout)
defer cancel() defer cancel()
} }
clientOpts := []clientv3.CompactOption{} var clientOpts []clientv3.CompactOption
if o.Physical { if o.Physical {
clientOpts = append(clientOpts, clientv3.WithCompactPhysical()) clientOpts = append(clientOpts, clientv3.WithCompactPhysical())
} }
@ -253,7 +253,7 @@ func (c integrationClient) Defragment(ctx context.Context, o config.DefragOption
} }
func (c integrationClient) TimeToLive(ctx context.Context, id clientv3.LeaseID, o config.LeaseOption) (*clientv3.LeaseTimeToLiveResponse, error) { func (c integrationClient) TimeToLive(ctx context.Context, id clientv3.LeaseID, o config.LeaseOption) (*clientv3.LeaseTimeToLiveResponse, error) {
leaseOpts := []clientv3.LeaseOption{} var leaseOpts []clientv3.LeaseOption
if o.WithAttachedKeys { if o.WithAttachedKeys {
leaseOpts = append(leaseOpts, clientv3.WithAttachedKeys()) leaseOpts = append(leaseOpts, clientv3.WithAttachedKeys())
} }
@ -274,7 +274,7 @@ func (c integrationClient) UserChangePass(ctx context.Context, user, newPass str
func (c integrationClient) Txn(ctx context.Context, compares, ifSucess, ifFail []string, o config.TxnOptions) (*clientv3.TxnResponse, error) { func (c integrationClient) Txn(ctx context.Context, compares, ifSucess, ifFail []string, o config.TxnOptions) (*clientv3.TxnResponse, error) {
txn := c.Client.Txn(ctx) txn := c.Client.Txn(ctx)
cmps := []clientv3.Cmp{} var cmps []clientv3.Cmp
for _, c := range compares { for _, c := range compares {
cmp, err := etcdctlcmd.ParseCompare(c) cmp, err := etcdctlcmd.ParseCompare(c)
if err != nil { if err != nil {
@ -299,7 +299,7 @@ func (c integrationClient) Txn(ctx context.Context, compares, ifSucess, ifFail [
} }
func getOps(ss []string) ([]clientv3.Op, error) { func getOps(ss []string) ([]clientv3.Op, error) {
ops := []clientv3.Op{} var ops []clientv3.Op
for _, s := range ss { for _, s := range ss {
s = strings.TrimSpace(s) s = strings.TrimSpace(s)
args := etcdctlcmd.Argify(s) args := etcdctlcmd.Argify(s)
@ -316,7 +316,7 @@ func getOps(ss []string) ([]clientv3.Op, error) {
} }
func (c integrationClient) Watch(ctx context.Context, key string, opts config.WatchOptions) clientv3.WatchChan { func (c integrationClient) Watch(ctx context.Context, key string, opts config.WatchOptions) clientv3.WatchChan {
opOpts := []clientv3.OpOption{} var opOpts []clientv3.OpOption
if opts.Prefix { if opts.Prefix {
opOpts = append(opOpts, clientv3.WithPrefix()) opOpts = append(opOpts, clientv3.WithPrefix())
} }

View File

@ -39,7 +39,7 @@ import (
"go.etcd.io/etcd/client/pkg/v3/tlsutil" "go.etcd.io/etcd/client/pkg/v3/tlsutil"
"go.etcd.io/etcd/client/pkg/v3/transport" "go.etcd.io/etcd/client/pkg/v3/transport"
"go.etcd.io/etcd/client/pkg/v3/types" "go.etcd.io/etcd/client/pkg/v3/types"
"go.etcd.io/etcd/client/v3" clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/grpc_testing" "go.etcd.io/etcd/pkg/v3/grpc_testing"
"go.etcd.io/etcd/raft/v3" "go.etcd.io/etcd/raft/v3"
"go.etcd.io/etcd/server/v3/config" "go.etcd.io/etcd/server/v3/config"
@ -239,7 +239,7 @@ func (c *Cluster) Launch(t testutil.TB) {
// ProtoMembers returns a list of all active members as client.Members // ProtoMembers returns a list of all active members as client.Members
func (c *Cluster) ProtoMembers() []*pb.Member { func (c *Cluster) ProtoMembers() []*pb.Member {
ms := []*pb.Member{} var ms []*pb.Member
for _, m := range c.Members { for _, m := range c.Members {
pScheme := SchemeFromTLSInfo(m.PeerTLSInfo) pScheme := SchemeFromTLSInfo(m.PeerTLSInfo)
cScheme := SchemeFromTLSInfo(m.ClientTLSInfo) cScheme := SchemeFromTLSInfo(m.ClientTLSInfo)

View File

@ -21,7 +21,7 @@ import (
"strings" "strings"
"time" "time"
"go.etcd.io/etcd/client/v3" clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/tests/v3/functional/rpcpb" "go.etcd.io/etcd/tests/v3/functional/rpcpb"
"go.uber.org/zap" "go.uber.org/zap"
@ -36,7 +36,7 @@ func inject_SIGQUIT_ETCD_AND_REMOVE_DATA(clus *Cluster, idx1 int) error {
var mresp *clientv3.MemberListResponse var mresp *clientv3.MemberListResponse
mresp, err = cli1.MemberList(context.Background()) mresp, err = cli1.MemberList(context.Background())
mss := []string{} var mss []string
if err == nil && mresp != nil { if err == nil && mresp != nil {
mss = describeMembers(mresp) mss = describeMembers(mresp)
} }
@ -161,7 +161,7 @@ func recover_SIGQUIT_ETCD_AND_REMOVE_DATA(clus *Cluster, idx1 int) error {
var mresp *clientv3.MemberListResponse var mresp *clientv3.MemberListResponse
mresp, err = cli2.MemberList(context.Background()) mresp, err = cli2.MemberList(context.Background())
mss := []string{} var mss []string
if err == nil && mresp != nil { if err == nil && mresp != nil {
mss = describeMembers(mresp) mss = describeMembers(mresp)
} }

View File

@ -20,7 +20,7 @@ import (
"strings" "strings"
"time" "time"
"go.etcd.io/etcd/client/v3" clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/tests/v3/functional/rpcpb" "go.etcd.io/etcd/tests/v3/functional/rpcpb"
"go.uber.org/zap" "go.uber.org/zap"
@ -82,7 +82,7 @@ func (c *fetchSnapshotCaseQuorum) Inject(clus *Cluster) error {
defer leaderc.Close() defer leaderc.Close()
var mresp *clientv3.MemberListResponse var mresp *clientv3.MemberListResponse
mresp, err = leaderc.MemberList(context.Background()) mresp, err = leaderc.MemberList(context.Background())
mss := []string{} var mss []string
if err == nil && mresp != nil { if err == nil && mresp != nil {
mss = describeMembers(mresp) mss = describeMembers(mresp)
} }
@ -148,7 +148,7 @@ func (c *fetchSnapshotCaseQuorum) Recover(clus *Cluster) error {
clus.Members[oldlead].EtcdOnSnapshotRestore = clus.Members[oldlead].Etcd clus.Members[oldlead].EtcdOnSnapshotRestore = clus.Members[oldlead].Etcd
clus.Members[oldlead].EtcdOnSnapshotRestore.InitialClusterState = "existing" clus.Members[oldlead].EtcdOnSnapshotRestore.InitialClusterState = "existing"
name := clus.Members[oldlead].Etcd.Name name := clus.Members[oldlead].Etcd.Name
initClus := []string{} var initClus []string
for _, u := range clus.Members[oldlead].Etcd.AdvertisePeerURLs { for _, u := range clus.Members[oldlead].Etcd.AdvertisePeerURLs {
initClus = append(initClus, fmt.Sprintf("%s=%s", name, u)) initClus = append(initClus, fmt.Sprintf("%s=%s", name, u))
} }

View File

@ -296,8 +296,8 @@ func (clus *Cluster) UpdateDelayLatencyMs() {
func (clus *Cluster) setStresserChecker() { func (clus *Cluster) setStresserChecker() {
css := &compositeStresser{} css := &compositeStresser{}
lss := []*leaseStresser{} var lss []*leaseStresser
rss := []*runnerStresser{} var rss []*runnerStresser
for _, m := range clus.Members { for _, m := range clus.Members {
sss := newStresser(clus, m) sss := newStresser(clus, m)
css.stressers = append(css.stressers, &compositeStresser{sss}) css.stressers = append(css.stressers, &compositeStresser{sss})
@ -419,7 +419,7 @@ func (clus *Cluster) broadcast(op rpcpb.Operation) error {
wg.Wait() wg.Wait()
close(errc) close(errc)
errs := []string{} var errs []string
for err := range errc { for err := range errc {
if err == nil { if err == nil {
continue continue

View File

@ -258,7 +258,7 @@ func (clus *Cluster) doTestCase(t *testing.T, fa Case) {
t.Fatalf("wait full health error: %v", err) t.Fatalf("wait full health error: %v", err)
} }
checkerFailExceptions := []rpcpb.Checker{} var checkerFailExceptions []rpcpb.Checker
switch fcase { switch fcase {
case rpcpb.Case_SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH: case rpcpb.Case_SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH:
// TODO: restore from snapshot // TODO: restore from snapshot

View File

@ -28,7 +28,7 @@ import (
"go.etcd.io/etcd/api/v3/mvccpb" "go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/api/v3/version" "go.etcd.io/etcd/api/v3/version"
"go.etcd.io/etcd/client/v3" clientv3 "go.etcd.io/etcd/client/v3"
integration2 "go.etcd.io/etcd/tests/v3/framework/integration" integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
@ -384,7 +384,7 @@ func TestKVDeleteRange(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("#%d: couldn't get keys (%v)", i, err) t.Fatalf("#%d: couldn't get keys (%v)", i, err)
} }
keys := []string{} var keys []string
for _, kv := range resp.Kvs { for _, kv := range resp.Kvs {
keys = append(keys, string(kv.Key)) keys = append(keys, string(kv.Key))
} }

View File

@ -24,7 +24,7 @@ import (
"time" "time"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/client/v3" clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/concurrency" "go.etcd.io/etcd/client/v3/concurrency"
integration2 "go.etcd.io/etcd/tests/v3/framework/integration" integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
) )
@ -249,7 +249,7 @@ func TestLeaseKeepAliveNotFound(t *testing.T) {
defer clus.Terminate(t) defer clus.Terminate(t)
cli := clus.RandClient() cli := clus.RandClient()
lchs := []leaseCh{} var lchs []leaseCh
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
resp, rerr := cli.Grant(context.TODO(), 5) resp, rerr := cli.Grant(context.TODO(), 5)
if rerr != nil { if rerr != nil {
@ -628,7 +628,7 @@ func TestLeaseLeases(t *testing.T) {
cli := clus.RandClient() cli := clus.RandClient()
ids := []clientv3.LeaseID{} var ids []clientv3.LeaseID
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {
resp, err := cli.Grant(context.Background(), 10) resp, err := cli.Grant(context.Background(), 10)
if err != nil { if err != nil {

View File

@ -357,7 +357,7 @@ func TestLeasingGetWithOpts(t *testing.T) {
} }
} }
getOpts := []clientv3.OpOption{} var getOpts []clientv3.OpOption
for i := 0; i < len(opts); i++ { for i := 0; i < len(opts); i++ {
getOpts = append(getOpts, opts[rand.Intn(len(opts))]) getOpts = append(getOpts, opts[rand.Intn(len(opts))])
} }
@ -963,7 +963,7 @@ func TestLeasingTxnNonOwnerPut(t *testing.T) {
clientv3.WithPrefix()) clientv3.WithPrefix())
wresp := <-w wresp := <-w
c := 0 c := 0
evs := []clientv3.Event{} var evs []clientv3.Event
for _, ev := range wresp.Events { for _, ev := range wresp.Events {
evs = append(evs, *ev) evs = append(evs, *ev)
if ev.Kv.ModRevision == tresp.Header.Revision { if ev.Kv.ModRevision == tresp.Header.Revision {
@ -1021,7 +1021,7 @@ func TestLeasingTxnRandIfThenOrElse(t *testing.T) {
// random list of comparisons, all true // random list of comparisons, all true
cmps, useThen := randCmps("k-", dat) cmps, useThen := randCmps("k-", dat)
// random list of puts/gets; unique keys // random list of puts/gets; unique keys
ops := []clientv3.Op{} var ops []clientv3.Op
usedIdx := make(map[int]struct{}) usedIdx := make(map[int]struct{})
for i := 0; i < keyCount; i++ { for i := 0; i < keyCount; i++ {
idx := rand.Intn(keyCount) idx := rand.Intn(keyCount)

View File

@ -28,7 +28,7 @@ import (
grpcprom "github.com/grpc-ecosystem/go-grpc-prometheus" grpcprom "github.com/grpc-ecosystem/go-grpc-prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/client_golang/prometheus/promhttp"
"go.etcd.io/etcd/client/pkg/v3/transport" "go.etcd.io/etcd/client/pkg/v3/transport"
"go.etcd.io/etcd/client/v3" clientv3 "go.etcd.io/etcd/client/v3"
integration2 "go.etcd.io/etcd/tests/v3/framework/integration" integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
"google.golang.org/grpc" "google.golang.org/grpc"
) )
@ -162,7 +162,7 @@ func getHTTPBodyAsLines(t *testing.T, url string) []string {
} }
reader := bufio.NewReader(resp.Body) reader := bufio.NewReader(resp.Body)
lines := []string{} var lines []string
for { for {
line, err := reader.ReadString('\n') line, err := reader.ReadString('\n')
if err != nil { if err != nil {

View File

@ -26,7 +26,7 @@ import (
"go.etcd.io/etcd/client/pkg/v3/fileutil" "go.etcd.io/etcd/client/pkg/v3/fileutil"
"go.etcd.io/etcd/client/pkg/v3/testutil" "go.etcd.io/etcd/client/pkg/v3/testutil"
"go.etcd.io/etcd/client/v3" clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/snapshot" "go.etcd.io/etcd/client/v3/snapshot"
"go.etcd.io/etcd/server/v3/embed" "go.etcd.io/etcd/server/v3/embed"
integration2 "go.etcd.io/etcd/tests/v3/framework/integration" integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
@ -55,7 +55,7 @@ func TestSaveSnapshotFilePermissions(t *testing.T) {
// TestSaveSnapshotVersion ensures that the snapshot returns proper storage version. // TestSaveSnapshotVersion ensures that the snapshot returns proper storage version.
func TestSaveSnapshotVersion(t *testing.T) { func TestSaveSnapshotVersion(t *testing.T) {
// Put some keys to ensure that wal snapshot is triggered // Put some keys to ensure that wal snapshot is triggered
kvs := []kv{} var kvs []kv
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
kvs = append(kvs, kv{fmt.Sprintf("%d", i), "test"}) kvs = append(kvs, kv{fmt.Sprintf("%d", i), "test"})
} }

View File

@ -110,7 +110,7 @@ func testWatchMultiWatcher(t *testing.T, wctx *watchctx) {
t.Errorf("expected watcher channel, got nil") t.Errorf("expected watcher channel, got nil")
} }
readyc <- struct{}{} readyc <- struct{}{}
evs := []*clientv3.Event{} var evs []*clientv3.Event
for i := 0; i < numKeyUpdates*2; i++ { for i := 0; i < numKeyUpdates*2; i++ {
resp, ok := <-prefixc resp, ok := <-prefixc
if !ok { if !ok {
@ -120,14 +120,14 @@ func testWatchMultiWatcher(t *testing.T, wctx *watchctx) {
} }
// check response // check response
expected := []string{} var expected []string
bkeys := []string{"bar", "baz"} bkeys := []string{"bar", "baz"}
for _, k := range bkeys { for _, k := range bkeys {
for i := 0; i < numKeyUpdates; i++ { for i := 0; i < numKeyUpdates; i++ {
expected = append(expected, fmt.Sprintf("%s-%d", k, i)) expected = append(expected, fmt.Sprintf("%s-%d", k, i))
} }
} }
got := []string{} var got []string
for _, ev := range evs { for _, ev := range evs {
got = append(got, string(ev.Kv.Value)) got = append(got, string(ev.Kv.Value))
} }

View File

@ -150,7 +150,7 @@ func setupClient(t *testing.T, endpointPattern string, clus *integration.Cluster
func templateEndpoints(t *testing.T, pattern string, clus *integration.Cluster) []string { func templateEndpoints(t *testing.T, pattern string, clus *integration.Cluster) []string {
t.Helper() t.Helper()
endpoints := []string{} var endpoints []string
for _, m := range clus.Members { for _, m := range clus.Members {
ent := pattern ent := pattern
if strings.Contains(ent, "%d") { if strings.Contains(ent, "%d") {

View File

@ -20,7 +20,7 @@ import (
"testing" "testing"
"time" "time"
"go.etcd.io/etcd/client/v3" clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/concurrency" "go.etcd.io/etcd/client/v3/concurrency"
"go.etcd.io/etcd/tests/v3/framework/integration" "go.etcd.io/etcd/tests/v3/framework/integration"
) )
@ -40,7 +40,7 @@ func TestElectionWait(t *testing.T) {
}() }()
electedc := make(chan string) electedc := make(chan string)
nextc := []chan struct{}{} var nextc []chan struct{}
// wait for all elections // wait for all elections
donec := make(chan struct{}) donec := make(chan struct{})

View File

@ -28,7 +28,7 @@ import (
pb "go.etcd.io/etcd/api/v3/etcdserverpb" pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/client/pkg/v3/transport" "go.etcd.io/etcd/client/pkg/v3/transport"
"go.etcd.io/etcd/client/v3" clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/tests/v3/framework/config" "go.etcd.io/etcd/tests/v3/framework/config"
"go.etcd.io/etcd/tests/v3/framework/integration" "go.etcd.io/etcd/tests/v3/framework/integration"
@ -1111,7 +1111,7 @@ func TestV3DeleteRange(t *testing.T) {
dresp.Header.Revision, rresp.Header.Revision) dresp.Header.Revision, rresp.Header.Revision)
} }
keys := [][]byte{} var keys [][]byte
for j := range rresp.Kvs { for j := range rresp.Kvs {
keys = append(keys, rresp.Kvs[j].Key) keys = append(keys, rresp.Kvs[j].Key)
} }

View File

@ -458,7 +458,7 @@ func TestV3LeaseLeases(t *testing.T) {
defer cancel0() defer cancel0()
// create leases // create leases
ids := []int64{} var ids []int64
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {
lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant( lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(
ctx0, ctx0,

View File

@ -174,7 +174,7 @@ func TestSTMSerialize(t *testing.T) {
defer close(updatec) defer close(updatec)
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {
s := fmt.Sprintf("%d", i) s := fmt.Sprintf("%d", i)
ops := []v3.Op{} var ops []v3.Op
for _, k := range keys { for _, k := range keys {
ops = append(ops, v3.OpPut(k, s)) ops = append(ops, v3.OpPut(k, s))
} }
@ -190,7 +190,7 @@ func TestSTMSerialize(t *testing.T) {
for range updatec { for range updatec {
curEtcdc := clus.RandClient() curEtcdc := clus.RandClient()
applyf := func(stm concurrency.STM) error { applyf := func(stm concurrency.STM) error {
vs := []string{} var vs []string
for i := range keys { for i := range keys {
vs = append(vs, stm.Get(keys[i])) vs = append(vs, stm.Get(keys[i]))
} }

View File

@ -748,7 +748,7 @@ func testV3WatchMultipleEventsTxn(t *testing.T, startRev int64) {
t.Fatalf("kvc.Txn failed: %+v", tresp) t.Fatalf("kvc.Txn failed: %+v", tresp)
} }
events := []*mvccpb.Event{} var events []*mvccpb.Event
for len(events) < 3 { for len(events) < 3 {
resp, err := wStream.Recv() resp, err := wStream.Recv()
if err != nil { if err != nil {
@ -845,7 +845,7 @@ func TestV3WatchMultipleEventsPutUnsynced(t *testing.T) {
}, },
} }
events := []*mvccpb.Event{} var events []*mvccpb.Event
for len(events) < 4 { for len(events) < 4 {
resp, err := wStream.Recv() resp, err := wStream.Recv()
if err != nil { if err != nil {

View File

@ -70,7 +70,7 @@ func (d *dispatcherPool) flush() {
// sort by sockets; preserve the packet ordering within a socket // sort by sockets; preserve the packet ordering within a socket
pktmap := make(map[io.Writer][]dispatchPacket) pktmap := make(map[io.Writer][]dispatchPacket)
outs := []io.Writer{} var outs []io.Writer
for _, pkt := range pkts { for _, pkt := range pkts {
opkts, ok := pktmap[pkt.out] opkts, ok := pktmap[pkt.out]
if !ok { if !ok {
@ -103,7 +103,7 @@ func (d *dispatcherPool) Copy(w io.Writer, f fetchFunc) error {
return err return err
} }
pkts := []dispatchPacket{} var pkts []dispatchPacket
for len(b) > 0 { for len(b) > 0 {
pkt := b pkt := b
if len(b) > dispatchPacketBytes { if len(b) > dispatchPacketBytes {