*: clean up from go vet, misspell

This commit is contained in:
Gyu-Ho Lee 2016-04-10 23:16:56 -07:00
parent 1f4f3667a4
commit 9108af9046
7 changed files with 22 additions and 20 deletions

View File

@ -21,15 +21,17 @@ import (
"golang.org/x/net/context"
)
// Txn is the interface that wraps mini-transactions.
//
// Tx.If(
// Compare(Value(k1), ">", v1),
// Compare(Version(k1), "=", 2)
// ).Then(
// OpPut(k2,v2), OpPut(k3,v3)
// ).Else(
// OpPut(k4,v4), OpPut(k5,v5)
// ).Commit()
//
// Tx.If(
// Compare(Value(k1), ">", v1),
// Compare(Version(k1), "=", 2)
// ).Then(
// OpPut(k2,v2), OpPut(k3,v3)
// ).Else(
// OpPut(k4,v4), OpPut(k5,v5)
// ).Commit()
type Txn interface {
// If takes a list of comparison. If all comparisons passed in succeed,
// the operations passed into Then() will be executed. Or the operations

View File

@ -76,7 +76,7 @@ func TestPeriodicPause(t *testing.T) {
select {
case a := <-compactable.Chan():
t.Fatal("unexpected action %v", a)
t.Fatalf("unexpected action %v", a)
case <-time.After(10 * time.Millisecond):
}

View File

@ -203,7 +203,7 @@ func startEtcd(cfg *config) (<-chan struct{}, error) {
}
if cfg.peerAutoTLS && cfg.peerTLSInfo.Empty() {
phosts := make([]string, 0)
var phosts []string
for _, u := range cfg.lpurls {
phosts = append(phosts, u.Host)
}
@ -218,7 +218,7 @@ func startEtcd(cfg *config) (<-chan struct{}, error) {
if !cfg.peerTLSInfo.Empty() {
plog.Infof("peerTLS: %s", cfg.peerTLSInfo)
}
plns := make([]net.Listener, 0)
var plns []net.Listener
for _, u := range cfg.lpurls {
if u.Scheme == "http" {
if !cfg.peerTLSInfo.Empty() {

View File

@ -226,11 +226,11 @@ func TestV3WatchFromCurrentRevision(t *testing.T) {
continue
}
if !cresp.Created {
t.Errorf("#%d: did not create watchid, got +%v", i, cresp)
t.Errorf("#%d: did not create watchid, got %+v", i, cresp)
continue
}
if cresp.Canceled {
t.Errorf("#%d: canceled watcher on create", i, cresp)
t.Errorf("#%d: canceled watcher on create %+v", i, cresp)
continue
}
@ -317,7 +317,7 @@ func TestV3WatchFutureRevision(t *testing.T) {
t.Fatalf("wStream.Recv error: %v", err)
}
if !cresp.Created {
t.Fatal("create = %v, want %v", cresp.Created, true)
t.Fatalf("create %v, want %v", cresp.Created, true)
}
kvc := toGRPC(clus.RandClient()).KV

View File

@ -903,7 +903,7 @@ func (r *raft) removeNode(id uint64) {
if r.maybeCommit() {
r.bcastAppend()
}
// If the removed node is the leadTransferee, then abort the leadership transfering.
// If the removed node is the leadTransferee, then abort the leadership transferring.
if r.state == StateLeader && r.leadTransferee == id {
r.abortLeaderTransfer()
}

View File

@ -1911,7 +1911,7 @@ func TestCommitAfterRemoveNode(t *testing.T) {
}
}
// TestLeaderTransferToUpToDateNode verifies transfering should succeed
// TestLeaderTransferToUpToDateNode verifies transferring should succeed
// if the transferee has the most up-to-date log entires when transfer starts.
func TestLeaderTransferToUpToDateNode(t *testing.T) {
nt := newNetwork(nil, nil, nil)
@ -2077,7 +2077,7 @@ func TestLeaderTransferRemoveNode(t *testing.T) {
lead := nt.peers[1].(*raft)
// The leadTransferee is removed when leadship transfering.
// The leadTransferee is removed when leadship transferring.
nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})
if lead.leadTransferee != 3 {
t.Fatalf("wait transferring, leadTransferee = %v, want %v", lead.leadTransferee, 3)
@ -2159,10 +2159,10 @@ func TestLeaderTransferSecondTransferToSameNode(t *testing.T) {
func checkLeaderTransferState(t *testing.T, r *raft, state StateType, lead uint64) {
if r.state != state || r.lead != lead {
t.Fatalf("after transfering, node has state %v lead %v, want state %v lead %v", r.state, r.lead, state, lead)
t.Fatalf("after transferring, node has state %v lead %v, want state %v lead %v", r.state, r.lead, state, lead)
}
if r.leadTransferee != None {
t.Fatalf("after transfering, node has leadTransferee %v, want leadTransferee %v", r.leadTransferee, None)
t.Fatalf("after transferring, node has leadTransferee %v, want leadTransferee %v", r.leadTransferee, None)
}
}

View File

@ -168,7 +168,7 @@ func (ti *treeIndex) RangeSince(key, end []byte, rev int64) []revision {
func (ti *treeIndex) Compact(rev int64) map[revision]struct{} {
available := make(map[revision]struct{})
emptyki := make([]*keyIndex, 0)
var emptyki []*keyIndex
log.Printf("store.index: compact %d", rev)
// TODO: do not hold the lock for long time?
// This is probably OK. Compacting 10M keys takes O(10ms).