Merge pull request #9362 from gyuho/spell-check

*: fix minor lint warnings
This commit is contained in:
Gyuho Lee
2018-02-26 10:19:17 -08:00
committed by GitHub
7 changed files with 22 additions and 16 deletions

View File

@@ -483,8 +483,14 @@ func (f fakeCancelContext) Done() <-chan struct{} {
func (f fakeCancelContext) Err() error { return fakeCancelContextError }
func (f fakeCancelContext) Value(key interface{}) interface{} { return 1 }
func withTimeout(parent context.Context, timeout time.Duration) (context.Context, context.CancelFunc) {
return parent, func() { parent = nil }
func withTimeout(parent context.Context, timeout time.Duration) (
ctx context.Context,
cancel context.CancelFunc) {
ctx = parent
cancel = func() {
ctx = nil
}
return ctx, cancel
}
func TestHTTPClusterClientDoCanceledContext(t *testing.T) {

View File

@@ -22,7 +22,7 @@ import (
"github.com/coreos/etcd/clientv3/clientv3util"
)
func ExampleKeyExists_put() {
func ExampleKeyMissing() {
cli, err := clientv3.New(clientv3.Config{
Endpoints: []string{"127.0.0.1:2379"},
})
@@ -45,7 +45,7 @@ func ExampleKeyExists_put() {
}
}
func ExampleKeyExists_delete() {
func ExampleKeyExists() {
cli, err := clientv3.New(clientv3.Config{
Endpoints: []string{"127.0.0.1:2379"},
})

View File

@@ -269,7 +269,7 @@ func newCheckPerfCommand(cmd *cobra.Command, args []string) {
func NewCheckDatascaleCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "datascale [options]",
Short: "Check the memory usage of holding data for diferent workloads on a given server endpoint.",
Short: "Check the memory usage of holding data for different workloads on a given server endpoint.",
Long: "If no endpoint is provided, localhost will be used. If multiple endpoints are provided, first endpoint will be used.",
Run: newCheckDatascaleCommand,
}

View File

@@ -722,7 +722,7 @@ func TestLearnerLogReplication(t *testing.T) {
match := n1.getProgress(2).Match
if match != n2.raftLog.committed {
t.Errorf("progresss 2 of leader 1 wants match %d, but got %d", n2.raftLog.committed, match)
t.Errorf("progress 2 of leader 1 wants match %d, but got %d", n2.raftLog.committed, match)
}
}

View File

@@ -56,7 +56,7 @@ func main() {
stressQPS := flag.Int("stress-qps", 10000, "maximum number of stresser requests per second.")
schedCases := flag.String("schedule-cases", "", "test case schedule")
consistencyCheck := flag.Bool("consistency-check", true, "true to check consistency (revision, hash)")
stresserType := flag.String("stresser", "keys,lease", "comma separated list of stressers (keys, lease, v2keys, nop, election-runner, watch-runner, lock-racer-runner, lease-runner).")
stresserType := flag.String("stresser", "keys,lease", "comma separated list of stressing clients (keys, lease, v2keys, nop, election-runner, watch-runner, lock-racer-runner, lease-runner).")
etcdRunnerPath := flag.String("etcd-runner", "", "specify a path of etcd runner binary")
failureTypes := flag.String("failures", "default,failpoints", "specify failures (concat of \"default\" and \"failpoints\").")
failpoints := flag.String("failpoints", `panic("etcd-tester")`, `comma separated list of failpoint terms to inject (e.g. 'panic("etcd-tester"),1*sleep(1000)')`)

View File

@@ -51,7 +51,7 @@ func (s *nopStresser) ModifiedKeys() int64 {
func (s *nopStresser) Checker() Checker { return nil }
// compositeStresser implements a Stresser that runs a slice of
// stressers concurrently.
// stressing clients concurrently.
type compositeStresser struct {
stressers []Stresser
}
@@ -141,7 +141,7 @@ func NewStresser(s string, sc *stressConfig, m *member) Stresser {
case "nop":
return &nopStresser{start: time.Now(), qps: int(sc.rateLimiter.Limit())}
case "keys":
// TODO: Too intensive stressers can panic etcd member with
// TODO: Too intensive stressing clients can panic etcd member with
// 'out of memory' error. Put rate limits in server side.
return &keyStresser{
Endpoint: m.grpcAddr(),

View File

@@ -245,26 +245,26 @@ func (tt *tester) cleanup() error {
}
func (tt *tester) pauseStresser() {
plog.Infof("%s pausing the stressers...", tt.logPrefix())
plog.Infof("%s pausing the stressing clients...", tt.logPrefix())
tt.stresser.Pause()
plog.Infof("%s paused stressers", tt.logPrefix())
plog.Infof("%s paused stressing clients", tt.logPrefix())
}
func (tt *tester) startStresser() (err error) {
plog.Infof("%s starting the stressers...", tt.logPrefix())
plog.Infof("%s starting the stressing clients...", tt.logPrefix())
err = tt.stresser.Stress()
plog.Infof("%s started stressers", tt.logPrefix())
plog.Infof("%s started stressing clients", tt.logPrefix())
return err
}
func (tt *tester) closeStresser() {
plog.Infof("%s closing the stressers...", tt.logPrefix())
plog.Infof("%s closing the stressing clients...", tt.logPrefix())
tt.stresser.Close()
plog.Infof("%s closed stressers", tt.logPrefix())
plog.Infof("%s closed stressing clients", tt.logPrefix())
}
func (tt *tester) resetStressCheck() error {
plog.Infof("%s resetting stressers and checkers...", tt.logPrefix())
plog.Infof("%s resetting stressing clients and checkers...", tt.logPrefix())
cs := &compositeStresser{}
for _, m := range tt.cluster.Members {
s := NewStresser(tt.stresserType, &tt.scfg, m)