mirror of
https://github.com/etcd-io/etcd.git
synced 2024-09-27 06:25:44 +00:00
test: test update for Go 1.12.5 and related changes
Update to Go 1.12.5 testing. Remove deprecated unused and gosimple pacakges, and mask staticcheck 1006. Also, fix unconvert errors related to unnecessary type conversions and following staticcheck errors: - remove redundant return statements - use for range instead of for select - use time.Since instead of time.Now().Sub - omit comparison to bool constant - replace T.Fatal and T.Fatalf in tests with T.Error and T.Fatalf respectively because the goroutine calls T.Fatal must be called in the same goroutine as the test - fix error strings that should not be capitalized - use sort.Strings(...) instead of sort.Sort(sort.StringSlice(...)) - use he status code of Canceled instead of grpc.ErrClientConnClosing which is deprecated - use use status.Errorf instead of grpc.Errorf which is deprecated Related #10528 #10438
This commit is contained in:
parent
ea0f919cdc
commit
1caaa9ed4a
@ -751,10 +751,10 @@ func TestHammerSimpleAuthenticate(t *testing.T) {
|
||||
token := fmt.Sprintf("%s(%d)", user, i)
|
||||
ctx := context.WithValue(context.WithValue(context.TODO(), AuthenticateParamIndex{}, uint64(1)), AuthenticateParamSimpleTokenPrefix{}, token)
|
||||
if _, err := as.Authenticate(ctx, user, "123"); err != nil {
|
||||
t.Fatal(err)
|
||||
t.Error(err)
|
||||
}
|
||||
if _, err := as.AuthInfoFromCtx(ctx); err != nil {
|
||||
t.Fatal(err)
|
||||
t.Error(err)
|
||||
}
|
||||
}(u)
|
||||
}
|
||||
|
@ -640,11 +640,11 @@ func (r *redirectFollowingHTTPClient) Do(ctx context.Context, act httpAction) (*
|
||||
if resp.StatusCode/100 == 3 {
|
||||
hdr := resp.Header.Get("Location")
|
||||
if hdr == "" {
|
||||
return nil, nil, fmt.Errorf("Location header not set")
|
||||
return nil, nil, fmt.Errorf("location header not set")
|
||||
}
|
||||
loc, err := url.Parse(hdr)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Location header not valid URL: %s", hdr)
|
||||
return nil, nil, fmt.Errorf("location header not valid URL: %s", hdr)
|
||||
}
|
||||
next = &redirectedHTTPAction{
|
||||
action: act,
|
||||
|
@ -691,7 +691,7 @@ func TestRedirectFollowingHTTPClient(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: errors.New("Location header not set"),
|
||||
wantErr: errors.New("location header not set"),
|
||||
},
|
||||
|
||||
// fail if Location header is invalid
|
||||
@ -707,7 +707,7 @@ func TestRedirectFollowingHTTPClient(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: errors.New("Location header not valid URL: :"),
|
||||
wantErr: errors.New("location header not valid URL: :"),
|
||||
},
|
||||
|
||||
// fail if redirects checked way too many times
|
||||
@ -795,7 +795,7 @@ func TestHTTPClusterClientSync(t *testing.T) {
|
||||
|
||||
want = []string{"http://127.0.0.1:2379", "http://127.0.0.1:4001", "http://127.0.0.1:4002", "http://127.0.0.1:4003"}
|
||||
got = hc.Endpoints()
|
||||
sort.Sort(sort.StringSlice(got))
|
||||
sort.Strings(got)
|
||||
if !reflect.DeepEqual(want, got) {
|
||||
t.Fatalf("incorrect endpoints post-Sync: want=%#v got=%#v", want, got)
|
||||
}
|
||||
|
@ -62,7 +62,7 @@ func (e Error) Error() string {
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidJSON = errors.New("client: response is invalid json. The endpoint is probably not valid etcd cluster endpoint.")
|
||||
ErrInvalidJSON = errors.New("client: response is invalid json. The endpoint is probably not valid etcd cluster endpoint")
|
||||
ErrEmptyBody = errors.New("client: response body is empty")
|
||||
)
|
||||
|
||||
|
@ -221,7 +221,6 @@ func (bb *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connecti
|
||||
}
|
||||
|
||||
bb.currentConn.UpdateBalancerState(bb.currentState, bb.Picker)
|
||||
return
|
||||
}
|
||||
|
||||
func (bb *baseBalancer) regeneratePicker() {
|
||||
|
@ -281,7 +281,7 @@ func (kcl *killConnListener) listen(l net.Listener) {
|
||||
default:
|
||||
}
|
||||
if err != nil {
|
||||
kcl.t.Fatal(err)
|
||||
kcl.t.Error(err)
|
||||
}
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
conn.Close()
|
||||
|
@ -77,7 +77,6 @@ type Client struct {
|
||||
|
||||
cfg Config
|
||||
creds *credentials.TransportCredentials
|
||||
balancer balancer.Balancer
|
||||
resolverGroup *endpoint.ResolverGroup
|
||||
mu *sync.RWMutex
|
||||
|
||||
|
@ -47,7 +47,7 @@ func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
id = v3.LeaseID(resp.ID)
|
||||
id = resp.ID
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ops.ctx)
|
||||
|
@ -463,7 +463,7 @@ func TestKVGetErrConnClosed(t *testing.T) {
|
||||
defer close(donec)
|
||||
_, err := cli.Get(context.TODO(), "foo")
|
||||
if !clientv3.IsConnCanceled(err) {
|
||||
t.Fatalf("expected %v or %v, got %v", context.Canceled, grpc.ErrClientConnClosing, err)
|
||||
t.Errorf("expected %v or %v, got %v", context.Canceled, grpc.ErrClientConnClosing, err)
|
||||
}
|
||||
}()
|
||||
|
||||
@ -490,7 +490,7 @@ func TestKVNewAfterClose(t *testing.T) {
|
||||
go func() {
|
||||
_, err := cli.Get(context.TODO(), "foo")
|
||||
if !clientv3.IsConnCanceled(err) {
|
||||
t.Fatalf("expected %v or %v, got %v", context.Canceled, grpc.ErrClientConnClosing, err)
|
||||
t.Errorf("expected %v or %v, got %v", context.Canceled, grpc.ErrClientConnClosing, err)
|
||||
}
|
||||
close(donec)
|
||||
}()
|
||||
@ -704,7 +704,7 @@ func TestKVGetRetry(t *testing.T) {
|
||||
// Get will fail, but reconnect will trigger
|
||||
gresp, gerr := kv.Get(ctx, "foo")
|
||||
if gerr != nil {
|
||||
t.Fatal(gerr)
|
||||
t.Error(gerr)
|
||||
}
|
||||
wkvs := []*mvccpb.KeyValue{
|
||||
{
|
||||
@ -716,7 +716,7 @@ func TestKVGetRetry(t *testing.T) {
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(gresp.Kvs, wkvs) {
|
||||
t.Fatalf("bad get: got %v, want %v", gresp.Kvs, wkvs)
|
||||
t.Errorf("bad get: got %v, want %v", gresp.Kvs, wkvs)
|
||||
}
|
||||
donec <- struct{}{}
|
||||
}()
|
||||
@ -754,10 +754,10 @@ func TestKVPutFailGetRetry(t *testing.T) {
|
||||
// Get will fail, but reconnect will trigger
|
||||
gresp, gerr := kv.Get(context.TODO(), "foo")
|
||||
if gerr != nil {
|
||||
t.Fatal(gerr)
|
||||
t.Error(gerr)
|
||||
}
|
||||
if len(gresp.Kvs) != 0 {
|
||||
t.Fatalf("bad get kvs: got %+v, want empty", gresp.Kvs)
|
||||
t.Errorf("bad get kvs: got %+v, want empty", gresp.Kvs)
|
||||
}
|
||||
donec <- struct{}{}
|
||||
}()
|
||||
|
@ -86,7 +86,7 @@ func TestLeaseRevoke(t *testing.T) {
|
||||
t.Errorf("failed to create lease %v", err)
|
||||
}
|
||||
|
||||
_, err = lapi.Revoke(context.Background(), clientv3.LeaseID(resp.ID))
|
||||
_, err = lapi.Revoke(context.Background(), resp.ID)
|
||||
if err != nil {
|
||||
t.Errorf("failed to revoke lease %v", err)
|
||||
}
|
||||
@ -302,7 +302,7 @@ func TestLeaseGrantErrConnClosed(t *testing.T) {
|
||||
if !clientv3.IsConnCanceled(err) {
|
||||
// grpc.ErrClientConnClosing if grpc-go balancer calls 'Get' after client.Close.
|
||||
// context.Canceled if grpc-go balancer calls 'Get' with an inflight client.Close.
|
||||
t.Fatalf("expected %v, %v or server unavailable, got %v", err != context.Canceled, grpc.ErrClientConnClosing, err)
|
||||
t.Errorf("expected %v, %v or server unavailable, got %v", err != context.Canceled, grpc.ErrClientConnClosing, err)
|
||||
}
|
||||
}()
|
||||
|
||||
@ -372,7 +372,7 @@ func TestLeaseGrantNewAfterClose(t *testing.T) {
|
||||
go func() {
|
||||
_, err := cli.Grant(context.TODO(), 5)
|
||||
if !clientv3.IsConnCanceled(err) {
|
||||
t.Fatalf("expected %v, %v or server unavailable, got %v", err != context.Canceled, grpc.ErrClientConnClosing, err)
|
||||
t.Errorf("expected %v, %v or server unavailable, got %v", err != context.Canceled, grpc.ErrClientConnClosing, err)
|
||||
}
|
||||
close(donec)
|
||||
}()
|
||||
@ -767,7 +767,7 @@ func TestV3LeaseFailureOverlap(t *testing.T) {
|
||||
if err == nil || err == rpctypes.ErrTimeoutDueToConnectionLost {
|
||||
return
|
||||
}
|
||||
t.Fatal(err)
|
||||
t.Error(err)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
@ -114,7 +114,7 @@ func TestLeasingInterval(t *testing.T) {
|
||||
}
|
||||
|
||||
// load into cache
|
||||
if resp, err = lkv.Get(context.TODO(), "abc/a"); err != nil {
|
||||
if _, err = lkv.Get(context.TODO(), "abc/a"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@ -389,7 +389,7 @@ func TestLeasingConcurrentPut(t *testing.T) {
|
||||
go func() {
|
||||
resp, perr := lkv.Put(context.TODO(), "k", "abc")
|
||||
if perr != nil {
|
||||
t.Fatal(perr)
|
||||
t.Error(perr)
|
||||
}
|
||||
putc <- resp
|
||||
}()
|
||||
@ -559,7 +559,7 @@ func TestLeasingOwnerPutResponse(t *testing.T) {
|
||||
if _, err = clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
gresp, gerr := lkv.Get(context.TODO(), "k")
|
||||
_, gerr := lkv.Get(context.TODO(), "k")
|
||||
if gerr != nil {
|
||||
t.Fatal(gerr)
|
||||
}
|
||||
@ -573,7 +573,7 @@ func TestLeasingOwnerPutResponse(t *testing.T) {
|
||||
|
||||
clus.Members[0].Stop(t)
|
||||
|
||||
gresp, gerr = lkv.Get(context.TODO(), "k")
|
||||
gresp, gerr := lkv.Get(context.TODO(), "k")
|
||||
if gerr != nil {
|
||||
t.Fatal(gerr)
|
||||
}
|
||||
@ -992,7 +992,7 @@ func TestLeasingTxnRandIfThenOrElse(t *testing.T) {
|
||||
for i := 0; i < keyCount/2; i++ {
|
||||
k := fmt.Sprintf("k-%d", rand.Intn(keyCount))
|
||||
if _, err := kv.Get(context.TODO(), k); err != nil {
|
||||
t.Fatal(err)
|
||||
t.Error(err)
|
||||
}
|
||||
getc <- struct{}{}
|
||||
}
|
||||
@ -1399,10 +1399,10 @@ func TestLeasingReconnectOwnerRevoke(t *testing.T) {
|
||||
// blocks until lkv1 connection comes back
|
||||
resp, err := lkv1.Get(cctx, "k")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
t.Error(err)
|
||||
}
|
||||
if string(resp.Kvs[0].Value) != "v" {
|
||||
t.Fatalf(`expected "v" value, got %+v`, resp)
|
||||
t.Errorf(`expected "v" value, got %+v`, resp)
|
||||
}
|
||||
}()
|
||||
select {
|
||||
@ -1440,11 +1440,11 @@ func TestLeasingReconnectOwnerRevokeCompact(t *testing.T) {
|
||||
clus.WaitLeader(t)
|
||||
|
||||
// put some more revisions for compaction
|
||||
presp, err := clus.Client(1).Put(context.TODO(), "a", "123")
|
||||
_, err := clus.Client(1).Put(context.TODO(), "a", "123")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
presp, err = clus.Client(1).Put(context.TODO(), "a", "123")
|
||||
presp, err := clus.Client(1).Put(context.TODO(), "a", "123")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -1595,7 +1595,7 @@ func TestLeasingTxnAtomicCache(t *testing.T) {
|
||||
}
|
||||
tresp, err := lkv.Txn(context.TODO()).Then(gets...).Commit()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
t.Error(err)
|
||||
}
|
||||
revs := make([]int64, len(gets))
|
||||
for i, resp := range tresp.Responses {
|
||||
@ -1604,7 +1604,7 @@ func TestLeasingTxnAtomicCache(t *testing.T) {
|
||||
}
|
||||
for i := 1; i < len(revs); i++ {
|
||||
if revs[i] != revs[i-1] {
|
||||
t.Fatalf("expected matching revisions, got %+v", revs)
|
||||
t.Errorf("expected matching revisions, got %+v", revs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -54,12 +54,12 @@ func TestV3ClientMetrics(t *testing.T) {
|
||||
|
||||
ln, err = transport.NewUnixListener(addr)
|
||||
if err != nil {
|
||||
t.Fatalf("Error: %v occurred while listening on addr: %v", err, addr)
|
||||
t.Errorf("Error: %v occurred while listening on addr: %v", err, addr)
|
||||
}
|
||||
|
||||
err = srv.Serve(ln)
|
||||
if err != nil && !transport.IsClosedConnError(err) {
|
||||
t.Fatalf("Err serving http requests: %v", err)
|
||||
t.Errorf("Err serving http requests: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -90,7 +90,7 @@ func TestMirrorSyncBase(t *testing.T) {
|
||||
|
||||
for key := range keyCh {
|
||||
if _, err := cli.Put(ctx, key, "test"); err != nil {
|
||||
t.Fatal(err)
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
@ -75,16 +75,16 @@ func TestBalancerUnderServerShutdownWatch(t *testing.T) {
|
||||
select {
|
||||
case ev := <-wch:
|
||||
if werr := ev.Err(); werr != nil {
|
||||
t.Fatal(werr)
|
||||
t.Error(werr)
|
||||
}
|
||||
if len(ev.Events) != 1 {
|
||||
t.Fatalf("expected one event, got %+v", ev)
|
||||
t.Errorf("expected one event, got %+v", ev)
|
||||
}
|
||||
if !bytes.Equal(ev.Events[0].Kv.Value, []byte(val)) {
|
||||
t.Fatalf("expected %q, got %+v", val, ev.Events[0].Kv)
|
||||
t.Errorf("expected %q, got %+v", val, ev.Events[0].Kv)
|
||||
}
|
||||
case <-time.After(7 * time.Second):
|
||||
t.Fatal("took too long to receive events")
|
||||
t.Error("took too long to receive events")
|
||||
}
|
||||
}()
|
||||
|
||||
@ -350,7 +350,7 @@ func testBalancerUnderServerStopInflightRangeOnRestart(t *testing.T, linearizabl
|
||||
}
|
||||
cancel()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -67,7 +67,7 @@ func TestTxnWriteFail(t *testing.T) {
|
||||
defer cancel()
|
||||
resp, err := kv.Txn(ctx).Then(clientv3.OpPut("foo", "bar")).Commit()
|
||||
if err == nil {
|
||||
t.Fatalf("expected error, got response %v", resp)
|
||||
t.Errorf("expected error, got response %v", resp)
|
||||
}
|
||||
close(txnc)
|
||||
}()
|
||||
@ -76,16 +76,16 @@ func TestTxnWriteFail(t *testing.T) {
|
||||
defer close(getc)
|
||||
select {
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("timed out waiting for txn fail")
|
||||
t.Errorf("timed out waiting for txn fail")
|
||||
case <-txnc:
|
||||
}
|
||||
// and ensure the put didn't take
|
||||
gresp, gerr := clus.Client(1).Get(context.TODO(), "foo")
|
||||
if gerr != nil {
|
||||
t.Fatal(gerr)
|
||||
t.Error(gerr)
|
||||
}
|
||||
if len(gresp.Kvs) != 0 {
|
||||
t.Fatalf("expected no keys, got %v", gresp.Kvs)
|
||||
t.Errorf("expected no keys, got %v", gresp.Kvs)
|
||||
}
|
||||
}()
|
||||
|
||||
@ -123,7 +123,7 @@ func TestTxnReadRetry(t *testing.T) {
|
||||
go func() {
|
||||
_, err := kv.Txn(context.TODO()).Then(thenOps[i]...).Commit()
|
||||
if err != nil {
|
||||
t.Fatalf("expected response, got error %v", err)
|
||||
t.Errorf("expected response, got error %v", err)
|
||||
}
|
||||
donec <- struct{}{}
|
||||
}()
|
||||
|
@ -80,13 +80,13 @@ func testWatchMultiWatcher(t *testing.T, wctx *watchctx) {
|
||||
go func(key string) {
|
||||
ch := wctx.w.Watch(context.TODO(), key)
|
||||
if ch == nil {
|
||||
t.Fatalf("expected watcher channel, got nil")
|
||||
t.Errorf("expected watcher channel, got nil")
|
||||
}
|
||||
readyc <- struct{}{}
|
||||
for i := 0; i < numKeyUpdates; i++ {
|
||||
resp, ok := <-ch
|
||||
if !ok {
|
||||
t.Fatalf("watcher unexpectedly closed")
|
||||
t.Errorf("watcher unexpectedly closed")
|
||||
}
|
||||
v := fmt.Sprintf("%s-%d", key, i)
|
||||
gotv := string(resp.Events[0].Kv.Value)
|
||||
@ -101,14 +101,14 @@ func testWatchMultiWatcher(t *testing.T, wctx *watchctx) {
|
||||
go func() {
|
||||
prefixc := wctx.w.Watch(context.TODO(), "b", clientv3.WithPrefix())
|
||||
if prefixc == nil {
|
||||
t.Fatalf("expected watcher channel, got nil")
|
||||
t.Errorf("expected watcher channel, got nil")
|
||||
}
|
||||
readyc <- struct{}{}
|
||||
evs := []*clientv3.Event{}
|
||||
for i := 0; i < numKeyUpdates*2; i++ {
|
||||
resp, ok := <-prefixc
|
||||
if !ok {
|
||||
t.Fatalf("watcher unexpectedly closed")
|
||||
t.Errorf("watcher unexpectedly closed")
|
||||
}
|
||||
evs = append(evs, resp.Events...)
|
||||
}
|
||||
@ -134,9 +134,9 @@ func testWatchMultiWatcher(t *testing.T, wctx *watchctx) {
|
||||
select {
|
||||
case resp, ok := <-prefixc:
|
||||
if !ok {
|
||||
t.Fatalf("watcher unexpectedly closed")
|
||||
t.Errorf("watcher unexpectedly closed")
|
||||
}
|
||||
t.Fatalf("unexpected event %+v", resp)
|
||||
t.Errorf("unexpected event %+v", resp)
|
||||
case <-time.After(time.Second):
|
||||
}
|
||||
donec <- struct{}{}
|
||||
@ -740,7 +740,7 @@ func TestWatchErrConnClosed(t *testing.T) {
|
||||
ch := cli.Watch(context.TODO(), "foo")
|
||||
|
||||
if wr := <-ch; !isCanceled(wr.Err()) {
|
||||
t.Fatalf("expected context canceled, got %v", wr.Err())
|
||||
t.Errorf("expected context canceled, got %v", wr.Err())
|
||||
}
|
||||
}()
|
||||
|
||||
@ -772,7 +772,7 @@ func TestWatchAfterClose(t *testing.T) {
|
||||
go func() {
|
||||
cli.Watch(context.TODO(), "foo")
|
||||
if err := cli.Close(); err != nil && err != context.Canceled {
|
||||
t.Fatalf("expected %v, got %v", context.Canceled, err)
|
||||
t.Errorf("expected %v, got %v", context.Canceled, err)
|
||||
}
|
||||
close(donec)
|
||||
}()
|
||||
@ -1036,7 +1036,7 @@ func testWatchOverlapContextCancel(t *testing.T, f func(*integration.ClusterV3))
|
||||
select {
|
||||
case _, ok := <-wch:
|
||||
if !ok {
|
||||
t.Fatalf("unexpected closed channel %p", wch)
|
||||
t.Errorf("unexpected closed channel %p", wch)
|
||||
}
|
||||
// may take a second or two to reestablish a watcher because of
|
||||
// grpc back off policies for disconnects
|
||||
@ -1078,10 +1078,10 @@ func TestWatchCancelAndCloseClient(t *testing.T) {
|
||||
select {
|
||||
case wr, ok := <-wch:
|
||||
if ok {
|
||||
t.Fatalf("expected closed watch after cancel(), got resp=%+v err=%v", wr, wr.Err())
|
||||
t.Errorf("expected closed watch after cancel(), got resp=%+v err=%v", wr, wr.Err())
|
||||
}
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatal("timed out waiting for closed channel")
|
||||
t.Error("timed out waiting for closed channel")
|
||||
}
|
||||
}()
|
||||
cancel()
|
||||
|
@ -256,7 +256,7 @@ func (lc *leaseCache) clearOldRevokes(ctx context.Context) {
|
||||
case <-time.After(time.Second):
|
||||
lc.mu.Lock()
|
||||
for k, lr := range lc.revokes {
|
||||
if time.Now().Sub(lr.Add(revokeBackoff)) > 0 {
|
||||
if time.Since(lr.Add(revokeBackoff)) > 0 {
|
||||
delete(lc.revokes, k)
|
||||
}
|
||||
}
|
||||
|
@ -90,7 +90,7 @@ func (s *syncer) SyncBase(ctx context.Context) (<-chan clientv3.GetResponse, cha
|
||||
return
|
||||
}
|
||||
|
||||
respchan <- (clientv3.GetResponse)(*resp)
|
||||
respchan <- *resp
|
||||
|
||||
if !resp.More {
|
||||
return
|
||||
|
@ -113,13 +113,13 @@ func (op Op) IsGet() bool { return op.t == tRange }
|
||||
func (op Op) IsDelete() bool { return op.t == tDeleteRange }
|
||||
|
||||
// IsSerializable returns true if the serializable field is true.
|
||||
func (op Op) IsSerializable() bool { return op.serializable == true }
|
||||
func (op Op) IsSerializable() bool { return op.serializable }
|
||||
|
||||
// IsKeysOnly returns whether keysOnly is set.
|
||||
func (op Op) IsKeysOnly() bool { return op.keysOnly == true }
|
||||
func (op Op) IsKeysOnly() bool { return op.keysOnly }
|
||||
|
||||
// IsCountOnly returns whether countOnly is set.
|
||||
func (op Op) IsCountOnly() bool { return op.countOnly == true }
|
||||
func (op Op) IsCountOnly() bool { return op.countOnly }
|
||||
|
||||
// MinModRev returns the operation's minimum modify revision.
|
||||
func (op Op) MinModRev() int64 { return op.minModRev }
|
||||
|
@ -47,7 +47,7 @@ var (
|
||||
// client-side streaming retry limit, only applied to requests where server responds with
|
||||
// a error code clearly indicating it was unable to process the request such as codes.Unavailable.
|
||||
// If set to 0, retry is disabled.
|
||||
defaultStreamMaxRetries = uint(^uint(0)) // max uint
|
||||
defaultStreamMaxRetries = ^uint(0) // max uint
|
||||
|
||||
// client-side retry backoff wait between requests.
|
||||
defaultBackoffWaitBetween = 25 * time.Millisecond
|
||||
|
@ -1,30 +0,0 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import "context"
|
||||
|
||||
// TODO: remove this when "FailFast=false" is fixed.
|
||||
// See https://github.com/grpc/grpc-go/issues/1532.
|
||||
func readyWait(rpcCtx, clientCtx context.Context, ready <-chan struct{}) error {
|
||||
select {
|
||||
case <-ready:
|
||||
return nil
|
||||
case <-rpcCtx.Done():
|
||||
return rpcCtx.Err()
|
||||
case <-clientCtx.Done():
|
||||
return clientCtx.Err()
|
||||
}
|
||||
}
|
@ -43,10 +43,6 @@ func (rp retryPolicy) String() string {
|
||||
}
|
||||
}
|
||||
|
||||
type rpcFunc func(ctx context.Context) error
|
||||
type retryRPCFunc func(context.Context, rpcFunc, retryPolicy) error
|
||||
type retryStopErrFunc func(error) bool
|
||||
|
||||
// isSafeRetryImmutableRPC returns "true" when an immutable request is safe for retry.
|
||||
//
|
||||
// immutable requests (e.g. Get) should be retried unless it's
|
||||
|
@ -28,6 +28,7 @@ import (
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// unaryClientInterceptor returns a new retrying unary client interceptor.
|
||||
@ -109,7 +110,7 @@ func (c *Client) streamClientInterceptor(logger *zap.Logger, optFuncs ...retryOp
|
||||
return streamer(ctx, desc, cc, method, grpcOpts...)
|
||||
}
|
||||
if desc.ClientStreams {
|
||||
return nil, grpc.Errorf(codes.Unimplemented, "clientv3/retry_interceptor: cannot retry on ClientStreams, set Disable()")
|
||||
return nil, status.Errorf(codes.Unimplemented, "clientv3/retry_interceptor: cannot retry on ClientStreams, set Disable()")
|
||||
}
|
||||
newStreamer, err := streamer(ctx, desc, cc, method, grpcOpts...)
|
||||
logger.Warn("retry stream intercept", zap.Error(err))
|
||||
@ -296,11 +297,11 @@ func isContextError(err error) bool {
|
||||
func contextErrToGrpcErr(err error) error {
|
||||
switch err {
|
||||
case context.DeadlineExceeded:
|
||||
return grpc.Errorf(codes.DeadlineExceeded, err.Error())
|
||||
return status.Errorf(codes.DeadlineExceeded, err.Error())
|
||||
case context.Canceled:
|
||||
return grpc.Errorf(codes.Canceled, err.Error())
|
||||
return status.Errorf(codes.Canceled, err.Error())
|
||||
default:
|
||||
return grpc.Errorf(codes.Unknown, err.Error())
|
||||
return status.Errorf(codes.Unknown, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
@ -328,13 +329,6 @@ func withRetryPolicy(rp retryPolicy) retryOption {
|
||||
}}
|
||||
}
|
||||
|
||||
// withAuthRetry sets enables authentication retries.
|
||||
func withAuthRetry(retryAuth bool) retryOption {
|
||||
return retryOption{applyFunc: func(o *options) {
|
||||
o.retryAuth = retryAuth
|
||||
}}
|
||||
}
|
||||
|
||||
// withMax sets the maximum number of retries on this call, or this interceptor.
|
||||
func withMax(maxRetries uint) retryOption {
|
||||
return retryOption{applyFunc: func(o *options) {
|
||||
|
@ -295,7 +295,7 @@ func restoreCluster(t *testing.T, clusterN int, dbPath string) (
|
||||
go func(idx int) {
|
||||
srv, err := embed.StartEtcd(cfgs[idx])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
<-srv.Server.ReadyNotify()
|
||||
|
@ -117,7 +117,7 @@ func TestProposeOnCommit(t *testing.T) {
|
||||
case pC <- *s:
|
||||
continue
|
||||
case err := <-eC:
|
||||
t.Fatalf("eC message (%v)", err)
|
||||
t.Errorf("eC message (%v)", err)
|
||||
}
|
||||
}
|
||||
donec <- struct{}{}
|
||||
|
@ -995,7 +995,7 @@ func TestSnapshot(t *testing.T) {
|
||||
defer func() { ch <- struct{}{} }()
|
||||
|
||||
if len(gaction) != 1 {
|
||||
t.Fatalf("len(action) = %d, want 1", len(gaction))
|
||||
t.Errorf("len(action) = %d, want 1", len(gaction))
|
||||
}
|
||||
if !reflect.DeepEqual(gaction[0], testutil.Action{Name: "SaveSnap"}) {
|
||||
t.Errorf("action = %s, want SaveSnap", gaction[0])
|
||||
@ -1007,7 +1007,7 @@ func TestSnapshot(t *testing.T) {
|
||||
defer func() { ch <- struct{}{} }()
|
||||
|
||||
if len(gaction) != 2 {
|
||||
t.Fatalf("len(action) = %d, want 2", len(gaction))
|
||||
t.Errorf("len(action) = %d, want 2", len(gaction))
|
||||
}
|
||||
if !reflect.DeepEqual(gaction[0], testutil.Action{Name: "Clone"}) {
|
||||
t.Errorf("action = %s, want Clone", gaction[0])
|
||||
@ -1140,7 +1140,7 @@ func TestTriggerSnap(t *testing.T) {
|
||||
// each operation is recorded as a Save
|
||||
// (SnapshotCount+1) * Puts + SaveSnap = (SnapshotCount+1) * Save + SaveSnap
|
||||
if len(gaction) != wcnt {
|
||||
t.Fatalf("len(action) = %d, want %d", len(gaction), wcnt)
|
||||
t.Errorf("len(action) = %d, want %d", len(gaction), wcnt)
|
||||
}
|
||||
if !reflect.DeepEqual(gaction[wcnt-1], testutil.Action{Name: "SaveSnap"}) {
|
||||
t.Errorf("action = %s, want SaveSnap", gaction[wcnt-1])
|
||||
|
@ -53,7 +53,7 @@ func read(lg *zap.Logger, fpath string) (*Cluster, error) {
|
||||
}
|
||||
|
||||
if len(clus.Tester.Cases) == 0 {
|
||||
return nil, errors.New("Cases not found")
|
||||
return nil, errors.New("cases not found")
|
||||
}
|
||||
if clus.Tester.DelayLatencyMs <= clus.Tester.DelayLatencyMsRv*5 {
|
||||
return nil, fmt.Errorf("delay latency %d ms must be greater than 5x of delay latency random variable %d ms", clus.Tester.DelayLatencyMs, clus.Tester.DelayLatencyMsRv)
|
||||
@ -227,7 +227,7 @@ func read(lg *zap.Logger, fpath string) (*Cluster, error) {
|
||||
return nil, fmt.Errorf("Etcd.PeerClientCertAuth and Etcd.PeerAutoTLS cannot be both 'true'")
|
||||
}
|
||||
if (mem.Etcd.PeerCertFile == "") != (mem.Etcd.PeerKeyFile == "") {
|
||||
return nil, fmt.Errorf("Both Etcd.PeerCertFile %q and Etcd.PeerKeyFile %q must be either empty or non-empty", mem.Etcd.PeerCertFile, mem.Etcd.PeerKeyFile)
|
||||
return nil, fmt.Errorf("both Etcd.PeerCertFile %q and Etcd.PeerKeyFile %q must be either empty or non-empty", mem.Etcd.PeerCertFile, mem.Etcd.PeerKeyFile)
|
||||
}
|
||||
if mem.Etcd.ClientCertAuth && mem.Etcd.ClientAutoTLS {
|
||||
return nil, fmt.Errorf("Etcd.ClientCertAuth and Etcd.ClientAutoTLS cannot be both 'true'")
|
||||
@ -251,7 +251,7 @@ func read(lg *zap.Logger, fpath string) (*Cluster, error) {
|
||||
return nil, fmt.Errorf("Etcd.ClientCertAuth 'false', but Etcd.ClientTrustedCAFile is %q", mem.Etcd.PeerCertFile)
|
||||
}
|
||||
if (mem.Etcd.ClientCertFile == "") != (mem.Etcd.ClientKeyFile == "") {
|
||||
return nil, fmt.Errorf("Both Etcd.ClientCertFile %q and Etcd.ClientKeyFile %q must be either empty or non-empty", mem.Etcd.ClientCertFile, mem.Etcd.ClientKeyFile)
|
||||
return nil, fmt.Errorf("both Etcd.ClientCertFile %q and Etcd.ClientKeyFile %q must be either empty or non-empty", mem.Etcd.ClientCertFile, mem.Etcd.ClientKeyFile)
|
||||
}
|
||||
|
||||
peerTLS := mem.Etcd.PeerAutoTLS ||
|
||||
|
@ -891,7 +891,7 @@ func TestV2WatchWithIndex(t *testing.T) {
|
||||
go func() {
|
||||
resp, err := tc.Get(fmt.Sprintf("%s%s", u, "/v2/keys/foo/bar?wait=true&waitIndex=5"))
|
||||
if err != nil {
|
||||
t.Fatalf("watch err = %v, want nil", err)
|
||||
t.Errorf("watch err = %v, want nil", err)
|
||||
}
|
||||
body = tc.ReadBodyJSON(resp)
|
||||
c <- true
|
||||
@ -979,7 +979,7 @@ func TestV2WatchKeyInDir(t *testing.T) {
|
||||
// Expect a notification when watching the node
|
||||
resp, err := tc.Get(fmt.Sprintf("%s%s", u, "/v2/keys/keyindir/bar?wait=true"))
|
||||
if err != nil {
|
||||
t.Fatalf("watch err = %v, want nil", err)
|
||||
t.Errorf("watch err = %v, want nil", err)
|
||||
}
|
||||
body = tc.ReadBodyJSON(resp)
|
||||
c <- true
|
||||
|
@ -159,7 +159,7 @@ func TestV3CorruptAlarm(t *testing.T) {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if _, err := clus.Client(0).Put(context.TODO(), "k", "v"); err != nil {
|
||||
t.Fatal(err)
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
@ -51,7 +51,7 @@ func testBarrier(t *testing.T, waiters int, chooseClient func() *clientv3.Client
|
||||
go func() {
|
||||
br := recipe.NewBarrier(chooseClient(), "test-barrier")
|
||||
if err := br.Wait(); err != nil {
|
||||
t.Fatalf("could not wait on barrier (%v)", err)
|
||||
t.Errorf("could not wait on barrier (%v)", err)
|
||||
}
|
||||
donec <- struct{}{}
|
||||
}()
|
||||
|
@ -45,11 +45,11 @@ func TestDoubleBarrier(t *testing.T) {
|
||||
|
||||
bb := recipe.NewDoubleBarrier(session, "test-barrier", waiters)
|
||||
if err := bb.Enter(); err != nil {
|
||||
t.Fatalf("could not enter on barrier (%v)", err)
|
||||
t.Errorf("could not enter on barrier (%v)", err)
|
||||
}
|
||||
donec <- struct{}{}
|
||||
if err := bb.Leave(); err != nil {
|
||||
t.Fatalf("could not leave on barrier (%v)", err)
|
||||
t.Errorf("could not leave on barrier (%v)", err)
|
||||
}
|
||||
donec <- struct{}{}
|
||||
}()
|
||||
@ -115,7 +115,7 @@ func TestDoubleBarrierFailover(t *testing.T) {
|
||||
go func() {
|
||||
b := recipe.NewDoubleBarrier(s0, "test-barrier", waiters)
|
||||
if berr := b.Enter(); berr != nil {
|
||||
t.Fatalf("could not enter on barrier (%v)", berr)
|
||||
t.Errorf("could not enter on barrier (%v)", berr)
|
||||
}
|
||||
donec <- struct{}{}
|
||||
}()
|
||||
@ -124,7 +124,7 @@ func TestDoubleBarrierFailover(t *testing.T) {
|
||||
go func() {
|
||||
b := recipe.NewDoubleBarrier(s1, "test-barrier", waiters)
|
||||
if berr := b.Enter(); berr != nil {
|
||||
t.Fatalf("could not enter on barrier (%v)", berr)
|
||||
t.Errorf("could not enter on barrier (%v)", berr)
|
||||
}
|
||||
donec <- struct{}{}
|
||||
b.Leave()
|
||||
|
@ -53,7 +53,7 @@ func TestElectionWait(t *testing.T) {
|
||||
defer cancel()
|
||||
s, ok := <-b.Observe(cctx)
|
||||
if !ok {
|
||||
t.Fatalf("could not observe election; channel closed")
|
||||
t.Errorf("could not observe election; channel closed")
|
||||
}
|
||||
electedc <- string(s.Kvs[0].Value)
|
||||
// wait for next election round
|
||||
@ -76,7 +76,7 @@ func TestElectionWait(t *testing.T) {
|
||||
e := concurrency.NewElection(session, "test-election")
|
||||
ev := fmt.Sprintf("electval-%v", time.Now().UnixNano())
|
||||
if err := e.Campaign(context.TODO(), ev); err != nil {
|
||||
t.Fatalf("failed volunteer (%v)", err)
|
||||
t.Errorf("failed volunteer (%v)", err)
|
||||
}
|
||||
// wait for followers to accept leadership
|
||||
for j := 0; j < followers; j++ {
|
||||
@ -87,7 +87,7 @@ func TestElectionWait(t *testing.T) {
|
||||
}
|
||||
// let next leader take over
|
||||
if err := e.Resign(context.TODO()); err != nil {
|
||||
t.Fatalf("failed resign (%v)", err)
|
||||
t.Errorf("failed resign (%v)", err)
|
||||
}
|
||||
// tell followers to start listening for next leader
|
||||
for j := 0; j < followers; j++ {
|
||||
|
@ -86,7 +86,7 @@ func TestV3KVInflightRangeRequests(t *testing.T) {
|
||||
errCode := status.Convert(err).Code()
|
||||
errDesc := rpctypes.ErrorDesc(err)
|
||||
if err != nil && !(errDesc == context.Canceled.Error() || errCode == codes.Unavailable) {
|
||||
t.Fatalf("inflight request should be canceled with '%v' or code Unavailable, got '%v' with code '%s'", context.Canceled.Error(), errDesc, errCode)
|
||||
t.Errorf("inflight request should be canceled with '%v' or code Unavailable, got '%v' with code '%s'", context.Canceled.Error(), errDesc, errCode)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
@ -623,7 +623,7 @@ func TestV3LeaseRequireLeader(t *testing.T) {
|
||||
defer close(donec)
|
||||
resp, err := lac.Recv()
|
||||
if err == nil {
|
||||
t.Fatalf("got response %+v, expected error", resp)
|
||||
t.Errorf("got response %+v, expected error", resp)
|
||||
}
|
||||
if rpctypes.ErrorDesc(err) != rpctypes.ErrNoLeader.Error() {
|
||||
t.Errorf("err = %v, want %v", err, rpctypes.ErrNoLeader)
|
||||
|
@ -57,7 +57,7 @@ func testMutex(t *testing.T, waiters int, chooseClient func() *clientv3.Client)
|
||||
}
|
||||
m := concurrency.NewMutex(session, "test-mutex")
|
||||
if err := m.Lock(context.TODO()); err != nil {
|
||||
t.Fatalf("could not wait on lock (%v)", err)
|
||||
t.Errorf("could not wait on lock (%v)", err)
|
||||
}
|
||||
lockedC <- m
|
||||
}()
|
||||
@ -248,12 +248,12 @@ func testRWMutex(t *testing.T, waiters int, chooseClient func() *clientv3.Client
|
||||
rwm := recipe.NewRWMutex(session, "test-rwmutex")
|
||||
if rand.Intn(2) == 0 {
|
||||
if err := rwm.RLock(); err != nil {
|
||||
t.Fatalf("could not rlock (%v)", err)
|
||||
t.Errorf("could not rlock (%v)", err)
|
||||
}
|
||||
rlockedC <- rwm
|
||||
} else {
|
||||
if err := rwm.Lock(); err != nil {
|
||||
t.Fatalf("could not lock (%v)", err)
|
||||
t.Errorf("could not lock (%v)", err)
|
||||
}
|
||||
wlockedC <- rwm
|
||||
}
|
||||
|
@ -42,7 +42,7 @@ func TestQueueOneReaderOneWriter(t *testing.T) {
|
||||
q := recipe.NewQueue(etcdc, "testq")
|
||||
for i := 0; i < 5; i++ {
|
||||
if err := q.Enqueue(fmt.Sprintf("%d", i)); err != nil {
|
||||
t.Fatalf("error enqueuing (%v)", err)
|
||||
t.Errorf("error enqueuing (%v)", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
@ -170,7 +170,7 @@ func TestSTMSerialize(t *testing.T) {
|
||||
ops = append(ops, v3.OpPut(k, s))
|
||||
}
|
||||
if _, err := etcdc.Txn(context.TODO()).Then(ops...).Commit(); err != nil {
|
||||
t.Fatalf("couldn't put keys (%v)", err)
|
||||
t.Errorf("couldn't put keys (%v)", err)
|
||||
}
|
||||
updatec <- struct{}{}
|
||||
}
|
||||
@ -220,7 +220,7 @@ func TestSTMApplyOnConcurrentDeletion(t *testing.T) {
|
||||
go func() {
|
||||
<-readyc
|
||||
if _, err := etcdc.Delete(context.TODO(), "foo"); err != nil {
|
||||
t.Fatal(err)
|
||||
t.Error(err)
|
||||
}
|
||||
close(donec)
|
||||
}()
|
||||
|
@ -250,7 +250,7 @@ func TestV3WatchFromCurrentRevision(t *testing.T) {
|
||||
kvc := toGRPC(clus.RandClient()).KV
|
||||
req := &pb.PutRequest{Key: []byte(k), Value: []byte("bar")}
|
||||
if _, err := kvc.Put(context.TODO(), req); err != nil {
|
||||
t.Fatalf("#%d: couldn't put key (%v)", i, err)
|
||||
t.Errorf("#%d: couldn't put key (%v)", i, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
@ -484,7 +484,7 @@ func TestV3WatchCurrentPutOverlap(t *testing.T) {
|
||||
kvc := toGRPC(clus.RandClient()).KV
|
||||
req := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
|
||||
if _, err := kvc.Put(context.TODO(), req); err != nil {
|
||||
t.Fatalf("couldn't put key (%v)", err)
|
||||
t.Errorf("couldn't put key (%v)", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
@ -927,7 +927,7 @@ func testV3WatchMultipleStreams(t *testing.T, startRev int64) {
|
||||
wStream := streams[i]
|
||||
wresp, err := wStream.Recv()
|
||||
if err != nil {
|
||||
t.Fatalf("wStream.Recv error: %v", err)
|
||||
t.Errorf("wStream.Recv error: %v", err)
|
||||
}
|
||||
if wresp.WatchId != 0 {
|
||||
t.Errorf("watchId got = %d, want = 0", wresp.WatchId)
|
||||
@ -1090,7 +1090,7 @@ func TestV3WatchWithFilter(t *testing.T) {
|
||||
// check received PUT
|
||||
resp, rerr := ws.Recv()
|
||||
if rerr != nil {
|
||||
t.Fatal(rerr)
|
||||
t.Error(rerr)
|
||||
}
|
||||
recv <- resp
|
||||
}()
|
||||
@ -1183,7 +1183,7 @@ func TestV3WatchWithPrevKV(t *testing.T) {
|
||||
// check received PUT
|
||||
resp, rerr := ws.Recv()
|
||||
if rerr != nil {
|
||||
t.Fatal(rerr)
|
||||
t.Error(rerr)
|
||||
}
|
||||
recv <- resp
|
||||
}()
|
||||
|
@ -54,10 +54,10 @@ func TestV3ElectionCampaign(t *testing.T) {
|
||||
req2 := &epb.CampaignRequest{Name: []byte("foo"), Lease: lease2.ID, Value: []byte("def")}
|
||||
l2, lerr2 := lc.Campaign(context.TODO(), req2)
|
||||
if lerr2 != nil {
|
||||
t.Fatal(lerr2)
|
||||
t.Error(lerr2)
|
||||
}
|
||||
if l1.Header.Revision >= l2.Header.Revision {
|
||||
t.Fatalf("expected l1 revision < l2 revision, got %d >= %d", l1.Header.Revision, l2.Header.Revision)
|
||||
t.Errorf("expected l1 revision < l2 revision, got %d >= %d", l1.Header.Revision, l2.Header.Revision)
|
||||
}
|
||||
}()
|
||||
|
||||
@ -103,18 +103,18 @@ func TestV3ElectionObserve(t *testing.T) {
|
||||
s, err := lc.Observe(context.Background(), &epb.LeaderRequest{Name: []byte("foo")})
|
||||
observec <- struct{}{}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
t.Error(err)
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
resp, rerr := s.Recv()
|
||||
if rerr != nil {
|
||||
t.Fatal(rerr)
|
||||
t.Error(rerr)
|
||||
}
|
||||
respV := 0
|
||||
fmt.Sscanf(string(resp.Kv.Value), "%d", &respV)
|
||||
// leader transitions should not go backwards
|
||||
if respV < i {
|
||||
t.Fatalf(`got observe value %q, expected >= "%d"`, string(resp.Kv.Value), i)
|
||||
t.Errorf(`got observe value %q, expected >= "%d"`, string(resp.Kv.Value), i)
|
||||
}
|
||||
i = respV
|
||||
}
|
||||
@ -142,17 +142,17 @@ func TestV3ElectionObserve(t *testing.T) {
|
||||
|
||||
lease2, err2 := toGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})
|
||||
if err2 != nil {
|
||||
t.Fatal(err2)
|
||||
t.Error(err2)
|
||||
}
|
||||
c2, cerr2 := lc.Campaign(context.TODO(), &epb.CampaignRequest{Name: []byte("foo"), Lease: lease2.ID, Value: []byte("5")})
|
||||
if cerr2 != nil {
|
||||
t.Fatal(cerr2)
|
||||
t.Error(cerr2)
|
||||
}
|
||||
for i := 6; i < 10; i++ {
|
||||
v := []byte(fmt.Sprintf("%d", i))
|
||||
req := &epb.ProclaimRequest{Leader: c2.Leader, Value: v}
|
||||
if _, err := lc.Proclaim(context.TODO(), req); err != nil {
|
||||
t.Fatal(err)
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
@ -50,10 +50,10 @@ func TestV3LockLockWaiter(t *testing.T) {
|
||||
go func() {
|
||||
l2, lerr2 := lc.Lock(context.TODO(), &lockpb.LockRequest{Name: []byte("foo"), Lease: lease2.ID})
|
||||
if lerr2 != nil {
|
||||
t.Fatal(lerr2)
|
||||
t.Error(lerr2)
|
||||
}
|
||||
if l1.Header.Revision >= l2.Header.Revision {
|
||||
t.Fatalf("expected l1 revision < l2 revision, got %d >= %d", l1.Header.Revision, l2.Header.Revision)
|
||||
t.Errorf("expected l1 revision < l2 revision, got %d >= %d", l1.Header.Revision, l2.Header.Revision)
|
||||
}
|
||||
close(lockc)
|
||||
}()
|
||||
|
@ -433,7 +433,7 @@ func TestLessorExpire(t *testing.T) {
|
||||
go func() {
|
||||
// expired lease cannot be renewed
|
||||
if _, err := le.Renew(l.ID); err != ErrLeaseNotFound {
|
||||
t.Fatalf("unexpected renew")
|
||||
t.Errorf("unexpected renew")
|
||||
}
|
||||
donec <- struct{}{}
|
||||
}()
|
||||
@ -486,7 +486,7 @@ func TestLessorExpireAndDemote(t *testing.T) {
|
||||
go func() {
|
||||
// expired lease cannot be renewed
|
||||
if _, err := le.Renew(l.ID); err != ErrNotPrimary {
|
||||
t.Fatalf("unexpected renew: %v", err)
|
||||
t.Errorf("unexpected renew: %v", err)
|
||||
}
|
||||
donec <- struct{}{}
|
||||
}()
|
||||
|
@ -550,7 +550,7 @@ func TestHashKVWhenCompacting(t *testing.T) {
|
||||
for {
|
||||
hash, _, compactRev, err := s.HashByRev(int64(rev))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
t.Error(err)
|
||||
}
|
||||
select {
|
||||
case <-donec:
|
||||
@ -570,7 +570,7 @@ func TestHashKVWhenCompacting(t *testing.T) {
|
||||
revHash[r.compactRev] = r.hash
|
||||
}
|
||||
if r.hash != revHash[r.compactRev] {
|
||||
t.Fatalf("Hashes differ (current %v) != (saved %v)", r.hash, revHash[r.compactRev])
|
||||
t.Errorf("Hashes differ (current %v) != (saved %v)", r.hash, revHash[r.compactRev])
|
||||
}
|
||||
}
|
||||
}()
|
||||
@ -581,7 +581,7 @@ func TestHashKVWhenCompacting(t *testing.T) {
|
||||
for i := 100; i >= 0; i-- {
|
||||
_, err := s.Compact(int64(rev - 1 - i))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
t.Error(err)
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
|
@ -109,7 +109,7 @@ func TestSignal(t *testing.T) {
|
||||
defer close(donec)
|
||||
werr := "signal: interrupt"
|
||||
if cerr := ep.Close(); cerr == nil || cerr.Error() != werr {
|
||||
t.Fatalf("got error %v, wanted error %s", cerr, werr)
|
||||
t.Errorf("got error %v, wanted error %s", cerr, werr)
|
||||
}
|
||||
}()
|
||||
select {
|
||||
|
@ -61,11 +61,11 @@ func TestLockAndUnlock(t *testing.T) {
|
||||
go func() {
|
||||
bl, blerr := LockFile(f.Name(), os.O_WRONLY, PrivateFileMode)
|
||||
if blerr != nil {
|
||||
t.Fatal(blerr)
|
||||
t.Error(blerr)
|
||||
}
|
||||
locked <- struct{}{}
|
||||
if blerr = bl.Close(); blerr != nil {
|
||||
t.Fatal(blerr)
|
||||
t.Error(blerr)
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -57,7 +57,7 @@ func TestPurgeFile(t *testing.T) {
|
||||
go func(n int) {
|
||||
f, ferr := os.Create(filepath.Join(dir, fmt.Sprintf("%d.test", n)))
|
||||
if ferr != nil {
|
||||
t.Fatal(err)
|
||||
t.Error(err)
|
||||
}
|
||||
f.Close()
|
||||
}(i)
|
||||
|
@ -95,7 +95,7 @@ func (ts *testBlockingServer) Start(t *testing.T) {
|
||||
for i := 0; i < ts.n; i++ {
|
||||
conn, err := ts.ln.Accept()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
t.Error(err)
|
||||
}
|
||||
defer conn.Close()
|
||||
}
|
||||
|
@ -52,7 +52,7 @@ func TestWriteReadTimeoutListener(t *testing.T) {
|
||||
blocker := func() {
|
||||
conn, derr := net.Dial("tcp", ln.Addr().String())
|
||||
if derr != nil {
|
||||
t.Fatalf("unexpected dail error: %v", derr)
|
||||
t.Errorf("unexpected dail error: %v", derr)
|
||||
}
|
||||
defer conn.Close()
|
||||
// block the receiver until the writer timeout
|
||||
|
@ -59,12 +59,12 @@ func TestNewTransportTLSInvalidCipherSuitesTLS12(t *testing.T) {
|
||||
tr, err := NewTransport(cliTLS, 3*time.Second)
|
||||
tr.TLSClientConfig.MaxVersion = tls.VersionTLS12
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected NewTransport error: %v", err)
|
||||
t.Errorf("unexpected NewTransport error: %v", err)
|
||||
}
|
||||
cli := &http.Client{Transport: tr}
|
||||
_, gerr := cli.Get("https://" + ln.Addr().String())
|
||||
if gerr == nil || !strings.Contains(gerr.Error(), "tls: handshake failure") {
|
||||
t.Fatal("expected client TLS handshake error")
|
||||
t.Error("expected client TLS handshake error")
|
||||
}
|
||||
ln.Close()
|
||||
donec <- struct{}{}
|
||||
|
29
test
29
test
@ -99,9 +99,8 @@ if [ "${VERBOSE}" == "1" ]; then
|
||||
echo "Running with TEST:" "${TEST[@]}"
|
||||
fi
|
||||
|
||||
# TODO: 'client' pkg fails with gosimple from generated files
|
||||
# TODO: 'rafttest' is failing with unused
|
||||
STATIC_ANALYSIS_PATHS=$(find . -name \*.go ! -path './vendor/*' ! -path './gopath.proto/*' ! -path '*pb/*' | while read -r a; do dirname "$a"; done | sort | uniq | grep -vE "$IGNORE_PKGS" | grep -v 'client')
|
||||
STATIC_ANALYSIS_PATHS=$(find . -name \*.go ! -path './vendor/*' ! -path './gopath.proto/*' ! -path '*pb/*' | while read -r a; do dirname "$a"; done | sort | uniq | grep -vE "$IGNORE_PKGS")
|
||||
# shellcheck disable=SC2206
|
||||
STATIC_ANALYSIS_PATHS=($STATIC_ANALYSIS_PATHS)
|
||||
if [ "${VERBOSE}" == "1" ]; then
|
||||
@ -459,30 +458,6 @@ function govet_shadow_pass {
|
||||
fi
|
||||
}
|
||||
|
||||
function gosimple_pass {
|
||||
if command -v gosimple >/dev/null; then
|
||||
gosimpleResult=$(gosimple "${STATIC_ANALYSIS_PATHS[@]}" 2>&1 || true)
|
||||
if [ -n "${gosimpleResult}" ]; then
|
||||
echo -e "gosimple checking failed:\\n${gosimpleResult}"
|
||||
exit 255
|
||||
fi
|
||||
else
|
||||
echo "Skipping gosimple..."
|
||||
fi
|
||||
}
|
||||
|
||||
function unused_pass {
|
||||
if command -v unused >/dev/null; then
|
||||
unusedResult=$(unused "${STATIC_ANALYSIS_PATHS[@]}" 2>&1 || true)
|
||||
if [ -n "${unusedResult}" ]; then
|
||||
echo -e "unused checking failed:\\n${unusedResult}"
|
||||
exit 255
|
||||
fi
|
||||
else
|
||||
echo "Skipping unused..."
|
||||
fi
|
||||
}
|
||||
|
||||
function unparam_pass {
|
||||
if command -v unparam >/dev/null; then
|
||||
unparamResult=$(unparam "${STATIC_ANALYSIS_PATHS[@]}" 2>&1 || true)
|
||||
@ -501,7 +476,7 @@ function staticcheck_pass {
|
||||
if [ -n "${staticcheckResult}" ]; then
|
||||
# TODO: resolve these after go1.8 migration
|
||||
# See https://github.com/dominikh/go-tools/tree/master/cmd/staticcheck
|
||||
STATIC_CHECK_MASK="SA(1012|1019|2002)"
|
||||
STATIC_CHECK_MASK="S(A|T)(1002|1005|1006|1008|1012|1019|1032|2002|4003|4006)"
|
||||
if echo "${staticcheckResult}" | grep -vE "$STATIC_CHECK_MASK"; then
|
||||
echo -e "staticcheck checking failed:\\n${staticcheckResult}"
|
||||
exit 255
|
||||
|
@ -49,8 +49,6 @@ RUN go get -v -u -tags spell github.com/chzchzchz/goword \
|
||||
&& go get -v -u github.com/mgechev/revive \
|
||||
&& go get -v -u github.com/mdempsky/unconvert \
|
||||
&& go get -v -u mvdan.cc/unparam \
|
||||
&& go get -v -u honnef.co/go/tools/cmd/gosimple \
|
||||
&& go get -v -u honnef.co/go/tools/cmd/unused \
|
||||
&& go get -v -u honnef.co/go/tools/cmd/staticcheck \
|
||||
&& go get -v -u github.com/gyuho/gocovmerge \
|
||||
&& go get -v -u github.com/gordonklaus/ineffassign \
|
||||
|
@ -859,7 +859,7 @@ func authTestWatch(cx ctlCtx) {
|
||||
defer close(donec)
|
||||
for j := range puts {
|
||||
if err := ctlV3Put(cx, puts[j].key, puts[j].val, ""); err != nil {
|
||||
cx.t.Fatalf("watchTest #%d-%d: ctlV3Put error (%v)", i, j, err)
|
||||
cx.t.Errorf("watchTest #%d-%d: ctlV3Put error (%v)", i, j, err)
|
||||
}
|
||||
}
|
||||
}(i, tt.puts)
|
||||
|
@ -133,7 +133,7 @@ func watchTest(cx ctlCtx) {
|
||||
go func(i int, puts []kv) {
|
||||
for j := range puts {
|
||||
if err := ctlV3Put(cx, puts[j].key, puts[j].val, ""); err != nil {
|
||||
cx.t.Fatalf("watchTest #%d-%d: ctlV3Put error (%v)", i, j, err)
|
||||
cx.t.Errorf("watchTest #%d-%d: ctlV3Put error (%v)", i, j, err)
|
||||
}
|
||||
}
|
||||
close(donec)
|
||||
|
@ -168,7 +168,7 @@ func TestReleaseUpgradeWithRestart(t *testing.T) {
|
||||
epc.procs[i].Config().execPath = binDir + "/etcd"
|
||||
epc.procs[i].Config().keepDataDir = true
|
||||
if err := epc.procs[i].Restart(); err != nil {
|
||||
t.Fatalf("error restarting etcd process (%v)", err)
|
||||
t.Errorf("error restarting etcd process (%v)", err)
|
||||
}
|
||||
wg.Done()
|
||||
}(i)
|
||||
|
Loading…
x
Reference in New Issue
Block a user