mirror of
https://github.com/etcd-io/etcd.git
synced 2024-09-27 06:25:44 +00:00
Merge pull request #16622 from fuweid/fix-unparam-lint
*: enable and fix unparam lint
This commit is contained in:
commit
11edde9204
@ -8,7 +8,7 @@ package client
|
||||
|
||||
import "net/http"
|
||||
|
||||
func requestCanceler(tr CancelableTransport, req *http.Request) func() {
|
||||
func requestCanceler(req *http.Request) func() {
|
||||
ch := make(chan struct{})
|
||||
req.Cancel = ch
|
||||
|
||||
|
@ -556,7 +556,7 @@ func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Respon
|
||||
}
|
||||
defer hcancel()
|
||||
|
||||
reqcancel := requestCanceler(c.transport, req)
|
||||
reqcancel := requestCanceler(req)
|
||||
|
||||
rtchan := make(chan roundTripResponse, 1)
|
||||
go func() {
|
||||
|
@ -499,7 +499,7 @@ func (f fakeCancelContext) Done() <-chan struct{} {
|
||||
func (f fakeCancelContext) Err() error { return errFakeCancelContext }
|
||||
func (f fakeCancelContext) Value(key any) any { return 1 }
|
||||
|
||||
func withTimeout(parent context.Context, timeout time.Duration) (
|
||||
func withTimeout(parent context.Context, _timeout time.Duration) (
|
||||
ctx context.Context,
|
||||
cancel context.CancelFunc) {
|
||||
ctx = parent
|
||||
|
@ -57,7 +57,7 @@ func TestNewKeepAliveListener(t *testing.T) {
|
||||
t.Fatalf("unable to create tmpfile: %v", err)
|
||||
}
|
||||
tlsInfo := TLSInfo{CertFile: tlsinfo.CertFile, KeyFile: tlsinfo.KeyFile}
|
||||
tlsInfo.parseFunc = fakeCertificateParserFunc(tls.Certificate{}, nil)
|
||||
tlsInfo.parseFunc = fakeCertificateParserFunc(nil)
|
||||
tlscfg, err := tlsInfo.ServerConfig()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected serverConfig error: %v", err)
|
||||
|
@ -60,11 +60,7 @@ func newListener(addr, scheme string, opts ...ListenerOption) (net.Listener, err
|
||||
switch {
|
||||
case lnOpts.IsSocketOpts():
|
||||
// new ListenConfig with socket options.
|
||||
config, err := newListenConfig(lnOpts.socketOpts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lnOpts.ListenConfig = config
|
||||
lnOpts.ListenConfig = newListenConfig(lnOpts.socketOpts)
|
||||
// check for timeout
|
||||
fallthrough
|
||||
case lnOpts.IsTimeout(), lnOpts.IsSocketOpts():
|
||||
@ -129,7 +125,7 @@ func wrapTLS(scheme string, tlsinfo *TLSInfo, l net.Listener) (net.Listener, err
|
||||
return newTLSListener(l, tlsinfo, checkSAN)
|
||||
}
|
||||
|
||||
func newListenConfig(sopts *SocketOpts) (net.ListenConfig, error) {
|
||||
func newListenConfig(sopts *SocketOpts) net.ListenConfig {
|
||||
lc := net.ListenConfig{}
|
||||
if sopts != nil {
|
||||
ctls := getControls(sopts)
|
||||
@ -137,7 +133,7 @@ func newListenConfig(sopts *SocketOpts) (net.ListenConfig, error) {
|
||||
lc.Control = ctls.Control
|
||||
}
|
||||
}
|
||||
return lc, nil
|
||||
return lc
|
||||
}
|
||||
|
||||
type TLSInfo struct {
|
||||
|
@ -28,7 +28,7 @@ import (
|
||||
"go.uber.org/zap/zaptest"
|
||||
)
|
||||
|
||||
func createSelfCert(t *testing.T, hosts ...string) (*TLSInfo, error) {
|
||||
func createSelfCert(t *testing.T) (*TLSInfo, error) {
|
||||
return createSelfCertEx(t, "127.0.0.1")
|
||||
}
|
||||
|
||||
@ -41,9 +41,9 @@ func createSelfCertEx(t *testing.T, host string, additionalUsages ...x509.ExtKey
|
||||
return &info, nil
|
||||
}
|
||||
|
||||
func fakeCertificateParserFunc(cert tls.Certificate, err error) func(certPEMBlock, keyPEMBlock []byte) (tls.Certificate, error) {
|
||||
func fakeCertificateParserFunc(err error) func(certPEMBlock, keyPEMBlock []byte) (tls.Certificate, error) {
|
||||
return func(certPEMBlock, keyPEMBlock []byte) (tls.Certificate, error) {
|
||||
return cert, err
|
||||
return tls.Certificate{}, err
|
||||
}
|
||||
}
|
||||
|
||||
@ -367,7 +367,7 @@ func TestNewTransportTLSInfo(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
tt.parseFunc = fakeCertificateParserFunc(tls.Certificate{}, nil)
|
||||
tt.parseFunc = fakeCertificateParserFunc(nil)
|
||||
trans, err := NewTransport(tt, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("Received unexpected error from NewTransport: %v", err)
|
||||
@ -458,7 +458,7 @@ func TestTLSInfoParseFuncError(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
tt.info.parseFunc = fakeCertificateParserFunc(tls.Certificate{}, errors.New("fake"))
|
||||
tt.info.parseFunc = fakeCertificateParserFunc(errors.New("fake"))
|
||||
|
||||
if _, err = tt.info.ServerConfig(); err == nil {
|
||||
t.Errorf("#%d: expected non-nil error from ServerConfig()", i)
|
||||
@ -496,7 +496,7 @@ func TestTLSInfoConfigFuncs(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
tt.info.parseFunc = fakeCertificateParserFunc(tls.Certificate{}, nil)
|
||||
tt.info.parseFunc = fakeCertificateParserFunc(nil)
|
||||
|
||||
sCfg, err := tt.info.ServerConfig()
|
||||
if err != nil {
|
||||
|
@ -220,7 +220,9 @@ func (c *Client) autoSync() {
|
||||
}
|
||||
|
||||
// dialSetupOpts gives the dial opts prior to any authentication.
|
||||
func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (opts []grpc.DialOption, err error) {
|
||||
func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) []grpc.DialOption {
|
||||
var opts []grpc.DialOption
|
||||
|
||||
if c.cfg.DialKeepAliveTime > 0 {
|
||||
params := keepalive.ClientParameters{
|
||||
Time: c.cfg.DialKeepAliveTime,
|
||||
@ -248,7 +250,7 @@ func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts
|
||||
grpc.WithUnaryInterceptor(c.unaryClientInterceptor(withMax(defaultUnaryMaxRetries), rrBackoff)),
|
||||
)
|
||||
|
||||
return opts, nil
|
||||
return opts
|
||||
}
|
||||
|
||||
// Dial connects to a single endpoint using the client's config.
|
||||
@ -289,10 +291,8 @@ func (c *Client) dialWithBalancer(dopts ...grpc.DialOption) (*grpc.ClientConn, e
|
||||
|
||||
// dial configures and dials any grpc balancer target.
|
||||
func (c *Client) dial(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
|
||||
opts, err := c.dialSetupOpts(creds, dopts...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure dialer: %v", err)
|
||||
}
|
||||
opts := c.dialSetupOpts(creds, dopts...)
|
||||
|
||||
if c.authTokenBundle != nil {
|
||||
opts = append(opts, grpc.WithPerRPCCredentials(c.authTokenBundle.PerRPCCredentials()))
|
||||
}
|
||||
|
@ -90,7 +90,7 @@ func (e *Election) Campaign(ctx context.Context, val string) error {
|
||||
}
|
||||
}
|
||||
|
||||
_, err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1)
|
||||
err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1)
|
||||
if err != nil {
|
||||
// clean up in case of context cancel
|
||||
select {
|
||||
|
@ -18,7 +18,6 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||
"go.etcd.io/etcd/api/v3/mvccpb"
|
||||
v3 "go.etcd.io/etcd/client/v3"
|
||||
)
|
||||
@ -47,19 +46,19 @@ func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) e
|
||||
|
||||
// waitDeletes efficiently waits until all keys matching the prefix and no greater
|
||||
// than the create revision are deleted.
|
||||
func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) (*pb.ResponseHeader, error) {
|
||||
func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) error {
|
||||
getOpts := append(v3.WithLastCreate(), v3.WithMaxCreateRev(maxCreateRev))
|
||||
for {
|
||||
resp, err := client.Get(ctx, pfx, getOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if len(resp.Kvs) == 0 {
|
||||
return resp.Header, nil
|
||||
return nil
|
||||
}
|
||||
lastKey := string(resp.Kvs[0].Key)
|
||||
if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ import (
|
||||
|
||||
func exampleEndpoints() []string { return nil }
|
||||
|
||||
func forUnitTestsRunInMockedContext(mocking func(), example func()) {
|
||||
func forUnitTestsRunInMockedContext(mocking func(), _example func()) {
|
||||
mocking()
|
||||
// TODO: Call 'example' when mocking() provides realistic mocking of transport.
|
||||
|
||||
|
@ -84,7 +84,7 @@ func (m *Mutex) Lock(ctx context.Context) error {
|
||||
client := m.s.Client()
|
||||
// wait for deletion revisions prior to myKey
|
||||
// TODO: early termination if the session key is deleted before other session keys with smaller revisions.
|
||||
_, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1)
|
||||
werr := waitDeletes(ctx, client, m.pfx, m.myRev-1)
|
||||
// release lock key if wait failed
|
||||
if werr != nil {
|
||||
m.Unlock(client.Ctx())
|
||||
|
@ -28,7 +28,7 @@ const (
|
||||
|
||||
func exampleEndpoints() []string { return nil }
|
||||
|
||||
func forUnitTestsRunInMockedContext(mocking func(), example func()) {
|
||||
func forUnitTestsRunInMockedContext(mocking func(), _example func()) {
|
||||
mocking()
|
||||
// TODO: Call 'example' when mocking() provides realistic mocking of transport.
|
||||
|
||||
|
@ -102,7 +102,7 @@ func RetryKVClient(c *Client) pb.KVClient {
|
||||
}
|
||||
}
|
||||
func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) {
|
||||
return rkv.kc.Range(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||
return rkv.kc.Range(ctx, in, append(opts, withRepeatablePolicy())...)
|
||||
}
|
||||
|
||||
func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
|
||||
@ -133,23 +133,23 @@ func RetryLeaseClient(c *Client) pb.LeaseClient {
|
||||
}
|
||||
|
||||
func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) {
|
||||
return rlc.lc.LeaseTimeToLive(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||
return rlc.lc.LeaseTimeToLive(ctx, in, append(opts, withRepeatablePolicy())...)
|
||||
}
|
||||
|
||||
func (rlc *retryLeaseClient) LeaseLeases(ctx context.Context, in *pb.LeaseLeasesRequest, opts ...grpc.CallOption) (resp *pb.LeaseLeasesResponse, err error) {
|
||||
return rlc.lc.LeaseLeases(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||
return rlc.lc.LeaseLeases(ctx, in, append(opts, withRepeatablePolicy())...)
|
||||
}
|
||||
|
||||
func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) {
|
||||
return rlc.lc.LeaseGrant(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||
return rlc.lc.LeaseGrant(ctx, in, append(opts, withRepeatablePolicy())...)
|
||||
}
|
||||
|
||||
func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) {
|
||||
return rlc.lc.LeaseRevoke(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||
return rlc.lc.LeaseRevoke(ctx, in, append(opts, withRepeatablePolicy())...)
|
||||
}
|
||||
|
||||
func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) {
|
||||
return rlc.lc.LeaseKeepAlive(ctx, append(opts, withRetryPolicy(repeatable))...)
|
||||
return rlc.lc.LeaseKeepAlive(ctx, append(opts, withRepeatablePolicy())...)
|
||||
}
|
||||
|
||||
type retryClusterClient struct {
|
||||
@ -164,7 +164,7 @@ func RetryClusterClient(c *Client) pb.ClusterClient {
|
||||
}
|
||||
|
||||
func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) {
|
||||
return rcc.cc.MemberList(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||
return rcc.cc.MemberList(ctx, in, append(opts, withRepeatablePolicy())...)
|
||||
}
|
||||
|
||||
func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
|
||||
@ -195,27 +195,27 @@ func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClie
|
||||
}
|
||||
|
||||
func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) {
|
||||
return rmc.mc.Alarm(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||
return rmc.mc.Alarm(ctx, in, append(opts, withRepeatablePolicy())...)
|
||||
}
|
||||
|
||||
func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) {
|
||||
return rmc.mc.Status(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||
return rmc.mc.Status(ctx, in, append(opts, withRepeatablePolicy())...)
|
||||
}
|
||||
|
||||
func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) {
|
||||
return rmc.mc.Hash(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||
return rmc.mc.Hash(ctx, in, append(opts, withRepeatablePolicy())...)
|
||||
}
|
||||
|
||||
func (rmc *retryMaintenanceClient) HashKV(ctx context.Context, in *pb.HashKVRequest, opts ...grpc.CallOption) (resp *pb.HashKVResponse, err error) {
|
||||
return rmc.mc.HashKV(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||
return rmc.mc.HashKV(ctx, in, append(opts, withRepeatablePolicy())...)
|
||||
}
|
||||
|
||||
func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) {
|
||||
return rmc.mc.Snapshot(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||
return rmc.mc.Snapshot(ctx, in, append(opts, withRepeatablePolicy())...)
|
||||
}
|
||||
|
||||
func (rmc *retryMaintenanceClient) MoveLeader(ctx context.Context, in *pb.MoveLeaderRequest, opts ...grpc.CallOption) (resp *pb.MoveLeaderResponse, err error) {
|
||||
return rmc.mc.MoveLeader(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||
return rmc.mc.MoveLeader(ctx, in, append(opts, withRepeatablePolicy())...)
|
||||
}
|
||||
|
||||
func (rmc *retryMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) {
|
||||
@ -238,19 +238,19 @@ func RetryAuthClient(c *Client) pb.AuthClient {
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) {
|
||||
return rac.ac.UserList(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||
return rac.ac.UserList(ctx, in, append(opts, withRepeatablePolicy())...)
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) {
|
||||
return rac.ac.UserGet(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||
return rac.ac.UserGet(ctx, in, append(opts, withRepeatablePolicy())...)
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) {
|
||||
return rac.ac.RoleGet(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||
return rac.ac.RoleGet(ctx, in, append(opts, withRepeatablePolicy())...)
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) {
|
||||
return rac.ac.RoleList(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||
return rac.ac.RoleList(ctx, in, append(opts, withRepeatablePolicy())...)
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
|
||||
|
@ -377,10 +377,10 @@ var (
|
||||
// with the next iteration.
|
||||
type backoffFunc func(attempt uint) time.Duration
|
||||
|
||||
// withRetryPolicy sets the retry policy of this call.
|
||||
func withRetryPolicy(rp retryPolicy) retryOption {
|
||||
// withRepeatablePolicy sets the repeatable policy of this call.
|
||||
func withRepeatablePolicy() retryOption {
|
||||
return retryOption{applyFunc: func(o *options) {
|
||||
o.retryPolicy = rp
|
||||
o.retryPolicy = repeatable
|
||||
}}
|
||||
}
|
||||
|
||||
|
@ -410,11 +410,6 @@ function govet_shadow_pass {
|
||||
run_for_modules generic_checker govet_shadow_per_package "${shadow}"
|
||||
}
|
||||
|
||||
function unparam_pass {
|
||||
# TODO: transport/listener.go:129:60: newListenConfig - result 1 (error) is always nil
|
||||
run_for_modules generic_checker run_go_tool "mvdan.cc/unparam"
|
||||
}
|
||||
|
||||
function lint_pass {
|
||||
run_for_modules generic_checker run golangci-lint run --config "${ETCD_ROOT_DIR}/tools/.golangci.yaml"
|
||||
}
|
||||
|
@ -268,12 +268,10 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
|
||||
}
|
||||
e.Server.Start()
|
||||
|
||||
if err = e.servePeers(); err != nil {
|
||||
return e, err
|
||||
}
|
||||
if err = e.serveClients(); err != nil {
|
||||
return e, err
|
||||
}
|
||||
e.servePeers()
|
||||
|
||||
e.serveClients()
|
||||
|
||||
if err = e.serveMetrics(); err != nil {
|
||||
return e, err
|
||||
}
|
||||
@ -561,7 +559,7 @@ func configurePeerListeners(cfg *Config) (peers []*peerListener, err error) {
|
||||
}
|
||||
|
||||
// configure peer handlers after rafthttp.Transport started
|
||||
func (e *Etcd) servePeers() (err error) {
|
||||
func (e *Etcd) servePeers() {
|
||||
ph := etcdhttp.NewPeerHandler(e.GetLogger(), e.Server)
|
||||
|
||||
for _, p := range e.Peers {
|
||||
@ -609,7 +607,6 @@ func (e *Etcd) servePeers() (err error) {
|
||||
e.errHandler(l.serve())
|
||||
}(pl)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func configureClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) {
|
||||
@ -727,7 +724,7 @@ func resolveUrl(u url.URL) (addr string, secure bool, network string) {
|
||||
return addr, secure, network
|
||||
}
|
||||
|
||||
func (e *Etcd) serveClients() (err error) {
|
||||
func (e *Etcd) serveClients() {
|
||||
if !e.cfg.ClientTLSInfo.Empty() {
|
||||
e.cfg.logger.Info(
|
||||
"starting with client TLS",
|
||||
@ -771,7 +768,6 @@ func (e *Etcd) serveClients() (err error) {
|
||||
e.errHandler(s.serve(e.Server, &e.cfg.ClientTLSInfo, mux, e.errHandler, e.grpcGatewayDial(splitHttp), splitHttp, gopts...))
|
||||
}(sctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Etcd) grpcGatewayDial(splitHttp bool) (grpcDial func(ctx context.Context) (*grpc.ClientConn, error)) {
|
||||
|
@ -231,7 +231,7 @@ func startGRPCProxy(cmd *cobra.Command, args []string) {
|
||||
if grpcProxyAdvertiseClientURL != "" {
|
||||
proxyClient = mustNewProxyClient(lg, tlsInfo)
|
||||
}
|
||||
httpClient := mustNewHTTPClient(lg)
|
||||
httpClient := mustNewHTTPClient()
|
||||
|
||||
srvhttp, httpl := mustHTTPListener(lg, m, tlsInfo, client, proxyClient)
|
||||
|
||||
@ -505,7 +505,7 @@ func newGRPCProxyServer(lg *zap.Logger, client *clientv3.Client) *grpc.Server {
|
||||
}
|
||||
|
||||
func mustHTTPListener(lg *zap.Logger, m cmux.CMux, tlsinfo *transport.TLSInfo, c *clientv3.Client, proxy *clientv3.Client) (*http.Server, net.Listener) {
|
||||
httpClient := mustNewHTTPClient(lg)
|
||||
httpClient := mustNewHTTPClient()
|
||||
httpmux := http.NewServeMux()
|
||||
httpmux.HandleFunc("/", http.NotFound)
|
||||
grpcproxy.HandleMetrics(httpmux, httpClient, c.Endpoints())
|
||||
@ -535,7 +535,7 @@ func mustHTTPListener(lg *zap.Logger, m cmux.CMux, tlsinfo *transport.TLSInfo, c
|
||||
return srvhttp, m.Match(cmux.Any())
|
||||
}
|
||||
|
||||
func mustNewHTTPClient(lg *zap.Logger) *http.Client {
|
||||
func mustNewHTTPClient() *http.Client {
|
||||
transport, err := newHTTPTransport(grpcProxyCA, grpcProxyCert, grpcProxyKey)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
|
@ -544,7 +544,7 @@ func TestClusterAddMemberAsLearner(t *testing.T) {
|
||||
st := mockstore.NewRecorder()
|
||||
c := newTestCluster(t, nil)
|
||||
c.SetStore(st)
|
||||
c.AddMember(newTestMemberAsLearner(1, nil, "node1", nil), true)
|
||||
c.AddMember(newTestMemberAsLearner(1, []string{}, "node1", []string{"http://node1"}), true)
|
||||
|
||||
wactions := []testutil.Action{
|
||||
{
|
||||
@ -552,7 +552,7 @@ func TestClusterAddMemberAsLearner(t *testing.T) {
|
||||
Params: []any{
|
||||
path.Join(StoreMembersPrefix, "1", "raftAttributes"),
|
||||
false,
|
||||
`{"peerURLs":null,"isLearner":true}`,
|
||||
`{"peerURLs":[],"isLearner":true}`,
|
||||
false,
|
||||
v2store.TTLOptionSet{ExpireTime: v2store.Permanent},
|
||||
},
|
||||
|
@ -96,11 +96,7 @@ func bootstrap(cfg config.ServerConfig) (b *bootstrappedServer, err error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s, err := bootstrapStorage(cfg, st, backend, bwal, cluster)
|
||||
if err != nil {
|
||||
backend.Close()
|
||||
return nil, err
|
||||
}
|
||||
s := bootstrapStorage(cfg, st, backend, bwal, cluster)
|
||||
|
||||
if err = cluster.Finalize(cfg, s); err != nil {
|
||||
backend.Close()
|
||||
@ -165,7 +161,7 @@ type bootstrappedRaft struct {
|
||||
storage *raft.MemoryStorage
|
||||
}
|
||||
|
||||
func bootstrapStorage(cfg config.ServerConfig, st v2store.Store, be *bootstrappedBackend, wal *bootstrappedWAL, cl *bootstrapedCluster) (b *bootstrappedStorage, err error) {
|
||||
func bootstrapStorage(cfg config.ServerConfig, st v2store.Store, be *bootstrappedBackend, wal *bootstrappedWAL, cl *bootstrapedCluster) *bootstrappedStorage {
|
||||
if wal == nil {
|
||||
wal = bootstrapNewWAL(cfg, cl)
|
||||
}
|
||||
@ -174,7 +170,7 @@ func bootstrapStorage(cfg config.ServerConfig, st v2store.Store, be *bootstrappe
|
||||
backend: be,
|
||||
st: st,
|
||||
wal: wal,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
func bootstrapSnapshot(cfg config.ServerConfig) *snap.Snapshotter {
|
||||
|
@ -512,7 +512,7 @@ func TestHashKVHandler(t *testing.T) {
|
||||
var revision = 1
|
||||
|
||||
etcdSrv := &EtcdServer{}
|
||||
etcdSrv.cluster = newTestCluster(t, nil)
|
||||
etcdSrv.cluster = newTestCluster(t)
|
||||
etcdSrv.cluster.SetID(types.ID(localClusterID), types.ID(localClusterID))
|
||||
be, _ := betesting.NewDefaultTmpBackend(t)
|
||||
defer betesting.Close(t, be)
|
||||
|
@ -187,7 +187,7 @@ func TestApplyRepeat(t *testing.T) {
|
||||
n.readyc <- raft.Ready{
|
||||
SoftState: &raft.SoftState{RaftState: raft.StateLeader},
|
||||
}
|
||||
cl := newTestCluster(t, nil)
|
||||
cl := newTestCluster(t)
|
||||
st := v2store.New()
|
||||
cl.SetStore(v2store.New())
|
||||
be, _ := betesting.NewDefaultTmpBackend(t)
|
||||
@ -1366,7 +1366,7 @@ func TestAddMember(t *testing.T) {
|
||||
n.readyc <- raft.Ready{
|
||||
SoftState: &raft.SoftState{RaftState: raft.StateLeader},
|
||||
}
|
||||
cl := newTestCluster(t, nil)
|
||||
cl := newTestCluster(t)
|
||||
st := v2store.New()
|
||||
cl.SetStore(st)
|
||||
be, _ := betesting.NewDefaultTmpBackend(t)
|
||||
@ -1416,7 +1416,7 @@ func TestRemoveMember(t *testing.T) {
|
||||
n.readyc <- raft.Ready{
|
||||
SoftState: &raft.SoftState{RaftState: raft.StateLeader},
|
||||
}
|
||||
cl := newTestCluster(t, nil)
|
||||
cl := newTestCluster(t)
|
||||
st := v2store.New()
|
||||
cl.SetStore(v2store.New())
|
||||
be, _ := betesting.NewDefaultTmpBackend(t)
|
||||
@ -1467,7 +1467,7 @@ func TestUpdateMember(t *testing.T) {
|
||||
n.readyc <- raft.Ready{
|
||||
SoftState: &raft.SoftState{RaftState: raft.StateLeader},
|
||||
}
|
||||
cl := newTestCluster(t, nil)
|
||||
cl := newTestCluster(t)
|
||||
st := v2store.New()
|
||||
cl.SetStore(st)
|
||||
cl.SetBackend(schema.NewMembershipBackend(lg, be))
|
||||
@ -1929,12 +1929,8 @@ func (n *nodeCommitter) Propose(ctx context.Context, data []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func newTestCluster(t testing.TB, membs []*membership.Member) *membership.RaftCluster {
|
||||
c := membership.NewCluster(zaptest.NewLogger(t))
|
||||
for _, m := range membs {
|
||||
c.AddMember(m, true)
|
||||
}
|
||||
return c
|
||||
func newTestCluster(t testing.TB) *membership.RaftCluster {
|
||||
return membership.NewCluster(zaptest.NewLogger(t))
|
||||
}
|
||||
|
||||
func newTestClusterWithBackend(t testing.TB, membs []*membership.Member, be backend.Backend) *membership.RaftCluster {
|
||||
|
@ -71,7 +71,7 @@ func TestDowngradeWithUserAuth(t *testing.T) {
|
||||
testDowngradeWithAuth(t, false, true, WithAuth("user0", "user0Pass"))
|
||||
}
|
||||
|
||||
func testDowngradeWithAuth(t *testing.T, expectConnectionError, expectOperationError bool, opts ...config.ClientOption) {
|
||||
func testDowngradeWithAuth(t *testing.T, _expectConnectionError, _expectOperationError bool, _opts ...config.ClientOption) {
|
||||
// TODO(ahrtr): finish this after we added interface methods `Downgrade` into `Client`
|
||||
t.Skip()
|
||||
}
|
||||
@ -121,7 +121,7 @@ func TestMoveLeaderWithUserAuth(t *testing.T) {
|
||||
testMoveLeaderWithAuth(t, false, true, WithAuth("user0", "user0Pass"))
|
||||
}
|
||||
|
||||
func testMoveLeaderWithAuth(t *testing.T, expectConnectionError, expectOperationError bool, opts ...config.ClientOption) {
|
||||
func testMoveLeaderWithAuth(t *testing.T, _expectConnectionError, _expectOperationError bool, _opts ...config.ClientOption) {
|
||||
// TODO(ahrtr): finish this after we added interface methods `MoveLeader` into `Client`
|
||||
t.Skip()
|
||||
}
|
||||
@ -145,7 +145,7 @@ func TestSnapshotWithUserAuth(t *testing.T) {
|
||||
testSnapshotWithAuth(t, false, true, WithAuth("user0", "user0Pass"))
|
||||
}
|
||||
|
||||
func testSnapshotWithAuth(t *testing.T, expectConnectionError, expectOperationError bool, opts ...config.ClientOption) {
|
||||
func testSnapshotWithAuth(t *testing.T, _expectConnectionError, _expectOperationError bool, _opts ...config.ClientOption) {
|
||||
// TODO(ahrtr): finish this after we added interface methods `Snapshot` into `Client`
|
||||
t.Skip()
|
||||
}
|
||||
|
@ -28,21 +28,21 @@ import (
|
||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||
)
|
||||
|
||||
func TestCtlV3PutTimeout(t *testing.T) { testCtl(t, putTest, withDialTimeout(0)) }
|
||||
func TestCtlV3PutTimeout(t *testing.T) { testCtl(t, putTest, withDefaultDialTimeout()) }
|
||||
func TestCtlV3PutClientTLSFlagByEnv(t *testing.T) {
|
||||
testCtl(t, putTest, withCfg(*e2e.NewConfigClientTLS()), withFlagByEnv())
|
||||
}
|
||||
func TestCtlV3PutIgnoreValue(t *testing.T) { testCtl(t, putTestIgnoreValue) }
|
||||
func TestCtlV3PutIgnoreLease(t *testing.T) { testCtl(t, putTestIgnoreLease) }
|
||||
|
||||
func TestCtlV3GetTimeout(t *testing.T) { testCtl(t, getTest, withDialTimeout(0)) }
|
||||
func TestCtlV3GetTimeout(t *testing.T) { testCtl(t, getTest, withDefaultDialTimeout()) }
|
||||
|
||||
func TestCtlV3GetFormat(t *testing.T) { testCtl(t, getFormatTest) }
|
||||
func TestCtlV3GetRev(t *testing.T) { testCtl(t, getRevTest) }
|
||||
func TestCtlV3GetKeysOnly(t *testing.T) { testCtl(t, getKeysOnlyTest) }
|
||||
func TestCtlV3GetCountOnly(t *testing.T) { testCtl(t, getCountOnlyTest) }
|
||||
|
||||
func TestCtlV3DelTimeout(t *testing.T) { testCtl(t, delTest, withDialTimeout(0)) }
|
||||
func TestCtlV3DelTimeout(t *testing.T) { testCtl(t, delTest, withDefaultDialTimeout()) }
|
||||
|
||||
func TestCtlV3GetRevokedCRL(t *testing.T) {
|
||||
cfg := e2e.NewConfig(
|
||||
|
@ -24,7 +24,7 @@ import (
|
||||
|
||||
// TestCtlV3RoleAddTimeout tests add role with 0 grpc dial timeout while it tolerates dial timeout error.
|
||||
// This is unique in e2e test
|
||||
func TestCtlV3RoleAddTimeout(t *testing.T) { testCtl(t, roleAddTest, withDialTimeout(0)) }
|
||||
func TestCtlV3RoleAddTimeout(t *testing.T) { testCtl(t, roleAddTest, withDefaultDialTimeout()) }
|
||||
|
||||
func roleAddTest(cx ctlCtx) {
|
||||
cmdSet := []struct {
|
||||
|
@ -447,5 +447,6 @@ func hasKVs(t *testing.T, ctl *e2e.EtcdctlV3, kvs []testutils.KV, currentRev int
|
||||
require.Equal(t, int64(baseRev+i), v.Kvs[0].CreateRevision)
|
||||
require.Equal(t, int64(baseRev+i), v.Kvs[0].ModRevision)
|
||||
require.Equal(t, int64(1), v.Kvs[0].Version)
|
||||
require.True(t, int64(currentRev) >= v.Kvs[0].ModRevision)
|
||||
}
|
||||
}
|
||||
|
@ -158,6 +158,10 @@ func withCfg(cfg e2e.EtcdProcessClusterConfig) ctlOption {
|
||||
return func(cx *ctlCtx) { cx.cfg = cfg }
|
||||
}
|
||||
|
||||
func withDefaultDialTimeout() ctlOption {
|
||||
return withDialTimeout(0)
|
||||
}
|
||||
|
||||
func withDialTimeout(timeout time.Duration) ctlOption {
|
||||
return func(cx *ctlCtx) { cx.dialTimeout = timeout }
|
||||
}
|
||||
|
@ -25,7 +25,7 @@ func TestCtlV3Watch(t *testing.T) { testCtl(t, watchTest) }
|
||||
func TestCtlV3WatchNoTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*e2e.NewConfigNoTLS())) }
|
||||
func TestCtlV3WatchClientTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*e2e.NewConfigClientTLS())) }
|
||||
func TestCtlV3WatchPeerTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*e2e.NewConfigPeerTLS())) }
|
||||
func TestCtlV3WatchTimeout(t *testing.T) { testCtl(t, watchTest, withDialTimeout(0)) }
|
||||
func TestCtlV3WatchTimeout(t *testing.T) { testCtl(t, watchTest, withDefaultDialTimeout()) }
|
||||
|
||||
func TestCtlV3WatchInteractive(t *testing.T) {
|
||||
testCtl(t, watchTest, withInteractive())
|
||||
|
@ -39,7 +39,7 @@ type bridge struct {
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func newBridge(dialer Dialer, listener net.Listener) (*bridge, error) {
|
||||
func newBridge(dialer Dialer, listener net.Listener) *bridge {
|
||||
b := &bridge{
|
||||
// bridge "port" is ("%05d%05d0", port, pid) since go1.8 expects the port to be a number
|
||||
dialer: dialer,
|
||||
@ -52,7 +52,7 @@ func newBridge(dialer Dialer, listener net.Listener) (*bridge, error) {
|
||||
close(b.pausec)
|
||||
b.wg.Add(1)
|
||||
go b.serveListen()
|
||||
return b, nil
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *bridge) Close() {
|
||||
|
@ -829,11 +829,8 @@ func (m *Member) addBridge() (*bridge, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("listen failed on bridge socket %s (%v)", bridgeAddr, err)
|
||||
}
|
||||
m.GrpcBridge, err = newBridge(dialer{network: network, addr: grpcAddr}, bridgeListener)
|
||||
if err != nil {
|
||||
bridgeListener.Close()
|
||||
return nil, err
|
||||
}
|
||||
m.GrpcBridge = newBridge(dialer{network: network, addr: grpcAddr}, bridgeListener)
|
||||
|
||||
addr := bridgeListener.Addr().String()
|
||||
m.Logger.Info("LISTEN BRIDGE SUCCESS", zap.String("grpc-address", addr), zap.String("member", m.Name))
|
||||
m.GrpcURL = m.clientScheme() + "://" + addr
|
||||
|
@ -357,14 +357,11 @@ func (c integrationClient) Txn(ctx context.Context, compares, ifSucess, ifFail [
|
||||
}
|
||||
cmps = append(cmps, *cmp)
|
||||
}
|
||||
succOps, err := getOps(ifSucess)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
failOps, err := getOps(ifFail)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
succOps := getOps(ifSucess)
|
||||
|
||||
failOps := getOps(ifFail)
|
||||
|
||||
txnrsp, err := txn.
|
||||
If(cmps...).
|
||||
Then(succOps...).
|
||||
@ -373,7 +370,7 @@ func (c integrationClient) Txn(ctx context.Context, compares, ifSucess, ifFail [
|
||||
return txnrsp, err
|
||||
}
|
||||
|
||||
func getOps(ss []string) ([]clientv3.Op, error) {
|
||||
func getOps(ss []string) []clientv3.Op {
|
||||
var ops []clientv3.Op
|
||||
for _, s := range ss {
|
||||
s = strings.TrimSpace(s)
|
||||
@ -387,7 +384,7 @@ func getOps(ss []string) ([]clientv3.Op, error) {
|
||||
ops = append(ops, clientv3.OpDelete(args[1]))
|
||||
}
|
||||
}
|
||||
return ops, nil
|
||||
return ops
|
||||
}
|
||||
|
||||
func (c integrationClient) Watch(ctx context.Context, key string, opts config.WatchOptions) clientv3.WatchChan {
|
||||
|
@ -50,11 +50,11 @@ func ExampleSTM_apply() {
|
||||
}
|
||||
}
|
||||
|
||||
exchange := func(stm concurrency.STM) error {
|
||||
exchange := func(stm concurrency.STM) {
|
||||
from, to := rand.Intn(totalAccounts), rand.Intn(totalAccounts)
|
||||
if from == to {
|
||||
// nothing to do
|
||||
return nil
|
||||
return
|
||||
}
|
||||
// read values
|
||||
fromK, toK := fmt.Sprintf("accts/%d", from), fmt.Sprintf("accts/%d", to)
|
||||
@ -70,7 +70,7 @@ func ExampleSTM_apply() {
|
||||
// write back
|
||||
stm.Put(fromK, fmt.Sprintf("%d", fromInt))
|
||||
stm.Put(toK, fmt.Sprintf("%d", toInt))
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
// concurrently exchange values between accounts
|
||||
@ -79,7 +79,10 @@ func ExampleSTM_apply() {
|
||||
for i := 0; i < 10; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if _, serr := concurrency.NewSTM(cli, exchange); serr != nil {
|
||||
if _, serr := concurrency.NewSTM(cli, func(stm concurrency.STM) error {
|
||||
exchange(stm)
|
||||
return nil
|
||||
}); serr != nil {
|
||||
log.Fatal(serr)
|
||||
}
|
||||
}()
|
||||
|
@ -26,7 +26,7 @@ var lazyCluster = integration.NewLazyCluster()
|
||||
|
||||
func exampleEndpoints() []string { return lazyCluster.EndpointsGRPC() }
|
||||
|
||||
func forUnitTestsRunInMockedContext(mocking func(), example func()) {
|
||||
func forUnitTestsRunInMockedContext(_mocking func(), example func()) {
|
||||
// For integration tests runs in the provided environment
|
||||
example()
|
||||
}
|
||||
|
@ -29,7 +29,7 @@ import (
|
||||
)
|
||||
|
||||
func TestRevisionMonotonicWithLeaderPartitions(t *testing.T) {
|
||||
testRevisionMonotonicWithFailures(t, 11*time.Second, func(clus *integration.Cluster) {
|
||||
testRevisionMonotonicWithFailures(t, 12*time.Second, func(clus *integration.Cluster) {
|
||||
for i := 0; i < 5; i++ {
|
||||
leader := clus.WaitLeader(t)
|
||||
time.Sleep(time.Second)
|
||||
|
@ -528,7 +528,7 @@ func testLeaseStress(t *testing.T, stresser func(context.Context, pb.LeaseClient
|
||||
t.Fatal(err)
|
||||
}
|
||||
for i := 0; i < 300; i++ {
|
||||
go func(i int) { errc <- stresser(ctx, integration.ToGRPC(clusterClient).Lease) }(i)
|
||||
go func() { errc <- stresser(ctx, integration.ToGRPC(clusterClient).Lease) }()
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < 100; i++ {
|
||||
|
@ -47,7 +47,7 @@ func TestSTMConflict(t *testing.T) {
|
||||
for i := range keys {
|
||||
curEtcdc := clus.RandClient()
|
||||
srcKey := keys[i]
|
||||
applyf := func(stm concurrency.STM) error {
|
||||
applyf := func(stm concurrency.STM) {
|
||||
src := stm.Get(srcKey)
|
||||
// must be different key to avoid double-adding
|
||||
dstKey := srcKey
|
||||
@ -59,16 +59,21 @@ func TestSTMConflict(t *testing.T) {
|
||||
dstV, _ := strconv.ParseInt(dst, 10, 64)
|
||||
if srcV == 0 {
|
||||
// can't rand.Intn on 0, so skip this transaction
|
||||
return nil
|
||||
return
|
||||
}
|
||||
xfer := int64(rand.Intn(int(srcV)) / 2)
|
||||
stm.Put(srcKey, fmt.Sprintf("%d", srcV-xfer))
|
||||
stm.Put(dstKey, fmt.Sprintf("%d", dstV+xfer))
|
||||
return nil
|
||||
}
|
||||
go func() {
|
||||
iso := concurrency.WithIsolation(concurrency.RepeatableReads)
|
||||
_, err := concurrency.NewSTM(curEtcdc, applyf, iso)
|
||||
_, err := concurrency.NewSTM(curEtcdc,
|
||||
func(stm concurrency.STM) error {
|
||||
applyf(stm)
|
||||
return nil
|
||||
},
|
||||
iso,
|
||||
)
|
||||
errc <- err
|
||||
}()
|
||||
}
|
||||
|
@ -45,7 +45,7 @@ var (
|
||||
CompactAfterCommitBatchPanic, RaftBeforeLeaderSendPanic, BlackholePeerNetwork, DelayPeerNetwork,
|
||||
RaftBeforeFollowerSendPanic, RaftBeforeApplySnapPanic, RaftAfterApplySnapPanic, RaftAfterWALReleasePanic,
|
||||
RaftBeforeSaveSnapPanic, RaftAfterSaveSnapPanic, BlackholeUntilSnapshot,
|
||||
beforeApplyOneConfChangeSleep,
|
||||
BeforeApplyOneConfChangeSleep,
|
||||
MemberReplace,
|
||||
}
|
||||
)
|
||||
@ -119,7 +119,7 @@ func Inject(ctx context.Context, t *testing.T, lg *zap.Logger, clus *e2e.EtcdPro
|
||||
return
|
||||
}
|
||||
|
||||
func verifyClusterHealth(ctx context.Context, t *testing.T, clus *e2e.EtcdProcessCluster) error {
|
||||
func verifyClusterHealth(ctx context.Context, _ *testing.T, clus *e2e.EtcdProcessCluster) error {
|
||||
for i := 0; i < len(clus.Procs); i++ {
|
||||
clusterClient, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: clus.Procs[i].EndpointsGRPC(),
|
||||
|
@ -53,7 +53,7 @@ var (
|
||||
RaftAfterWALReleasePanic Failpoint = goPanicFailpoint{"raftAfterWALRelease", triggerBlackhole{waitTillSnapshot: true}, Follower}
|
||||
RaftBeforeSaveSnapPanic Failpoint = goPanicFailpoint{"raftBeforeSaveSnap", triggerBlackhole{waitTillSnapshot: true}, Follower}
|
||||
RaftAfterSaveSnapPanic Failpoint = goPanicFailpoint{"raftAfterSaveSnap", triggerBlackhole{waitTillSnapshot: true}, Follower}
|
||||
beforeApplyOneConfChangeSleep Failpoint = killAndGofailSleep{"beforeApplyOneConfChange", time.Second}
|
||||
BeforeApplyOneConfChangeSleep Failpoint = killAndGofailSleep{"beforeApplyOneConfChange", time.Second}
|
||||
)
|
||||
|
||||
type goPanicFailpoint struct {
|
||||
|
@ -162,11 +162,11 @@ var commonTestScenarios = []modelTestCase{
|
||||
{
|
||||
name: "Stale Get need to match put if asking about matching revision",
|
||||
operations: []testOperation{
|
||||
{req: putRequest("key", "1"), resp: putResponse(2)},
|
||||
{req: staleGetRequest("key", 2), resp: getResponse("key", "1", 3, 2), expectFailure: true},
|
||||
{req: staleGetRequest("key", 2), resp: getResponse("key", "1", 2, 3), expectFailure: true},
|
||||
{req: staleGetRequest("key", 2), resp: getResponse("key", "2", 2, 2), expectFailure: true},
|
||||
{req: staleGetRequest("key", 2), resp: getResponse("key", "1", 2, 2)},
|
||||
{req: putRequest("key1", "1"), resp: putResponse(2)},
|
||||
{req: staleGetRequest("key1", 2), resp: getResponse("key1", "1", 3, 2), expectFailure: true},
|
||||
{req: staleGetRequest("key1", 2), resp: getResponse("key1", "1", 2, 3), expectFailure: true},
|
||||
{req: staleGetRequest("key1", 2), resp: getResponse("key1", "2", 2, 2), expectFailure: true},
|
||||
{req: staleGetRequest("key1", 2), resp: getResponse("key1", "1", 2, 2)},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
"go.etcd.io/etcd/tests/v3/robustness/report"
|
||||
)
|
||||
|
||||
func validateWatch(t *testing.T, lg *zap.Logger, cfg Config, reports []report.ClientReport, eventHistory []model.WatchEvent) []model.WatchEvent {
|
||||
func validateWatch(t *testing.T, lg *zap.Logger, cfg Config, reports []report.ClientReport, eventHistory []model.WatchEvent) {
|
||||
lg.Info("Validating watch")
|
||||
// Validate etcd watch properties defined in https://etcd.io/docs/v3.6/learning/api_guarantees/#watch-apis
|
||||
for _, r := range reports {
|
||||
@ -34,7 +34,6 @@ func validateWatch(t *testing.T, lg *zap.Logger, cfg Config, reports []report.Cl
|
||||
validateReliable(t, eventHistory, r)
|
||||
validateResumable(t, eventHistory, r)
|
||||
}
|
||||
return eventHistory
|
||||
}
|
||||
|
||||
func validateBookmarkable(t *testing.T, report report.ClientReport) {
|
||||
|
@ -23,8 +23,9 @@ linters:
|
||||
- revive
|
||||
- staticcheck
|
||||
- stylecheck
|
||||
- unused
|
||||
- unconvert # Remove unnecessary type conversions
|
||||
- unparam
|
||||
- unused
|
||||
linters-settings: # please keep this alphabetized
|
||||
goimports:
|
||||
local-prefixes: go.etcd.io # Put imports beginning with prefix after 3rd-party packages.
|
||||
|
@ -18,7 +18,6 @@ require (
|
||||
gotest.tools/gotestsum v1.11.0
|
||||
gotest.tools/v3 v3.5.1
|
||||
honnef.co/go/tools v0.4.6
|
||||
mvdan.cc/unparam v0.0.0-20220316160445-06cc5682983b
|
||||
)
|
||||
|
||||
require (
|
||||
|
@ -294,5 +294,3 @@ honnef.co/go/tools v0.4.6 h1:oFEHCKeID7to/3autwsWfnuv69j3NsfcXbvJKuIcep8=
|
||||
honnef.co/go/tools v0.4.6/go.mod h1:+rnGS1THNh8zMwnd2oVOTL9QF6vmfyG6ZXBULae2uc0=
|
||||
k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4=
|
||||
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
mvdan.cc/unparam v0.0.0-20220316160445-06cc5682983b h1:C8Pi6noat8BcrL9WnSRYeQ63fpkJk3hKVHtF5731kIw=
|
||||
mvdan.cc/unparam v0.0.0-20220316160445-06cc5682983b/go.mod h1:WqFWCt8MGPoFSYGsQSiIORRlYVhkJsIk+n2MY6rhNbA=
|
||||
|
@ -37,5 +37,4 @@ import (
|
||||
_ "gotest.tools/gotestsum"
|
||||
_ "gotest.tools/v3"
|
||||
_ "honnef.co/go/tools/cmd/staticcheck"
|
||||
_ "mvdan.cc/unparam"
|
||||
)
|
||||
|
Loading…
x
Reference in New Issue
Block a user