tests: Rename grpcAddr to grpcURL to imply that it includes schema

This commit is contained in:
Marek Siarkowicz 2021-09-22 15:48:13 +02:00
parent c1f48d8077
commit 994bd08723
15 changed files with 72 additions and 72 deletions

View File

@ -41,7 +41,7 @@ func TestBalancerUnderBlackholeKeepAliveWatch(t *testing.T) {
})
defer clus.Terminate(t)
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr()}
eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL()}
ccfg := clientv3.Config{
Endpoints: []string{eps[0]},
@ -173,7 +173,7 @@ func testBalancerUnderBlackholeNoKeepAlive(t *testing.T, op func(*clientv3.Clien
})
defer clus.Terminate(t)
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr()}
eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL()}
ccfg := clientv3.Config{
Endpoints: []string{eps[0]},

View File

@ -57,7 +57,7 @@ func TestDialTLSExpired(t *testing.T) {
}
// expect remote errors "tls: bad certificate"
_, err = integration.NewClient(t, clientv3.Config{
Endpoints: []string{clus.Members[0].GRPCAddr()},
Endpoints: []string{clus.Members[0].GRPCURL()},
DialTimeout: 3 * time.Second,
DialOptions: []grpc.DialOption{grpc.WithBlock()},
TLS: tls,
@ -75,7 +75,7 @@ func TestDialTLSNoConfig(t *testing.T) {
defer clus.Terminate(t)
// expect "signed by unknown authority"
c, err := integration.NewClient(t, clientv3.Config{
Endpoints: []string{clus.Members[0].GRPCAddr()},
Endpoints: []string{clus.Members[0].GRPCURL()},
DialTimeout: time.Second,
DialOptions: []grpc.DialOption{grpc.WithBlock()},
})
@ -108,7 +108,7 @@ func testDialSetEndpoints(t *testing.T, setBefore bool) {
// get endpoint list
eps := make([]string, 3)
for i := range eps {
eps[i] = clus.Members[i].GRPCAddr()
eps[i] = clus.Members[i].GRPCURL()
}
toKill := rand.Intn(len(eps))
@ -149,7 +149,7 @@ func TestSwitchSetEndpoints(t *testing.T) {
defer clus.Terminate(t)
// get non partitioned members endpoints
eps := []string{clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
eps := []string{clus.Members[1].GRPCURL(), clus.Members[2].GRPCURL()}
cli := clus.Client(0)
clus.Members[0].InjectPartition(t, clus.Members[1:]...)
@ -170,7 +170,7 @@ func TestRejectOldCluster(t *testing.T) {
defer clus.Terminate(t)
cfg := clientv3.Config{
Endpoints: []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr()},
Endpoints: []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL()},
DialTimeout: 5 * time.Second,
DialOptions: []grpc.DialOption{grpc.WithBlock()},
RejectOldCluster: true,
@ -212,7 +212,7 @@ func TestSetEndpointAndPut(t *testing.T) {
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
defer clus.Terminate(t)
clus.Client(1).SetEndpoints(clus.Members[0].GRPCAddr())
clus.Client(1).SetEndpoints(clus.Members[0].GRPCURL())
_, err := clus.Client(1).Put(context.TODO(), "foo", "bar")
if err != nil && !strings.Contains(err.Error(), "closing") {
t.Fatal(err)

View File

@ -112,7 +112,7 @@ func testBalancerUnderNetworkPartition(t *testing.T, op func(*clientv3.Client, c
})
defer clus.Terminate(t)
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL(), clus.Members[2].GRPCURL()}
// expect pin eps[0]
ccfg := clientv3.Config{
@ -168,7 +168,7 @@ func TestBalancerUnderNetworkPartitionLinearizableGetLeaderElection(t *testing.T
SkipCreatingClient: true,
})
defer clus.Terminate(t)
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL(), clus.Members[2].GRPCURL()}
lead := clus.WaitLeader(t)
@ -224,7 +224,7 @@ func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) {
})
defer clus.Terminate(t)
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL(), clus.Members[2].GRPCURL()}
target := clus.WaitLeader(t)
if !isolateLeader {
@ -285,7 +285,7 @@ func TestDropReadUnderNetworkPartition(t *testing.T) {
defer clus.Terminate(t)
leaderIndex := clus.WaitLeader(t)
// get a follower endpoint
eps := []string{clus.Members[(leaderIndex+1)%3].GRPCAddr()}
eps := []string{clus.Members[(leaderIndex+1)%3].GRPCURL()}
ccfg := clientv3.Config{
Endpoints: eps,
DialTimeout: 10 * time.Second,
@ -303,7 +303,7 @@ func TestDropReadUnderNetworkPartition(t *testing.T) {
// add other endpoints for later endpoint switch
cli.SetEndpoints(eps...)
time.Sleep(time.Second * 2)
conn, err := cli.Dial(clus.Members[(leaderIndex+1)%3].GRPCAddr())
conn, err := cli.Dial(clus.Members[(leaderIndex+1)%3].GRPCURL())
if err != nil {
t.Fatal(err)
}

View File

@ -38,7 +38,7 @@ func TestBalancerUnderServerShutdownWatch(t *testing.T) {
})
defer clus.Terminate(t)
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL(), clus.Members[2].GRPCURL()}
lead := clus.WaitLeader(t)
@ -150,7 +150,7 @@ func testBalancerUnderServerShutdownMutable(t *testing.T, op func(*clientv3.Clie
})
defer clus.Terminate(t)
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL(), clus.Members[2].GRPCURL()}
// pin eps[0]
cli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{eps[0]}})
@ -208,7 +208,7 @@ func testBalancerUnderServerShutdownImmutable(t *testing.T, op func(*clientv3.Cl
})
defer clus.Terminate(t)
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL(), clus.Members[2].GRPCURL()}
// pin eps[0]
cli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{eps[0]}})
@ -285,9 +285,9 @@ func testBalancerUnderServerStopInflightRangeOnRestart(t *testing.T, linearizabl
clus := integration.NewClusterV3(t, cfg)
defer clus.Terminate(t)
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr()}
eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL()}
if linearizable {
eps = append(eps, clus.Members[2].GRPCAddr())
eps = append(eps, clus.Members[2].GRPCURL())
}
lead := clus.WaitLeader(t)

View File

@ -1027,7 +1027,7 @@ func TestKVForLearner(t *testing.T) {
// 1. clus.Members[3] is the newly added learner member, which was appended to clus.Members
// 2. we are using member's grpcAddr instead of clientURLs as the endpoint for clientv3.Config,
// because the implementation of integration test has diverged from embed/etcd.go.
learnerEp := clus.Members[3].GRPCAddr()
learnerEp := clus.Members[3].GRPCURL()
cfg := clientv3.Config{
Endpoints: []string{learnerEp},
DialTimeout: 5 * time.Second,
@ -1100,7 +1100,7 @@ func TestBalancerSupportLearner(t *testing.T) {
}
// clus.Members[3] is the newly added learner member, which was appended to clus.Members
learnerEp := clus.Members[3].GRPCAddr()
learnerEp := clus.Members[3].GRPCURL()
cfg := clientv3.Config{
Endpoints: []string{learnerEp},
DialTimeout: 5 * time.Second,
@ -1119,7 +1119,7 @@ func TestBalancerSupportLearner(t *testing.T) {
t.Fatalf("expect Get request to learner to fail, got no error")
}
eps := []string{learnerEp, clus.Members[0].GRPCAddr()}
eps := []string{learnerEp, clus.Members[0].GRPCURL()}
cli.SetEndpoints(eps...)
if _, err := cli.Get(context.Background(), "foo"); err != nil {
t.Errorf("expect no error (balancer should retry when request to learner fails), got error: %v", err)

View File

@ -55,7 +55,7 @@ func TestMaintenanceHashKV(t *testing.T) {
if _, err := cli.Get(context.TODO(), "foo"); err != nil {
t.Fatal(err)
}
hresp, err := cli.HashKV(context.Background(), clus.Members[i].GRPCAddr(), 0)
hresp, err := cli.HashKV(context.Background(), clus.Members[i].GRPCURL(), 0)
if err != nil {
t.Fatal(err)
}
@ -206,7 +206,7 @@ func TestMaintenanceStatus(t *testing.T) {
eps := make([]string, 3)
for i := 0; i < 3; i++ {
eps[i] = clus.Members[i].GRPCAddr()
eps[i] = clus.Members[i].GRPCURL()
}
cli, err := integration.NewClient(t, clientv3.Config{Endpoints: eps, DialOptions: []grpc.DialOption{grpc.WithBlock()}})

View File

@ -75,7 +75,7 @@ func TestV3ClientMetrics(t *testing.T) {
defer clus.Terminate(t)
cfg := clientv3.Config{
Endpoints: []string{clus.Members[0].GRPCAddr()},
Endpoints: []string{clus.Members[0].GRPCURL()},
DialOptions: []grpc.DialOption{
grpc.WithUnaryInterceptor(grpcprom.UnaryClientInterceptor),
grpc.WithStreamInterceptor(grpcprom.StreamClientInterceptor),

View File

@ -35,9 +35,9 @@ func TestDetectKvOrderViolation(t *testing.T) {
cfg := clientv3.Config{
Endpoints: []string{
clus.Members[0].GRPCAddr(),
clus.Members[1].GRPCAddr(),
clus.Members[2].GRPCAddr(),
clus.Members[0].GRPCURL(),
clus.Members[1].GRPCURL(),
clus.Members[2].GRPCURL(),
},
}
cli, err := integration.NewClient(t, cfg)
@ -82,7 +82,7 @@ func TestDetectKvOrderViolation(t *testing.T) {
clus.Members[1].Stop(t)
assert.NoError(t, clus.Members[2].Restart(t))
// force OrderingKv to query the third member
cli.SetEndpoints(clus.Members[2].GRPCAddr())
cli.SetEndpoints(clus.Members[2].GRPCURL())
time.Sleep(2 * time.Second) // FIXME: Figure out how pause SetEndpoints sufficiently that this is not needed
t.Logf("Quering m2 after restart")
@ -102,9 +102,9 @@ func TestDetectTxnOrderViolation(t *testing.T) {
cfg := clientv3.Config{
Endpoints: []string{
clus.Members[0].GRPCAddr(),
clus.Members[1].GRPCAddr(),
clus.Members[2].GRPCAddr(),
clus.Members[0].GRPCURL(),
clus.Members[1].GRPCURL(),
clus.Members[2].GRPCURL(),
},
}
cli, err := integration.NewClient(t, cfg)
@ -151,7 +151,7 @@ func TestDetectTxnOrderViolation(t *testing.T) {
clus.Members[1].Stop(t)
assert.NoError(t, clus.Members[2].Restart(t))
// force OrderingKv to query the third member
cli.SetEndpoints(clus.Members[2].GRPCAddr())
cli.SetEndpoints(clus.Members[2].GRPCURL())
time.Sleep(2 * time.Second) // FIXME: Figure out how pause SetEndpoints sufficiently that this is not needed
_, err = orderingKv.Get(ctx, "foo", clientv3.WithSerializable())
if err != errOrderViolation {

View File

@ -29,11 +29,11 @@ func TestEndpointSwitchResolvesViolation(t *testing.T) {
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
eps := []string{
clus.Members[0].GRPCAddr(),
clus.Members[1].GRPCAddr(),
clus.Members[2].GRPCAddr(),
clus.Members[0].GRPCURL(),
clus.Members[1].GRPCURL(),
clus.Members[2].GRPCURL(),
}
cfg := clientv3.Config{Endpoints: []string{clus.Members[0].GRPCAddr()}}
cfg := clientv3.Config{Endpoints: []string{clus.Members[0].GRPCURL()}}
cli, err := integration.NewClient(t, cfg)
if err != nil {
t.Fatal(err)
@ -71,7 +71,7 @@ func TestEndpointSwitchResolvesViolation(t *testing.T) {
}
t.Logf("Reconfigure client to speak only to the 'partitioned' member")
cli.SetEndpoints(clus.Members[2].GRPCAddr())
cli.SetEndpoints(clus.Members[2].GRPCURL())
_, err = orderingKv.Get(ctx, "foo", clientv3.WithSerializable())
if err != ordering.ErrNoGreaterRev {
t.Fatal("While speaking to partitioned leader, we should get ErrNoGreaterRev error")
@ -84,11 +84,11 @@ func TestUnresolvableOrderViolation(t *testing.T) {
defer clus.Terminate(t)
cfg := clientv3.Config{
Endpoints: []string{
clus.Members[0].GRPCAddr(),
clus.Members[1].GRPCAddr(),
clus.Members[2].GRPCAddr(),
clus.Members[3].GRPCAddr(),
clus.Members[4].GRPCAddr(),
clus.Members[0].GRPCURL(),
clus.Members[1].GRPCURL(),
clus.Members[2].GRPCURL(),
clus.Members[3].GRPCURL(),
clus.Members[4].GRPCURL(),
},
}
cli, err := integration.NewClient(t, cfg)
@ -99,7 +99,7 @@ func TestUnresolvableOrderViolation(t *testing.T) {
eps := cli.Endpoints()
ctx := context.TODO()
cli.SetEndpoints(clus.Members[0].GRPCAddr())
cli.SetEndpoints(clus.Members[0].GRPCURL())
time.Sleep(1 * time.Second)
_, err = cli.Put(ctx, "foo", "bar")
if err != nil {
@ -139,7 +139,7 @@ func TestUnresolvableOrderViolation(t *testing.T) {
t.Fatal(err)
}
clus.Members[3].WaitStarted(t)
cli.SetEndpoints(clus.Members[3].GRPCAddr())
cli.SetEndpoints(clus.Members[3].GRPCURL())
_, err = OrderingKv.Get(ctx, "foo", clientv3.WithSerializable())
if err != ordering.ErrNoGreaterRev {

View File

@ -249,7 +249,7 @@ func (c *cluster) Launch(t testutil.TB) {
c.waitMembersMatch(t, c.HTTPMembers())
c.waitVersion()
for _, m := range c.Members {
t.Logf(" - %v -> %v (%v)", m.Name, m.ID(), m.GRPCAddr())
t.Logf(" - %v -> %v (%v)", m.Name, m.ID(), m.GRPCURL())
}
}
@ -572,7 +572,7 @@ type member struct {
grpcServerOpts []grpc.ServerOption
grpcServer *grpc.Server
grpcServerPeer *grpc.Server
grpcAddr string
grpcURL string
grpcBridge *bridge
// serverClient is a clientv3 that directly calls the etcdserver.
@ -587,7 +587,7 @@ type member struct {
closed bool
}
func (m *member) GRPCAddr() string { return m.grpcAddr }
func (m *member) GRPCURL() string { return m.grpcURL }
type memberConfig struct {
name string
@ -730,28 +730,28 @@ func memberLogger(t testutil.TB, name string) *zap.Logger {
// listenGRPC starts a grpc server over a unix domain socket on the member
func (m *member) listenGRPC() error {
// prefix with localhost so cert has right domain
m.grpcAddr = "localhost:" + m.Name
m.Logger.Info("LISTEN GRPC", zap.String("m.grpcAddr", m.grpcAddr), zap.String("m.Name", m.Name))
m.grpcURL = "localhost:" + m.Name
m.Logger.Info("LISTEN GRPC", zap.String("m.grpcURL", m.grpcURL), zap.String("m.Name", m.Name))
if m.useIP { // for IP-only TLS certs
m.grpcAddr = "127.0.0.1:" + m.Name
m.grpcURL = "127.0.0.1:" + m.Name
}
grpcListener, err := transport.NewUnixListener(m.grpcAddr)
grpcListener, err := transport.NewUnixListener(m.grpcURL)
if err != nil {
return fmt.Errorf("listen failed on grpc socket %s (%v)", m.grpcAddr, err)
return fmt.Errorf("listen failed on grpc socket %s (%v)", m.grpcURL, err)
}
bridgeAddr := m.grpcAddr + "0"
bridgeAddr := m.grpcURL + "0"
bridgeListener, err := transport.NewUnixListener(bridgeAddr)
if err != nil {
grpcListener.Close()
return fmt.Errorf("listen failed on bridge socket %s (%v)", m.grpcAddr, err)
return fmt.Errorf("listen failed on bridge socket %s (%v)", m.grpcURL, err)
}
m.grpcBridge, err = newBridge(dialer{network: "unix", addr: m.grpcAddr}, bridgeListener)
m.grpcBridge, err = newBridge(dialer{network: "unix", addr: m.grpcURL}, bridgeListener)
if err != nil {
bridgeListener.Close()
grpcListener.Close()
return err
}
m.grpcAddr = schemeFromTLSInfo(m.ClientTLSInfo) + "://" + bridgeAddr
m.grpcURL = schemeFromTLSInfo(m.ClientTLSInfo) + "://" + bridgeAddr
m.grpcListener = grpcListener
return nil
}
@ -779,12 +779,12 @@ func (m *member) Unblackhole() { m.grpcBridge.Unblackhole() }
// NewClientV3 creates a new grpc client connection to the member
func NewClientV3(m *member) (*clientv3.Client, error) {
if m.grpcAddr == "" {
if m.grpcURL == "" {
return nil, fmt.Errorf("member not configured for grpc")
}
cfg := clientv3.Config{
Endpoints: []string{m.grpcAddr},
Endpoints: []string{m.grpcURL},
DialTimeout: 5 * time.Second,
DialOptions: []grpc.DialOption{grpc.WithBlock()},
MaxCallSendMsgSize: m.clientMaxCallSendMsgSize,
@ -845,7 +845,7 @@ func (m *member) Launch() error {
zap.String("name", m.Name),
zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
zap.String("grpc-address", m.grpcAddr),
zap.String("grpc-url", m.grpcURL),
)
var err error
if m.s, err = etcdserver.NewServer(m.ServerConfig); err != nil {
@ -1002,7 +1002,7 @@ func (m *member) Launch() error {
zap.String("name", m.Name),
zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
zap.String("grpc-address", m.grpcAddr),
zap.String("grpc-url", m.grpcURL),
)
return nil
}
@ -1115,7 +1115,7 @@ func (m *member) Stop(_ testutil.TB) {
zap.String("name", m.Name),
zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
zap.String("grpc-address", m.grpcAddr),
zap.String("grpc-url", m.grpcURL),
)
m.Close()
m.serverClosers = nil
@ -1124,7 +1124,7 @@ func (m *member) Stop(_ testutil.TB) {
zap.String("name", m.Name),
zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
zap.String("grpc-address", m.grpcAddr),
zap.String("grpc-url", m.grpcURL),
)
}
@ -1149,7 +1149,7 @@ func (m *member) Restart(t testutil.TB) error {
zap.String("name", m.Name),
zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
zap.String("grpc-address", m.grpcAddr),
zap.String("grpc-url", m.grpcURL),
)
newPeerListeners := make([]net.Listener, 0)
for _, ln := range m.PeerListeners {
@ -1174,7 +1174,7 @@ func (m *member) Restart(t testutil.TB) error {
zap.String("name", m.Name),
zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
zap.String("grpc-address", m.grpcAddr),
zap.String("grpc-url", m.grpcURL),
zap.Error(err),
)
return err
@ -1187,7 +1187,7 @@ func (m *member) Terminate(t testutil.TB) {
zap.String("name", m.Name),
zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
zap.String("grpc-address", m.grpcAddr),
zap.String("grpc-url", m.grpcURL),
)
m.Close()
if !m.keepDataDirTerminate {
@ -1200,7 +1200,7 @@ func (m *member) Terminate(t testutil.TB) {
zap.String("name", m.Name),
zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
zap.String("grpc-address", m.grpcAddr),
zap.String("grpc-url", m.grpcURL),
)
}

View File

@ -36,7 +36,7 @@ func TestClusterProxyMemberList(t *testing.T) {
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cts := newClusterProxyServer(zaptest.NewLogger(t), []string{clus.Members[0].GRPCAddr()}, t)
cts := newClusterProxyServer(zaptest.NewLogger(t), []string{clus.Members[0].GRPCURL()}, t)
defer cts.close(t)
cfg := clientv3.Config{

View File

@ -34,7 +34,7 @@ func TestKVProxyRange(t *testing.T) {
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
kvts := newKVProxyServer([]string{clus.Members[0].GRPCAddr()}, t)
kvts := newKVProxyServer([]string{clus.Members[0].GRPCURL()}, t)
defer kvts.close()
// create a client and try to get key from proxy.

View File

@ -31,7 +31,7 @@ func TestRegister(t *testing.T) {
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.Client(0)
paddr := clus.Members[0].GRPCAddr()
paddr := clus.Members[0].GRPCURL()
testPrefix := "test-name"
wa := mustCreateWatcher(t, cli, testPrefix)

View File

@ -1777,7 +1777,7 @@ func testTLSReload(
}
cli, cerr := NewClient(t, clientv3.Config{
DialOptions: []grpc.DialOption{grpc.WithBlock()},
Endpoints: []string{clus.Members[0].GRPCAddr()},
Endpoints: []string{clus.Members[0].GRPCURL()},
DialTimeout: time.Second,
TLS: cc,
})
@ -1811,7 +1811,7 @@ func testTLSReload(
t.Fatal(terr)
}
cl, cerr := NewClient(t, clientv3.Config{
Endpoints: []string{clus.Members[0].GRPCAddr()},
Endpoints: []string{clus.Members[0].GRPCURL()},
DialTimeout: 5 * time.Second,
TLS: tls,
})

View File

@ -55,7 +55,7 @@ func testTLSCipherSuites(t *testing.T, valid bool) {
t.Fatal(err)
}
cli, cerr := NewClient(t, clientv3.Config{
Endpoints: []string{clus.Members[0].GRPCAddr()},
Endpoints: []string{clus.Members[0].GRPCURL()},
DialTimeout: time.Second,
DialOptions: []grpc.DialOption{grpc.WithBlock()},
TLS: cc,