clientv3/integration: test large KV requests

Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
This commit is contained in:
Gyuho Lee 2017-12-19 15:21:01 -08:00
parent 348b25f3dc
commit 452ccd693d
2 changed files with 127 additions and 19 deletions

View File

@ -30,6 +30,7 @@ import (
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
func TestKVPutError(t *testing.T) {
@ -861,3 +862,95 @@ func TestKVPutAtMostOnce(t *testing.T) {
t.Fatalf("expected version <= 10, got %+v", resp.Kvs[0])
}
}
// TestKVLargeRequests tests various client/server side request limits.
func TestKVLargeRequests(t *testing.T) {
defer testutil.AfterTest(t)
tests := []struct {
// make sure that "MaxCallSendMsgSize" < server-side default send/recv limit
maxRequestBytesServer uint
maxCallSendBytesClient int
maxCallRecvBytesClient int
valueSize int
expectError error
}{
{
maxRequestBytesServer: 1,
maxCallSendBytesClient: 0,
maxCallRecvBytesClient: 0,
valueSize: 1024,
expectError: rpctypes.ErrRequestTooLarge,
},
// without proper client-side receive size limit
// "code = ResourceExhausted desc = grpc: received message larger than max (5242929 vs. 4194304)"
{
maxRequestBytesServer: 7*1024*1024 + 512*1024,
maxCallSendBytesClient: 7 * 1024 * 1024,
maxCallRecvBytesClient: 0,
valueSize: 5 * 1024 * 1024,
expectError: nil,
},
{
maxRequestBytesServer: 10 * 1024 * 1024,
maxCallSendBytesClient: 100 * 1024 * 1024,
maxCallRecvBytesClient: 0,
valueSize: 10 * 1024 * 1024,
expectError: rpctypes.ErrRequestTooLarge,
},
{
maxRequestBytesServer: 10 * 1024 * 1024,
maxCallSendBytesClient: 10 * 1024 * 1024,
maxCallRecvBytesClient: 0,
valueSize: 10 * 1024 * 1024,
expectError: grpc.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", 10485770, 10485760),
},
{
maxRequestBytesServer: 10 * 1024 * 1024,
maxCallSendBytesClient: 100 * 1024 * 1024,
maxCallRecvBytesClient: 0,
valueSize: 10*1024*1024 + 5,
expectError: rpctypes.ErrRequestTooLarge,
},
{
maxRequestBytesServer: 10 * 1024 * 1024,
maxCallSendBytesClient: 10 * 1024 * 1024,
maxCallRecvBytesClient: 0,
valueSize: 10*1024*1024 + 5,
expectError: grpc.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", 10485775, 10485760),
},
}
for i, test := range tests {
clus := integration.NewClusterV3(t,
&integration.ClusterConfig{
Size: 1,
MaxRequestBytes: test.maxRequestBytesServer,
ClientMaxCallSendMsgSize: test.maxCallSendBytesClient,
ClientMaxCallRecvMsgSize: test.maxCallRecvBytesClient,
},
)
cli := clus.Client(0)
_, err := cli.Put(context.TODO(), "foo", strings.Repeat("a", test.valueSize))
if _, ok := err.(rpctypes.EtcdError); ok {
if err != test.expectError {
t.Errorf("#%d: expected %v, got %v", i, test.expectError, err)
}
} else if err != nil && err.Error() != test.expectError.Error() {
t.Errorf("#%d: expected %v, got %v", i, test.expectError, err)
}
// put request went through, now expects large response back
if err == nil {
_, err = cli.Get(context.TODO(), "foo")
if err != nil {
t.Errorf("#%d: get expected no error, got %v", i, err)
}
}
clus.Terminate(t)
}
}

View File

@ -102,6 +102,9 @@ type ClusterConfig struct {
GRPCKeepAliveTimeout time.Duration
// SkipCreatingClient to skip creating clients for each member.
SkipCreatingClient bool
ClientMaxCallSendMsgSize int
ClientMaxCallRecvMsgSize int
}
type cluster struct {
@ -229,14 +232,16 @@ func (c *cluster) HTTPMembers() []client.Member {
func (c *cluster) mustNewMember(t *testing.T) *member {
m := mustNewMember(t,
memberConfig{
name: c.name(rand.Int()),
peerTLS: c.cfg.PeerTLS,
clientTLS: c.cfg.ClientTLS,
quotaBackendBytes: c.cfg.QuotaBackendBytes,
maxRequestBytes: c.cfg.MaxRequestBytes,
grpcKeepAliveMinTime: c.cfg.GRPCKeepAliveMinTime,
grpcKeepAliveInterval: c.cfg.GRPCKeepAliveInterval,
grpcKeepAliveTimeout: c.cfg.GRPCKeepAliveTimeout,
name: c.name(rand.Int()),
peerTLS: c.cfg.PeerTLS,
clientTLS: c.cfg.ClientTLS,
quotaBackendBytes: c.cfg.QuotaBackendBytes,
maxRequestBytes: c.cfg.MaxRequestBytes,
grpcKeepAliveMinTime: c.cfg.GRPCKeepAliveMinTime,
grpcKeepAliveInterval: c.cfg.GRPCKeepAliveInterval,
grpcKeepAliveTimeout: c.cfg.GRPCKeepAliveTimeout,
clientMaxCallSendMsgSize: c.cfg.ClientMaxCallSendMsgSize,
clientMaxCallRecvMsgSize: c.cfg.ClientMaxCallRecvMsgSize,
})
m.DiscoveryURL = c.cfg.DiscoveryURL
if c.cfg.UseGRPC {
@ -494,20 +499,24 @@ type member struct {
// serverClient is a clientv3 that directly calls the etcdserver.
serverClient *clientv3.Client
keepDataDirTerminate bool
keepDataDirTerminate bool
clientMaxCallSendMsgSize int
clientMaxCallRecvMsgSize int
}
func (m *member) GRPCAddr() string { return m.grpcAddr }
type memberConfig struct {
name string
peerTLS *transport.TLSInfo
clientTLS *transport.TLSInfo
quotaBackendBytes int64
maxRequestBytes uint
grpcKeepAliveMinTime time.Duration
grpcKeepAliveInterval time.Duration
grpcKeepAliveTimeout time.Duration
name string
peerTLS *transport.TLSInfo
clientTLS *transport.TLSInfo
quotaBackendBytes int64
maxRequestBytes uint
grpcKeepAliveMinTime time.Duration
grpcKeepAliveInterval time.Duration
grpcKeepAliveTimeout time.Duration
clientMaxCallSendMsgSize int
clientMaxCallRecvMsgSize int
}
// mustNewMember return an inited member with the given name. If peerTLS is
@ -575,6 +584,10 @@ func mustNewMember(t *testing.T, mcfg memberConfig) *member {
Timeout: mcfg.grpcKeepAliveTimeout,
}))
}
m.clientMaxCallSendMsgSize = mcfg.clientMaxCallSendMsgSize
m.clientMaxCallRecvMsgSize = mcfg.clientMaxCallRecvMsgSize
return m
}
@ -613,8 +626,10 @@ func NewClientV3(m *member) (*clientv3.Client, error) {
}
cfg := clientv3.Config{
Endpoints: []string{m.grpcAddr},
DialTimeout: 5 * time.Second,
Endpoints: []string{m.grpcAddr},
DialTimeout: 5 * time.Second,
MaxCallSendMsgSize: m.clientMaxCallSendMsgSize,
MaxCallRecvMsgSize: m.clientMaxCallRecvMsgSize,
}
if m.ClientTLSInfo != nil {