Merge pull request #7896 from gyuho/metadata-grpc

*: gRPC v1.4.1, gateway v1.2.2, metadata Incoming/OutgoingContext
This commit is contained in:
Gyu-Ho Lee 2017-06-15 16:42:55 -07:00 committed by GitHub
commit 5fedaf2dd7
48 changed files with 4351 additions and 1013 deletions

View File

@ -60,7 +60,7 @@ For avoiding such a situation, the API layer performs *version number validation
After authenticating with `Authenticate()`, a client can create a gRPC connection as it would without auth. In addition to the existing initialization process, the client must associate the token with the newly created connection. `grpc.WithPerRPCCredentials()` provides the functionality for this purpose.
Every authenticated request from the client has a token. The token can be obtained with `grpc.metadata.FromContext()` in the server side. The server can obtain who is issuing the request and when the user was authorized. The information will be filled by the API layer in the header (`etcdserverpb.RequestHeader.Username` and `etcdserverpb.RequestHeader.AuthRevision`) of a raft log entry (`etcdserverpb.InternalRaftRequest`).
Every authenticated request from the client has a token. The token can be obtained with `grpc.metadata.FromIncomingContext()` in the server side. The server can obtain who is issuing the request and when the user was authorized. The information will be filled by the API layer in the header (`etcdserverpb.RequestHeader.Username` and `etcdserverpb.RequestHeader.AuthRevision`) of a raft log entry (`etcdserverpb.InternalRaftRequest`).
### Checking permission in the state machine

View File

@ -995,7 +995,7 @@ func (as *authStore) AuthInfoFromTLS(ctx context.Context) *AuthInfo {
}
func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) {
md, ok := metadata.FromContext(ctx)
md, ok := metadata.FromIncomingContext(ctx)
if !ok {
return nil, nil
}

View File

@ -453,7 +453,8 @@ func TestAuthInfoFromCtx(t *testing.T) {
t.Errorf("expected (nil, nil), got (%v, %v)", ai, err)
}
ctx = metadata.NewContext(context.Background(), metadata.New(map[string]string{"tokens": "dummy"}))
// as if it came from RPC
ctx = metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{"tokens": "dummy"}))
ai, err = as.AuthInfoFromCtx(ctx)
if err != nil && ai != nil {
t.Errorf("expected (nil, nil), got (%v, %v)", ai, err)
@ -465,19 +466,19 @@ func TestAuthInfoFromCtx(t *testing.T) {
t.Error(err)
}
ctx = metadata.NewContext(context.Background(), metadata.New(map[string]string{"token": "Invalid Token"}))
ctx = metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{"token": "Invalid Token"}))
_, err = as.AuthInfoFromCtx(ctx)
if err != ErrInvalidAuthToken {
t.Errorf("expected %v, got %v", ErrInvalidAuthToken, err)
}
ctx = metadata.NewContext(context.Background(), metadata.New(map[string]string{"token": "Invalid.Token"}))
ctx = metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{"token": "Invalid.Token"}))
_, err = as.AuthInfoFromCtx(ctx)
if err != ErrInvalidAuthToken {
t.Errorf("expected %v, got %v", ErrInvalidAuthToken, err)
}
ctx = metadata.NewContext(context.Background(), metadata.New(map[string]string{"token": resp.Token}))
ctx = metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{"token": resp.Token}))
ai, err = as.AuthInfoFromCtx(ctx)
if err != nil {
t.Error(err)
@ -521,7 +522,7 @@ func TestAuthInfoFromCtxRace(t *testing.T) {
donec := make(chan struct{})
go func() {
defer close(donec)
ctx := metadata.NewContext(context.Background(), metadata.New(map[string]string{"token": "test"}))
ctx := metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{"token": "test"}))
as.AuthInfoFromCtx(ctx)
}()
as.UserAdd(&pb.AuthUserAddRequest{Name: "test"})

View File

@ -345,6 +345,15 @@
}
]
},
{
"project": "google.golang.org/genproto/googleapis/rpc/status",
"licenses": [
{
"type": "Apache License 2.0",
"confidence": 1
}
]
},
{
"project": "google.golang.org/grpc",
"licenses": [

View File

@ -333,7 +333,7 @@ func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientCo
// when the cluster has a leader.
func WithRequireLeader(ctx context.Context) context.Context {
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
return metadata.NewContext(ctx, md)
return metadata.NewOutgoingContext(ctx, md)
}
func newClient(cfg *Config) (*Client, error) {

View File

@ -323,7 +323,7 @@ func (l *lessor) closeRequireLeader() {
reqIdxs := 0
// find all required leader channels, close, mark as nil
for i, ctx := range ka.ctxs {
md, ok := metadata.FromContext(ctx)
md, ok := metadata.FromOutgoingContext(ctx)
if !ok {
continue
}

View File

@ -0,0 +1,168 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: github.com/golang/protobuf/ptypes/any/any.proto
/*
Package any is a generated protocol buffer package.
It is generated from these files:
github.com/golang/protobuf/ptypes/any/any.proto
It has these top-level messages:
Any
*/
package any
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// `Any` contains an arbitrary serialized protocol buffer message along with a
// URL that describes the type of the serialized message.
//
// Protobuf library provides support to pack/unpack Any values in the form
// of utility functions or additional generated methods of the Any type.
//
// Example 1: Pack and unpack a message in C++.
//
// Foo foo = ...;
// Any any;
// any.PackFrom(foo);
// ...
// if (any.UnpackTo(&foo)) {
// ...
// }
//
// Example 2: Pack and unpack a message in Java.
//
// Foo foo = ...;
// Any any = Any.pack(foo);
// ...
// if (any.is(Foo.class)) {
// foo = any.unpack(Foo.class);
// }
//
// Example 3: Pack and unpack a message in Python.
//
// foo = Foo(...)
// any = Any()
// any.Pack(foo)
// ...
// if any.Is(Foo.DESCRIPTOR):
// any.Unpack(foo)
// ...
//
// The pack methods provided by protobuf library will by default use
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
// methods only use the fully qualified type name after the last '/'
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
// name "y.z".
//
//
// JSON
// ====
// The JSON representation of an `Any` value uses the regular
// representation of the deserialized, embedded message, with an
// additional field `@type` which contains the type URL. Example:
//
// package google.profile;
// message Person {
// string first_name = 1;
// string last_name = 2;
// }
//
// {
// "@type": "type.googleapis.com/google.profile.Person",
// "firstName": <string>,
// "lastName": <string>
// }
//
// If the embedded message type is well-known and has a custom JSON
// representation, that representation will be embedded adding a field
// `value` which holds the custom JSON in addition to the `@type`
// field. Example (for message [google.protobuf.Duration][]):
//
// {
// "@type": "type.googleapis.com/google.protobuf.Duration",
// "value": "1.212s"
// }
//
type Any struct {
// A URL/resource name whose content describes the type of the
// serialized protocol buffer message.
//
// For URLs which use the scheme `http`, `https`, or no scheme, the
// following restrictions and interpretations apply:
//
// * If no scheme is provided, `https` is assumed.
// * The last segment of the URL's path must represent the fully
// qualified name of the type (as in `path/google.protobuf.Duration`).
// The name should be in a canonical form (e.g., leading "." is
// not accepted).
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
// value in binary format, or produce an error.
// * Applications are allowed to cache lookup results based on the
// URL, or have them precompiled into a binary to avoid any
// lookup. Therefore, binary compatibility needs to be preserved
// on changes to types. (Use versioned type names to manage
// breaking changes.)
//
// Schemes other than `http`, `https` (or the empty scheme) might be
// used with implementation specific semantics.
//
TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"`
// Must be a valid serialized protocol buffer of the above specified type.
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
}
func (m *Any) Reset() { *m = Any{} }
func (m *Any) String() string { return proto.CompactTextString(m) }
func (*Any) ProtoMessage() {}
func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (*Any) XXX_WellKnownType() string { return "Any" }
func (m *Any) GetTypeUrl() string {
if m != nil {
return m.TypeUrl
}
return ""
}
func (m *Any) GetValue() []byte {
if m != nil {
return m.Value
}
return nil
}
func init() {
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
}
func init() { proto.RegisterFile("github.com/golang/protobuf/ptypes/any/any.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 184 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4f, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcc,
0xab, 0x04, 0x61, 0x3d, 0xb0, 0xb8, 0x10, 0x7f, 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x1e, 0x4c,
0x95, 0x92, 0x19, 0x17, 0xb3, 0x63, 0x5e, 0xa5, 0x90, 0x24, 0x17, 0x07, 0x48, 0x79, 0x7c, 0x69,
0x51, 0x8e, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x3b, 0x88, 0x1f, 0x5a, 0x94, 0x23, 0x24,
0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x13, 0x04, 0xe1,
0x38, 0xe5, 0x73, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe7, 0xc4, 0xe1, 0x98, 0x57, 0x19,
0x00, 0xe2, 0x04, 0x30, 0x46, 0xa9, 0x12, 0xe5, 0xb8, 0x45, 0x4c, 0xcc, 0xee, 0x01, 0x4e, 0xab,
0x98, 0xe4, 0xdc, 0x21, 0x46, 0x05, 0x40, 0x95, 0xe8, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5,
0x97, 0xe7, 0x85, 0x80, 0x94, 0x26, 0xb1, 0x81, 0xf5, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff,
0x45, 0x1f, 0x1a, 0xf2, 0xf3, 0x00, 0x00, 0x00,
}

View File

@ -94,7 +94,7 @@ func AnnotateContext(ctx context.Context, req *http.Request) (context.Context, e
if len(pairs) == 0 {
return ctx, nil
}
return metadata.NewContext(ctx, metadata.Pairs(pairs...)), nil
return metadata.NewOutgoingContext(ctx, metadata.Pairs(pairs...)), nil
}
// ServerMetadata consists of metadata sent from gRPC server.

View File

@ -48,8 +48,11 @@ func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values []
return fmt.Errorf("non-aggregate type in the mid of path: %s", strings.Join(fieldPath, "."))
}
var f reflect.Value
f, props = fieldByProtoName(m, fieldName)
if !f.IsValid() {
var err error
f, props, err = fieldByProtoName(m, fieldName)
if err != nil {
return err
} else if !f.IsValid() {
grpclog.Printf("field not found in %T: %s", msg, strings.Join(fieldPath, "."))
return nil
}
@ -92,14 +95,26 @@ func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values []
// fieldByProtoName looks up a field whose corresponding protobuf field name is "name".
// "m" must be a struct value. It returns zero reflect.Value if no such field found.
func fieldByProtoName(m reflect.Value, name string) (reflect.Value, *proto.Properties) {
func fieldByProtoName(m reflect.Value, name string) (reflect.Value, *proto.Properties, error) {
props := proto.GetProperties(m.Type())
// look up field name in oneof map
if op, ok := props.OneofTypes[name]; ok {
v := reflect.New(op.Type.Elem())
field := m.Field(op.Field)
if !field.IsNil() {
return reflect.Value{}, nil, fmt.Errorf("field already set for %s oneof", props.Prop[op.Field].OrigName)
}
field.Set(v)
return v.Elem().Field(0), op.Prop, nil
}
for _, p := range props.Prop {
if p.OrigName == name {
return m.FieldByName(p.Name), p
return m.FieldByName(p.Name), p, nil
}
}
return reflect.Value{}, nil
return reflect.Value{}, nil, nil
}
func populateRepeatedField(f reflect.Value, values []string, props *proto.Properties) error {

202
cmd/vendor/google.golang.org/genproto/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,143 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/rpc/status.proto
/*
Package status is a generated protocol buffer package.
It is generated from these files:
google/rpc/status.proto
It has these top-level messages:
Status
*/
package status
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import google_protobuf "github.com/golang/protobuf/ptypes/any"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// The `Status` type defines a logical error model that is suitable for different
// programming environments, including REST APIs and RPC APIs. It is used by
// [gRPC](https://github.com/grpc). The error model is designed to be:
//
// - Simple to use and understand for most users
// - Flexible enough to meet unexpected needs
//
// # Overview
//
// The `Status` message contains three pieces of data: error code, error message,
// and error details. The error code should be an enum value of
// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes if needed. The
// error message should be a developer-facing English message that helps
// developers *understand* and *resolve* the error. If a localized user-facing
// error message is needed, put the localized message in the error details or
// localize it in the client. The optional error details may contain arbitrary
// information about the error. There is a predefined set of error detail types
// in the package `google.rpc` which can be used for common error conditions.
//
// # Language mapping
//
// The `Status` message is the logical representation of the error model, but it
// is not necessarily the actual wire format. When the `Status` message is
// exposed in different client libraries and different wire protocols, it can be
// mapped differently. For example, it will likely be mapped to some exceptions
// in Java, but more likely mapped to some error codes in C.
//
// # Other uses
//
// The error model and the `Status` message can be used in a variety of
// environments, either with or without APIs, to provide a
// consistent developer experience across different environments.
//
// Example uses of this error model include:
//
// - Partial errors. If a service needs to return partial errors to the client,
// it may embed the `Status` in the normal response to indicate the partial
// errors.
//
// - Workflow errors. A typical workflow has multiple steps. Each step may
// have a `Status` message for error reporting purpose.
//
// - Batch operations. If a client uses batch request and batch response, the
// `Status` message should be used directly inside batch response, one for
// each error sub-response.
//
// - Asynchronous operations. If an API call embeds asynchronous operation
// results in its response, the status of those operations should be
// represented directly using the `Status` message.
//
// - Logging. If some API errors are stored in logs, the message `Status` could
// be used directly after any stripping needed for security/privacy reasons.
type Status struct {
// The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].
Code int32 `protobuf:"varint,1,opt,name=code" json:"code,omitempty"`
// A developer-facing error message, which should be in English. Any
// user-facing error message should be localized and sent in the
// [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client.
Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"`
// A list of messages that carry the error details. There will be a
// common set of message types for APIs to use.
Details []*google_protobuf.Any `protobuf:"bytes,3,rep,name=details" json:"details,omitempty"`
}
func (m *Status) Reset() { *m = Status{} }
func (m *Status) String() string { return proto.CompactTextString(m) }
func (*Status) ProtoMessage() {}
func (*Status) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *Status) GetCode() int32 {
if m != nil {
return m.Code
}
return 0
}
func (m *Status) GetMessage() string {
if m != nil {
return m.Message
}
return ""
}
func (m *Status) GetDetails() []*google_protobuf.Any {
if m != nil {
return m.Details
}
return nil
}
func init() {
proto.RegisterType((*Status)(nil), "google.rpc.Status")
}
func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 209 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28,
0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x48, 0xe8, 0x15, 0x15, 0x24, 0x4b, 0x49, 0x42, 0x15, 0x81,
0x65, 0x92, 0x4a, 0xd3, 0xf4, 0x13, 0xf3, 0x2a, 0x21, 0xca, 0x94, 0xd2, 0xb8, 0xd8, 0x82, 0xc1,
0xda, 0x84, 0x84, 0xb8, 0x58, 0x92, 0xf3, 0x53, 0x52, 0x25, 0x18, 0x15, 0x18, 0x35, 0x58, 0x83,
0xc0, 0x6c, 0x21, 0x09, 0x2e, 0xf6, 0xdc, 0xd4, 0xe2, 0xe2, 0xc4, 0xf4, 0x54, 0x09, 0x26, 0x05,
0x46, 0x0d, 0xce, 0x20, 0x18, 0x57, 0x48, 0x8f, 0x8b, 0x3d, 0x25, 0xb5, 0x24, 0x31, 0x33, 0xa7,
0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x6a, 0x21, 0xcc, 0x12, 0x3d, 0xc7,
0xbc, 0xca, 0x20, 0x98, 0x22, 0xa7, 0x38, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x3d, 0x84, 0xa3, 0x9c,
0xb8, 0x21, 0xf6, 0x06, 0x80, 0x94, 0x07, 0x30, 0x46, 0x99, 0x43, 0xa5, 0xd2, 0xf3, 0x73, 0x12,
0xf3, 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0xd3, 0x53, 0xf3, 0xc0, 0x86, 0xe9, 0x43, 0xa4, 0x12,
0x0b, 0x32, 0x8b, 0x91, 0xfc, 0x69, 0x0d, 0xa1, 0x16, 0x31, 0x31, 0x07, 0x05, 0x38, 0x27, 0xb1,
0x81, 0x55, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xa4, 0x53, 0xf0, 0x7c, 0x10, 0x01, 0x00,
0x00,
}

View File

@ -35,6 +35,7 @@ package grpc
import (
"fmt"
"net"
"sync"
"golang.org/x/net/context"
@ -60,6 +61,10 @@ type BalancerConfig struct {
// use to dial to a remote load balancer server. The Balancer implementations
// can ignore this if it does not need to talk to another party securely.
DialCreds credentials.TransportCredentials
// Dialer is the custom dialer the Balancer implementation can use to dial
// to a remote load balancer server. The Balancer implementations
// can ignore this if it doesn't need to talk to remote balancer.
Dialer func(context.Context, string) (net.Conn, error)
}
// BalancerGetOptions configures a Get call.
@ -385,6 +390,9 @@ func (rr *roundRobin) Notify() <-chan []Address {
func (rr *roundRobin) Close() error {
rr.mu.Lock()
defer rr.mu.Unlock()
if rr.done {
return errBalancerClosed
}
rr.done = true
if rr.w != nil {
rr.w.Close()

View File

@ -43,6 +43,7 @@ import (
"google.golang.org/grpc/codes"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
"google.golang.org/grpc/transport"
)
@ -72,14 +73,17 @@ func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTran
}
}
for {
if err = recv(p, dopts.codec, stream, dopts.dc, reply, dopts.maxMsgSize, inPayload); err != nil {
if c.maxReceiveMessageSize == nil {
return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
}
if err = recv(p, dopts.codec, stream, dopts.dc, reply, *c.maxReceiveMessageSize, inPayload); err != nil {
if err == io.EOF {
break
}
return
}
}
if inPayload != nil && err == io.EOF && stream.StatusCode() == codes.OK {
if inPayload != nil && err == io.EOF && stream.Status().Code() == codes.OK {
// TODO in the current implementation, inTrailer may be handled before inPayload in some cases.
// Fix the order if necessary.
dopts.copts.StatsHandler.HandleRPC(ctx, inPayload)
@ -92,11 +96,7 @@ func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTran
}
// sendRequest writes out various information of an RPC such as Context and Message.
func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) {
stream, err := t.NewStream(ctx, callHdr)
if err != nil {
return nil, err
}
func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, c *callInfo, callHdr *transport.CallHdr, stream *transport.Stream, t transport.ClientTransport, args interface{}, opts *transport.Options) (err error) {
defer func() {
if err != nil {
// If err is connection error, t will be closed, no need to close stream here.
@ -119,7 +119,13 @@ func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor,
}
outBuf, err := encode(dopts.codec, args, compressor, cbuf, outPayload)
if err != nil {
return nil, Errorf(codes.Internal, "grpc: %v", err)
return err
}
if c.maxSendMessageSize == nil {
return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)")
}
if len(outBuf) > *c.maxSendMessageSize {
return Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(outBuf), *c.maxSendMessageSize)
}
err = t.Write(stream, outBuf, opts)
if err == nil && outPayload != nil {
@ -130,10 +136,10 @@ func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor,
// does not exist.) so that t.Write could get io.EOF from wait(...). Leave the following
// recvResponse to get the final status.
if err != nil && err != io.EOF {
return nil, err
return err
}
// Sent successfully.
return stream, nil
return nil
}
// Invoke sends the RPC request on the wire and returns after response is received.
@ -148,14 +154,18 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
func invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (e error) {
c := defaultCallInfo
if mc, ok := cc.getMethodConfig(method); ok {
c.failFast = !mc.WaitForReady
if mc.Timeout > 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, mc.Timeout)
defer cancel()
}
mc := cc.GetMethodConfig(method)
if mc.WaitForReady != nil {
c.failFast = !*mc.WaitForReady
}
if mc.Timeout != nil && *mc.Timeout >= 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, *mc.Timeout)
defer cancel()
}
opts = append(cc.dopts.callOptions, opts...)
for _, o := range opts {
if err := o.before(&c); err != nil {
return toRPCErr(err)
@ -166,6 +176,10 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
o.after(&c)
}
}()
c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize)
c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
if EnableTracing {
c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
defer c.traceInfo.tr.Finish()
@ -182,26 +196,25 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
}
}()
}
ctx = newContextWithRPCInfo(ctx)
sh := cc.dopts.copts.StatsHandler
if sh != nil {
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method})
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast})
begin := &stats.Begin{
Client: true,
BeginTime: time.Now(),
FailFast: c.failFast,
}
sh.HandleRPC(ctx, begin)
}
defer func() {
if sh != nil {
defer func() {
end := &stats.End{
Client: true,
EndTime: time.Now(),
Error: e,
}
sh.HandleRPC(ctx, end)
}
}()
}()
}
topts := &transport.Options{
Last: true,
Delay: false,
@ -223,6 +236,9 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
if cc.dopts.cp != nil {
callHdr.SendCompress = cc.dopts.cp.Type()
}
if c.creds != nil {
callHdr.Creds = c.creds
}
gopts := BalancerGetOptions{
BlockingWait: !c.failFast,
@ -230,7 +246,7 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
t, put, err = cc.getTransport(ctx, gopts)
if err != nil {
// TODO(zhaoq): Probably revisit the error handling.
if _, ok := err.(*rpcError); ok {
if _, ok := status.FromError(err); ok {
return err
}
if err == errConnClosing || err == errConnUnavailable {
@ -245,19 +261,35 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
if c.traceInfo.tr != nil {
c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true)
}
stream, err = sendRequest(ctx, cc.dopts, cc.dopts.cp, callHdr, t, args, topts)
stream, err = t.NewStream(ctx, callHdr)
if err != nil {
if put != nil {
if _, ok := err.(transport.ConnectionError); ok {
// If error is connection error, transport was sending data on wire,
// and we are not sure if anything has been sent on wire.
// If error is not connection error, we are sure nothing has been sent.
updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false})
}
put()
}
if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
continue
}
return toRPCErr(err)
}
err = sendRequest(ctx, cc.dopts, cc.dopts.cp, &c, callHdr, stream, t, args, topts)
if err != nil {
if put != nil {
updateRPCInfoInContext(ctx, rpcInfo{
bytesSent: stream.BytesSent(),
bytesReceived: stream.BytesReceived(),
})
put()
put = nil
}
// Retry a non-failfast RPC when
// i) there is a connection error; or
// ii) the server started to drain before this RPC was initiated.
if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain {
if c.failFast {
return toRPCErr(err)
}
if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
continue
}
return toRPCErr(err)
@ -265,13 +297,13 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
err = recvResponse(ctx, cc.dopts, t, &c, stream, reply)
if err != nil {
if put != nil {
updateRPCInfoInContext(ctx, rpcInfo{
bytesSent: stream.BytesSent(),
bytesReceived: stream.BytesReceived(),
})
put()
put = nil
}
if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain {
if c.failFast {
return toRPCErr(err)
}
if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
continue
}
return toRPCErr(err)
@ -281,9 +313,12 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
}
t.CloseStream(stream, nil)
if put != nil {
updateRPCInfoInContext(ctx, rpcInfo{
bytesSent: stream.BytesSent(),
bytesReceived: stream.BytesReceived(),
})
put()
put = nil
}
return Errorf(stream.StatusCode(), "%s", stream.StatusDesc())
return stream.Status().Err()
}
}

View File

@ -36,7 +36,6 @@ package grpc
import (
"errors"
"fmt"
"math"
"net"
"strings"
"sync"
@ -57,8 +56,7 @@ var (
ErrClientConnClosing = errors.New("grpc: the client connection is closing")
// ErrClientConnTimeout indicates that the ClientConn cannot establish the
// underlying connections within the specified timeout.
// DEPRECATED: Please use context.DeadlineExceeded instead. This error will be
// removed in Q1 2017.
// DEPRECATED: Please use context.DeadlineExceeded instead.
ErrClientConnTimeout = errors.New("grpc: timed out when dialing")
// errNoTransportSecurity indicates that there is no transport security
@ -80,7 +78,8 @@ var (
errConnClosing = errors.New("grpc: the connection is closing")
// errConnUnavailable indicates that the connection is unavailable.
errConnUnavailable = errors.New("grpc: the connection is unavailable")
errNoAddr = errors.New("grpc: there is no address available to dial")
// errBalancerClosed indicates that the balancer is closed.
errBalancerClosed = errors.New("grpc: balancer is closed")
// minimum time to give a connection to complete
minConnectTimeout = 20 * time.Second
)
@ -88,30 +87,54 @@ var (
// dialOptions configure a Dial call. dialOptions are set by the DialOption
// values passed to Dial.
type dialOptions struct {
unaryInt UnaryClientInterceptor
streamInt StreamClientInterceptor
codec Codec
cp Compressor
dc Decompressor
bs backoffStrategy
balancer Balancer
block bool
insecure bool
timeout time.Duration
scChan <-chan ServiceConfig
copts transport.ConnectOptions
maxMsgSize int
unaryInt UnaryClientInterceptor
streamInt StreamClientInterceptor
codec Codec
cp Compressor
dc Decompressor
bs backoffStrategy
balancer Balancer
block bool
insecure bool
timeout time.Duration
scChan <-chan ServiceConfig
copts transport.ConnectOptions
callOptions []CallOption
}
const defaultClientMaxMsgSize = math.MaxInt32
const (
defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4
defaultClientMaxSendMessageSize = 1024 * 1024 * 4
)
// DialOption configures how we set up the connection.
type DialOption func(*dialOptions)
// WithMaxMsgSize returns a DialOption which sets the maximum message size the client can receive.
func WithMaxMsgSize(s int) DialOption {
// WithInitialWindowSize returns a DialOption which sets the value for initial window size on a stream.
// The lower bound for window size is 64K and any value smaller than that will be ignored.
func WithInitialWindowSize(s int32) DialOption {
return func(o *dialOptions) {
o.maxMsgSize = s
o.copts.InitialWindowSize = s
}
}
// WithInitialConnWindowSize returns a DialOption which sets the value for initial window size on a connection.
// The lower bound for window size is 64K and any value smaller than that will be ignored.
func WithInitialConnWindowSize(s int32) DialOption {
return func(o *dialOptions) {
o.copts.InitialConnWindowSize = s
}
}
// WithMaxMsgSize returns a DialOption which sets the maximum message size the client can receive. Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead.
func WithMaxMsgSize(s int) DialOption {
return WithDefaultCallOptions(MaxCallRecvMsgSize(s))
}
// WithDefaultCallOptions returns a DialOption which sets the default CallOptions for calls over the connection.
func WithDefaultCallOptions(cos ...CallOption) DialOption {
return func(o *dialOptions) {
o.callOptions = append(o.callOptions, cos...)
}
}
@ -206,7 +229,7 @@ func WithTransportCredentials(creds credentials.TransportCredentials) DialOption
}
// WithPerRPCCredentials returns a DialOption which sets
// credentials which will place auth state on each outbound RPC.
// credentials and places auth state on each outbound RPC.
func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption {
return func(o *dialOptions) {
o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds)
@ -243,7 +266,7 @@ func WithStatsHandler(h stats.Handler) DialOption {
}
}
// FailOnNonTempDialError returns a DialOption that specified if gRPC fails on non-temporary dial errors.
// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on non-temporary dial errors.
// If f is true, and dialer returns a non-temporary error, gRPC will fail the connection to the network
// address and won't try to reconnect.
// The default value of FailOnNonTempDialError is false.
@ -297,22 +320,29 @@ func Dial(target string, opts ...DialOption) (*ClientConn, error) {
}
// DialContext creates a client connection to the given target. ctx can be used to
// cancel or expire the pending connecting. Once this function returns, the
// cancel or expire the pending connection. Once this function returns, the
// cancellation and expiration of ctx will be noop. Users should call ClientConn.Close
// to terminate all the pending operations after this function returns.
// This is the EXPERIMENTAL API.
func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) {
cc := &ClientConn{
target: target,
conns: make(map[Address]*addrConn),
}
cc.ctx, cc.cancel = context.WithCancel(context.Background())
cc.dopts.maxMsgSize = defaultClientMaxMsgSize
for _, opt := range opts {
opt(&cc.dopts)
}
cc.mkp = cc.dopts.copts.KeepaliveParams
if cc.dopts.copts.Dialer == nil {
cc.dopts.copts.Dialer = newProxyDialer(
func(ctx context.Context, addr string) (net.Conn, error) {
return dialContext(ctx, "tcp", addr)
},
)
}
grpcUA := "grpc-go/" + Version
if cc.dopts.copts.UserAgent != "" {
cc.dopts.copts.UserAgent += " " + grpcUA
} else {
@ -337,15 +367,16 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
}
}()
scSet := false
if cc.dopts.scChan != nil {
// Wait for the initial service config.
// Try to get an initial service config.
select {
case sc, ok := <-cc.dopts.scChan:
if ok {
cc.sc = sc
scSet = true
}
case <-ctx.Done():
return nil, ctx.Err()
default:
}
}
// Set defaults.
@ -361,53 +392,44 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
} else if cc.dopts.insecure && cc.dopts.copts.Authority != "" {
cc.authority = cc.dopts.copts.Authority
} else {
colonPos := strings.LastIndex(target, ":")
if colonPos == -1 {
colonPos = len(target)
}
cc.authority = target[:colonPos]
cc.authority = target
}
var ok bool
waitC := make(chan error, 1)
go func() {
var addrs []Address
defer close(waitC)
if cc.dopts.balancer == nil && cc.sc.LB != nil {
cc.dopts.balancer = cc.sc.LB
}
if cc.dopts.balancer == nil {
// Connect to target directly if balancer is nil.
addrs = append(addrs, Address{Addr: target})
} else {
if cc.dopts.balancer != nil {
var credsClone credentials.TransportCredentials
if creds != nil {
credsClone = creds.Clone()
}
config := BalancerConfig{
DialCreds: credsClone,
Dialer: cc.dopts.copts.Dialer,
}
if err := cc.dopts.balancer.Start(target, config); err != nil {
waitC <- err
return
}
ch := cc.dopts.balancer.Notify()
if ch == nil {
// There is no name resolver installed.
addrs = append(addrs, Address{Addr: target})
} else {
addrs, ok = <-ch
if !ok || len(addrs) == 0 {
waitC <- errNoAddr
return
if ch != nil {
if cc.dopts.block {
doneChan := make(chan struct{})
go cc.lbWatcher(doneChan)
<-doneChan
} else {
go cc.lbWatcher(nil)
}
}
}
for _, a := range addrs {
if err := cc.resetAddrConn(a, false, nil); err != nil {
waitC <- err
return
}
}
close(waitC)
// No balancer, or no resolver within the balancer. Connect directly.
if err := cc.resetAddrConn(Address{Addr: target}, cc.dopts.block, nil); err != nil {
waitC <- err
return
}
}()
select {
case <-ctx.Done():
@ -417,16 +439,21 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
return nil, err
}
}
// If balancer is nil or balancer.Notify() is nil, ok will be false here.
// The lbWatcher goroutine will not be created.
if ok {
go cc.lbWatcher()
if cc.dopts.scChan != nil && !scSet {
// Blocking wait for the initial service config.
select {
case sc, ok := <-cc.dopts.scChan:
if ok {
cc.sc = sc
}
case <-ctx.Done():
return nil, ctx.Err()
}
}
if cc.dopts.scChan != nil {
go cc.scWatcher()
}
return cc, nil
}
@ -475,9 +502,14 @@ type ClientConn struct {
mu sync.RWMutex
sc ServiceConfig
conns map[Address]*addrConn
// Keepalive parameter can be udated if a GoAway is received.
mkp keepalive.ClientParameters
}
func (cc *ClientConn) lbWatcher() {
// lbWatcher watches the Notify channel of the balancer in cc and manages
// connections accordingly. If doneChan is not nil, it is closed after the
// first successfull connection is made.
func (cc *ClientConn) lbWatcher(doneChan chan struct{}) {
for addrs := range cc.dopts.balancer.Notify() {
var (
add []Address // Addresses need to setup connections.
@ -504,7 +536,15 @@ func (cc *ClientConn) lbWatcher() {
}
cc.mu.Unlock()
for _, a := range add {
cc.resetAddrConn(a, true, nil)
if doneChan != nil {
err := cc.resetAddrConn(a, true, nil)
if err == nil {
close(doneChan)
doneChan = nil
}
} else {
cc.resetAddrConn(a, false, nil)
}
}
for _, c := range del {
c.tearDown(errConnDrain)
@ -533,12 +573,15 @@ func (cc *ClientConn) scWatcher() {
// resetAddrConn creates an addrConn for addr and adds it to cc.conns.
// If there is an old addrConn for addr, it will be torn down, using tearDownErr as the reason.
// If tearDownErr is nil, errConnDrain will be used instead.
func (cc *ClientConn) resetAddrConn(addr Address, skipWait bool, tearDownErr error) error {
func (cc *ClientConn) resetAddrConn(addr Address, block bool, tearDownErr error) error {
ac := &addrConn{
cc: cc,
addr: addr,
dopts: cc.dopts,
}
cc.mu.RLock()
ac.dopts.copts.KeepaliveParams = cc.mkp
cc.mu.RUnlock()
ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
ac.stateCV = sync.NewCond(&ac.mu)
if EnableTracing {
@ -583,8 +626,7 @@ func (cc *ClientConn) resetAddrConn(addr Address, skipWait bool, tearDownErr err
stale.tearDown(tearDownErr)
}
}
// skipWait may overwrite the decision in ac.dopts.block.
if ac.dopts.block && !skipWait {
if block {
if err := ac.resetTransport(false); err != nil {
if err != errConnClosing {
// Tear down ac and delete it from cc.conns.
@ -617,12 +659,23 @@ func (cc *ClientConn) resetAddrConn(addr Address, skipWait bool, tearDownErr err
return nil
}
// TODO: Avoid the locking here.
func (cc *ClientConn) getMethodConfig(method string) (m MethodConfig, ok bool) {
// GetMethodConfig gets the method config of the input method.
// If there's an exact match for input method (i.e. /service/method), we return
// the corresponding MethodConfig.
// If there isn't an exact match for the input method, we look for the default config
// under the service (i.e /service/). If there is a default MethodConfig for
// the serivce, we return it.
// Otherwise, we return an empty MethodConfig.
func (cc *ClientConn) GetMethodConfig(method string) MethodConfig {
// TODO: Avoid the locking here.
cc.mu.RLock()
defer cc.mu.RUnlock()
m, ok = cc.sc.Methods[method]
return
m, ok := cc.sc.Methods[method]
if !ok {
i := strings.LastIndex(method, "/")
m, _ = cc.sc.Methods[method[:i+1]]
}
return m
}
func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions) (transport.ClientTransport, func(), error) {
@ -663,6 +716,7 @@ func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions)
}
if !ok {
if put != nil {
updateRPCInfoInContext(ctx, rpcInfo{bytesSent: false, bytesReceived: false})
put()
}
return nil, nil, errConnClosing
@ -670,6 +724,7 @@ func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions)
t, err := ac.wait(ctx, cc.dopts.balancer != nil, !opts.BlockingWait)
if err != nil {
if put != nil {
updateRPCInfoInContext(ctx, rpcInfo{bytesSent: false, bytesReceived: false})
put()
}
return nil, nil, err
@ -721,6 +776,20 @@ type addrConn struct {
tearDownErr error
}
// adjustParams updates parameters used to create transports upon
// receiving a GoAway.
func (ac *addrConn) adjustParams(r transport.GoAwayReason) {
switch r {
case transport.TooManyPings:
v := 2 * ac.dopts.copts.KeepaliveParams.Time
ac.cc.mu.Lock()
if v > ac.cc.mkp.Time {
ac.cc.mkp.Time = v
}
ac.cc.mu.Unlock()
}
}
// printf records an event in ac's event log, unless ac has been closed.
// REQUIRES ac.mu is held.
func (ac *addrConn) printf(format string, a ...interface{}) {
@ -829,11 +898,14 @@ func (ac *addrConn) resetTransport(closeTransport bool) error {
}
ac.mu.Unlock()
closeTransport = false
timer := time.NewTimer(sleepTime - time.Since(connectTime))
select {
case <-time.After(sleepTime - time.Since(connectTime)):
case <-timer.C:
case <-ac.ctx.Done():
timer.Stop()
return ac.ctx.Err()
}
timer.Stop()
continue
}
ac.mu.Lock()
@ -877,6 +949,7 @@ func (ac *addrConn) transportMonitor() {
}
return
case <-t.GoAway():
ac.adjustParams(t.GetGoAwayReason())
// If GoAway happens without any network I/O error, ac is closed without shutting down the
// underlying transport (the transport will be closed when all the pending RPCs finished or
// failed.).
@ -885,9 +958,9 @@ func (ac *addrConn) transportMonitor() {
// In both cases, a new ac is created.
select {
case <-t.Error():
ac.cc.resetAddrConn(ac.addr, true, errNetworkIO)
ac.cc.resetAddrConn(ac.addr, false, errNetworkIO)
default:
ac.cc.resetAddrConn(ac.addr, true, errConnDrain)
ac.cc.resetAddrConn(ac.addr, false, errConnDrain)
}
return
case <-t.Error():
@ -896,7 +969,8 @@ func (ac *addrConn) transportMonitor() {
t.Close()
return
case <-t.GoAway():
ac.cc.resetAddrConn(ac.addr, true, errNetworkIO)
ac.adjustParams(t.GetGoAwayReason())
ac.cc.resetAddrConn(ac.addr, false, errNetworkIO)
return
default:
}

119
cmd/vendor/google.golang.org/grpc/codec.go generated vendored Normal file
View File

@ -0,0 +1,119 @@
/*
*
* Copyright 2014, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
package grpc
import (
"math"
"sync"
"github.com/golang/protobuf/proto"
)
// Codec defines the interface gRPC uses to encode and decode messages.
// Note that implementations of this interface must be thread safe;
// a Codec's methods can be called from concurrent goroutines.
type Codec interface {
// Marshal returns the wire format of v.
Marshal(v interface{}) ([]byte, error)
// Unmarshal parses the wire format into v.
Unmarshal(data []byte, v interface{}) error
// String returns the name of the Codec implementation. The returned
// string will be used as part of content type in transmission.
String() string
}
// protoCodec is a Codec implementation with protobuf. It is the default codec for gRPC.
type protoCodec struct {
}
type cachedProtoBuffer struct {
lastMarshaledSize uint32
proto.Buffer
}
func capToMaxInt32(val int) uint32 {
if val > math.MaxInt32 {
return uint32(math.MaxInt32)
}
return uint32(val)
}
func (p protoCodec) marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) {
protoMsg := v.(proto.Message)
newSlice := make([]byte, 0, cb.lastMarshaledSize)
cb.SetBuf(newSlice)
cb.Reset()
if err := cb.Marshal(protoMsg); err != nil {
return nil, err
}
out := cb.Bytes()
cb.lastMarshaledSize = capToMaxInt32(len(out))
return out, nil
}
func (p protoCodec) Marshal(v interface{}) ([]byte, error) {
cb := protoBufferPool.Get().(*cachedProtoBuffer)
out, err := p.marshal(v, cb)
// put back buffer and lose the ref to the slice
cb.SetBuf(nil)
protoBufferPool.Put(cb)
return out, err
}
func (p protoCodec) Unmarshal(data []byte, v interface{}) error {
cb := protoBufferPool.Get().(*cachedProtoBuffer)
cb.SetBuf(data)
v.(proto.Message).Reset()
err := cb.Unmarshal(v.(proto.Message))
cb.SetBuf(nil)
protoBufferPool.Put(cb)
return err
}
func (protoCodec) String() string {
return "proto"
}
var (
protoBufferPool = &sync.Pool{
New: func() interface{} {
return &cachedProtoBuffer{
Buffer: proto.Buffer{},
lastMarshaledSize: 16,
}
},
}
)

View File

@ -44,7 +44,7 @@ const (
// OK is returned on success.
OK Code = 0
// Canceled indicates the operation was cancelled (typically by the caller).
// Canceled indicates the operation was canceled (typically by the caller).
Canceled Code = 1
// Unknown error. An example of where this error may be returned is

View File

@ -102,6 +102,10 @@ type TransportCredentials interface {
// authentication protocol on rawConn for clients. It returns the authenticated
// connection and the corresponding auth information about the connection.
// Implementations must use the provided context to implement timely cancellation.
// gRPC will try to reconnect if the error returned is a temporary error
// (io.EOF, context.DeadlineExceeded or err.Temporary() == true).
// If the returned error is a wrapper error, implementations should make sure that
// the error implements Temporary() to have the correct retry behaviors.
ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error)
// ServerHandshake does the authentication handshake for servers. It returns
// the authenticated connection and the corresponding auth information about
@ -192,14 +196,14 @@ func NewTLS(c *tls.Config) TransportCredentials {
return tc
}
// NewClientTLSFromCert constructs a TLS from the input certificate for client.
// NewClientTLSFromCert constructs TLS credentials from the input certificate for client.
// serverNameOverride is for testing only. If set to a non empty string,
// it will override the virtual host name of authority (e.g. :authority header field) in requests.
func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials {
return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp})
}
// NewClientTLSFromFile constructs a TLS from the input certificate file for client.
// NewClientTLSFromFile constructs TLS credentials from the input certificate file for client.
// serverNameOverride is for testing only. If set to a non empty string,
// it will override the virtual host name of authority (e.g. :authority header field) in requests.
func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) {
@ -214,12 +218,12 @@ func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredent
return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil
}
// NewServerTLSFromCert constructs a TLS from the input certificate for server.
// NewServerTLSFromCert constructs TLS credentials from the input certificate for server.
func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials {
return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}})
}
// NewServerTLSFromFile constructs a TLS from the input certificate file and key
// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key
// file for server.
func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) {
cert, err := tls.LoadX509KeyPair(certFile, keyFile)

113
cmd/vendor/google.golang.org/grpc/go16.go generated vendored Normal file
View File

@ -0,0 +1,113 @@
// +build go1.6,!go1.7
/*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
package grpc
import (
"fmt"
"io"
"net"
"net/http"
"os"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/grpc/transport"
"golang.org/x/net/context"
)
// dialContext connects to the address on the named network.
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address)
}
func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
req.Cancel = ctx.Done()
if err := req.Write(conn); err != nil {
return fmt.Errorf("failed to write the HTTP request: %v", err)
}
return nil
}
// toRPCErr converts an error into an error from the status package.
func toRPCErr(err error) error {
if _, ok := status.FromError(err); ok {
return err
}
switch e := err.(type) {
case transport.StreamError:
return status.Error(e.Code, e.Desc)
case transport.ConnectionError:
return status.Error(codes.Internal, e.Desc)
default:
switch err {
case context.DeadlineExceeded:
return status.Error(codes.DeadlineExceeded, err.Error())
case context.Canceled:
return status.Error(codes.Canceled, err.Error())
case ErrClientConnClosing:
return status.Error(codes.FailedPrecondition, err.Error())
}
}
return status.Error(codes.Unknown, err.Error())
}
// convertCode converts a standard Go error into its canonical code. Note that
// this is only used to translate the error returned by the server applications.
func convertCode(err error) codes.Code {
switch err {
case nil:
return codes.OK
case io.EOF:
return codes.OutOfRange
case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
return codes.FailedPrecondition
case os.ErrInvalid:
return codes.InvalidArgument
case context.Canceled:
return codes.Canceled
case context.DeadlineExceeded:
return codes.DeadlineExceeded
}
switch {
case os.IsExist(err):
return codes.AlreadyExists
case os.IsNotExist(err):
return codes.NotFound
case os.IsPermission(err):
return codes.PermissionDenied
}
return codes.Unknown
}

113
cmd/vendor/google.golang.org/grpc/go17.go generated vendored Normal file
View File

@ -0,0 +1,113 @@
// +build go1.7
/*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
package grpc
import (
"context"
"io"
"net"
"net/http"
"os"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/grpc/transport"
netctx "golang.org/x/net/context"
)
// dialContext connects to the address on the named network.
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
return (&net.Dialer{}).DialContext(ctx, network, address)
}
func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
req = req.WithContext(ctx)
if err := req.Write(conn); err != nil {
return err
}
return nil
}
// toRPCErr converts an error into an error from the status package.
func toRPCErr(err error) error {
if _, ok := status.FromError(err); ok {
return err
}
switch e := err.(type) {
case transport.StreamError:
return status.Error(e.Code, e.Desc)
case transport.ConnectionError:
return status.Error(codes.Internal, e.Desc)
default:
switch err {
case context.DeadlineExceeded, netctx.DeadlineExceeded:
return status.Error(codes.DeadlineExceeded, err.Error())
case context.Canceled, netctx.Canceled:
return status.Error(codes.Canceled, err.Error())
case ErrClientConnClosing:
return status.Error(codes.FailedPrecondition, err.Error())
}
}
return status.Error(codes.Unknown, err.Error())
}
// convertCode converts a standard Go error into its canonical code. Note that
// this is only used to translate the error returned by the server applications.
func convertCode(err error) codes.Code {
switch err {
case nil:
return codes.OK
case io.EOF:
return codes.OutOfRange
case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
return codes.FailedPrecondition
case os.ErrInvalid:
return codes.InvalidArgument
case context.Canceled, netctx.Canceled:
return codes.Canceled
case context.DeadlineExceeded, netctx.DeadlineExceeded:
return codes.DeadlineExceeded
}
switch {
case os.IsExist(err):
return codes.AlreadyExists
case os.IsNotExist(err):
return codes.NotFound
case os.IsPermission(err):
return codes.PermissionDenied
}
return codes.Unknown
}

765
cmd/vendor/google.golang.org/grpc/grpclb.go generated vendored Normal file
View File

@ -0,0 +1,765 @@
/*
*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
package grpc
import (
"errors"
"fmt"
"math/rand"
"net"
"sync"
"time"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/naming"
)
// Client API for LoadBalancer service.
// Mostly copied from generated pb.go file.
// To avoid circular dependency.
type loadBalancerClient struct {
cc *ClientConn
}
func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...CallOption) (*balanceLoadClientStream, error) {
desc := &StreamDesc{
StreamName: "BalanceLoad",
ServerStreams: true,
ClientStreams: true,
}
stream, err := NewClientStream(ctx, desc, c.cc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...)
if err != nil {
return nil, err
}
x := &balanceLoadClientStream{stream}
return x, nil
}
type balanceLoadClientStream struct {
ClientStream
}
func (x *balanceLoadClientStream) Send(m *lbpb.LoadBalanceRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *balanceLoadClientStream) Recv() (*lbpb.LoadBalanceResponse, error) {
m := new(lbpb.LoadBalanceResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// AddressType indicates the address type returned by name resolution.
type AddressType uint8
const (
// Backend indicates the server is a backend server.
Backend AddressType = iota
// GRPCLB indicates the server is a grpclb load balancer.
GRPCLB
)
// AddrMetadataGRPCLB contains the information the name resolver for grpclb should provide. The
// name resolver used by the grpclb balancer is required to provide this type of metadata in
// its address updates.
type AddrMetadataGRPCLB struct {
// AddrType is the type of server (grpc load balancer or backend).
AddrType AddressType
// ServerName is the name of the grpc load balancer. Used for authentication.
ServerName string
}
// NewGRPCLBBalancer creates a grpclb load balancer.
func NewGRPCLBBalancer(r naming.Resolver) Balancer {
return &balancer{
r: r,
}
}
type remoteBalancerInfo struct {
addr string
// the server name used for authentication with the remote LB server.
name string
}
// grpclbAddrInfo consists of the information of a backend server.
type grpclbAddrInfo struct {
addr Address
connected bool
// dropForRateLimiting indicates whether this particular request should be
// dropped by the client for rate limiting.
dropForRateLimiting bool
// dropForLoadBalancing indicates whether this particular request should be
// dropped by the client for load balancing.
dropForLoadBalancing bool
}
type balancer struct {
r naming.Resolver
target string
mu sync.Mutex
seq int // a sequence number to make sure addrCh does not get stale addresses.
w naming.Watcher
addrCh chan []Address
rbs []remoteBalancerInfo
addrs []*grpclbAddrInfo
next int
waitCh chan struct{}
done bool
expTimer *time.Timer
rand *rand.Rand
clientStats lbpb.ClientStats
}
func (b *balancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBalancerInfo) error {
updates, err := w.Next()
if err != nil {
grpclog.Printf("grpclb: failed to get next addr update from watcher: %v", err)
return err
}
b.mu.Lock()
defer b.mu.Unlock()
if b.done {
return ErrClientConnClosing
}
for _, update := range updates {
switch update.Op {
case naming.Add:
var exist bool
for _, v := range b.rbs {
// TODO: Is the same addr with different server name a different balancer?
if update.Addr == v.addr {
exist = true
break
}
}
if exist {
continue
}
md, ok := update.Metadata.(*AddrMetadataGRPCLB)
if !ok {
// TODO: Revisit the handling here and may introduce some fallback mechanism.
grpclog.Printf("The name resolution contains unexpected metadata %v", update.Metadata)
continue
}
switch md.AddrType {
case Backend:
// TODO: Revisit the handling here and may introduce some fallback mechanism.
grpclog.Printf("The name resolution does not give grpclb addresses")
continue
case GRPCLB:
b.rbs = append(b.rbs, remoteBalancerInfo{
addr: update.Addr,
name: md.ServerName,
})
default:
grpclog.Printf("Received unknow address type %d", md.AddrType)
continue
}
case naming.Delete:
for i, v := range b.rbs {
if update.Addr == v.addr {
copy(b.rbs[i:], b.rbs[i+1:])
b.rbs = b.rbs[:len(b.rbs)-1]
break
}
}
default:
grpclog.Println("Unknown update.Op ", update.Op)
}
}
// TODO: Fall back to the basic round-robin load balancing if the resulting address is
// not a load balancer.
select {
case <-ch:
default:
}
ch <- b.rbs
return nil
}
func (b *balancer) serverListExpire(seq int) {
b.mu.Lock()
defer b.mu.Unlock()
// TODO: gRPC interanls do not clear the connections when the server list is stale.
// This means RPCs will keep using the existing server list until b receives new
// server list even though the list is expired. Revisit this behavior later.
if b.done || seq < b.seq {
return
}
b.next = 0
b.addrs = nil
// Ask grpc internals to close all the corresponding connections.
b.addrCh <- nil
}
func convertDuration(d *lbpb.Duration) time.Duration {
if d == nil {
return 0
}
return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond
}
func (b *balancer) processServerList(l *lbpb.ServerList, seq int) {
if l == nil {
return
}
servers := l.GetServers()
expiration := convertDuration(l.GetExpirationInterval())
var (
sl []*grpclbAddrInfo
addrs []Address
)
for _, s := range servers {
md := metadata.Pairs("lb-token", s.LoadBalanceToken)
addr := Address{
Addr: fmt.Sprintf("%s:%d", net.IP(s.IpAddress), s.Port),
Metadata: &md,
}
sl = append(sl, &grpclbAddrInfo{
addr: addr,
dropForRateLimiting: s.DropForRateLimiting,
dropForLoadBalancing: s.DropForLoadBalancing,
})
addrs = append(addrs, addr)
}
b.mu.Lock()
defer b.mu.Unlock()
if b.done || seq < b.seq {
return
}
if len(sl) > 0 {
// reset b.next to 0 when replacing the server list.
b.next = 0
b.addrs = sl
b.addrCh <- addrs
if b.expTimer != nil {
b.expTimer.Stop()
b.expTimer = nil
}
if expiration > 0 {
b.expTimer = time.AfterFunc(expiration, func() {
b.serverListExpire(seq)
})
}
}
return
}
func (b *balancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration, done <-chan struct{}) {
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
case <-done:
return
}
b.mu.Lock()
stats := b.clientStats
b.clientStats = lbpb.ClientStats{} // Clear the stats.
b.mu.Unlock()
t := time.Now()
stats.Timestamp = &lbpb.Timestamp{
Seconds: t.Unix(),
Nanos: int32(t.Nanosecond()),
}
if err := s.Send(&lbpb.LoadBalanceRequest{
LoadBalanceRequestType: &lbpb.LoadBalanceRequest_ClientStats{
ClientStats: &stats,
},
}); err != nil {
grpclog.Printf("grpclb: failed to send load report: %v", err)
return
}
}
}
func (b *balancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (retry bool) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
stream, err := lbc.BalanceLoad(ctx)
if err != nil {
grpclog.Printf("grpclb: failed to perform RPC to the remote balancer %v", err)
return
}
b.mu.Lock()
if b.done {
b.mu.Unlock()
return
}
b.mu.Unlock()
initReq := &lbpb.LoadBalanceRequest{
LoadBalanceRequestType: &lbpb.LoadBalanceRequest_InitialRequest{
InitialRequest: &lbpb.InitialLoadBalanceRequest{
Name: b.target,
},
},
}
if err := stream.Send(initReq); err != nil {
grpclog.Printf("grpclb: failed to send init request: %v", err)
// TODO: backoff on retry?
return true
}
reply, err := stream.Recv()
if err != nil {
grpclog.Printf("grpclb: failed to recv init response: %v", err)
// TODO: backoff on retry?
return true
}
initResp := reply.GetInitialResponse()
if initResp == nil {
grpclog.Println("grpclb: reply from remote balancer did not include initial response.")
return
}
// TODO: Support delegation.
if initResp.LoadBalancerDelegate != "" {
// delegation
grpclog.Println("TODO: Delegation is not supported yet.")
return
}
streamDone := make(chan struct{})
defer close(streamDone)
b.mu.Lock()
b.clientStats = lbpb.ClientStats{} // Clear client stats.
b.mu.Unlock()
if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 {
go b.sendLoadReport(stream, d, streamDone)
}
// Retrieve the server list.
for {
reply, err := stream.Recv()
if err != nil {
grpclog.Printf("grpclb: failed to recv server list: %v", err)
break
}
b.mu.Lock()
if b.done || seq < b.seq {
b.mu.Unlock()
return
}
b.seq++ // tick when receiving a new list of servers.
seq = b.seq
b.mu.Unlock()
if serverList := reply.GetServerList(); serverList != nil {
b.processServerList(serverList, seq)
}
}
return true
}
func (b *balancer) Start(target string, config BalancerConfig) error {
b.rand = rand.New(rand.NewSource(time.Now().Unix()))
// TODO: Fall back to the basic direct connection if there is no name resolver.
if b.r == nil {
return errors.New("there is no name resolver installed")
}
b.target = target
b.mu.Lock()
if b.done {
b.mu.Unlock()
return ErrClientConnClosing
}
b.addrCh = make(chan []Address)
w, err := b.r.Resolve(target)
if err != nil {
b.mu.Unlock()
grpclog.Printf("grpclb: failed to resolve address: %v, err: %v", target, err)
return err
}
b.w = w
b.mu.Unlock()
balancerAddrsCh := make(chan []remoteBalancerInfo, 1)
// Spawn a goroutine to monitor the name resolution of remote load balancer.
go func() {
for {
if err := b.watchAddrUpdates(w, balancerAddrsCh); err != nil {
grpclog.Printf("grpclb: the naming watcher stops working due to %v.\n", err)
close(balancerAddrsCh)
return
}
}
}()
// Spawn a goroutine to talk to the remote load balancer.
go func() {
var (
cc *ClientConn
// ccError is closed when there is an error in the current cc.
// A new rb should be picked from rbs and connected.
ccError chan struct{}
rb *remoteBalancerInfo
rbs []remoteBalancerInfo
rbIdx int
)
defer func() {
if ccError != nil {
select {
case <-ccError:
default:
close(ccError)
}
}
if cc != nil {
cc.Close()
}
}()
for {
var ok bool
select {
case rbs, ok = <-balancerAddrsCh:
if !ok {
return
}
foundIdx := -1
if rb != nil {
for i, trb := range rbs {
if trb == *rb {
foundIdx = i
break
}
}
}
if foundIdx >= 0 {
if foundIdx >= 1 {
// Move the address in use to the beginning of the list.
b.rbs[0], b.rbs[foundIdx] = b.rbs[foundIdx], b.rbs[0]
rbIdx = 0
}
continue // If found, don't dial new cc.
} else if len(rbs) > 0 {
// Pick a random one from the list, instead of always using the first one.
if l := len(rbs); l > 1 && rb != nil {
tmpIdx := b.rand.Intn(l - 1)
b.rbs[0], b.rbs[tmpIdx] = b.rbs[tmpIdx], b.rbs[0]
}
rbIdx = 0
rb = &rbs[0]
} else {
// foundIdx < 0 && len(rbs) <= 0.
rb = nil
}
case <-ccError:
ccError = nil
if rbIdx < len(rbs)-1 {
rbIdx++
rb = &rbs[rbIdx]
} else {
rb = nil
}
}
if rb == nil {
continue
}
if cc != nil {
cc.Close()
}
// Talk to the remote load balancer to get the server list.
var (
err error
dopts []DialOption
)
if creds := config.DialCreds; creds != nil {
if rb.name != "" {
if err := creds.OverrideServerName(rb.name); err != nil {
grpclog.Printf("grpclb: failed to override the server name in the credentials: %v", err)
continue
}
}
dopts = append(dopts, WithTransportCredentials(creds))
} else {
dopts = append(dopts, WithInsecure())
}
if dialer := config.Dialer; dialer != nil {
// WithDialer takes a different type of function, so we instead use a special DialOption here.
dopts = append(dopts, func(o *dialOptions) { o.copts.Dialer = dialer })
}
ccError = make(chan struct{})
cc, err = Dial(rb.addr, dopts...)
if err != nil {
grpclog.Printf("grpclb: failed to setup a connection to the remote balancer %v: %v", rb.addr, err)
close(ccError)
continue
}
b.mu.Lock()
b.seq++ // tick when getting a new balancer address
seq := b.seq
b.next = 0
b.mu.Unlock()
go func(cc *ClientConn, ccError chan struct{}) {
lbc := &loadBalancerClient{cc}
b.callRemoteBalancer(lbc, seq)
cc.Close()
select {
case <-ccError:
default:
close(ccError)
}
}(cc, ccError)
}
}()
return nil
}
func (b *balancer) down(addr Address, err error) {
b.mu.Lock()
defer b.mu.Unlock()
for _, a := range b.addrs {
if addr == a.addr {
a.connected = false
break
}
}
}
func (b *balancer) Up(addr Address) func(error) {
b.mu.Lock()
defer b.mu.Unlock()
if b.done {
return nil
}
var cnt int
for _, a := range b.addrs {
if a.addr == addr {
if a.connected {
return nil
}
a.connected = true
}
if a.connected && !a.dropForRateLimiting && !a.dropForLoadBalancing {
cnt++
}
}
// addr is the only one which is connected. Notify the Get() callers who are blocking.
if cnt == 1 && b.waitCh != nil {
close(b.waitCh)
b.waitCh = nil
}
return func(err error) {
b.down(addr, err)
}
}
func (b *balancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) {
var ch chan struct{}
b.mu.Lock()
if b.done {
b.mu.Unlock()
err = ErrClientConnClosing
return
}
seq := b.seq
defer func() {
if err != nil {
return
}
put = func() {
s, ok := rpcInfoFromContext(ctx)
if !ok {
return
}
b.mu.Lock()
defer b.mu.Unlock()
if b.done || seq < b.seq {
return
}
b.clientStats.NumCallsFinished++
if !s.bytesSent {
b.clientStats.NumCallsFinishedWithClientFailedToSend++
} else if s.bytesReceived {
b.clientStats.NumCallsFinishedKnownReceived++
}
}
}()
b.clientStats.NumCallsStarted++
if len(b.addrs) > 0 {
if b.next >= len(b.addrs) {
b.next = 0
}
next := b.next
for {
a := b.addrs[next]
next = (next + 1) % len(b.addrs)
if a.connected {
if !a.dropForRateLimiting && !a.dropForLoadBalancing {
addr = a.addr
b.next = next
b.mu.Unlock()
return
}
if !opts.BlockingWait {
b.next = next
if a.dropForLoadBalancing {
b.clientStats.NumCallsFinished++
b.clientStats.NumCallsFinishedWithDropForLoadBalancing++
} else if a.dropForRateLimiting {
b.clientStats.NumCallsFinished++
b.clientStats.NumCallsFinishedWithDropForRateLimiting++
}
b.mu.Unlock()
err = Errorf(codes.Unavailable, "%s drops requests", a.addr.Addr)
return
}
}
if next == b.next {
// Has iterated all the possible address but none is connected.
break
}
}
}
if !opts.BlockingWait {
if len(b.addrs) == 0 {
b.clientStats.NumCallsFinished++
b.clientStats.NumCallsFinishedWithClientFailedToSend++
b.mu.Unlock()
err = Errorf(codes.Unavailable, "there is no address available")
return
}
// Returns the next addr on b.addrs for a failfast RPC.
addr = b.addrs[b.next].addr
b.next++
b.mu.Unlock()
return
}
// Wait on b.waitCh for non-failfast RPCs.
if b.waitCh == nil {
ch = make(chan struct{})
b.waitCh = ch
} else {
ch = b.waitCh
}
b.mu.Unlock()
for {
select {
case <-ctx.Done():
b.mu.Lock()
b.clientStats.NumCallsFinished++
b.clientStats.NumCallsFinishedWithClientFailedToSend++
b.mu.Unlock()
err = ctx.Err()
return
case <-ch:
b.mu.Lock()
if b.done {
b.clientStats.NumCallsFinished++
b.clientStats.NumCallsFinishedWithClientFailedToSend++
b.mu.Unlock()
err = ErrClientConnClosing
return
}
if len(b.addrs) > 0 {
if b.next >= len(b.addrs) {
b.next = 0
}
next := b.next
for {
a := b.addrs[next]
next = (next + 1) % len(b.addrs)
if a.connected {
if !a.dropForRateLimiting && !a.dropForLoadBalancing {
addr = a.addr
b.next = next
b.mu.Unlock()
return
}
if !opts.BlockingWait {
b.next = next
if a.dropForLoadBalancing {
b.clientStats.NumCallsFinished++
b.clientStats.NumCallsFinishedWithDropForLoadBalancing++
} else if a.dropForRateLimiting {
b.clientStats.NumCallsFinished++
b.clientStats.NumCallsFinishedWithDropForRateLimiting++
}
b.mu.Unlock()
err = Errorf(codes.Unavailable, "drop requests for the addreess %s", a.addr.Addr)
return
}
}
if next == b.next {
// Has iterated all the possible address but none is connected.
break
}
}
}
// The newly added addr got removed by Down() again.
if b.waitCh == nil {
ch = make(chan struct{})
b.waitCh = ch
} else {
ch = b.waitCh
}
b.mu.Unlock()
}
}
}
func (b *balancer) Notify() <-chan []Address {
return b.addrCh
}
func (b *balancer) Close() error {
b.mu.Lock()
defer b.mu.Unlock()
if b.done {
return errBalancerClosed
}
b.done = true
if b.expTimer != nil {
b.expTimer.Stop()
}
if b.waitCh != nil {
close(b.waitCh)
}
if b.addrCh != nil {
close(b.addrCh)
}
if b.w != nil {
b.w.Close()
}
return nil
}

View File

@ -0,0 +1,629 @@
// Code generated by protoc-gen-go.
// source: grpclb.proto
// DO NOT EDIT!
/*
Package grpc_lb_v1 is a generated protocol buffer package.
It is generated from these files:
grpclb.proto
It has these top-level messages:
Duration
Timestamp
LoadBalanceRequest
InitialLoadBalanceRequest
ClientStats
LoadBalanceResponse
InitialLoadBalanceResponse
ServerList
Server
*/
package grpc_lb_v1
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type Duration struct {
// Signed seconds of the span of time. Must be from -315,576,000,000
// to +315,576,000,000 inclusive.
Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
// Signed fractions of a second at nanosecond resolution of the span
// of time. Durations less than one second are represented with a 0
// `seconds` field and a positive or negative `nanos` field. For durations
// of one second or more, a non-zero value for the `nanos` field must be
// of the same sign as the `seconds` field. Must be from -999,999,999
// to +999,999,999 inclusive.
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
}
func (m *Duration) Reset() { *m = Duration{} }
func (m *Duration) String() string { return proto.CompactTextString(m) }
func (*Duration) ProtoMessage() {}
func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *Duration) GetSeconds() int64 {
if m != nil {
return m.Seconds
}
return 0
}
func (m *Duration) GetNanos() int32 {
if m != nil {
return m.Nanos
}
return 0
}
type Timestamp struct {
// Represents seconds of UTC time since Unix epoch
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
// 9999-12-31T23:59:59Z inclusive.
Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
// Non-negative fractions of a second at nanosecond resolution. Negative
// second values with fractions must still have non-negative nanos values
// that count forward in time. Must be from 0 to 999,999,999
// inclusive.
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
}
func (m *Timestamp) Reset() { *m = Timestamp{} }
func (m *Timestamp) String() string { return proto.CompactTextString(m) }
func (*Timestamp) ProtoMessage() {}
func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *Timestamp) GetSeconds() int64 {
if m != nil {
return m.Seconds
}
return 0
}
func (m *Timestamp) GetNanos() int32 {
if m != nil {
return m.Nanos
}
return 0
}
type LoadBalanceRequest struct {
// Types that are valid to be assigned to LoadBalanceRequestType:
// *LoadBalanceRequest_InitialRequest
// *LoadBalanceRequest_ClientStats
LoadBalanceRequestType isLoadBalanceRequest_LoadBalanceRequestType `protobuf_oneof:"load_balance_request_type"`
}
func (m *LoadBalanceRequest) Reset() { *m = LoadBalanceRequest{} }
func (m *LoadBalanceRequest) String() string { return proto.CompactTextString(m) }
func (*LoadBalanceRequest) ProtoMessage() {}
func (*LoadBalanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
type isLoadBalanceRequest_LoadBalanceRequestType interface {
isLoadBalanceRequest_LoadBalanceRequestType()
}
type LoadBalanceRequest_InitialRequest struct {
InitialRequest *InitialLoadBalanceRequest `protobuf:"bytes,1,opt,name=initial_request,json=initialRequest,oneof"`
}
type LoadBalanceRequest_ClientStats struct {
ClientStats *ClientStats `protobuf:"bytes,2,opt,name=client_stats,json=clientStats,oneof"`
}
func (*LoadBalanceRequest_InitialRequest) isLoadBalanceRequest_LoadBalanceRequestType() {}
func (*LoadBalanceRequest_ClientStats) isLoadBalanceRequest_LoadBalanceRequestType() {}
func (m *LoadBalanceRequest) GetLoadBalanceRequestType() isLoadBalanceRequest_LoadBalanceRequestType {
if m != nil {
return m.LoadBalanceRequestType
}
return nil
}
func (m *LoadBalanceRequest) GetInitialRequest() *InitialLoadBalanceRequest {
if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_InitialRequest); ok {
return x.InitialRequest
}
return nil
}
func (m *LoadBalanceRequest) GetClientStats() *ClientStats {
if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_ClientStats); ok {
return x.ClientStats
}
return nil
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*LoadBalanceRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _LoadBalanceRequest_OneofMarshaler, _LoadBalanceRequest_OneofUnmarshaler, _LoadBalanceRequest_OneofSizer, []interface{}{
(*LoadBalanceRequest_InitialRequest)(nil),
(*LoadBalanceRequest_ClientStats)(nil),
}
}
func _LoadBalanceRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*LoadBalanceRequest)
// load_balance_request_type
switch x := m.LoadBalanceRequestType.(type) {
case *LoadBalanceRequest_InitialRequest:
b.EncodeVarint(1<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.InitialRequest); err != nil {
return err
}
case *LoadBalanceRequest_ClientStats:
b.EncodeVarint(2<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.ClientStats); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("LoadBalanceRequest.LoadBalanceRequestType has unexpected type %T", x)
}
return nil
}
func _LoadBalanceRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*LoadBalanceRequest)
switch tag {
case 1: // load_balance_request_type.initial_request
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(InitialLoadBalanceRequest)
err := b.DecodeMessage(msg)
m.LoadBalanceRequestType = &LoadBalanceRequest_InitialRequest{msg}
return true, err
case 2: // load_balance_request_type.client_stats
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(ClientStats)
err := b.DecodeMessage(msg)
m.LoadBalanceRequestType = &LoadBalanceRequest_ClientStats{msg}
return true, err
default:
return false, nil
}
}
func _LoadBalanceRequest_OneofSizer(msg proto.Message) (n int) {
m := msg.(*LoadBalanceRequest)
// load_balance_request_type
switch x := m.LoadBalanceRequestType.(type) {
case *LoadBalanceRequest_InitialRequest:
s := proto.Size(x.InitialRequest)
n += proto.SizeVarint(1<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *LoadBalanceRequest_ClientStats:
s := proto.Size(x.ClientStats)
n += proto.SizeVarint(2<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type InitialLoadBalanceRequest struct {
// Name of load balanced service (IE, balancer.service.com)
// length should be less than 256 bytes.
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
}
func (m *InitialLoadBalanceRequest) Reset() { *m = InitialLoadBalanceRequest{} }
func (m *InitialLoadBalanceRequest) String() string { return proto.CompactTextString(m) }
func (*InitialLoadBalanceRequest) ProtoMessage() {}
func (*InitialLoadBalanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *InitialLoadBalanceRequest) GetName() string {
if m != nil {
return m.Name
}
return ""
}
// Contains client level statistics that are useful to load balancing. Each
// count except the timestamp should be reset to zero after reporting the stats.
type ClientStats struct {
// The timestamp of generating the report.
Timestamp *Timestamp `protobuf:"bytes,1,opt,name=timestamp" json:"timestamp,omitempty"`
// The total number of RPCs that started.
NumCallsStarted int64 `protobuf:"varint,2,opt,name=num_calls_started,json=numCallsStarted" json:"num_calls_started,omitempty"`
// The total number of RPCs that finished.
NumCallsFinished int64 `protobuf:"varint,3,opt,name=num_calls_finished,json=numCallsFinished" json:"num_calls_finished,omitempty"`
// The total number of RPCs that were dropped by the client because of rate
// limiting.
NumCallsFinishedWithDropForRateLimiting int64 `protobuf:"varint,4,opt,name=num_calls_finished_with_drop_for_rate_limiting,json=numCallsFinishedWithDropForRateLimiting" json:"num_calls_finished_with_drop_for_rate_limiting,omitempty"`
// The total number of RPCs that were dropped by the client because of load
// balancing.
NumCallsFinishedWithDropForLoadBalancing int64 `protobuf:"varint,5,opt,name=num_calls_finished_with_drop_for_load_balancing,json=numCallsFinishedWithDropForLoadBalancing" json:"num_calls_finished_with_drop_for_load_balancing,omitempty"`
// The total number of RPCs that failed to reach a server except dropped RPCs.
NumCallsFinishedWithClientFailedToSend int64 `protobuf:"varint,6,opt,name=num_calls_finished_with_client_failed_to_send,json=numCallsFinishedWithClientFailedToSend" json:"num_calls_finished_with_client_failed_to_send,omitempty"`
// The total number of RPCs that finished and are known to have been received
// by a server.
NumCallsFinishedKnownReceived int64 `protobuf:"varint,7,opt,name=num_calls_finished_known_received,json=numCallsFinishedKnownReceived" json:"num_calls_finished_known_received,omitempty"`
}
func (m *ClientStats) Reset() { *m = ClientStats{} }
func (m *ClientStats) String() string { return proto.CompactTextString(m) }
func (*ClientStats) ProtoMessage() {}
func (*ClientStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *ClientStats) GetTimestamp() *Timestamp {
if m != nil {
return m.Timestamp
}
return nil
}
func (m *ClientStats) GetNumCallsStarted() int64 {
if m != nil {
return m.NumCallsStarted
}
return 0
}
func (m *ClientStats) GetNumCallsFinished() int64 {
if m != nil {
return m.NumCallsFinished
}
return 0
}
func (m *ClientStats) GetNumCallsFinishedWithDropForRateLimiting() int64 {
if m != nil {
return m.NumCallsFinishedWithDropForRateLimiting
}
return 0
}
func (m *ClientStats) GetNumCallsFinishedWithDropForLoadBalancing() int64 {
if m != nil {
return m.NumCallsFinishedWithDropForLoadBalancing
}
return 0
}
func (m *ClientStats) GetNumCallsFinishedWithClientFailedToSend() int64 {
if m != nil {
return m.NumCallsFinishedWithClientFailedToSend
}
return 0
}
func (m *ClientStats) GetNumCallsFinishedKnownReceived() int64 {
if m != nil {
return m.NumCallsFinishedKnownReceived
}
return 0
}
type LoadBalanceResponse struct {
// Types that are valid to be assigned to LoadBalanceResponseType:
// *LoadBalanceResponse_InitialResponse
// *LoadBalanceResponse_ServerList
LoadBalanceResponseType isLoadBalanceResponse_LoadBalanceResponseType `protobuf_oneof:"load_balance_response_type"`
}
func (m *LoadBalanceResponse) Reset() { *m = LoadBalanceResponse{} }
func (m *LoadBalanceResponse) String() string { return proto.CompactTextString(m) }
func (*LoadBalanceResponse) ProtoMessage() {}
func (*LoadBalanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
type isLoadBalanceResponse_LoadBalanceResponseType interface {
isLoadBalanceResponse_LoadBalanceResponseType()
}
type LoadBalanceResponse_InitialResponse struct {
InitialResponse *InitialLoadBalanceResponse `protobuf:"bytes,1,opt,name=initial_response,json=initialResponse,oneof"`
}
type LoadBalanceResponse_ServerList struct {
ServerList *ServerList `protobuf:"bytes,2,opt,name=server_list,json=serverList,oneof"`
}
func (*LoadBalanceResponse_InitialResponse) isLoadBalanceResponse_LoadBalanceResponseType() {}
func (*LoadBalanceResponse_ServerList) isLoadBalanceResponse_LoadBalanceResponseType() {}
func (m *LoadBalanceResponse) GetLoadBalanceResponseType() isLoadBalanceResponse_LoadBalanceResponseType {
if m != nil {
return m.LoadBalanceResponseType
}
return nil
}
func (m *LoadBalanceResponse) GetInitialResponse() *InitialLoadBalanceResponse {
if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_InitialResponse); ok {
return x.InitialResponse
}
return nil
}
func (m *LoadBalanceResponse) GetServerList() *ServerList {
if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_ServerList); ok {
return x.ServerList
}
return nil
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*LoadBalanceResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _LoadBalanceResponse_OneofMarshaler, _LoadBalanceResponse_OneofUnmarshaler, _LoadBalanceResponse_OneofSizer, []interface{}{
(*LoadBalanceResponse_InitialResponse)(nil),
(*LoadBalanceResponse_ServerList)(nil),
}
}
func _LoadBalanceResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*LoadBalanceResponse)
// load_balance_response_type
switch x := m.LoadBalanceResponseType.(type) {
case *LoadBalanceResponse_InitialResponse:
b.EncodeVarint(1<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.InitialResponse); err != nil {
return err
}
case *LoadBalanceResponse_ServerList:
b.EncodeVarint(2<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.ServerList); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("LoadBalanceResponse.LoadBalanceResponseType has unexpected type %T", x)
}
return nil
}
func _LoadBalanceResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*LoadBalanceResponse)
switch tag {
case 1: // load_balance_response_type.initial_response
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(InitialLoadBalanceResponse)
err := b.DecodeMessage(msg)
m.LoadBalanceResponseType = &LoadBalanceResponse_InitialResponse{msg}
return true, err
case 2: // load_balance_response_type.server_list
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(ServerList)
err := b.DecodeMessage(msg)
m.LoadBalanceResponseType = &LoadBalanceResponse_ServerList{msg}
return true, err
default:
return false, nil
}
}
func _LoadBalanceResponse_OneofSizer(msg proto.Message) (n int) {
m := msg.(*LoadBalanceResponse)
// load_balance_response_type
switch x := m.LoadBalanceResponseType.(type) {
case *LoadBalanceResponse_InitialResponse:
s := proto.Size(x.InitialResponse)
n += proto.SizeVarint(1<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *LoadBalanceResponse_ServerList:
s := proto.Size(x.ServerList)
n += proto.SizeVarint(2<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type InitialLoadBalanceResponse struct {
// This is an application layer redirect that indicates the client should use
// the specified server for load balancing. When this field is non-empty in
// the response, the client should open a separate connection to the
// load_balancer_delegate and call the BalanceLoad method. Its length should
// be less than 64 bytes.
LoadBalancerDelegate string `protobuf:"bytes,1,opt,name=load_balancer_delegate,json=loadBalancerDelegate" json:"load_balancer_delegate,omitempty"`
// This interval defines how often the client should send the client stats
// to the load balancer. Stats should only be reported when the duration is
// positive.
ClientStatsReportInterval *Duration `protobuf:"bytes,2,opt,name=client_stats_report_interval,json=clientStatsReportInterval" json:"client_stats_report_interval,omitempty"`
}
func (m *InitialLoadBalanceResponse) Reset() { *m = InitialLoadBalanceResponse{} }
func (m *InitialLoadBalanceResponse) String() string { return proto.CompactTextString(m) }
func (*InitialLoadBalanceResponse) ProtoMessage() {}
func (*InitialLoadBalanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *InitialLoadBalanceResponse) GetLoadBalancerDelegate() string {
if m != nil {
return m.LoadBalancerDelegate
}
return ""
}
func (m *InitialLoadBalanceResponse) GetClientStatsReportInterval() *Duration {
if m != nil {
return m.ClientStatsReportInterval
}
return nil
}
type ServerList struct {
// Contains a list of servers selected by the load balancer. The list will
// be updated when server resolutions change or as needed to balance load
// across more servers. The client should consume the server list in order
// unless instructed otherwise via the client_config.
Servers []*Server `protobuf:"bytes,1,rep,name=servers" json:"servers,omitempty"`
// Indicates the amount of time that the client should consider this server
// list as valid. It may be considered stale after waiting this interval of
// time after receiving the list. If the interval is not positive, the
// client can assume the list is valid until the next list is received.
ExpirationInterval *Duration `protobuf:"bytes,3,opt,name=expiration_interval,json=expirationInterval" json:"expiration_interval,omitempty"`
}
func (m *ServerList) Reset() { *m = ServerList{} }
func (m *ServerList) String() string { return proto.CompactTextString(m) }
func (*ServerList) ProtoMessage() {}
func (*ServerList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
func (m *ServerList) GetServers() []*Server {
if m != nil {
return m.Servers
}
return nil
}
func (m *ServerList) GetExpirationInterval() *Duration {
if m != nil {
return m.ExpirationInterval
}
return nil
}
// Contains server information. When none of the [drop_for_*] fields are true,
// use the other fields. When drop_for_rate_limiting is true, ignore all other
// fields. Use drop_for_load_balancing only when it is true and
// drop_for_rate_limiting is false.
type Server struct {
// A resolved address for the server, serialized in network-byte-order. It may
// either be an IPv4 or IPv6 address.
IpAddress []byte `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"`
// A resolved port number for the server.
Port int32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"`
// An opaque but printable token given to the frontend for each pick. All
// frontend requests for that pick must include the token in its initial
// metadata. The token is used by the backend to verify the request and to
// allow the backend to report load to the gRPC LB system.
//
// Its length is variable but less than 50 bytes.
LoadBalanceToken string `protobuf:"bytes,3,opt,name=load_balance_token,json=loadBalanceToken" json:"load_balance_token,omitempty"`
// Indicates whether this particular request should be dropped by the client
// for rate limiting.
DropForRateLimiting bool `protobuf:"varint,4,opt,name=drop_for_rate_limiting,json=dropForRateLimiting" json:"drop_for_rate_limiting,omitempty"`
// Indicates whether this particular request should be dropped by the client
// for load balancing.
DropForLoadBalancing bool `protobuf:"varint,5,opt,name=drop_for_load_balancing,json=dropForLoadBalancing" json:"drop_for_load_balancing,omitempty"`
}
func (m *Server) Reset() { *m = Server{} }
func (m *Server) String() string { return proto.CompactTextString(m) }
func (*Server) ProtoMessage() {}
func (*Server) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
func (m *Server) GetIpAddress() []byte {
if m != nil {
return m.IpAddress
}
return nil
}
func (m *Server) GetPort() int32 {
if m != nil {
return m.Port
}
return 0
}
func (m *Server) GetLoadBalanceToken() string {
if m != nil {
return m.LoadBalanceToken
}
return ""
}
func (m *Server) GetDropForRateLimiting() bool {
if m != nil {
return m.DropForRateLimiting
}
return false
}
func (m *Server) GetDropForLoadBalancing() bool {
if m != nil {
return m.DropForLoadBalancing
}
return false
}
func init() {
proto.RegisterType((*Duration)(nil), "grpc.lb.v1.Duration")
proto.RegisterType((*Timestamp)(nil), "grpc.lb.v1.Timestamp")
proto.RegisterType((*LoadBalanceRequest)(nil), "grpc.lb.v1.LoadBalanceRequest")
proto.RegisterType((*InitialLoadBalanceRequest)(nil), "grpc.lb.v1.InitialLoadBalanceRequest")
proto.RegisterType((*ClientStats)(nil), "grpc.lb.v1.ClientStats")
proto.RegisterType((*LoadBalanceResponse)(nil), "grpc.lb.v1.LoadBalanceResponse")
proto.RegisterType((*InitialLoadBalanceResponse)(nil), "grpc.lb.v1.InitialLoadBalanceResponse")
proto.RegisterType((*ServerList)(nil), "grpc.lb.v1.ServerList")
proto.RegisterType((*Server)(nil), "grpc.lb.v1.Server")
}
func init() { proto.RegisterFile("grpclb.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 733 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xdd, 0x4e, 0x1b, 0x39,
0x14, 0x66, 0x36, 0xfc, 0xe5, 0x24, 0x5a, 0x58, 0x93, 0x85, 0xc0, 0xc2, 0x2e, 0x1b, 0xa9, 0x34,
0xaa, 0x68, 0x68, 0x43, 0x7b, 0xd1, 0x9f, 0x9b, 0x02, 0x45, 0x41, 0xe5, 0xa2, 0x72, 0xa8, 0x7a,
0x55, 0x59, 0x4e, 0xc6, 0x80, 0xc5, 0xc4, 0x9e, 0xda, 0x4e, 0x68, 0x2f, 0x7b, 0xd9, 0x47, 0xe9,
0x63, 0x54, 0x7d, 0x86, 0xbe, 0x4f, 0x65, 0x7b, 0x26, 0x33, 0x90, 0x1f, 0xd4, 0xbb, 0xf1, 0xf1,
0x77, 0xbe, 0xf3, 0xf9, 0xd8, 0xdf, 0x19, 0x28, 0x5f, 0xa8, 0xb8, 0x1b, 0x75, 0x1a, 0xb1, 0x92,
0x46, 0x22, 0xb0, 0xab, 0x46, 0xd4, 0x69, 0x0c, 0x1e, 0xd7, 0x9e, 0xc3, 0xe2, 0x51, 0x5f, 0x51,
0xc3, 0xa5, 0x40, 0x55, 0x58, 0xd0, 0xac, 0x2b, 0x45, 0xa8, 0xab, 0xc1, 0x76, 0x50, 0x2f, 0xe0,
0x74, 0x89, 0x2a, 0x30, 0x27, 0xa8, 0x90, 0xba, 0xfa, 0xc7, 0x76, 0x50, 0x9f, 0xc3, 0x7e, 0x51,
0x7b, 0x01, 0xc5, 0x33, 0xde, 0x63, 0xda, 0xd0, 0x5e, 0xfc, 0xdb, 0xc9, 0xdf, 0x03, 0x40, 0xa7,
0x92, 0x86, 0x07, 0x34, 0xa2, 0xa2, 0xcb, 0x30, 0xfb, 0xd8, 0x67, 0xda, 0xa0, 0xb7, 0xb0, 0xc4,
0x05, 0x37, 0x9c, 0x46, 0x44, 0xf9, 0x90, 0xa3, 0x2b, 0x35, 0xef, 0x35, 0x32, 0xd5, 0x8d, 0x13,
0x0f, 0x19, 0xcd, 0x6f, 0xcd, 0xe0, 0x3f, 0x93, 0xfc, 0x94, 0xf1, 0x25, 0x94, 0xbb, 0x11, 0x67,
0xc2, 0x10, 0x6d, 0xa8, 0xf1, 0x2a, 0x4a, 0xcd, 0xb5, 0x3c, 0xdd, 0xa1, 0xdb, 0x6f, 0xdb, 0xed,
0xd6, 0x0c, 0x2e, 0x75, 0xb3, 0xe5, 0xc1, 0x3f, 0xb0, 0x1e, 0x49, 0x1a, 0x92, 0x8e, 0x2f, 0x93,
0x8a, 0x22, 0xe6, 0x73, 0xcc, 0x6a, 0x7b, 0xb0, 0x3e, 0x51, 0x09, 0x42, 0x30, 0x2b, 0x68, 0x8f,
0x39, 0xf9, 0x45, 0xec, 0xbe, 0x6b, 0x5f, 0x67, 0xa1, 0x94, 0x2b, 0x86, 0xf6, 0xa1, 0x68, 0xd2,
0x0e, 0x26, 0xe7, 0xfc, 0x3b, 0x2f, 0x6c, 0xd8, 0x5e, 0x9c, 0xe1, 0xd0, 0x03, 0xf8, 0x4b, 0xf4,
0x7b, 0xa4, 0x4b, 0xa3, 0x48, 0xdb, 0x33, 0x29, 0xc3, 0x42, 0x77, 0xaa, 0x02, 0x5e, 0x12, 0xfd,
0xde, 0xa1, 0x8d, 0xb7, 0x7d, 0x18, 0xed, 0x02, 0xca, 0xb0, 0xe7, 0x5c, 0x70, 0x7d, 0xc9, 0xc2,
0x6a, 0xc1, 0x81, 0x97, 0x53, 0xf0, 0x71, 0x12, 0x47, 0x04, 0x1a, 0xa3, 0x68, 0x72, 0xcd, 0xcd,
0x25, 0x09, 0x95, 0x8c, 0xc9, 0xb9, 0x54, 0x44, 0x51, 0xc3, 0x48, 0xc4, 0x7b, 0xdc, 0x70, 0x71,
0x51, 0x9d, 0x75, 0x4c, 0xf7, 0x6f, 0x33, 0xbd, 0xe7, 0xe6, 0xf2, 0x48, 0xc9, 0xf8, 0x58, 0x2a,
0x4c, 0x0d, 0x3b, 0x4d, 0xe0, 0x88, 0xc2, 0xde, 0x9d, 0x05, 0x72, 0xed, 0xb6, 0x15, 0xe6, 0x5c,
0x85, 0xfa, 0x94, 0x0a, 0x59, 0xef, 0x6d, 0x89, 0x0f, 0xf0, 0x70, 0x52, 0x89, 0xe4, 0x19, 0x9c,
0x53, 0x1e, 0xb1, 0x90, 0x18, 0x49, 0x34, 0x13, 0x61, 0x75, 0xde, 0x15, 0xd8, 0x19, 0x57, 0xc0,
0x5f, 0xd5, 0xb1, 0xc3, 0x9f, 0xc9, 0x36, 0x13, 0x21, 0x6a, 0xc1, 0xff, 0x63, 0xe8, 0xaf, 0x84,
0xbc, 0x16, 0x44, 0xb1, 0x2e, 0xe3, 0x03, 0x16, 0x56, 0x17, 0x1c, 0xe5, 0xd6, 0x6d, 0xca, 0x37,
0x16, 0x85, 0x13, 0x50, 0xed, 0x47, 0x00, 0x2b, 0x37, 0x9e, 0x8d, 0x8e, 0xa5, 0xd0, 0x0c, 0xb5,
0x61, 0x39, 0x73, 0x80, 0x8f, 0x25, 0x4f, 0x63, 0xe7, 0x2e, 0x0b, 0x78, 0x74, 0x6b, 0x06, 0x2f,
0x0d, 0x3d, 0x90, 0x90, 0x3e, 0x83, 0x92, 0x66, 0x6a, 0xc0, 0x14, 0x89, 0xb8, 0x36, 0x89, 0x07,
0x56, 0xf3, 0x7c, 0x6d, 0xb7, 0x7d, 0xca, 0x9d, 0x87, 0x40, 0x0f, 0x57, 0x07, 0x9b, 0xb0, 0x71,
0xcb, 0x01, 0x9e, 0xd3, 0x5b, 0xe0, 0x5b, 0x00, 0x1b, 0x93, 0xa5, 0xa0, 0x27, 0xb0, 0x9a, 0x4f,
0x56, 0x24, 0x64, 0x11, 0xbb, 0xa0, 0x26, 0xb5, 0x45, 0x25, 0xca, 0x92, 0xd4, 0x51, 0xb2, 0x87,
0xde, 0xc1, 0x66, 0xde, 0xb2, 0x44, 0xb1, 0x58, 0x2a, 0x43, 0xb8, 0x30, 0x4c, 0x0d, 0x68, 0x94,
0xc8, 0xaf, 0xe4, 0xe5, 0xa7, 0x43, 0x0c, 0xaf, 0xe7, 0xdc, 0x8b, 0x5d, 0xde, 0x49, 0x92, 0x56,
0xfb, 0x12, 0x00, 0x64, 0xc7, 0x44, 0xbb, 0x76, 0x62, 0xd9, 0x95, 0x9d, 0x58, 0x85, 0x7a, 0xa9,
0x89, 0x46, 0xfb, 0x81, 0x53, 0x08, 0x7a, 0x0d, 0x2b, 0xec, 0x53, 0xcc, 0x7d, 0x95, 0x4c, 0x4a,
0x61, 0x8a, 0x14, 0x94, 0x25, 0x0c, 0x35, 0xfc, 0x0c, 0x60, 0xde, 0x53, 0xa3, 0x2d, 0x00, 0x1e,
0x13, 0x1a, 0x86, 0x8a, 0x69, 0x3f, 0x34, 0xcb, 0xb8, 0xc8, 0xe3, 0x57, 0x3e, 0x60, 0xe7, 0x87,
0x55, 0x9f, 0x4c, 0x4d, 0xf7, 0x6d, 0xed, 0x7c, 0xe3, 0x2e, 0x8c, 0xbc, 0x62, 0xc2, 0x69, 0x28,
0xe2, 0xe5, 0x5c, 0x2b, 0xcf, 0x6c, 0x1c, 0xed, 0xc3, 0xea, 0x14, 0xdb, 0x2e, 0xe2, 0x95, 0x70,
0x8c, 0x45, 0x9f, 0xc2, 0xda, 0x34, 0x2b, 0x2e, 0xe2, 0x4a, 0x38, 0xc6, 0x76, 0xcd, 0x0e, 0x94,
0x73, 0xf7, 0xaf, 0x10, 0x86, 0x52, 0xf2, 0x6d, 0xc3, 0xe8, 0xdf, 0x7c, 0x83, 0x46, 0x87, 0xe5,
0xc6, 0x7f, 0x13, 0xf7, 0xfd, 0x43, 0xaa, 0x07, 0x8f, 0x82, 0xce, 0xbc, 0xfb, 0x7d, 0xed, 0xff,
0x0a, 0x00, 0x00, 0xff, 0xff, 0x64, 0xbf, 0xda, 0x5e, 0xce, 0x06, 0x00, 0x00,
}

View File

@ -40,17 +40,17 @@ import (
// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs.
type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error
// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. inovker is the handler to complete the RPC
// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC
// and it is the responsibility of the interceptor to call it.
// This is the EXPERIMENTAL API.
// This is an EXPERIMENTAL API.
type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error
// Streamer is called by StreamClientInterceptor to create a ClientStream.
type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error)
// StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O
// operations. streamer is the handlder to create a ClientStream and it is the responsibility of the interceptor to call it.
// This is the EXPERIMENTAL API.
// operations. streamer is the handler to create a ClientStream and it is the responsibility of the interceptor to call it.
// This is an EXPERIMENTAL API.
type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error)
// UnaryServerInfo consists of various information about a unary RPC on

View File

@ -39,8 +39,10 @@ import (
)
// ClientParameters is used to set keepalive parameters on the client-side.
// These configure how the client will actively probe to notice when a connection broken
// and to cause activity so intermediaries are aware the connection is still in use.
// These configure how the client will actively probe to notice when a connection is broken
// and send pings so intermediaries will be aware of the liveness of the connection.
// Make sure these parameters are set in coordination with the keepalive policy on the server,
// as incompatible settings can result in closing of connection.
type ClientParameters struct {
// After a duration of this time if the client doesn't see any activity it pings the server to see if the transport is still alive.
Time time.Duration // The current default value is infinity.
@ -48,5 +50,31 @@ type ClientParameters struct {
// the connection is closed.
Timeout time.Duration // The current default value is 20 seconds.
// If true, client runs keepalive checks even with no active RPCs.
PermitWithoutStream bool
PermitWithoutStream bool // false by default.
}
// ServerParameters is used to set keepalive and max-age parameters on the server-side.
type ServerParameters struct {
// MaxConnectionIdle is a duration for the amount of time after which an idle connection would be closed by sending a GoAway.
// Idleness duration is defined since the most recent time the number of outstanding RPCs became zero or the connection establishment.
MaxConnectionIdle time.Duration // The current default value is infinity.
// MaxConnectionAge is a duration for the maximum amount of time a connection may exist before it will be closed by sending a GoAway.
// A random jitter of +/-10% will be added to MaxConnectionAge to spread out connection storms.
MaxConnectionAge time.Duration // The current default value is infinity.
// MaxConnectinoAgeGrace is an additive period after MaxConnectionAge after which the connection will be forcibly closed.
MaxConnectionAgeGrace time.Duration // The current default value is infinity.
// After a duration of this time if the server doesn't see any activity it pings the client to see if the transport is still alive.
Time time.Duration // The current default value is 2 hours.
// After having pinged for keepalive check, the server waits for a duration of Timeout and if no activity is seen even after that
// the connection is closed.
Timeout time.Duration // The current default value is 20 seconds.
}
// EnforcementPolicy is used to set keepalive enforcement policy on the server-side.
// Server will close connection with a client that violates this policy.
type EnforcementPolicy struct {
// MinTime is the minimum amount of time a client should wait before sending a keepalive ping.
MinTime time.Duration // The current default value is 5 minutes.
// If true, server expects keepalive pings even when there are no active streams(RPCs).
PermitWithoutStream bool // false by default.
}

View File

@ -36,58 +36,27 @@
package metadata // import "google.golang.org/grpc/metadata"
import (
"encoding/base64"
"fmt"
"strings"
"golang.org/x/net/context"
)
const (
binHdrSuffix = "-bin"
)
// encodeKeyValue encodes key and value qualified for transmission via gRPC.
// Transmitting binary headers violates HTTP/2 spec.
// TODO(zhaoq): Maybe check if k is ASCII also.
func encodeKeyValue(k, v string) (string, string) {
k = strings.ToLower(k)
if strings.HasSuffix(k, binHdrSuffix) {
val := base64.StdEncoding.EncodeToString([]byte(v))
v = string(val)
}
return k, v
}
// DecodeKeyValue returns the original key and value corresponding to the
// encoded data in k, v.
// If k is a binary header and v contains comma, v is split on comma before decoded,
// and the decoded v will be joined with comma before returned.
// DecodeKeyValue returns k, v, nil. It is deprecated and should not be used.
func DecodeKeyValue(k, v string) (string, string, error) {
if !strings.HasSuffix(k, binHdrSuffix) {
return k, v, nil
}
vvs := strings.Split(v, ",")
for i, vv := range vvs {
val, err := base64.StdEncoding.DecodeString(vv)
if err != nil {
return "", "", err
}
vvs[i] = string(val)
}
return k, strings.Join(vvs, ","), nil
return k, v, nil
}
// MD is a mapping from metadata keys to values. Users should use the following
// two convenience functions New and Pairs to generate MD.
type MD map[string][]string
// New creates a MD from given key-value map.
// Keys are automatically converted to lowercase. And for keys having "-bin" as suffix, their values will be applied Base64 encoding.
// New creates an MD from a given key-value map.
// Keys are automatically converted to lowercase.
func New(m map[string]string) MD {
md := MD{}
for k, v := range m {
key, val := encodeKeyValue(k, v)
for k, val := range m {
key := strings.ToLower(k)
md[key] = append(md[key], val)
}
return md
@ -95,20 +64,19 @@ func New(m map[string]string) MD {
// Pairs returns an MD formed by the mapping of key, value ...
// Pairs panics if len(kv) is odd.
// Keys are automatically converted to lowercase. And for keys having "-bin" as suffix, their values will be appplied Base64 encoding.
// Keys are automatically converted to lowercase.
func Pairs(kv ...string) MD {
if len(kv)%2 == 1 {
panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv)))
}
md := MD{}
var k string
var key string
for i, s := range kv {
if i%2 == 0 {
k = s
key = strings.ToLower(s)
continue
}
key, val := encodeKeyValue(k, s)
md[key] = append(md[key], val)
md[key] = append(md[key], s)
}
return md
}
@ -123,9 +91,9 @@ func (md MD) Copy() MD {
return Join(md)
}
// Join joins any number of MDs into a single MD.
// Join joins any number of mds into a single MD.
// The order of values for each key is determined by the order in which
// the MDs containing those values are presented to Join.
// the mds containing those values are presented to Join.
func Join(mds ...MD) MD {
out := MD{}
for _, md := range mds {
@ -136,17 +104,41 @@ func Join(mds ...MD) MD {
return out
}
type mdKey struct{}
type mdIncomingKey struct{}
type mdOutgoingKey struct{}
// NewContext creates a new context with md attached.
// NewContext is a wrapper for NewOutgoingContext(ctx, md). Deprecated.
func NewContext(ctx context.Context, md MD) context.Context {
return context.WithValue(ctx, mdKey{}, md)
return NewOutgoingContext(ctx, md)
}
// FromContext returns the MD in ctx if it exists.
// The returned md should be immutable, writing to it may cause races.
// Modification should be made to the copies of the returned md.
// NewIncomingContext creates a new context with incoming md attached.
func NewIncomingContext(ctx context.Context, md MD) context.Context {
return context.WithValue(ctx, mdIncomingKey{}, md)
}
// NewOutgoingContext creates a new context with outgoing md attached.
func NewOutgoingContext(ctx context.Context, md MD) context.Context {
return context.WithValue(ctx, mdOutgoingKey{}, md)
}
// FromContext is a wrapper for FromIncomingContext(ctx). Deprecated.
func FromContext(ctx context.Context) (md MD, ok bool) {
md, ok = ctx.Value(mdKey{}).(MD)
return FromIncomingContext(ctx)
}
// FromIncomingContext returns the incoming metadata in ctx if it exists. The
// returned MD should not be modified. Writing to it may cause races.
// Modification should be made to copies of the returned MD.
func FromIncomingContext(ctx context.Context) (md MD, ok bool) {
md, ok = ctx.Value(mdIncomingKey{}).(MD)
return
}
// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The
// returned MD should not be modified. Writing to it may cause races.
// Modification should be made to the copies of the returned MD.
func FromOutgoingContext(ctx context.Context) (md MD, ok bool) {
md, ok = ctx.Value(mdOutgoingKey{}).(MD)
return
}

View File

@ -42,7 +42,8 @@ import (
"google.golang.org/grpc/credentials"
)
// Peer contains the information of the peer for an RPC.
// Peer contains the information of the peer for an RPC, such as the address
// and authentication information.
type Peer struct {
// Addr is the peer address.
Addr net.Addr

145
cmd/vendor/google.golang.org/grpc/proxy.go generated vendored Normal file
View File

@ -0,0 +1,145 @@
/*
*
* Copyright 2017, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
package grpc
import (
"bufio"
"errors"
"fmt"
"io"
"net"
"net/http"
"net/http/httputil"
"net/url"
"golang.org/x/net/context"
)
var (
// errDisabled indicates that proxy is disabled for the address.
errDisabled = errors.New("proxy is disabled for the address")
// The following variable will be overwritten in the tests.
httpProxyFromEnvironment = http.ProxyFromEnvironment
)
func mapAddress(ctx context.Context, address string) (string, error) {
req := &http.Request{
URL: &url.URL{
Scheme: "https",
Host: address,
},
}
url, err := httpProxyFromEnvironment(req)
if err != nil {
return "", err
}
if url == nil {
return "", errDisabled
}
return url.Host, nil
}
// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader.
// It's possible that this reader reads more than what's need for the response and stores
// those bytes in the buffer.
// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the
// bytes in the buffer.
type bufConn struct {
net.Conn
r io.Reader
}
func (c *bufConn) Read(b []byte) (int, error) {
return c.r.Read(b)
}
func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, addr string) (_ net.Conn, err error) {
defer func() {
if err != nil {
conn.Close()
}
}()
req := (&http.Request{
Method: http.MethodConnect,
URL: &url.URL{Host: addr},
Header: map[string][]string{"User-Agent": {grpcUA}},
})
if err := sendHTTPRequest(ctx, req, conn); err != nil {
return nil, fmt.Errorf("failed to write the HTTP request: %v", err)
}
r := bufio.NewReader(conn)
resp, err := http.ReadResponse(r, req)
if err != nil {
return nil, fmt.Errorf("reading server HTTP response: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
dump, err := httputil.DumpResponse(resp, true)
if err != nil {
return nil, fmt.Errorf("failed to do connect handshake, status code: %s", resp.Status)
}
return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump)
}
return &bufConn{Conn: conn, r: r}, nil
}
// newProxyDialer returns a dialer that connects to proxy first if necessary.
// The returned dialer checks if a proxy is necessary, dial to the proxy with the
// provided dialer, does HTTP CONNECT handshake and returns the connection.
func newProxyDialer(dialer func(context.Context, string) (net.Conn, error)) func(context.Context, string) (net.Conn, error) {
return func(ctx context.Context, addr string) (conn net.Conn, err error) {
var skipHandshake bool
newAddr, err := mapAddress(ctx, addr)
if err != nil {
if err != errDisabled {
return nil, err
}
skipHandshake = true
newAddr = addr
}
conn, err = dialer(ctx, newAddr)
if err != nil {
return
}
if !skipHandshake {
conn, err = doHTTPConnectHandshake(ctx, conn, addr)
}
return
}
}

View File

@ -37,48 +37,22 @@ import (
"bytes"
"compress/gzip"
"encoding/binary"
"fmt"
"io"
"io/ioutil"
"math"
"os"
"sync"
"time"
"github.com/golang/protobuf/proto"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
"google.golang.org/grpc/transport"
)
// Codec defines the interface gRPC uses to encode and decode messages.
type Codec interface {
// Marshal returns the wire format of v.
Marshal(v interface{}) ([]byte, error)
// Unmarshal parses the wire format into v.
Unmarshal(data []byte, v interface{}) error
// String returns the name of the Codec implementation. The returned
// string will be used as part of content type in transmission.
String() string
}
// protoCodec is a Codec implementation with protobuf. It is the default codec for gRPC.
type protoCodec struct{}
func (protoCodec) Marshal(v interface{}) ([]byte, error) {
return proto.Marshal(v.(proto.Message))
}
func (protoCodec) Unmarshal(data []byte, v interface{}) error {
return proto.Unmarshal(data, v.(proto.Message))
}
func (protoCodec) String() string {
return "proto"
}
// Compressor defines the interface gRPC uses to compress a message.
type Compressor interface {
// Do compresses p into w.
@ -87,16 +61,24 @@ type Compressor interface {
Type() string
}
// NewGZIPCompressor creates a Compressor based on GZIP.
func NewGZIPCompressor() Compressor {
return &gzipCompressor{}
type gzipCompressor struct {
pool sync.Pool
}
type gzipCompressor struct {
// NewGZIPCompressor creates a Compressor based on GZIP.
func NewGZIPCompressor() Compressor {
return &gzipCompressor{
pool: sync.Pool{
New: func() interface{} {
return gzip.NewWriter(ioutil.Discard)
},
},
}
}
func (c *gzipCompressor) Do(w io.Writer, p []byte) error {
z := gzip.NewWriter(w)
z := c.pool.Get().(*gzip.Writer)
z.Reset(w)
if _, err := z.Write(p); err != nil {
return err
}
@ -116,6 +98,7 @@ type Decompressor interface {
}
type gzipDecompressor struct {
pool sync.Pool
}
// NewGZIPDecompressor creates a Decompressor based on GZIP.
@ -124,11 +107,26 @@ func NewGZIPDecompressor() Decompressor {
}
func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) {
z, err := gzip.NewReader(r)
if err != nil {
return nil, err
var z *gzip.Reader
switch maybeZ := d.pool.Get().(type) {
case nil:
newZ, err := gzip.NewReader(r)
if err != nil {
return nil, err
}
z = newZ
case *gzip.Reader:
z = maybeZ
if err := z.Reset(r); err != nil {
d.pool.Put(z)
return nil, err
}
}
defer z.Close()
defer func() {
z.Close()
d.pool.Put(z)
}()
return ioutil.ReadAll(z)
}
@ -138,11 +136,14 @@ func (d *gzipDecompressor) Type() string {
// callInfo contains all related configuration and information about an RPC.
type callInfo struct {
failFast bool
headerMD metadata.MD
trailerMD metadata.MD
peer *peer.Peer
traceInfo traceInfo // in trace.go
failFast bool
headerMD metadata.MD
trailerMD metadata.MD
peer *peer.Peer
traceInfo traceInfo // in trace.go
maxReceiveMessageSize *int
maxSendMessageSize *int
creds credentials.PerRPCCredentials
}
var defaultCallInfo = callInfo{failFast: true}
@ -159,6 +160,14 @@ type CallOption interface {
after(*callInfo)
}
// EmptyCallOption does not alter the Call configuration.
// It can be embedded in another structure to carry satellite data for use
// by interceptors.
type EmptyCallOption struct{}
func (EmptyCallOption) before(*callInfo) error { return nil }
func (EmptyCallOption) after(*callInfo) {}
type beforeCall func(c *callInfo) error
func (o beforeCall) before(c *callInfo) error { return o(c) }
@ -189,7 +198,9 @@ func Trailer(md *metadata.MD) CallOption {
// unary RPC.
func Peer(peer *peer.Peer) CallOption {
return afterCall(func(c *callInfo) {
*peer = *c.peer
if c.peer != nil {
*peer = *c.peer
}
})
}
@ -198,7 +209,8 @@ func Peer(peer *peer.Peer) CallOption {
// immediately. Otherwise, the RPC client will block the call until a
// connection is available (or the call is canceled or times out) and will retry
// the call if it fails due to a transient error. Please refer to
// https://github.com/grpc/grpc/blob/master/doc/fail_fast.md. Note: failFast is default to true.
// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md.
// Note: failFast is default to true.
func FailFast(failFast bool) CallOption {
return beforeCall(func(c *callInfo) error {
c.failFast = failFast
@ -206,6 +218,31 @@ func FailFast(failFast bool) CallOption {
})
}
// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive.
func MaxCallRecvMsgSize(s int) CallOption {
return beforeCall(func(o *callInfo) error {
o.maxReceiveMessageSize = &s
return nil
})
}
// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send.
func MaxCallSendMsgSize(s int) CallOption {
return beforeCall(func(o *callInfo) error {
o.maxSendMessageSize = &s
return nil
})
}
// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials
// for a call.
func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption {
return beforeCall(func(c *callInfo) error {
c.creds = creds
return nil
})
}
// The format of the payload: compressed or not?
type payloadFormat uint8
@ -239,8 +276,8 @@ type parser struct {
// No other error values or types must be returned, which also means
// that the underlying io.Reader must not return an incompatible
// error.
func (p *parser) recvMsg(maxMsgSize int) (pf payloadFormat, msg []byte, err error) {
if _, err := io.ReadFull(p.r, p.header[:]); err != nil {
func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) {
if _, err := p.r.Read(p.header[:]); err != nil {
return 0, nil, err
}
@ -250,13 +287,13 @@ func (p *parser) recvMsg(maxMsgSize int) (pf payloadFormat, msg []byte, err erro
if length == 0 {
return pf, nil, nil
}
if length > uint32(maxMsgSize) {
return 0, nil, Errorf(codes.Internal, "grpc: received message length %d exceeding the max size %d", length, maxMsgSize)
if length > uint32(maxReceiveMessageSize) {
return 0, nil, Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize)
}
// TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead
// of making it for each message:
msg = make([]byte, int(length))
if _, err := io.ReadFull(p.r, msg); err != nil {
if _, err := p.r.Read(msg); err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
@ -277,7 +314,7 @@ func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer, outPayl
// TODO(zhaoq): optimize to reduce memory alloc and copying.
b, err = c.Marshal(msg)
if err != nil {
return nil, err
return nil, Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error())
}
if outPayload != nil {
outPayload.Payload = msg
@ -287,14 +324,14 @@ func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer, outPayl
}
if cp != nil {
if err := cp.Do(cbuf, b); err != nil {
return nil, err
return nil, Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
}
b = cbuf.Bytes()
}
length = uint(len(b))
}
if length > math.MaxUint32 {
return nil, Errorf(codes.InvalidArgument, "grpc: message too large (%d bytes)", length)
return nil, Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", length)
}
const (
@ -335,8 +372,8 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) er
return nil
}
func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxMsgSize int, inPayload *stats.InPayload) error {
pf, d, err := p.recvMsg(maxMsgSize)
func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload) error {
pf, d, err := p.recvMsg(maxReceiveMessageSize)
if err != nil {
return err
}
@ -352,10 +389,10 @@ func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{
return Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
}
}
if len(d) > maxMsgSize {
if len(d) > maxReceiveMessageSize {
// TODO: Revisit the error code. Currently keep it consistent with java
// implementation.
return Errorf(codes.Internal, "grpc: received a message of %d bytes exceeding %d limit", len(d), maxMsgSize)
return Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize)
}
if err := c.Unmarshal(d, m); err != nil {
return Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
@ -370,116 +407,57 @@ func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{
return nil
}
// rpcError defines the status from an RPC.
type rpcError struct {
code codes.Code
desc string
type rpcInfo struct {
bytesSent bool
bytesReceived bool
}
func (e *rpcError) Error() string {
return fmt.Sprintf("rpc error: code = %s desc = %s", e.code, e.desc)
type rpcInfoContextKey struct{}
func newContextWithRPCInfo(ctx context.Context) context.Context {
return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{})
}
func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) {
s, ok = ctx.Value(rpcInfoContextKey{}).(*rpcInfo)
return
}
func updateRPCInfoInContext(ctx context.Context, s rpcInfo) {
if ss, ok := rpcInfoFromContext(ctx); ok {
*ss = s
}
return
}
// Code returns the error code for err if it was produced by the rpc system.
// Otherwise, it returns codes.Unknown.
//
// Deprecated; use status.FromError and Code method instead.
func Code(err error) codes.Code {
if err == nil {
return codes.OK
}
if e, ok := err.(*rpcError); ok {
return e.code
if s, ok := status.FromError(err); ok {
return s.Code()
}
return codes.Unknown
}
// ErrorDesc returns the error description of err if it was produced by the rpc system.
// Otherwise, it returns err.Error() or empty string when err is nil.
//
// Deprecated; use status.FromError and Message method instead.
func ErrorDesc(err error) string {
if err == nil {
return ""
}
if e, ok := err.(*rpcError); ok {
return e.desc
if s, ok := status.FromError(err); ok {
return s.Message()
}
return err.Error()
}
// Errorf returns an error containing an error code and a description;
// Errorf returns nil if c is OK.
//
// Deprecated; use status.Errorf instead.
func Errorf(c codes.Code, format string, a ...interface{}) error {
if c == codes.OK {
return nil
}
return &rpcError{
code: c,
desc: fmt.Sprintf(format, a...),
}
}
// toRPCErr converts an error into a rpcError.
func toRPCErr(err error) error {
switch e := err.(type) {
case *rpcError:
return err
case transport.StreamError:
return &rpcError{
code: e.Code,
desc: e.Desc,
}
case transport.ConnectionError:
return &rpcError{
code: codes.Internal,
desc: e.Desc,
}
default:
switch err {
case context.DeadlineExceeded:
return &rpcError{
code: codes.DeadlineExceeded,
desc: err.Error(),
}
case context.Canceled:
return &rpcError{
code: codes.Canceled,
desc: err.Error(),
}
case ErrClientConnClosing:
return &rpcError{
code: codes.FailedPrecondition,
desc: err.Error(),
}
}
}
return Errorf(codes.Unknown, "%v", err)
}
// convertCode converts a standard Go error into its canonical code. Note that
// this is only used to translate the error returned by the server applications.
func convertCode(err error) codes.Code {
switch err {
case nil:
return codes.OK
case io.EOF:
return codes.OutOfRange
case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
return codes.FailedPrecondition
case os.ErrInvalid:
return codes.InvalidArgument
case context.Canceled:
return codes.Canceled
case context.DeadlineExceeded:
return codes.DeadlineExceeded
}
switch {
case os.IsExist(err):
return codes.AlreadyExists
case os.IsNotExist(err):
return codes.NotFound
case os.IsPermission(err):
return codes.PermissionDenied
}
return codes.Unknown
return status.Errorf(c, format, a...)
}
// MethodConfig defines the configuration recommended by the service providers for a
@ -489,24 +467,22 @@ type MethodConfig struct {
// WaitForReady indicates whether RPCs sent to this method should wait until
// the connection is ready by default (!failfast). The value specified via the
// gRPC client API will override the value set here.
WaitForReady bool
WaitForReady *bool
// Timeout is the default timeout for RPCs sent to this method. The actual
// deadline used will be the minimum of the value specified here and the value
// set by the application via the gRPC client API. If either one is not set,
// then the other will be used. If neither is set, then the RPC has no deadline.
Timeout time.Duration
Timeout *time.Duration
// MaxReqSize is the maximum allowed payload size for an individual request in a
// stream (client->server) in bytes. The size which is measured is the serialized
// payload after per-message compression (but before stream compression) in bytes.
// The actual value used is the minumum of the value specified here and the value set
// by the application via the gRPC client API. If either one is not set, then the other
// will be used. If neither is set, then the built-in default is used.
// TODO: support this.
MaxReqSize uint32
MaxReqSize *int
// MaxRespSize is the maximum allowed payload size for an individual response in a
// stream (server->client) in bytes.
// TODO: support this.
MaxRespSize uint32
MaxRespSize *int
}
// ServiceConfig is provided by the service provider and contains parameters for how
@ -517,9 +493,32 @@ type ServiceConfig struct {
// via grpc.WithBalancer will override this.
LB Balancer
// Methods contains a map for the methods in this service.
// If there is an exact match for a method (i.e. /service/method) in the map, use the corresponding MethodConfig.
// If there's no exact match, look for the default config for the service (/service/) and use the corresponding MethodConfig if it exists.
// Otherwise, the method has no MethodConfig to use.
Methods map[string]MethodConfig
}
func min(a, b *int) *int {
if *a < *b {
return a
}
return b
}
func getMaxSize(mcMax, doptMax *int, defaultVal int) *int {
if mcMax == nil && doptMax == nil {
return &defaultVal
}
if mcMax != nil && doptMax != nil {
return min(mcMax, doptMax)
}
if mcMax != nil {
return mcMax
}
return doptMax
}
// SupportPackageIsVersion4 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the grpc package.
//
@ -529,4 +528,6 @@ type ServiceConfig struct {
const SupportPackageIsVersion4 = true
// Version is the current grpc version.
const Version = "1.2.1"
const Version = "1.4.1"
const grpcUA = "grpc-go/" + Version

View File

@ -53,12 +53,19 @@ import (
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
"google.golang.org/grpc/tap"
"google.golang.org/grpc/transport"
)
const (
defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4
defaultServerMaxSendMessageSize = 1024 * 1024 * 4
)
type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error)
// MethodDesc represents an RPC service's method specification.
@ -105,25 +112,63 @@ type Server struct {
}
type options struct {
creds credentials.TransportCredentials
codec Codec
cp Compressor
dc Decompressor
maxMsgSize int
unaryInt UnaryServerInterceptor
streamInt StreamServerInterceptor
inTapHandle tap.ServerInHandle
statsHandler stats.Handler
maxConcurrentStreams uint32
useHandlerImpl bool // use http.Handler-based server
unknownStreamDesc *StreamDesc
creds credentials.TransportCredentials
codec Codec
cp Compressor
dc Decompressor
unaryInt UnaryServerInterceptor
streamInt StreamServerInterceptor
inTapHandle tap.ServerInHandle
statsHandler stats.Handler
maxConcurrentStreams uint32
maxReceiveMessageSize int
maxSendMessageSize int
useHandlerImpl bool // use http.Handler-based server
unknownStreamDesc *StreamDesc
keepaliveParams keepalive.ServerParameters
keepalivePolicy keepalive.EnforcementPolicy
initialWindowSize int32
initialConnWindowSize int32
}
var defaultMaxMsgSize = 1024 * 1024 * 4 // use 4MB as the default message size limit
var defaultServerOptions = options{
maxReceiveMessageSize: defaultServerMaxReceiveMessageSize,
maxSendMessageSize: defaultServerMaxSendMessageSize,
}
// A ServerOption sets options.
// A ServerOption sets options such as credentials, codec and keepalive parameters, etc.
type ServerOption func(*options)
// InitialWindowSize returns a ServerOption that sets window size for stream.
// The lower bound for window size is 64K and any value smaller than that will be ignored.
func InitialWindowSize(s int32) ServerOption {
return func(o *options) {
o.initialWindowSize = s
}
}
// InitialConnWindowSize returns a ServerOption that sets window size for a connection.
// The lower bound for window size is 64K and any value smaller than that will be ignored.
func InitialConnWindowSize(s int32) ServerOption {
return func(o *options) {
o.initialConnWindowSize = s
}
}
// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server.
func KeepaliveParams(kp keepalive.ServerParameters) ServerOption {
return func(o *options) {
o.keepaliveParams = kp
}
}
// KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server.
func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption {
return func(o *options) {
o.keepalivePolicy = kep
}
}
// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling.
func CustomCodec(codec Codec) ServerOption {
return func(o *options) {
@ -145,11 +190,25 @@ func RPCDecompressor(dc Decompressor) ServerOption {
}
}
// MaxMsgSize returns a ServerOption to set the max message size in bytes for inbound mesages.
// If this is not set, gRPC uses the default 4MB.
// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
// If this is not set, gRPC uses the default limit. Deprecated: use MaxRecvMsgSize instead.
func MaxMsgSize(m int) ServerOption {
return MaxRecvMsgSize(m)
}
// MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
// If this is not set, gRPC uses the default 4MB.
func MaxRecvMsgSize(m int) ServerOption {
return func(o *options) {
o.maxMsgSize = m
o.maxReceiveMessageSize = m
}
}
// MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send.
// If this is not set, gRPC uses the default 4MB.
func MaxSendMsgSize(m int) ServerOption {
return func(o *options) {
o.maxSendMessageSize = m
}
}
@ -174,7 +233,7 @@ func Creds(c credentials.TransportCredentials) ServerOption {
func UnaryInterceptor(i UnaryServerInterceptor) ServerOption {
return func(o *options) {
if o.unaryInt != nil {
panic("The unary server interceptor has been set.")
panic("The unary server interceptor was already set and may not be reset.")
}
o.unaryInt = i
}
@ -185,7 +244,7 @@ func UnaryInterceptor(i UnaryServerInterceptor) ServerOption {
func StreamInterceptor(i StreamServerInterceptor) ServerOption {
return func(o *options) {
if o.streamInt != nil {
panic("The stream server interceptor has been set.")
panic("The stream server interceptor was already set and may not be reset.")
}
o.streamInt = i
}
@ -196,7 +255,7 @@ func StreamInterceptor(i StreamServerInterceptor) ServerOption {
func InTapHandle(h tap.ServerInHandle) ServerOption {
return func(o *options) {
if o.inTapHandle != nil {
panic("The tap handle has been set.")
panic("The tap handle was already set and may not be reset.")
}
o.inTapHandle = h
}
@ -211,7 +270,7 @@ func StatsHandler(h stats.Handler) ServerOption {
// UnknownServiceHandler returns a ServerOption that allows for adding a custom
// unknown service handler. The provided method is a bidi-streaming RPC service
// handler that will be invoked instead of returning the the "unimplemented" gRPC
// handler that will be invoked instead of returning the "unimplemented" gRPC
// error whenever a request is received for an unregistered service or method.
// The handling function has full access to the Context of the request and the
// stream, and the invocation passes through interceptors.
@ -230,8 +289,7 @@ func UnknownServiceHandler(streamHandler StreamHandler) ServerOption {
// NewServer creates a gRPC server which has no service registered and has not
// started to accept requests yet.
func NewServer(opt ...ServerOption) *Server {
var opts options
opts.maxMsgSize = defaultMaxMsgSize
opts := defaultServerOptions
for _, o := range opt {
o(&opts)
}
@ -270,8 +328,8 @@ func (s *Server) errorf(format string, a ...interface{}) {
}
}
// RegisterService register a service and its implementation to the gRPC
// server. Called from the IDL generated code. This must be called before
// RegisterService registers a service and its implementation to the gRPC
// server. It is called from the IDL generated code. This must be called before
// invoking Serve.
func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) {
ht := reflect.TypeOf(sd.HandlerType).Elem()
@ -316,7 +374,7 @@ type MethodInfo struct {
IsServerStream bool
}
// ServiceInfo contains unary RPC method info, streaming RPC methid info and metadata for a service.
// ServiceInfo contains unary RPC method info, streaming RPC method info and metadata for a service.
type ServiceInfo struct {
Methods []MethodInfo
// Metadata is the metadata specified in ServiceDesc when registering service.
@ -409,10 +467,12 @@ func (s *Server) Serve(lis net.Listener) error {
s.mu.Lock()
s.printf("Accept error: %v; retrying in %v", err, tempDelay)
s.mu.Unlock()
timer := time.NewTimer(tempDelay)
select {
case <-time.After(tempDelay):
case <-timer.C:
case <-s.ctx.Done():
}
timer.Stop()
continue
}
s.mu.Lock()
@ -465,10 +525,14 @@ func (s *Server) handleRawConn(rawConn net.Conn) {
// transport.NewServerTransport).
func (s *Server) serveHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) {
config := &transport.ServerConfig{
MaxStreams: s.opts.maxConcurrentStreams,
AuthInfo: authInfo,
InTapHandle: s.opts.inTapHandle,
StatsHandler: s.opts.statsHandler,
MaxStreams: s.opts.maxConcurrentStreams,
AuthInfo: authInfo,
InTapHandle: s.opts.inTapHandle,
StatsHandler: s.opts.statsHandler,
KeepaliveParams: s.opts.keepaliveParams,
KeepalivePolicy: s.opts.keepalivePolicy,
InitialWindowSize: s.opts.initialWindowSize,
InitialConnWindowSize: s.opts.initialConnWindowSize,
}
st, err := transport.NewServerTransport("http2", c, config)
if err != nil {
@ -600,14 +664,11 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str
}
p, err := encode(s.opts.codec, msg, cp, cbuf, outPayload)
if err != nil {
// This typically indicates a fatal issue (e.g., memory
// corruption or hardware faults) the application program
// cannot handle.
//
// TODO(zhaoq): There exist other options also such as only closing the
// faulty stream locally and remotely (Other streams can keep going). Find
// the optimal option.
grpclog.Fatalf("grpc: Server failed to encode response %v", err)
grpclog.Println("grpc: server failed to encode response: ", err)
return err
}
if len(p) > s.opts.maxSendMessageSize {
return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(p), s.opts.maxSendMessageSize)
}
err = t.Write(stream, p, opts)
if err == nil && outPayload != nil {
@ -624,9 +685,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
BeginTime: time.Now(),
}
sh.HandleRPC(stream.Context(), begin)
}
defer func() {
if sh != nil {
defer func() {
end := &stats.End{
EndTime: time.Now(),
}
@ -634,8 +693,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
end.Error = toRPCErr(err)
}
sh.HandleRPC(stream.Context(), end)
}
}()
}()
}
if trInfo != nil {
defer trInfo.tr.Finish()
trInfo.firstLine.client = false
@ -652,136 +711,137 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
stream.SetSendCompress(s.opts.cp.Type())
}
p := &parser{r: stream}
for {
pf, req, err := p.recvMsg(s.opts.maxMsgSize)
pf, req, err := p.recvMsg(s.opts.maxReceiveMessageSize)
if err == io.EOF {
// The entire stream is done (for unary RPC only).
return err
}
if err == io.ErrUnexpectedEOF {
err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
}
if err != nil {
if st, ok := status.FromError(err); ok {
if e := t.WriteStatus(stream, st); e != nil {
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e)
}
} else {
switch st := err.(type) {
case transport.ConnectionError:
// Nothing to do here.
case transport.StreamError:
if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil {
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e)
}
default:
panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", st, st))
}
}
return err
}
if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil {
if st, ok := status.FromError(err); ok {
if e := t.WriteStatus(stream, st); e != nil {
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e)
}
return err
}
if e := t.WriteStatus(stream, status.New(codes.Internal, err.Error())); e != nil {
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e)
}
// TODO checkRecvPayload always return RPC error. Add a return here if necessary.
}
var inPayload *stats.InPayload
if sh != nil {
inPayload = &stats.InPayload{
RecvTime: time.Now(),
}
}
df := func(v interface{}) error {
if inPayload != nil {
inPayload.WireLength = len(req)
}
if pf == compressionMade {
var err error
req, err = s.opts.dc.Do(bytes.NewReader(req))
if err != nil {
return Errorf(codes.Internal, err.Error())
}
}
if len(req) > s.opts.maxReceiveMessageSize {
// TODO: Revisit the error code. Currently keep it consistent with
// java implementation.
return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(req), s.opts.maxReceiveMessageSize)
}
if err := s.opts.codec.Unmarshal(req, v); err != nil {
return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
}
if inPayload != nil {
inPayload.Payload = v
inPayload.Data = req
inPayload.Length = len(req)
sh.HandleRPC(stream.Context(), inPayload)
}
if trInfo != nil {
trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true)
}
return nil
}
reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt)
if appErr != nil {
appStatus, ok := status.FromError(appErr)
if !ok {
// Convert appErr if it is not a grpc status error.
appErr = status.Error(convertCode(appErr), appErr.Error())
appStatus, _ = status.FromError(appErr)
}
if trInfo != nil {
trInfo.tr.LazyLog(stringer(appStatus.Message()), true)
trInfo.tr.SetError()
}
if e := t.WriteStatus(stream, appStatus); e != nil {
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", e)
}
return appErr
}
if trInfo != nil {
trInfo.tr.LazyLog(stringer("OK"), false)
}
opts := &transport.Options{
Last: true,
Delay: false,
}
if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil {
if err == io.EOF {
// The entire stream is done (for unary RPC only).
return err
}
if err == io.ErrUnexpectedEOF {
err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
}
if err != nil {
switch err := err.(type) {
case *rpcError:
if e := t.WriteStatus(stream, err.code, err.desc); e != nil {
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e)
}
if s, ok := status.FromError(err); ok {
if e := t.WriteStatus(stream, s); e != nil {
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", e)
}
} else {
switch st := err.(type) {
case transport.ConnectionError:
// Nothing to do here.
case transport.StreamError:
if e := t.WriteStatus(stream, err.Code, err.Desc); e != nil {
if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil {
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e)
}
default:
panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", err, err))
}
return err
}
if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil {
switch err := err.(type) {
case *rpcError:
if e := t.WriteStatus(stream, err.code, err.desc); e != nil {
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e)
}
return err
default:
if e := t.WriteStatus(stream, codes.Internal, err.Error()); e != nil {
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e)
}
// TODO checkRecvPayload always return RPC error. Add a return here if necessary.
panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st))
}
}
var inPayload *stats.InPayload
if sh != nil {
inPayload = &stats.InPayload{
RecvTime: time.Now(),
}
}
statusCode := codes.OK
statusDesc := ""
df := func(v interface{}) error {
if inPayload != nil {
inPayload.WireLength = len(req)
}
if pf == compressionMade {
var err error
req, err = s.opts.dc.Do(bytes.NewReader(req))
if err != nil {
if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil {
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err)
}
return Errorf(codes.Internal, err.Error())
}
}
if len(req) > s.opts.maxMsgSize {
// TODO: Revisit the error code. Currently keep it consistent with
// java implementation.
statusCode = codes.Internal
statusDesc = fmt.Sprintf("grpc: server received a message of %d bytes exceeding %d limit", len(req), s.opts.maxMsgSize)
}
if err := s.opts.codec.Unmarshal(req, v); err != nil {
return err
}
if inPayload != nil {
inPayload.Payload = v
inPayload.Data = req
inPayload.Length = len(req)
sh.HandleRPC(stream.Context(), inPayload)
}
if trInfo != nil {
trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true)
}
return nil
}
reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt)
if appErr != nil {
if err, ok := appErr.(*rpcError); ok {
statusCode = err.code
statusDesc = err.desc
} else {
statusCode = convertCode(appErr)
statusDesc = appErr.Error()
}
if trInfo != nil && statusCode != codes.OK {
trInfo.tr.LazyLog(stringer(statusDesc), true)
trInfo.tr.SetError()
}
if err := t.WriteStatus(stream, statusCode, statusDesc); err != nil {
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err)
}
return Errorf(statusCode, statusDesc)
}
if trInfo != nil {
trInfo.tr.LazyLog(stringer("OK"), false)
}
opts := &transport.Options{
Last: true,
Delay: false,
}
if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil {
switch err := err.(type) {
case transport.ConnectionError:
// Nothing to do here.
case transport.StreamError:
statusCode = err.Code
statusDesc = err.Desc
default:
statusCode = codes.Unknown
statusDesc = err.Error()
}
return err
}
if trInfo != nil {
trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true)
}
errWrite := t.WriteStatus(stream, statusCode, statusDesc)
if statusCode != codes.OK {
return Errorf(statusCode, statusDesc)
}
return errWrite
return err
}
if trInfo != nil {
trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true)
}
// TODO: Should we be logging if writing status failed here, like above?
// Should the logging be in WriteStatus? Should we ignore the WriteStatus
// error or allow the stats handler to see it?
return t.WriteStatus(stream, status.New(codes.OK, ""))
}
func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) {
@ -791,9 +851,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
BeginTime: time.Now(),
}
sh.HandleRPC(stream.Context(), begin)
}
defer func() {
if sh != nil {
defer func() {
end := &stats.End{
EndTime: time.Now(),
}
@ -801,21 +859,22 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
end.Error = toRPCErr(err)
}
sh.HandleRPC(stream.Context(), end)
}
}()
}()
}
if s.opts.cp != nil {
stream.SetSendCompress(s.opts.cp.Type())
}
ss := &serverStream{
t: t,
s: stream,
p: &parser{r: stream},
codec: s.opts.codec,
cp: s.opts.cp,
dc: s.opts.dc,
maxMsgSize: s.opts.maxMsgSize,
trInfo: trInfo,
statsHandler: sh,
t: t,
s: stream,
p: &parser{r: stream},
codec: s.opts.codec,
cp: s.opts.cp,
dc: s.opts.dc,
maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
maxSendMessageSize: s.opts.maxSendMessageSize,
trInfo: trInfo,
statsHandler: sh,
}
if ss.cp != nil {
ss.cbuf = new(bytes.Buffer)
@ -849,32 +908,32 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
appErr = s.opts.streamInt(server, ss, info, sd.Handler)
}
if appErr != nil {
if err, ok := appErr.(*rpcError); ok {
ss.statusCode = err.code
ss.statusDesc = err.desc
} else if err, ok := appErr.(transport.StreamError); ok {
ss.statusCode = err.Code
ss.statusDesc = err.Desc
} else {
ss.statusCode = convertCode(appErr)
ss.statusDesc = appErr.Error()
appStatus, ok := status.FromError(appErr)
if !ok {
switch err := appErr.(type) {
case transport.StreamError:
appStatus = status.New(err.Code, err.Desc)
default:
appStatus = status.New(convertCode(appErr), appErr.Error())
}
appErr = appStatus.Err()
}
if trInfo != nil {
ss.mu.Lock()
ss.trInfo.tr.LazyLog(stringer(appStatus.Message()), true)
ss.trInfo.tr.SetError()
ss.mu.Unlock()
}
t.WriteStatus(ss.s, appStatus)
// TODO: Should we log an error from WriteStatus here and below?
return appErr
}
if trInfo != nil {
ss.mu.Lock()
if ss.statusCode != codes.OK {
ss.trInfo.tr.LazyLog(stringer(ss.statusDesc), true)
ss.trInfo.tr.SetError()
} else {
ss.trInfo.tr.LazyLog(stringer("OK"), false)
}
ss.trInfo.tr.LazyLog(stringer("OK"), false)
ss.mu.Unlock()
}
errWrite := t.WriteStatus(ss.s, ss.statusCode, ss.statusDesc)
if ss.statusCode != codes.OK {
return Errorf(ss.statusCode, ss.statusDesc)
}
return errWrite
return t.WriteStatus(ss.s, status.New(codes.OK, ""))
}
@ -890,7 +949,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
trInfo.tr.SetError()
}
errDesc := fmt.Sprintf("malformed method name: %q", stream.Method())
if err := t.WriteStatus(stream, codes.InvalidArgument, errDesc); err != nil {
if err := t.WriteStatus(stream, status.New(codes.ResourceExhausted, errDesc)); err != nil {
if trInfo != nil {
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
trInfo.tr.SetError()
@ -915,7 +974,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
trInfo.tr.SetError()
}
errDesc := fmt.Sprintf("unknown service %v", service)
if err := t.WriteStatus(stream, codes.Unimplemented, errDesc); err != nil {
if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
if trInfo != nil {
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
trInfo.tr.SetError()
@ -945,7 +1004,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
return
}
errDesc := fmt.Sprintf("unknown method %v", method)
if err := t.WriteStatus(stream, codes.Unimplemented, errDesc); err != nil {
if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
if trInfo != nil {
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
trInfo.tr.SetError()
@ -988,8 +1047,9 @@ func (s *Server) Stop() {
s.mu.Unlock()
}
// GracefulStop stops the gRPC server gracefully. It stops the server to accept new
// connections and RPCs and blocks until all the pending RPCs are finished.
// GracefulStop stops the gRPC server gracefully. It stops the server from
// accepting new connections and RPCs and blocks until all the pending RPCs are
// finished.
func (s *Server) GracefulStop() {
s.mu.Lock()
defer s.mu.Unlock()

View File

@ -45,19 +45,22 @@ type ConnTagInfo struct {
RemoteAddr net.Addr
// LocalAddr is the local address of the corresponding connection.
LocalAddr net.Addr
// TODO add QOS related fields.
}
// RPCTagInfo defines the relevant information needed by RPC context tagger.
type RPCTagInfo struct {
// FullMethodName is the RPC method in the format of /package.service/method.
FullMethodName string
// FailFast indicates if this RPC is failfast.
// This field is only valid on client side, it's always false on server side.
FailFast bool
}
// Handler defines the interface for the related stats handling (e.g., RPCs, connections).
type Handler interface {
// TagRPC can attach some information to the given context.
// The returned context is used in the rest lifetime of the RPC.
// The context used for the rest lifetime of the RPC will be derived from
// the returned context.
TagRPC(context.Context, *RPCTagInfo) context.Context
// HandleRPC processes the RPC stats.
HandleRPC(context.Context, RPCStats)

View File

@ -49,7 +49,7 @@ type RPCStats interface {
}
// Begin contains stats when an RPC begins.
// FailFast are only valid if Client is true.
// FailFast is only valid if this Begin is from client side.
type Begin struct {
// Client is true if this Begin is from client side.
Client bool
@ -59,7 +59,7 @@ type Begin struct {
FailFast bool
}
// IsClient indicates if this is from client side.
// IsClient indicates if the stats information is from client side.
func (s *Begin) IsClient() bool { return s.Client }
func (s *Begin) isRPCStats() {}
@ -80,19 +80,19 @@ type InPayload struct {
RecvTime time.Time
}
// IsClient indicates if this is from client side.
// IsClient indicates if the stats information is from client side.
func (s *InPayload) IsClient() bool { return s.Client }
func (s *InPayload) isRPCStats() {}
// InHeader contains stats when a header is received.
// FullMethod, addresses and Compression are only valid if Client is false.
type InHeader struct {
// Client is true if this InHeader is from client side.
Client bool
// WireLength is the wire length of header.
WireLength int
// The following fields are valid only if Client is false.
// FullMethod is the full RPC method string, i.e., /package.service/method.
FullMethod string
// RemoteAddr is the remote address of the corresponding connection.
@ -103,7 +103,7 @@ type InHeader struct {
Compression string
}
// IsClient indicates if this is from client side.
// IsClient indicates if the stats information is from client side.
func (s *InHeader) IsClient() bool { return s.Client }
func (s *InHeader) isRPCStats() {}
@ -116,7 +116,7 @@ type InTrailer struct {
WireLength int
}
// IsClient indicates if this is from client side.
// IsClient indicates if the stats information is from client side.
func (s *InTrailer) IsClient() bool { return s.Client }
func (s *InTrailer) isRPCStats() {}
@ -137,19 +137,19 @@ type OutPayload struct {
SentTime time.Time
}
// IsClient indicates if this is from client side.
// IsClient indicates if this stats information is from client side.
func (s *OutPayload) IsClient() bool { return s.Client }
func (s *OutPayload) isRPCStats() {}
// OutHeader contains stats when a header is sent.
// FullMethod, addresses and Compression are only valid if Client is true.
type OutHeader struct {
// Client is true if this OutHeader is from client side.
Client bool
// WireLength is the wire length of header.
WireLength int
// The following fields are valid only if Client is true.
// FullMethod is the full RPC method string, i.e., /package.service/method.
FullMethod string
// RemoteAddr is the remote address of the corresponding connection.
@ -160,7 +160,7 @@ type OutHeader struct {
Compression string
}
// IsClient indicates if this is from client side.
// IsClient indicates if this stats information is from client side.
func (s *OutHeader) IsClient() bool { return s.Client }
func (s *OutHeader) isRPCStats() {}
@ -173,7 +173,7 @@ type OutTrailer struct {
WireLength int
}
// IsClient indicates if this is from client side.
// IsClient indicates if this stats information is from client side.
func (s *OutTrailer) IsClient() bool { return s.Client }
func (s *OutTrailer) isRPCStats() {}
@ -184,7 +184,7 @@ type End struct {
Client bool
// EndTime is the time when the RPC ends.
EndTime time.Time
// Error is the error just happened. Its type is gRPC error.
// Error is the error just happened. It implements status.Status if non-nil.
Error error
}

145
cmd/vendor/google.golang.org/grpc/status/status.go generated vendored Normal file
View File

@ -0,0 +1,145 @@
/*
*
* Copyright 2017, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
// Package status implements errors returned by gRPC. These errors are
// serialized and transmitted on the wire between server and client, and allow
// for additional data to be transmitted via the Details field in the status
// proto. gRPC service handlers should return an error created by this
// package, and gRPC clients should expect a corresponding error to be
// returned from the RPC call.
//
// This package upholds the invariants that a non-nil error may not
// contain an OK code, and an OK code must result in a nil error.
package status
import (
"fmt"
"github.com/golang/protobuf/proto"
spb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc/codes"
)
// statusError is an alias of a status proto. It implements error and Status,
// and a nil statusError should never be returned by this package.
type statusError spb.Status
func (se *statusError) Error() string {
p := (*spb.Status)(se)
return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage())
}
func (se *statusError) status() *Status {
return &Status{s: (*spb.Status)(se)}
}
// Status represents an RPC status code, message, and details. It is immutable
// and should be created with New, Newf, or FromProto.
type Status struct {
s *spb.Status
}
// Code returns the status code contained in s.
func (s *Status) Code() codes.Code {
if s == nil || s.s == nil {
return codes.OK
}
return codes.Code(s.s.Code)
}
// Message returns the message contained in s.
func (s *Status) Message() string {
if s == nil || s.s == nil {
return ""
}
return s.s.Message
}
// Proto returns s's status as an spb.Status proto message.
func (s *Status) Proto() *spb.Status {
if s == nil {
return nil
}
return proto.Clone(s.s).(*spb.Status)
}
// Err returns an immutable error representing s; returns nil if s.Code() is
// OK.
func (s *Status) Err() error {
if s.Code() == codes.OK {
return nil
}
return (*statusError)(s.s)
}
// New returns a Status representing c and msg.
func New(c codes.Code, msg string) *Status {
return &Status{s: &spb.Status{Code: int32(c), Message: msg}}
}
// Newf returns New(c, fmt.Sprintf(format, a...)).
func Newf(c codes.Code, format string, a ...interface{}) *Status {
return New(c, fmt.Sprintf(format, a...))
}
// Error returns an error representing c and msg. If c is OK, returns nil.
func Error(c codes.Code, msg string) error {
return New(c, msg).Err()
}
// Errorf returns Error(c, fmt.Sprintf(format, a...)).
func Errorf(c codes.Code, format string, a ...interface{}) error {
return Error(c, fmt.Sprintf(format, a...))
}
// ErrorProto returns an error representing s. If s.Code is OK, returns nil.
func ErrorProto(s *spb.Status) error {
return FromProto(s).Err()
}
// FromProto returns a Status representing s.
func FromProto(s *spb.Status) *Status {
return &Status{s: proto.Clone(s).(*spb.Status)}
}
// FromError returns a Status representing err if it was produced from this
// package, otherwise it returns nil, false.
func FromError(err error) (s *Status, ok bool) {
if err == nil {
return &Status{s: &spb.Status{Code: int32(codes.OK)}}, true
}
if s, ok := err.(*statusError); ok {
return s.status(), true
}
return nil, false
}

View File

@ -45,6 +45,7 @@ import (
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
"google.golang.org/grpc/transport"
)
@ -112,17 +113,24 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
cancel context.CancelFunc
)
c := defaultCallInfo
if mc, ok := cc.getMethodConfig(method); ok {
c.failFast = !mc.WaitForReady
if mc.Timeout > 0 {
ctx, cancel = context.WithTimeout(ctx, mc.Timeout)
}
mc := cc.GetMethodConfig(method)
if mc.WaitForReady != nil {
c.failFast = !*mc.WaitForReady
}
if mc.Timeout != nil {
ctx, cancel = context.WithTimeout(ctx, *mc.Timeout)
}
opts = append(cc.dopts.callOptions, opts...)
for _, o := range opts {
if err := o.before(&c); err != nil {
return nil, toRPCErr(err)
}
}
c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize)
c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
callHdr := &transport.CallHdr{
Host: cc.authority,
Method: method,
@ -131,6 +139,9 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
if cc.dopts.cp != nil {
callHdr.SendCompress = cc.dopts.cp.Type()
}
if c.creds != nil {
callHdr.Creds = c.creds
}
var trInfo traceInfo
if EnableTracing {
trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
@ -150,26 +161,27 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
}
}()
}
ctx = newContextWithRPCInfo(ctx)
sh := cc.dopts.copts.StatsHandler
if sh != nil {
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method})
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast})
begin := &stats.Begin{
Client: true,
BeginTime: time.Now(),
FailFast: c.failFast,
}
sh.HandleRPC(ctx, begin)
}
defer func() {
if err != nil && sh != nil {
// Only handle end stats if err != nil.
end := &stats.End{
Client: true,
Error: err,
defer func() {
if err != nil {
// Only handle end stats if err != nil.
end := &stats.End{
Client: true,
Error: err,
}
sh.HandleRPC(ctx, end)
}
sh.HandleRPC(ctx, end)
}
}()
}()
}
gopts := BalancerGetOptions{
BlockingWait: !c.failFast,
}
@ -177,7 +189,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
t, put, err = cc.getTransport(ctx, gopts)
if err != nil {
// TODO(zhaoq): Probably revisit the error handling.
if _, ok := err.(*rpcError); ok {
if _, ok := status.FromError(err); ok {
return nil, err
}
if err == errConnClosing || err == errConnUnavailable {
@ -192,14 +204,17 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
s, err = t.NewStream(ctx, callHdr)
if err != nil {
if _, ok := err.(transport.ConnectionError); ok && put != nil {
// If error is connection error, transport was sending data on wire,
// and we are not sure if anything has been sent on wire.
// If error is not connection error, we are sure nothing has been sent.
updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false})
}
if put != nil {
put()
put = nil
}
if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain {
if c.failFast {
return nil, toRPCErr(err)
}
if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
continue
}
return nil, toRPCErr(err)
@ -207,14 +222,13 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
break
}
cs := &clientStream{
opts: opts,
c: c,
desc: desc,
codec: cc.dopts.codec,
cp: cc.dopts.cp,
dc: cc.dopts.dc,
maxMsgSize: cc.dopts.maxMsgSize,
cancel: cancel,
opts: opts,
c: c,
desc: desc,
codec: cc.dopts.codec,
cp: cc.dopts.cp,
dc: cc.dopts.dc,
cancel: cancel,
put: put,
t: t,
@ -236,14 +250,13 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
select {
case <-t.Error():
// Incur transport error, simply exit.
case <-cc.ctx.Done():
cs.finish(ErrClientConnClosing)
cs.closeTransportStream(ErrClientConnClosing)
case <-s.Done():
// TODO: The trace of the RPC is terminated here when there is no pending
// I/O, which is probably not the optimal solution.
if s.StatusCode() == codes.OK {
cs.finish(nil)
} else {
cs.finish(Errorf(s.StatusCode(), "%s", s.StatusDesc()))
}
cs.finish(s.Status().Err())
cs.closeTransportStream(nil)
case <-s.GoAway():
cs.finish(errConnDrain)
@ -259,24 +272,24 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
// clientStream implements a client side Stream.
type clientStream struct {
opts []CallOption
c callInfo
t transport.ClientTransport
s *transport.Stream
p *parser
desc *StreamDesc
codec Codec
cp Compressor
cbuf *bytes.Buffer
dc Decompressor
maxMsgSize int
cancel context.CancelFunc
opts []CallOption
c callInfo
t transport.ClientTransport
s *transport.Stream
p *parser
desc *StreamDesc
codec Codec
cp Compressor
cbuf *bytes.Buffer
dc Decompressor
cancel context.CancelFunc
tracing bool // set to EnableTracing when the clientStream is created.
mu sync.Mutex
put func()
closed bool
mu sync.Mutex
put func()
closed bool
finished bool
// trInfo.tr is set when the clientStream is created (if EnableTracing is true),
// and is set to nil when the clientStream's finish method is called.
trInfo traceInfo
@ -351,7 +364,13 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
}
}()
if err != nil {
return Errorf(codes.Internal, "grpc: %v", err)
return err
}
if cs.c.maxSendMessageSize == nil {
return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)")
}
if len(out) > *cs.c.maxSendMessageSize {
return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(out), *cs.c.maxSendMessageSize)
}
err = cs.t.Write(cs.s, out, &transport.Options{Last: false})
if err == nil && outPayload != nil {
@ -362,28 +381,16 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
}
func (cs *clientStream) RecvMsg(m interface{}) (err error) {
defer func() {
if err != nil && cs.statsHandler != nil {
// Only generate End if err != nil.
// If err == nil, it's not the last RecvMsg.
// The last RecvMsg gets either an RPC error or io.EOF.
end := &stats.End{
Client: true,
EndTime: time.Now(),
}
if err != io.EOF {
end.Error = toRPCErr(err)
}
cs.statsHandler.HandleRPC(cs.statsCtx, end)
}
}()
var inPayload *stats.InPayload
if cs.statsHandler != nil {
inPayload = &stats.InPayload{
Client: true,
}
}
err = recv(cs.p, cs.codec, cs.s, cs.dc, m, cs.maxMsgSize, inPayload)
if cs.c.maxReceiveMessageSize == nil {
return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
}
err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, inPayload)
defer func() {
// err != nil indicates the termination of the stream.
if err != nil {
@ -406,17 +413,20 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) {
}
// Special handling for client streaming rpc.
// This recv expects EOF or errors, so we don't collect inPayload.
err = recv(cs.p, cs.codec, cs.s, cs.dc, m, cs.maxMsgSize, nil)
if cs.c.maxReceiveMessageSize == nil {
return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
}
err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, nil)
cs.closeTransportStream(err)
if err == nil {
return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
}
if err == io.EOF {
if cs.s.StatusCode() == codes.OK {
cs.finish(err)
return nil
if se := cs.s.Status().Err(); se != nil {
return se
}
return Errorf(cs.s.StatusCode(), "%s", cs.s.StatusDesc())
cs.finish(err)
return nil
}
return toRPCErr(err)
}
@ -424,11 +434,11 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) {
cs.closeTransportStream(err)
}
if err == io.EOF {
if cs.s.StatusCode() == codes.OK {
// Returns io.EOF to indicate the end of the stream.
return
if statusErr := cs.s.Status().Err(); statusErr != nil {
return statusErr
}
return Errorf(cs.s.StatusCode(), "%s", cs.s.StatusDesc())
// Returns io.EOF to indicate the end of the stream.
return
}
return toRPCErr(err)
}
@ -462,20 +472,39 @@ func (cs *clientStream) closeTransportStream(err error) {
}
func (cs *clientStream) finish(err error) {
cs.mu.Lock()
defer cs.mu.Unlock()
if cs.finished {
return
}
cs.finished = true
defer func() {
if cs.cancel != nil {
cs.cancel()
}
}()
cs.mu.Lock()
defer cs.mu.Unlock()
for _, o := range cs.opts {
o.after(&cs.c)
}
if cs.put != nil {
updateRPCInfoInContext(cs.s.Context(), rpcInfo{
bytesSent: cs.s.BytesSent(),
bytesReceived: cs.s.BytesReceived(),
})
cs.put()
cs.put = nil
}
if cs.statsHandler != nil {
end := &stats.End{
Client: true,
EndTime: time.Now(),
}
if err != io.EOF {
// end.Error is nil if the RPC finished successfully.
end.Error = toRPCErr(err)
}
cs.statsHandler.HandleRPC(cs.statsCtx, end)
}
if !cs.tracing {
return
}
@ -512,17 +541,16 @@ type ServerStream interface {
// serverStream implements a server side Stream.
type serverStream struct {
t transport.ServerTransport
s *transport.Stream
p *parser
codec Codec
cp Compressor
dc Decompressor
cbuf *bytes.Buffer
maxMsgSize int
statusCode codes.Code
statusDesc string
trInfo *traceInfo
t transport.ServerTransport
s *transport.Stream
p *parser
codec Codec
cp Compressor
dc Decompressor
cbuf *bytes.Buffer
maxReceiveMessageSize int
maxSendMessageSize int
trInfo *traceInfo
statsHandler stats.Handler
@ -578,9 +606,11 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
}
}()
if err != nil {
err = Errorf(codes.Internal, "grpc: %v", err)
return err
}
if len(out) > ss.maxSendMessageSize {
return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(out), ss.maxSendMessageSize)
}
if err := ss.t.Write(ss.s, out, &transport.Options{Last: false}); err != nil {
return toRPCErr(err)
}
@ -610,7 +640,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
if ss.statsHandler != nil {
inPayload = &stats.InPayload{}
}
if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxMsgSize, inPayload); err != nil {
if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, inPayload); err != nil {
if err == io.EOF {
return err
}

View File

@ -46,12 +46,20 @@ const (
// The default value of flow control window size in HTTP2 spec.
defaultWindowSize = 65535
// The initial window size for flow control.
initialWindowSize = defaultWindowSize // for an RPC
initialConnWindowSize = defaultWindowSize * 16 // for a connection
infinity = time.Duration(math.MaxInt64)
defaultKeepaliveTime = infinity
defaultKeepaliveTimeout = time.Duration(20 * time.Second)
defaultMaxStreamsClient = 100
initialWindowSize = defaultWindowSize // for an RPC
initialConnWindowSize = defaultWindowSize * 16 // for a connection
infinity = time.Duration(math.MaxInt64)
defaultClientKeepaliveTime = infinity
defaultClientKeepaliveTimeout = time.Duration(20 * time.Second)
defaultMaxStreamsClient = 100
defaultMaxConnectionIdle = infinity
defaultMaxConnectionAge = infinity
defaultMaxConnectionAgeGrace = infinity
defaultServerKeepaliveTime = time.Duration(2 * time.Hour)
defaultServerKeepaliveTimeout = time.Duration(20 * time.Second)
defaultKeepalivePolicyMinTime = time.Duration(5 * time.Minute)
// max window limit set by HTTP2 Specs.
maxWindowSize = math.MaxInt32
)
// The following defines various control items which could flow through
@ -60,6 +68,7 @@ const (
type windowUpdate struct {
streamID uint32
increment uint32
flush bool
}
func (*windowUpdate) item() {}
@ -79,6 +88,8 @@ type resetStream struct {
func (*resetStream) item() {}
type goAway struct {
code http2.ErrCode
debugData []byte
}
func (*goAway) item() {}
@ -159,6 +170,40 @@ type inFlow struct {
// The amount of data the application has consumed but grpc has not sent
// window update for them. Used to reduce window update frequency.
pendingUpdate uint32
// delta is the extra window update given by receiver when an application
// is reading data bigger in size than the inFlow limit.
delta uint32
}
func (f *inFlow) maybeAdjust(n uint32) uint32 {
if n > uint32(math.MaxInt32) {
n = uint32(math.MaxInt32)
}
f.mu.Lock()
defer f.mu.Unlock()
// estSenderQuota is the receiver's view of the maximum number of bytes the sender
// can send without a window update.
estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate))
// estUntransmittedData is the maximum number of bytes the sends might not have put
// on the wire yet. A value of 0 or less means that we have already received all or
// more bytes than the application is requesting to read.
estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative.
// This implies that unless we send a window update, the sender won't be able to send all the bytes
// for this message. Therefore we must send an update over the limit since there's an active read
// request from the application.
if estUntransmittedData > estSenderQuota {
// Sender's window shouldn't go more than 2^31 - 1 as speecified in the HTTP spec.
if f.limit+n > maxWindowSize {
f.delta = maxWindowSize - f.limit
} else {
// Send a window update for the whole message and not just the difference between
// estUntransmittedData and estSenderQuota. This will be helpful in case the message
// is padded; We will fallback on the current available window(at least a 1/4th of the limit).
f.delta = n
}
return f.delta
}
return 0
}
// onData is invoked when some data frame is received. It updates pendingData.
@ -166,7 +211,7 @@ func (f *inFlow) onData(n uint32) error {
f.mu.Lock()
defer f.mu.Unlock()
f.pendingData += n
if f.pendingData+f.pendingUpdate > f.limit {
if f.pendingData+f.pendingUpdate > f.limit+f.delta {
return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate, f.limit)
}
return nil
@ -181,6 +226,13 @@ func (f *inFlow) onRead(n uint32) uint32 {
return 0
}
f.pendingData -= n
if n > f.delta {
n -= f.delta
f.delta = 0
} else {
f.delta -= n
n = 0
}
f.pendingUpdate += n
if f.pendingUpdate >= f.limit/4 {
wu := f.pendingUpdate
@ -190,10 +242,10 @@ func (f *inFlow) onRead(n uint32) uint32 {
return 0
}
func (f *inFlow) resetPendingData() uint32 {
func (f *inFlow) resetPendingUpdate() uint32 {
f.mu.Lock()
defer f.mu.Unlock()
n := f.pendingData
f.pendingData = 0
n := f.pendingUpdate
f.pendingUpdate = 0
return n
}

View File

@ -37,6 +37,8 @@ package transport
import (
"net"
"google.golang.org/grpc/codes"
"golang.org/x/net/context"
)
@ -44,3 +46,14 @@ import (
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address)
}
// ContextErr converts the error from context package into a StreamError.
func ContextErr(err error) StreamError {
switch err {
case context.DeadlineExceeded:
return streamErrorf(codes.DeadlineExceeded, "%v", err)
case context.Canceled:
return streamErrorf(codes.Canceled, "%v", err)
}
return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err)
}

View File

@ -35,12 +35,26 @@
package transport
import (
"context"
"net"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
netctx "golang.org/x/net/context"
)
// dialContext connects to the address on the named network.
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
return (&net.Dialer{}).DialContext(ctx, network, address)
}
// ContextErr converts the error from context package into a StreamError.
func ContextErr(err error) StreamError {
switch err {
case context.DeadlineExceeded, netctx.DeadlineExceeded:
return streamErrorf(codes.DeadlineExceeded, "%v", err)
case context.Canceled, netctx.Canceled:
return streamErrorf(codes.Canceled, "%v", err)
}
return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err)
}

View File

@ -53,6 +53,7 @@ import (
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/status"
)
// NewServerHandlerTransport returns a ServerTransport handling gRPC
@ -101,14 +102,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTr
continue
}
for _, v := range vv {
if k == "user-agent" {
// user-agent is special. Copying logic of http_util.go.
if i := strings.LastIndex(v, " "); i == -1 {
// There is no application user agent string being set
continue
} else {
v = v[:i]
}
v, err := decodeMetadataHeader(k, v)
if err != nil {
return nil, streamErrorf(codes.InvalidArgument, "malformed binary metadata: %v", err)
}
metakv = append(metakv, k, v)
}
@ -174,15 +170,22 @@ func (a strAddr) String() string { return string(a) }
// do runs fn in the ServeHTTP goroutine.
func (ht *serverHandlerTransport) do(fn func()) error {
// Avoid a panic writing to closed channel. Imperfect but maybe good enough.
select {
case ht.writes <- fn:
return nil
case <-ht.closedCh:
return ErrConnClosing
default:
select {
case ht.writes <- fn:
return nil
case <-ht.closedCh:
return ErrConnClosing
}
}
}
func (ht *serverHandlerTransport) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error {
func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error {
err := ht.do(func() {
ht.writeCommonHeaders(s)
@ -192,10 +195,13 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, statusCode codes.Code,
ht.rw.(http.Flusher).Flush()
h := ht.rw.Header()
h.Set("Grpc-Status", fmt.Sprintf("%d", statusCode))
if statusDesc != "" {
h.Set("Grpc-Message", encodeGrpcMessage(statusDesc))
h.Set("Grpc-Status", fmt.Sprintf("%d", st.Code()))
if m := st.Message(); m != "" {
h.Set("Grpc-Message", encodeGrpcMessage(m))
}
// TODO: Support Grpc-Status-Details-Bin
if md := s.Trailer(); len(md) > 0 {
for k, vv := range md {
// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
@ -203,10 +209,9 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, statusCode codes.Code,
continue
}
for _, v := range vv {
// http2 ResponseWriter mechanism to
// send undeclared Trailers after the
// headers have possibly been written.
h.Add(http2.TrailerPrefix+k, v)
// http2 ResponseWriter mechanism to send undeclared Trailers after
// the headers have possibly been written.
h.Add(http2.TrailerPrefix+k, encodeMetadataHeader(k, v))
}
}
}
@ -234,6 +239,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
// and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
h.Add("Trailer", "Grpc-Status")
h.Add("Trailer", "Grpc-Message")
// TODO: Support Grpc-Status-Details-Bin
if s.sendCompress != "" {
h.Set("Grpc-Encoding", s.sendCompress)
@ -260,6 +266,7 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
continue
}
for _, v := range vv {
v = encodeMetadataHeader(k, v)
h.Add(k, v)
}
}
@ -300,13 +307,13 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
req := ht.req
s := &Stream{
id: 0, // irrelevant
windowHandler: func(int) {}, // nothing
cancel: cancel,
buf: newRecvBuffer(),
st: ht,
method: req.URL.Path,
recvCompress: req.Header.Get("grpc-encoding"),
id: 0, // irrelevant
requestRead: func(int) {},
cancel: cancel,
buf: newRecvBuffer(),
st: ht,
method: req.URL.Path,
recvCompress: req.Header.Get("grpc-encoding"),
}
pr := &peer.Peer{
Addr: ht.RemoteAddr(),
@ -314,10 +321,13 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
if req.TLS != nil {
pr.AuthInfo = credentials.TLSInfo{State: *req.TLS}
}
ctx = metadata.NewContext(ctx, ht.headerMD)
ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
ctx = peer.NewContext(ctx, pr)
s.ctx = newContextWithStream(ctx, s)
s.dec = &recvBufferReader{ctx: s.ctx, recv: s.buf}
s.trReader = &transportReader{
reader: &recvBufferReader{ctx: s.ctx, recv: s.buf},
windowHandler: func(int) {},
}
// readerDone is closed when the Body.Read-ing goroutine exits.
readerDone := make(chan struct{})

View File

@ -35,7 +35,6 @@ package transport
import (
"bytes"
"fmt"
"io"
"math"
"net"
@ -54,6 +53,7 @@ import (
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
)
// http2Client implements the ClientTransport interface with HTTP2.
@ -101,6 +101,8 @@ type http2Client struct {
// The scheme used: https if TLS is on, http otherwise.
scheme string
isSecure bool
creds []credentials.PerRPCCredentials
// Boolean to keep track of reading activity on transport.
@ -110,6 +112,8 @@ type http2Client struct {
statsHandler stats.Handler
initialWindowSize int32
mu sync.Mutex // guard the following variables
state transportState // the state of underlying connection
activeStreams map[uint32]*Stream
@ -121,6 +125,9 @@ type http2Client struct {
goAwayID uint32
// prevGoAway ID records the Last-Stream-ID in the previous GOAway frame.
prevGoAwayID uint32
// goAwayReason records the http2.ErrCode and debug data received with the
// GoAway frame.
goAwayReason GoAwayReason
}
func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) {
@ -166,9 +173,9 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) (
conn, err := dial(ctx, opts.Dialer, addr.Addr)
if err != nil {
if opts.FailOnNonTempDialError {
return nil, connectionErrorf(isTemporary(err), err, "transport: %v", err)
return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err)
}
return nil, connectionErrorf(true, err, "transport: %v", err)
return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err)
}
// Any further errors will close the underlying connection
defer func(conn net.Conn) {
@ -176,7 +183,10 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) (
conn.Close()
}
}(conn)
var authInfo credentials.AuthInfo
var (
isSecure bool
authInfo credentials.AuthInfo
)
if creds := opts.TransportCredentials; creds != nil {
scheme = "https"
conn, authInfo, err = creds.ClientHandshake(ctx, addr.Addr, conn)
@ -184,16 +194,21 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) (
// Credentials handshake errors are typically considered permanent
// to avoid retrying on e.g. bad certificates.
temp := isTemporary(err)
return nil, connectionErrorf(temp, err, "transport: %v", err)
return nil, connectionErrorf(temp, err, "transport: authentication handshake failed: %v", err)
}
isSecure = true
}
kp := opts.KeepaliveParams
// Validate keepalive parameters.
if kp.Time == 0 {
kp.Time = defaultKeepaliveTime
kp.Time = defaultClientKeepaliveTime
}
if kp.Timeout == 0 {
kp.Timeout = defaultKeepaliveTimeout
kp.Timeout = defaultClientKeepaliveTimeout
}
icwz := int32(initialConnWindowSize)
if opts.InitialConnWindowSize >= defaultWindowSize {
icwz = opts.InitialConnWindowSize
}
var buf bytes.Buffer
t := &http2Client{
@ -206,27 +221,32 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) (
localAddr: conn.LocalAddr(),
authInfo: authInfo,
// The client initiated stream id is odd starting from 1.
nextID: 1,
writableChan: make(chan int, 1),
shutdownChan: make(chan struct{}),
errorChan: make(chan struct{}),
goAway: make(chan struct{}),
awakenKeepalive: make(chan struct{}, 1),
framer: newFramer(conn),
hBuf: &buf,
hEnc: hpack.NewEncoder(&buf),
controlBuf: newRecvBuffer(),
fc: &inFlow{limit: initialConnWindowSize},
sendQuotaPool: newQuotaPool(defaultWindowSize),
scheme: scheme,
state: reachable,
activeStreams: make(map[uint32]*Stream),
creds: opts.PerRPCCredentials,
maxStreams: defaultMaxStreamsClient,
streamsQuota: newQuotaPool(defaultMaxStreamsClient),
streamSendQuota: defaultWindowSize,
kp: kp,
statsHandler: opts.StatsHandler,
nextID: 1,
writableChan: make(chan int, 1),
shutdownChan: make(chan struct{}),
errorChan: make(chan struct{}),
goAway: make(chan struct{}),
awakenKeepalive: make(chan struct{}, 1),
framer: newFramer(conn),
hBuf: &buf,
hEnc: hpack.NewEncoder(&buf),
controlBuf: newRecvBuffer(),
fc: &inFlow{limit: uint32(icwz)},
sendQuotaPool: newQuotaPool(defaultWindowSize),
scheme: scheme,
state: reachable,
activeStreams: make(map[uint32]*Stream),
isSecure: isSecure,
creds: opts.PerRPCCredentials,
maxStreams: defaultMaxStreamsClient,
streamsQuota: newQuotaPool(defaultMaxStreamsClient),
streamSendQuota: defaultWindowSize,
kp: kp,
statsHandler: opts.StatsHandler,
initialWindowSize: initialWindowSize,
}
if opts.InitialWindowSize >= defaultWindowSize {
t.initialWindowSize = opts.InitialWindowSize
}
// Make sure awakenKeepalive can't be written upon.
// keepalive routine will make it writable, if need be.
@ -249,29 +269,29 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) (
n, err := t.conn.Write(clientPreface)
if err != nil {
t.Close()
return nil, connectionErrorf(true, err, "transport: %v", err)
return nil, connectionErrorf(true, err, "transport: failed to write client preface: %v", err)
}
if n != len(clientPreface) {
t.Close()
return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
}
if initialWindowSize != defaultWindowSize {
if t.initialWindowSize != defaultWindowSize {
err = t.framer.writeSettings(true, http2.Setting{
ID: http2.SettingInitialWindowSize,
Val: uint32(initialWindowSize),
Val: uint32(t.initialWindowSize),
})
} else {
err = t.framer.writeSettings(true)
}
if err != nil {
t.Close()
return nil, connectionErrorf(true, err, "transport: %v", err)
return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err)
}
// Adjust the connection flow control window if needed.
if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 {
if delta := uint32(icwz - defaultWindowSize); delta > 0 {
if err := t.framer.writeWindowUpdate(true, 0, delta); err != nil {
t.Close()
return nil, connectionErrorf(true, err, "transport: %v", err)
return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err)
}
}
go t.controller()
@ -291,27 +311,33 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
method: callHdr.Method,
sendCompress: callHdr.SendCompress,
buf: newRecvBuffer(),
fc: &inFlow{limit: initialWindowSize},
fc: &inFlow{limit: uint32(t.initialWindowSize)},
sendQuotaPool: newQuotaPool(int(t.streamSendQuota)),
headerChan: make(chan struct{}),
}
t.nextID += 2
s.windowHandler = func(n int) {
t.updateWindow(s, uint32(n))
s.requestRead = func(n int) {
t.adjustWindow(s, uint32(n))
}
// The client side stream context should have exactly the same life cycle with the user provided context.
// That means, s.ctx should be read-only. And s.ctx is done iff ctx is done.
// So we use the original context here instead of creating a copy.
s.ctx = ctx
s.dec = &recvBufferReader{
ctx: s.ctx,
goAway: s.goAway,
recv: s.buf,
s.trReader = &transportReader{
reader: &recvBufferReader{
ctx: s.ctx,
goAway: s.goAway,
recv: s.buf,
},
windowHandler: func(n int) {
t.updateWindow(s, uint32(n))
},
}
return s
}
// NewStream creates a stream and register it into the transport as "active"
// NewStream creates a stream and registers it into the transport as "active"
// streams.
func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) {
pr := &peer.Peer{
@ -321,10 +347,13 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
if t.authInfo != nil {
pr.AuthInfo = t.authInfo
}
userCtx := ctx
ctx = peer.NewContext(ctx, pr)
authData := make(map[string]string)
for _, c := range t.creds {
var (
authData = make(map[string]string)
audience string
)
// Create an audience string only if needed.
if len(t.creds) > 0 || callHdr.Creds != nil {
// Construct URI required to get auth request metadata.
var port string
if pos := strings.LastIndex(t.target, ":"); pos != -1 {
@ -335,17 +364,39 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
}
pos := strings.LastIndex(callHdr.Method, "/")
if pos == -1 {
return nil, streamErrorf(codes.InvalidArgument, "transport: malformed method name: %q", callHdr.Method)
pos = len(callHdr.Method)
}
audience := "https://" + callHdr.Host + port + callHdr.Method[:pos]
audience = "https://" + callHdr.Host + port + callHdr.Method[:pos]
}
for _, c := range t.creds {
data, err := c.GetRequestMetadata(ctx, audience)
if err != nil {
return nil, streamErrorf(codes.InvalidArgument, "transport: %v", err)
return nil, streamErrorf(codes.Internal, "transport: %v", err)
}
for k, v := range data {
// Capital header names are illegal in HTTP/2.
k = strings.ToLower(k)
authData[k] = v
}
}
callAuthData := make(map[string]string)
// Check if credentials.PerRPCCredentials were provided via call options.
// Note: if these credentials are provided both via dial options and call
// options, then both sets of credentials will be applied.
if callCreds := callHdr.Creds; callCreds != nil {
if !t.isSecure && callCreds.RequireTransportSecurity() {
return nil, streamErrorf(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure conneciton")
}
data, err := callCreds.GetRequestMetadata(ctx, audience)
if err != nil {
return nil, streamErrorf(codes.Internal, "transport: %v", err)
}
for k, v := range data {
// Capital header names are illegal in HTTP/2
k = strings.ToLower(k)
callAuthData[k] = v
}
}
t.mu.Lock()
if t.activeStreams == nil {
t.mu.Unlock()
@ -388,7 +439,6 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
return nil, ErrConnClosing
}
s := t.newStream(ctx, callHdr)
s.clientStatsCtx = userCtx
t.activeStreams[s.id] = s
// If the number of active streams change from 0 to 1, then check if keepalive
// has gone dormant. If so, wake it up.
@ -424,33 +474,34 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
}
for k, v := range authData {
// Capital header names are illegal in HTTP/2.
k = strings.ToLower(k)
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v})
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
}
for k, v := range callAuthData {
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
}
var (
hasMD bool
endHeaders bool
)
if md, ok := metadata.FromContext(ctx); ok {
if md, ok := metadata.FromOutgoingContext(ctx); ok {
hasMD = true
for k, v := range md {
for k, vv := range md {
// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
if isReservedHeader(k) {
continue
}
for _, entry := range v {
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
for _, v := range vv {
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
}
}
}
if md, ok := t.md.(*metadata.MD); ok {
for k, v := range *md {
for k, vv := range *md {
if isReservedHeader(k) {
continue
}
for _, entry := range v {
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
for _, v := range vv {
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
}
}
}
@ -490,6 +541,8 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
return nil, connectionErrorf(true, err, "transport: %v", err)
}
}
s.bytesSent = true
if t.statsHandler != nil {
outHeader := &stats.OutHeader{
Client: true,
@ -499,7 +552,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
LocalAddr: t.localAddr,
Compression: callHdr.SendCompress,
}
t.statsHandler.HandleRPC(s.clientStatsCtx, outHeader)
t.statsHandler.HandleRPC(s.ctx, outHeader)
}
t.writableChan <- 0
return s, nil
@ -513,6 +566,10 @@ func (t *http2Client) CloseStream(s *Stream, err error) {
t.mu.Unlock()
return
}
if err != nil {
// notify in-flight streams, before the deletion
s.write(recvMsg{err: err})
}
delete(t.activeStreams, s.id)
if t.state == draining && len(t.activeStreams) == 0 {
// The transport is draining and s is the last live stream on t.
@ -542,11 +599,6 @@ func (t *http2Client) CloseStream(s *Stream, err error) {
s.mu.Lock()
rstStream = s.rstStream
rstError = s.rstError
if q := s.fc.resetPendingData(); q > 0 {
if n := t.fc.onRead(q); n > 0 {
t.controlBuf.put(&windowUpdate{0, n})
}
}
if s.state == streamDone {
s.mu.Unlock()
return
@ -755,6 +807,24 @@ func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) {
return s, ok
}
// adjustWindow sends out extra window update over the initial window size
// of stream if the application is requesting data larger in size than
// the window.
func (t *http2Client) adjustWindow(s *Stream, n uint32) {
s.mu.Lock()
defer s.mu.Unlock()
if s.state == streamDone {
return
}
if w := s.fc.maybeAdjust(n); w > 0 {
// Piggyback conneciton's window update along.
if cw := t.fc.resetPendingUpdate(); cw > 0 {
t.controlBuf.put(&windowUpdate{0, cw, false})
}
t.controlBuf.put(&windowUpdate{s.id, w, true})
}
}
// updateWindow adjusts the inbound quota for the stream and the transport.
// Window updates will deliver to the controller for sending when
// the cumulative quota exceeds the corresponding threshold.
@ -764,11 +834,11 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) {
if s.state == streamDone {
return
}
if w := t.fc.onRead(n); w > 0 {
t.controlBuf.put(&windowUpdate{0, w})
}
if w := s.fc.onRead(n); w > 0 {
t.controlBuf.put(&windowUpdate{s.id, w})
if cw := t.fc.resetPendingUpdate(); cw > 0 {
t.controlBuf.put(&windowUpdate{0, cw, false})
}
t.controlBuf.put(&windowUpdate{s.id, w, true})
}
}
@ -778,43 +848,39 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
t.notifyError(connectionErrorf(true, err, "%v", err))
return
}
// Decouple connection's flow control from application's read.
// An update on connection's flow control should not depend on
// whether user application has read the data or not. Such a
// restriction is already imposed on the stream's flow control,
// and therefore the sender will be blocked anyways.
// Decoupling the connection flow control will prevent other
// active(fast) streams from starving in presence of slow or
// inactive streams.
if w := t.fc.onRead(uint32(size)); w > 0 {
t.controlBuf.put(&windowUpdate{0, w, true})
}
// Select the right stream to dispatch.
s, ok := t.getStream(f)
if !ok {
if w := t.fc.onRead(uint32(size)); w > 0 {
t.controlBuf.put(&windowUpdate{0, w})
}
return
}
if size > 0 {
if f.Header().Flags.Has(http2.FlagDataPadded) {
if w := t.fc.onRead(uint32(size) - uint32(len(f.Data()))); w > 0 {
t.controlBuf.put(&windowUpdate{0, w})
}
}
s.mu.Lock()
if s.state == streamDone {
s.mu.Unlock()
// The stream has been closed. Release the corresponding quota.
if w := t.fc.onRead(uint32(size)); w > 0 {
t.controlBuf.put(&windowUpdate{0, w})
}
return
}
if err := s.fc.onData(uint32(size)); err != nil {
s.state = streamDone
s.statusCode = codes.Internal
s.statusDesc = err.Error()
s.rstStream = true
s.rstError = http2.ErrCodeFlowControl
close(s.done)
s.finish(status.New(codes.Internal, err.Error()))
s.mu.Unlock()
s.write(recvMsg{err: io.EOF})
return
}
if f.Header().Flags.Has(http2.FlagDataPadded) {
if w := s.fc.onRead(uint32(size) - uint32(len(f.Data()))); w > 0 {
t.controlBuf.put(&windowUpdate{s.id, w})
t.controlBuf.put(&windowUpdate{s.id, w, true})
}
}
s.mu.Unlock()
@ -835,10 +901,7 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
s.mu.Unlock()
return
}
s.state = streamDone
s.statusCode = codes.Internal
s.statusDesc = "server closed the stream without sending trailers"
close(s.done)
s.finish(status.New(codes.Internal, "server closed the stream without sending trailers"))
s.mu.Unlock()
s.write(recvMsg{err: io.EOF})
}
@ -854,18 +917,16 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
s.mu.Unlock()
return
}
s.state = streamDone
if !s.headerDone {
close(s.headerChan)
s.headerDone = true
}
s.statusCode, ok = http2ErrConvTab[http2.ErrCode(f.ErrCode)]
statusCode, ok := http2ErrConvTab[http2.ErrCode(f.ErrCode)]
if !ok {
grpclog.Println("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error ", f.ErrCode)
s.statusCode = codes.Unknown
statusCode = codes.Unknown
}
s.statusDesc = fmt.Sprintf("stream terminated by RST_STREAM with error code: %d", f.ErrCode)
close(s.done)
s.finish(status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %d", f.ErrCode))
s.mu.Unlock()
s.write(recvMsg{err: io.EOF})
}
@ -893,6 +954,9 @@ func (t *http2Client) handlePing(f *http2.PingFrame) {
}
func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
if f.ErrCode == http2.ErrCodeEnhanceYourCalm {
grpclog.Printf("Client received GoAway with http2.ErrCodeEnhanceYourCalm.")
}
t.mu.Lock()
if t.state == reachable || t.state == draining {
if f.LastStreamID > 0 && f.LastStreamID%2 != 1 {
@ -914,6 +978,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
t.mu.Unlock()
return
default:
t.setGoAwayReason(f)
}
t.goAwayID = f.LastStreamID
close(t.goAway)
@ -921,6 +986,26 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
t.mu.Unlock()
}
// setGoAwayReason sets the value of t.goAwayReason based
// on the GoAway frame received.
// It expects a lock on transport's mutext to be held by
// the caller.
func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) {
t.goAwayReason = NoReason
switch f.ErrCode {
case http2.ErrCodeEnhanceYourCalm:
if string(f.DebugData()) == "too_many_pings" {
t.goAwayReason = TooManyPings
}
}
}
func (t *http2Client) GetGoAwayReason() GoAwayReason {
t.mu.Lock()
defer t.mu.Unlock()
return t.goAwayReason
}
func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) {
id := f.Header().StreamID
incr := f.Increment
@ -939,18 +1024,16 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
if !ok {
return
}
s.bytesReceived = true
var state decodeState
for _, hf := range frame.Fields {
state.processHeaderField(hf)
}
if state.err != nil {
if err := state.decodeResponseHeader(frame); err != nil {
s.mu.Lock()
if !s.headerDone {
close(s.headerChan)
s.headerDone = true
}
s.mu.Unlock()
s.write(recvMsg{err: state.err})
s.write(recvMsg{err: err})
// Something wrong. Stops reading even when there is remaining.
return
}
@ -964,13 +1047,13 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
Client: true,
WireLength: int(frame.Header().Length),
}
t.statsHandler.HandleRPC(s.clientStatsCtx, inHeader)
t.statsHandler.HandleRPC(s.ctx, inHeader)
} else {
inTrailer := &stats.InTrailer{
Client: true,
WireLength: int(frame.Header().Length),
}
t.statsHandler.HandleRPC(s.clientStatsCtx, inTrailer)
t.statsHandler.HandleRPC(s.ctx, inTrailer)
}
}
}()
@ -995,10 +1078,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
if len(state.mdata) > 0 {
s.trailer = state.mdata
}
s.statusCode = state.statusCode
s.statusDesc = state.statusDesc
close(s.done)
s.state = streamDone
s.finish(state.status())
s.mu.Unlock()
s.write(recvMsg{err: io.EOF})
}
@ -1097,7 +1177,7 @@ func (t *http2Client) applySettings(ss []http2.Setting) {
t.mu.Lock()
for _, stream := range t.activeStreams {
// Adjust the sending quota for each stream.
stream.sendQuotaPool.add(int(s.Val - t.streamSendQuota))
stream.sendQuotaPool.add(int(s.Val) - int(t.streamSendQuota))
}
t.streamSendQuota = s.Val
t.mu.Unlock()
@ -1116,7 +1196,7 @@ func (t *http2Client) controller() {
case <-t.writableChan:
switch i := i.(type) {
case *windowUpdate:
t.framer.writeWindowUpdate(true, i.streamID, i.increment)
t.framer.writeWindowUpdate(i.flush, i.streamID, i.increment)
case *settings:
if i.ack {
t.framer.writeSettingsAck(true)

View File

@ -38,19 +38,25 @@ import (
"errors"
"io"
"math"
"math/rand"
"net"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/golang/protobuf/proto"
"golang.org/x/net/context"
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
"google.golang.org/grpc/tap"
)
@ -90,11 +96,35 @@ type http2Server struct {
stats stats.Handler
// Flag to keep track of reading activity on transport.
// 1 is true and 0 is false.
activity uint32 // Accessed atomically.
// Keepalive and max-age parameters for the server.
kp keepalive.ServerParameters
// Keepalive enforcement policy.
kep keepalive.EnforcementPolicy
// The time instance last ping was received.
lastPingAt time.Time
// Number of times the client has violated keepalive ping policy so far.
pingStrikes uint8
// Flag to signify that number of ping strikes should be reset to 0.
// This is set whenever data or header frames are sent.
// 1 means yes.
resetPingStrikes uint32 // Accessed atomically.
initialWindowSize int32
mu sync.Mutex // guard the following
state transportState
activeStreams map[uint32]*Stream
// the per-stream outbound flow control window size set by the peer.
streamSendQuota uint32
// idle is the time instant when the connection went idle.
// This is either the begining of the connection or when the number of
// RPCs go down to 0.
// When the connection is busy, this value is set to 0.
idle time.Time
}
// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
@ -114,41 +144,75 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
Val: maxStreams,
})
}
if initialWindowSize != defaultWindowSize {
iwz := int32(initialWindowSize)
if config.InitialWindowSize >= defaultWindowSize {
iwz = config.InitialWindowSize
}
icwz := int32(initialConnWindowSize)
if config.InitialConnWindowSize >= defaultWindowSize {
icwz = config.InitialConnWindowSize
}
if iwz != defaultWindowSize {
settings = append(settings, http2.Setting{
ID: http2.SettingInitialWindowSize,
Val: uint32(initialWindowSize)})
Val: uint32(iwz)})
}
if err := framer.writeSettings(true, settings...); err != nil {
return nil, connectionErrorf(true, err, "transport: %v", err)
}
// Adjust the connection flow control window if needed.
if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 {
if delta := uint32(icwz - defaultWindowSize); delta > 0 {
if err := framer.writeWindowUpdate(true, 0, delta); err != nil {
return nil, connectionErrorf(true, err, "transport: %v", err)
}
}
kp := config.KeepaliveParams
if kp.MaxConnectionIdle == 0 {
kp.MaxConnectionIdle = defaultMaxConnectionIdle
}
if kp.MaxConnectionAge == 0 {
kp.MaxConnectionAge = defaultMaxConnectionAge
}
// Add a jitter to MaxConnectionAge.
kp.MaxConnectionAge += getJitter(kp.MaxConnectionAge)
if kp.MaxConnectionAgeGrace == 0 {
kp.MaxConnectionAgeGrace = defaultMaxConnectionAgeGrace
}
if kp.Time == 0 {
kp.Time = defaultServerKeepaliveTime
}
if kp.Timeout == 0 {
kp.Timeout = defaultServerKeepaliveTimeout
}
kep := config.KeepalivePolicy
if kep.MinTime == 0 {
kep.MinTime = defaultKeepalivePolicyMinTime
}
var buf bytes.Buffer
t := &http2Server{
ctx: context.Background(),
conn: conn,
remoteAddr: conn.RemoteAddr(),
localAddr: conn.LocalAddr(),
authInfo: config.AuthInfo,
framer: framer,
hBuf: &buf,
hEnc: hpack.NewEncoder(&buf),
maxStreams: maxStreams,
inTapHandle: config.InTapHandle,
controlBuf: newRecvBuffer(),
fc: &inFlow{limit: initialConnWindowSize},
sendQuotaPool: newQuotaPool(defaultWindowSize),
state: reachable,
writableChan: make(chan int, 1),
shutdownChan: make(chan struct{}),
activeStreams: make(map[uint32]*Stream),
streamSendQuota: defaultWindowSize,
stats: config.StatsHandler,
ctx: context.Background(),
conn: conn,
remoteAddr: conn.RemoteAddr(),
localAddr: conn.LocalAddr(),
authInfo: config.AuthInfo,
framer: framer,
hBuf: &buf,
hEnc: hpack.NewEncoder(&buf),
maxStreams: maxStreams,
inTapHandle: config.InTapHandle,
controlBuf: newRecvBuffer(),
fc: &inFlow{limit: uint32(icwz)},
sendQuotaPool: newQuotaPool(defaultWindowSize),
state: reachable,
writableChan: make(chan int, 1),
shutdownChan: make(chan struct{}),
activeStreams: make(map[uint32]*Stream),
streamSendQuota: defaultWindowSize,
stats: config.StatsHandler,
kp: kp,
idle: time.Now(),
kep: kep,
initialWindowSize: iwz,
}
if t.stats != nil {
t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{
@ -159,6 +223,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
t.stats.HandleConn(t.ctx, connBegin)
}
go t.controller()
go t.keepalive()
t.writableChan <- 0
return t, nil
}
@ -170,18 +235,17 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
id: frame.Header().StreamID,
st: t,
buf: buf,
fc: &inFlow{limit: initialWindowSize},
fc: &inFlow{limit: uint32(t.initialWindowSize)},
}
var state decodeState
for _, hf := range frame.Fields {
state.processHeaderField(hf)
}
if err := state.err; err != nil {
if se, ok := err.(StreamError); ok {
t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]})
if err := state.processHeaderField(hf); err != nil {
if se, ok := err.(StreamError); ok {
t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]})
}
return
}
return
}
if frame.StreamEnded() {
@ -208,12 +272,16 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
s.ctx = newContextWithStream(s.ctx, s)
// Attach the received metadata to the context.
if len(state.mdata) > 0 {
s.ctx = metadata.NewContext(s.ctx, state.mdata)
s.ctx = metadata.NewIncomingContext(s.ctx, state.mdata)
}
s.dec = &recvBufferReader{
ctx: s.ctx,
recv: s.buf,
s.trReader = &transportReader{
reader: &recvBufferReader{
ctx: s.ctx,
recv: s.buf,
},
windowHandler: func(n int) {
t.updateWindow(s, uint32(n))
},
}
s.recvCompress = state.encoding
s.method = state.method
@ -224,7 +292,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
}
s.ctx, err = t.inTapHandle(s.ctx, info)
if err != nil {
// TODO: Log the real error.
grpclog.Printf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err)
t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream})
return
}
@ -248,9 +316,12 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
t.maxStreamID = s.id
s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota))
t.activeStreams[s.id] = s
if len(t.activeStreams) == 1 {
t.idle = time.Time{}
}
t.mu.Unlock()
s.windowHandler = func(n int) {
t.updateWindow(s, uint32(n))
s.requestRead = func(n int) {
t.adjustWindow(s, uint32(n))
}
s.ctx = traceCtx(s.ctx, s.method)
if t.stats != nil {
@ -275,7 +346,10 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
// Check the validity of client preface.
preface := make([]byte, len(clientPreface))
if _, err := io.ReadFull(t.conn, preface); err != nil {
grpclog.Printf("transport: http2Server.HandleStreams failed to receive the preface from client: %v", err)
// Only log if it isn't a simple tcp accept check (ie: tcp balancer doing open/close socket)
if err != io.EOF {
grpclog.Printf("transport: http2Server.HandleStreams failed to receive the preface from client: %v", err)
}
t.Close()
return
}
@ -291,10 +365,11 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
return
}
if err != nil {
grpclog.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err)
grpclog.Printf("transport: http2Server.HandleStreams failed to read initial settings frame: %v", err)
t.Close()
return
}
atomic.StoreUint32(&t.activity, 1)
sf, ok := frame.(*http2.SettingsFrame)
if !ok {
grpclog.Printf("transport: http2Server.HandleStreams saw invalid preface type %T from client", frame)
@ -305,6 +380,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
for {
frame, err := t.framer.readFrame()
atomic.StoreUint32(&t.activity, 1)
if err != nil {
if se, ok := err.(http2.StreamError); ok {
t.mu.Lock()
@ -363,6 +439,23 @@ func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) {
return s, true
}
// adjustWindow sends out extra window update over the initial window size
// of stream if the application is requesting data larger in size than
// the window.
func (t *http2Server) adjustWindow(s *Stream, n uint32) {
s.mu.Lock()
defer s.mu.Unlock()
if s.state == streamDone {
return
}
if w := s.fc.maybeAdjust(n); w > 0 {
if cw := t.fc.resetPendingUpdate(); cw > 0 {
t.controlBuf.put(&windowUpdate{0, cw, false})
}
t.controlBuf.put(&windowUpdate{s.id, w, true})
}
}
// updateWindow adjusts the inbound quota for the stream and the transport.
// Window updates will deliver to the controller for sending when
// the cumulative quota exceeds the corresponding threshold.
@ -372,11 +465,11 @@ func (t *http2Server) updateWindow(s *Stream, n uint32) {
if s.state == streamDone {
return
}
if w := t.fc.onRead(n); w > 0 {
t.controlBuf.put(&windowUpdate{0, w})
}
if w := s.fc.onRead(n); w > 0 {
t.controlBuf.put(&windowUpdate{s.id, w})
if cw := t.fc.resetPendingUpdate(); cw > 0 {
t.controlBuf.put(&windowUpdate{0, cw, false})
}
t.controlBuf.put(&windowUpdate{s.id, w, true})
}
}
@ -387,27 +480,26 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
t.Close()
return
}
// Decouple connection's flow control from application's read.
// An update on connection's flow control should not depend on
// whether user application has read the data or not. Such a
// restriction is already imposed on the stream's flow control,
// and therefore the sender will be blocked anyways.
// Decoupling the connection flow control will prevent other
// active(fast) streams from starving in presence of slow or
// inactive streams.
if w := t.fc.onRead(uint32(size)); w > 0 {
t.controlBuf.put(&windowUpdate{0, w, true})
}
// Select the right stream to dispatch.
s, ok := t.getStream(f)
if !ok {
if w := t.fc.onRead(uint32(size)); w > 0 {
t.controlBuf.put(&windowUpdate{0, w})
}
return
}
if size > 0 {
if f.Header().Flags.Has(http2.FlagDataPadded) {
if w := t.fc.onRead(uint32(size) - uint32(len(f.Data()))); w > 0 {
t.controlBuf.put(&windowUpdate{0, w})
}
}
s.mu.Lock()
if s.state == streamDone {
s.mu.Unlock()
// The stream has been closed. Release the corresponding quota.
if w := t.fc.onRead(uint32(size)); w > 0 {
t.controlBuf.put(&windowUpdate{0, w})
}
return
}
if err := s.fc.onData(uint32(size)); err != nil {
@ -418,7 +510,7 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
}
if f.Header().Flags.Has(http2.FlagDataPadded) {
if w := s.fc.onRead(uint32(size) - uint32(len(f.Data()))); w > 0 {
t.controlBuf.put(&windowUpdate{s.id, w})
t.controlBuf.put(&windowUpdate{s.id, w, true})
}
}
s.mu.Unlock()
@ -463,6 +555,11 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
t.controlBuf.put(&settings{ack: true, ss: ss})
}
const (
maxPingStrikes = 2
defaultPingTimeout = 2 * time.Hour
)
func (t *http2Server) handlePing(f *http2.PingFrame) {
if f.IsAck() { // Do nothing.
return
@ -470,6 +567,38 @@ func (t *http2Server) handlePing(f *http2.PingFrame) {
pingAck := &ping{ack: true}
copy(pingAck.data[:], f.Data[:])
t.controlBuf.put(pingAck)
now := time.Now()
defer func() {
t.lastPingAt = now
}()
// A reset ping strikes means that we don't need to check for policy
// violation for this ping and the pingStrikes counter should be set
// to 0.
if atomic.CompareAndSwapUint32(&t.resetPingStrikes, 1, 0) {
t.pingStrikes = 0
return
}
t.mu.Lock()
ns := len(t.activeStreams)
t.mu.Unlock()
if ns < 1 && !t.kep.PermitWithoutStream {
// Keepalive shouldn't be active thus, this new ping should
// have come after atleast defaultPingTimeout.
if t.lastPingAt.Add(defaultPingTimeout).After(now) {
t.pingStrikes++
}
} else {
// Check if keepalive policy is respected.
if t.lastPingAt.Add(t.kep.MinTime).After(now) {
t.pingStrikes++
}
}
if t.pingStrikes > maxPingStrikes {
// Send goaway and close the connection.
t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings")})
}
}
func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) {
@ -488,6 +617,13 @@ func (t *http2Server) writeHeaders(s *Stream, b *bytes.Buffer, endStream bool) e
first := true
endHeaders := false
var err error
defer func() {
if err == nil {
// Reset ping strikes when seding headers since that might cause the
// peer to send ping.
atomic.StoreUint32(&t.resetPingStrikes, 1)
}
}()
// Sends the headers in a single batch.
for !endHeaders {
size := t.hBuf.Len()
@ -542,13 +678,13 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
if s.sendCompress != "" {
t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
}
for k, v := range md {
for k, vv := range md {
if isReservedHeader(k) {
// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
continue
}
for _, entry := range v {
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
for _, v := range vv {
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
}
}
bufLen := t.hBuf.Len()
@ -569,7 +705,7 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
// There is no further I/O operations being able to perform on this stream.
// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early
// OK is adopted.
func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error {
func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
var headersSent, hasHeader bool
s.mu.Lock()
if s.state == streamDone {
@ -600,17 +736,28 @@ func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc s
t.hEnc.WriteField(
hpack.HeaderField{
Name: "grpc-status",
Value: strconv.Itoa(int(statusCode)),
Value: strconv.Itoa(int(st.Code())),
})
t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(statusDesc)})
t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())})
if p := st.Proto(); p != nil && len(p.Details) > 0 {
stBytes, err := proto.Marshal(p)
if err != nil {
// TODO: return error instead, when callers are able to handle it.
panic(err)
}
t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)})
}
// Attach the trailer metadata.
for k, v := range s.trailer {
for k, vv := range s.trailer {
// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
if isReservedHeader(k) {
continue
}
for _, entry := range v {
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
for _, v := range vv {
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
}
}
bufLen := t.hBuf.Len()
@ -631,7 +778,7 @@ func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc s
// Write converts the data into HTTP2 data frame and sends it out. Non-nil error
// is returns if it fails (e.g., framing error, transport error).
func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error {
func (t *http2Server) Write(s *Stream, data []byte, opts *Options) (err error) {
// TODO(zhaoq): Support multi-writers for a single stream.
var writeHeaderFrame bool
s.mu.Lock()
@ -646,6 +793,13 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error {
if writeHeaderFrame {
t.WriteHeader(s, nil)
}
defer func() {
if err == nil {
// Reset ping strikes when sending data since this might cause
// the peer to send ping.
atomic.StoreUint32(&t.resetPingStrikes, 1)
}
}()
r := bytes.NewBuffer(data)
for {
if r.Len() == 0 {
@ -727,7 +881,7 @@ func (t *http2Server) applySettings(ss []http2.Setting) {
t.mu.Lock()
defer t.mu.Unlock()
for _, stream := range t.activeStreams {
stream.sendQuotaPool.add(int(s.Val - t.streamSendQuota))
stream.sendQuotaPool.add(int(s.Val) - int(t.streamSendQuota))
}
t.streamSendQuota = s.Val
}
@ -735,6 +889,91 @@ func (t *http2Server) applySettings(ss []http2.Setting) {
}
}
// keepalive running in a separate goroutine does the following:
// 1. Gracefully closes an idle connection after a duration of keepalive.MaxConnectionIdle.
// 2. Gracefully closes any connection after a duration of keepalive.MaxConnectionAge.
// 3. Forcibly closes a connection after an additive period of keepalive.MaxConnectionAgeGrace over keepalive.MaxConnectionAge.
// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-resposive connection
// after an additional duration of keepalive.Timeout.
func (t *http2Server) keepalive() {
p := &ping{}
var pingSent bool
maxIdle := time.NewTimer(t.kp.MaxConnectionIdle)
maxAge := time.NewTimer(t.kp.MaxConnectionAge)
keepalive := time.NewTimer(t.kp.Time)
// NOTE: All exit paths of this function should reset their
// respecitve timers. A failure to do so will cause the
// following clean-up to deadlock and eventually leak.
defer func() {
if !maxIdle.Stop() {
<-maxIdle.C
}
if !maxAge.Stop() {
<-maxAge.C
}
if !keepalive.Stop() {
<-keepalive.C
}
}()
for {
select {
case <-maxIdle.C:
t.mu.Lock()
idle := t.idle
if idle.IsZero() { // The connection is non-idle.
t.mu.Unlock()
maxIdle.Reset(t.kp.MaxConnectionIdle)
continue
}
val := t.kp.MaxConnectionIdle - time.Since(idle)
if val <= 0 {
// The connection has been idle for a duration of keepalive.MaxConnectionIdle or more.
// Gracefully close the connection.
t.state = draining
t.mu.Unlock()
t.Drain()
// Reseting the timer so that the clean-up doesn't deadlock.
maxIdle.Reset(infinity)
return
}
t.mu.Unlock()
maxIdle.Reset(val)
case <-maxAge.C:
t.mu.Lock()
t.state = draining
t.mu.Unlock()
t.Drain()
maxAge.Reset(t.kp.MaxConnectionAgeGrace)
select {
case <-maxAge.C:
// Close the connection after grace period.
t.Close()
// Reseting the timer so that the clean-up doesn't deadlock.
maxAge.Reset(infinity)
case <-t.shutdownChan:
}
return
case <-keepalive.C:
if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
pingSent = false
keepalive.Reset(t.kp.Time)
continue
}
if pingSent {
t.Close()
// Reseting the timer so that the clean-up doesn't deadlock.
keepalive.Reset(infinity)
return
}
pingSent = true
t.controlBuf.put(p)
keepalive.Reset(t.kp.Timeout)
case <-t.shutdownChan:
return
}
}
}
// controller running in a separate goroutine takes charge of sending control
// frames (e.g., window update, reset stream, setting, etc.) to the server.
func (t *http2Server) controller() {
@ -746,7 +985,7 @@ func (t *http2Server) controller() {
case <-t.writableChan:
switch i := i.(type) {
case *windowUpdate:
t.framer.writeWindowUpdate(true, i.streamID, i.increment)
t.framer.writeWindowUpdate(i.flush, i.streamID, i.increment)
case *settings:
if i.ack {
t.framer.writeSettingsAck(true)
@ -766,7 +1005,10 @@ func (t *http2Server) controller() {
sid := t.maxStreamID
t.state = draining
t.mu.Unlock()
t.framer.writeGoAway(true, sid, http2.ErrCodeNo, nil)
t.framer.writeGoAway(true, sid, i.code, i.debugData)
if i.code == http2.ErrCodeEnhanceYourCalm {
t.Close()
}
case *flushIO:
t.framer.flushWrite()
case *ping:
@ -816,6 +1058,9 @@ func (t *http2Server) Close() (err error) {
func (t *http2Server) closeStream(s *Stream) {
t.mu.Lock()
delete(t.activeStreams, s.id)
if len(t.activeStreams) == 0 {
t.idle = time.Now()
}
if t.state == draining && len(t.activeStreams) == 0 {
defer t.Close()
}
@ -825,11 +1070,6 @@ func (t *http2Server) closeStream(s *Stream) {
// called to interrupt the potential blocking on other goroutines.
s.cancel()
s.mu.Lock()
if q := s.fc.resetPendingData(); q > 0 {
if w := t.fc.onRead(q); w > 0 {
t.controlBuf.put(&windowUpdate{0, w})
}
}
if s.state == streamDone {
s.mu.Unlock()
return
@ -843,5 +1083,17 @@ func (t *http2Server) RemoteAddr() net.Addr {
}
func (t *http2Server) Drain() {
t.controlBuf.put(&goAway{})
t.controlBuf.put(&goAway{code: http2.ErrCodeNo})
}
var rgen = rand.New(rand.NewSource(time.Now().UnixNano()))
func getJitter(v time.Duration) time.Duration {
if v == infinity {
return 0
}
// Generate a jitter between +/- 10% of the value.
r := int64(v / 10)
j := rgen.Int63n(2*r) - r
return time.Duration(j)
}

View File

@ -36,19 +36,23 @@ package transport
import (
"bufio"
"bytes"
"encoding/base64"
"fmt"
"io"
"net"
"net/http"
"strconv"
"strings"
"sync/atomic"
"time"
"github.com/golang/protobuf/proto"
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
spb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
const (
@ -85,18 +89,39 @@ var (
codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm,
codes.PermissionDenied: http2.ErrCodeInadequateSecurity,
}
httpStatusConvTab = map[int]codes.Code{
// 400 Bad Request - INTERNAL.
http.StatusBadRequest: codes.Internal,
// 401 Unauthorized - UNAUTHENTICATED.
http.StatusUnauthorized: codes.Unauthenticated,
// 403 Forbidden - PERMISSION_DENIED.
http.StatusForbidden: codes.PermissionDenied,
// 404 Not Found - UNIMPLEMENTED.
http.StatusNotFound: codes.Unimplemented,
// 429 Too Many Requests - UNAVAILABLE.
http.StatusTooManyRequests: codes.Unavailable,
// 502 Bad Gateway - UNAVAILABLE.
http.StatusBadGateway: codes.Unavailable,
// 503 Service Unavailable - UNAVAILABLE.
http.StatusServiceUnavailable: codes.Unavailable,
// 504 Gateway timeout - UNAVAILABLE.
http.StatusGatewayTimeout: codes.Unavailable,
}
)
// Records the states during HPACK decoding. Must be reset once the
// decoding of the entire headers are finished.
type decodeState struct {
err error // first error encountered decoding
encoding string
// statusCode caches the stream status received from the trailer
// the server sent. Client side only.
statusCode codes.Code
statusDesc string
// statusGen caches the stream status received from the trailer the server
// sent. Client side only. Do not access directly. After all trailers are
// parsed, use the status method to retrieve the status.
statusGen *status.Status
// rawStatusCode and rawStatusMsg are set from the raw trailer fields and are not
// intended for direct access outside of parsing.
rawStatusCode *int
rawStatusMsg string
httpStatus *int
// Server side only fields.
timeoutSet bool
timeout time.Duration
@ -119,6 +144,7 @@ func isReservedHeader(hdr string) bool {
"grpc-message",
"grpc-status",
"grpc-timeout",
"grpc-status-details-bin",
"te":
return true
default:
@ -137,12 +163,6 @@ func isWhitelistedPseudoHeader(hdr string) bool {
}
}
func (d *decodeState) setErr(err error) {
if d.err == nil {
d.err = err
}
}
func validContentType(t string) bool {
e := "application/grpc"
if !strings.HasPrefix(t, e) {
@ -156,56 +176,135 @@ func validContentType(t string) bool {
return true
}
func (d *decodeState) processHeaderField(f hpack.HeaderField) {
func (d *decodeState) status() *status.Status {
if d.statusGen == nil {
// No status-details were provided; generate status using code/msg.
d.statusGen = status.New(codes.Code(int32(*(d.rawStatusCode))), d.rawStatusMsg)
}
return d.statusGen
}
const binHdrSuffix = "-bin"
func encodeBinHeader(v []byte) string {
return base64.RawStdEncoding.EncodeToString(v)
}
func decodeBinHeader(v string) ([]byte, error) {
if len(v)%4 == 0 {
// Input was padded, or padding was not necessary.
return base64.StdEncoding.DecodeString(v)
}
return base64.RawStdEncoding.DecodeString(v)
}
func encodeMetadataHeader(k, v string) string {
if strings.HasSuffix(k, binHdrSuffix) {
return encodeBinHeader(([]byte)(v))
}
return v
}
func decodeMetadataHeader(k, v string) (string, error) {
if strings.HasSuffix(k, binHdrSuffix) {
b, err := decodeBinHeader(v)
return string(b), err
}
return v, nil
}
func (d *decodeState) decodeResponseHeader(frame *http2.MetaHeadersFrame) error {
for _, hf := range frame.Fields {
if err := d.processHeaderField(hf); err != nil {
return err
}
}
// If grpc status exists, no need to check further.
if d.rawStatusCode != nil || d.statusGen != nil {
return nil
}
// If grpc status doesn't exist and http status doesn't exist,
// then it's a malformed header.
if d.httpStatus == nil {
return streamErrorf(codes.Internal, "malformed header: doesn't contain status(gRPC or HTTP)")
}
if *(d.httpStatus) != http.StatusOK {
code, ok := httpStatusConvTab[*(d.httpStatus)]
if !ok {
code = codes.Unknown
}
return streamErrorf(code, http.StatusText(*(d.httpStatus)))
}
// gRPC status doesn't exist and http status is OK.
// Set rawStatusCode to be unknown and return nil error.
// So that, if the stream has ended this Unknown status
// will be propogated to the user.
// Otherwise, it will be ignored. In which case, status from
// a later trailer, that has StreamEnded flag set, is propogated.
code := int(codes.Unknown)
d.rawStatusCode = &code
return nil
}
func (d *decodeState) processHeaderField(f hpack.HeaderField) error {
switch f.Name {
case "content-type":
if !validContentType(f.Value) {
d.setErr(streamErrorf(codes.FailedPrecondition, "transport: received the unexpected content-type %q", f.Value))
return
return streamErrorf(codes.FailedPrecondition, "transport: received the unexpected content-type %q", f.Value)
}
case "grpc-encoding":
d.encoding = f.Value
case "grpc-status":
code, err := strconv.Atoi(f.Value)
if err != nil {
d.setErr(streamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err))
return
return streamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err)
}
d.statusCode = codes.Code(code)
d.rawStatusCode = &code
case "grpc-message":
d.statusDesc = decodeGrpcMessage(f.Value)
d.rawStatusMsg = decodeGrpcMessage(f.Value)
case "grpc-status-details-bin":
v, err := decodeBinHeader(f.Value)
if err != nil {
return streamErrorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
}
s := &spb.Status{}
if err := proto.Unmarshal(v, s); err != nil {
return streamErrorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
}
d.statusGen = status.FromProto(s)
case "grpc-timeout":
d.timeoutSet = true
var err error
d.timeout, err = decodeTimeout(f.Value)
if err != nil {
d.setErr(streamErrorf(codes.Internal, "transport: malformed time-out: %v", err))
return
if d.timeout, err = decodeTimeout(f.Value); err != nil {
return streamErrorf(codes.Internal, "transport: malformed time-out: %v", err)
}
case ":path":
d.method = f.Value
case ":status":
code, err := strconv.Atoi(f.Value)
if err != nil {
return streamErrorf(codes.Internal, "transport: malformed http-status: %v", err)
}
d.httpStatus = &code
default:
if !isReservedHeader(f.Name) || isWhitelistedPseudoHeader(f.Name) {
if f.Name == "user-agent" {
i := strings.LastIndex(f.Value, " ")
if i == -1 {
// There is no application user agent string being set.
return
}
// Extract the application user agent string.
f.Value = f.Value[:i]
}
if d.mdata == nil {
d.mdata = make(map[string][]string)
}
k, v, err := metadata.DecodeKeyValue(f.Name, f.Value)
v, err := decodeMetadataHeader(f.Name, f.Value)
if err != nil {
grpclog.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err)
return
return nil
}
d.mdata[k] = append(d.mdata[k], v)
d.mdata[f.Name] = append(d.mdata[f.Name], v)
}
}
return nil
}
type timeoutUnit uint8
@ -377,6 +476,9 @@ func newFramer(conn net.Conn) *framer {
writer: bufio.NewWriterSize(conn, http2IOBufSize),
}
f.fr = http2.NewFramer(f.writer, f.reader)
// Opt-in to Frame reuse API on framer to reduce garbage.
// Frames aren't safe to read from after a subsequent call to ReadFrame.
f.fr.SetReuseFrames()
f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil)
return f
}

View File

@ -1,51 +0,0 @@
// +build !go1.6
/*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
package transport
import (
"net"
"time"
"golang.org/x/net/context"
)
// dialContext connects to the address on the named network.
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
var dialer net.Dialer
if deadline, ok := ctx.Deadline(); ok {
dialer.Timeout = deadline.Sub(time.Now())
}
return dialer.Dial(network, address)
}

View File

@ -51,6 +51,7 @@ import (
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
"google.golang.org/grpc/tap"
)
@ -104,6 +105,7 @@ func (b *recvBuffer) load() {
if len(b.backlog) > 0 {
select {
case b.c <- b.backlog[0]:
b.backlog[0] = nil
b.backlog = b.backlog[1:]
default:
}
@ -170,11 +172,6 @@ type Stream struct {
id uint32
// nil for client side Stream.
st ServerTransport
// clientStatsCtx keeps the user context for stats handling.
// It's only valid on client side. Server side stats context is same as s.ctx.
// All client side stats collection should use the clientStatsCtx (instead of the stream context)
// so that all the generated stats for a particular RPC can be associated in the processing phase.
clientStatsCtx context.Context
// ctx is the associated context of the stream.
ctx context.Context
// cancel is always nil for client side Stream.
@ -188,14 +185,17 @@ type Stream struct {
recvCompress string
sendCompress string
buf *recvBuffer
dec io.Reader
trReader io.Reader
fc *inFlow
recvQuota uint32
// TODO: Remote this unused variable.
// The accumulated inbound quota pending for window update.
updateQuota uint32
// The handler to control the window update procedure for both this
// particular stream and the associated transport.
windowHandler func(int)
// Callback to state application's intentions to read data. This
// is used to adjust flow control, if need be.
requestRead func(int)
sendQuotaPool *quotaPool
// Close headerChan to indicate the end of reception of header metadata.
@ -212,14 +212,17 @@ type Stream struct {
// true iff headerChan is closed. Used to avoid closing headerChan
// multiple times.
headerDone bool
// the status received from the server.
statusCode codes.Code
statusDesc string
// the status error received from the server.
status *status.Status
// rstStream indicates whether a RST_STREAM frame needs to be sent
// to the server to signify that this stream is closing.
rstStream bool
// rstError is the error that needs to be sent along with the RST_STREAM frame.
rstError http2.ErrCode
// bytesSent and bytesReceived indicates whether any bytes have been sent or
// received on this stream.
bytesSent bool
bytesReceived bool
}
// RecvCompress returns the compression algorithm applied to the inbound
@ -247,7 +250,7 @@ func (s *Stream) GoAway() <-chan struct{} {
// Header acquires the key-value pairs of header metadata once it
// is available. It blocks until i) the metadata is ready or ii) there is no
// header metadata or iii) the stream is cancelled/expired.
// header metadata or iii) the stream is canceled/expired.
func (s *Stream) Header() (metadata.MD, error) {
select {
case <-s.ctx.Done():
@ -284,14 +287,9 @@ func (s *Stream) Method() string {
return s.method
}
// StatusCode returns statusCode received from the server.
func (s *Stream) StatusCode() codes.Code {
return s.statusCode
}
// StatusDesc returns statusDesc received from the server.
func (s *Stream) StatusDesc() string {
return s.statusDesc
// Status returns the status received from the server.
func (s *Stream) Status() *status.Status {
return s.status
}
// SetHeader sets the header metadata. This can be called multiple times.
@ -325,19 +323,60 @@ func (s *Stream) write(m recvMsg) {
s.buf.put(&m)
}
// Read reads all the data available for this Stream from the transport and
// Read reads all p bytes from the wire for this stream.
func (s *Stream) Read(p []byte) (n int, err error) {
// Don't request a read if there was an error earlier
if er := s.trReader.(*transportReader).er; er != nil {
return 0, er
}
s.requestRead(len(p))
return io.ReadFull(s.trReader, p)
}
// tranportReader reads all the data available for this Stream from the transport and
// passes them into the decoder, which converts them into a gRPC message stream.
// The error is io.EOF when the stream is done or another non-nil error if
// the stream broke.
func (s *Stream) Read(p []byte) (n int, err error) {
n, err = s.dec.Read(p)
type transportReader struct {
reader io.Reader
// The handler to control the window update procedure for both this
// particular stream and the associated transport.
windowHandler func(int)
er error
}
func (t *transportReader) Read(p []byte) (n int, err error) {
n, err = t.reader.Read(p)
if err != nil {
t.er = err
return
}
s.windowHandler(n)
t.windowHandler(n)
return
}
// finish sets the stream's state and status, and closes the done channel.
// s.mu must be held by the caller. st must always be non-nil.
func (s *Stream) finish(st *status.Status) {
s.status = st
s.state = streamDone
close(s.done)
}
// BytesSent indicates whether any bytes have been sent on this stream.
func (s *Stream) BytesSent() bool {
s.mu.Lock()
defer s.mu.Unlock()
return s.bytesSent
}
// BytesReceived indicates whether any bytes have been received on this stream.
func (s *Stream) BytesReceived() bool {
s.mu.Lock()
defer s.mu.Unlock()
return s.bytesReceived
}
// GoString is implemented by Stream so context.String() won't
// race when printing %#v.
func (s *Stream) GoString() string {
@ -371,10 +410,14 @@ const (
// ServerConfig consists of all the configurations to establish a server transport.
type ServerConfig struct {
MaxStreams uint32
AuthInfo credentials.AuthInfo
InTapHandle tap.ServerInHandle
StatsHandler stats.Handler
MaxStreams uint32
AuthInfo credentials.AuthInfo
InTapHandle tap.ServerInHandle
StatsHandler stats.Handler
KeepaliveParams keepalive.ServerParameters
KeepalivePolicy keepalive.EnforcementPolicy
InitialWindowSize int32
InitialConnWindowSize int32
}
// NewServerTransport creates a ServerTransport with conn or non-nil error
@ -402,6 +445,10 @@ type ConnectOptions struct {
KeepaliveParams keepalive.ClientParameters
// StatsHandler stores the handler for stats.
StatsHandler stats.Handler
// InitialWindowSize sets the intial window size for a stream.
InitialWindowSize int32
// InitialConnWindowSize sets the intial window size for a connection.
InitialConnWindowSize int32
}
// TargetInfo contains the information of the target such as network address and metadata.
@ -445,6 +492,9 @@ type CallHdr struct {
// outbound message.
SendCompress string
// Creds specifies credentials.PerRPCCredentials for a call.
Creds credentials.PerRPCCredentials
// Flush indicates whether a new stream command should be sent
// to the peer without waiting for the first data. This is
// only a hint. The transport may modify the flush decision
@ -484,10 +534,13 @@ type ClientTransport interface {
// once the transport is initiated.
Error() <-chan struct{}
// GoAway returns a channel that is closed when ClientTranspor
// GoAway returns a channel that is closed when ClientTransport
// receives the draining signal from the server (e.g., GOAWAY frame in
// HTTP/2).
GoAway() <-chan struct{}
// GetGoAwayReason returns the reason why GoAway frame was received.
GetGoAwayReason() GoAwayReason
}
// ServerTransport is the common interface for all gRPC server-side transport
@ -507,10 +560,9 @@ type ServerTransport interface {
// Write may not be called on all streams.
Write(s *Stream, data []byte, opts *Options) error
// WriteStatus sends the status of a stream to the client.
// WriteStatus is the final call made on a stream and always
// occurs.
WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error
// WriteStatus sends the status of a stream to the client. WriteStatus is
// the final call made on a stream and always occurs.
WriteStatus(s *Stream, st *status.Status) error
// Close tears down the transport. Once it is called, the transport
// should not be accessed any more. All the pending streams and their
@ -576,6 +628,8 @@ var (
ErrStreamDrain = streamErrorf(codes.Unavailable, "the server stops accepting new RPCs")
)
// TODO: See if we can replace StreamError with status package errors.
// StreamError is an error that only affects one stream within a connection.
type StreamError struct {
Code codes.Code
@ -586,17 +640,6 @@ func (e StreamError) Error() string {
return fmt.Sprintf("stream error: code = %s desc = %q", e.Code, e.Desc)
}
// ContextErr converts the error from context package into a StreamError.
func ContextErr(err error) StreamError {
switch err {
case context.DeadlineExceeded:
return streamErrorf(codes.DeadlineExceeded, "%v", err)
case context.Canceled:
return streamErrorf(codes.Canceled, "%v", err)
}
panic(fmt.Sprintf("Unexpected error from context packet: %v", err))
}
// wait blocks until it can receive from ctx.Done, closing, or proceed.
// If it receives from ctx.Done, it returns 0, the StreamError for ctx.Err.
// If it receives from done, it returns 0, io.EOF if ctx is not done; otherwise
@ -624,3 +667,16 @@ func wait(ctx context.Context, done, goAway, closing <-chan struct{}, proceed <-
return i, nil
}
}
// GoAwayReason contains the reason for the GoAway frame received.
type GoAwayReason uint8
const (
// Invalid indicates that no GoAway frame is received.
Invalid GoAwayReason = 0
// NoReason is the default value when GoAway frame is received.
NoReason GoAwayReason = 1
// TooManyPings indicates that a GoAway frame with ErrCodeEnhanceYourCalm
// was recieved and that the debug data said "too_many_pings".
TooManyPings GoAwayReason = 2
)

View File

@ -45,7 +45,7 @@ func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor {
return nil, rpctypes.ErrGRPCNotCapable
}
md, ok := metadata.FromContext(ctx)
md, ok := metadata.FromIncomingContext(ctx)
if ok {
if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader {
if s.Leader() == types.ID(raft.None) {
@ -66,7 +66,7 @@ func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor
return rpctypes.ErrGRPCNotCapable
}
md, ok := metadata.FromContext(ss.Context())
md, ok := metadata.FromIncomingContext(ss.Context())
if ok {
if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader {
if s.Leader() == types.ID(raft.None) {

15
glide.lock generated
View File

@ -1,5 +1,5 @@
hash: b001848c59d9603ce7c1a5387d4906eb2c906e2c46e3ec5e9f64b8f47757c598
updated: 2017-06-15T16:07:48.488474021-07:00
hash: 63e5d04362c6e514a571bb9ea848ebd3c9703a964150443e4b26318fb0cdb720
updated: 2017-06-15T16:39:00.785038305-07:00
imports:
- name: github.com/beorn7/perks
version: 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
@ -49,13 +49,14 @@ imports:
subpackages:
- jsonpb
- proto
- ptypes/any
- ptypes/struct
- name: github.com/google/btree
version: 925471ac9e2131377a91e1595defec898166fe49
- name: github.com/grpc-ecosystem/go-grpc-prometheus
version: 6b7015e65d366bf3f19b2b2a000a831940f0f7e0
- name: github.com/grpc-ecosystem/grpc-gateway
version: 18d159699f2e83fc5bb9ef2f79465ca3f3122676
version: 589b126116b5fc961939b3e156c29e4d9d58222f
subpackages:
- runtime
- runtime/internal
@ -138,11 +139,16 @@ imports:
version: c06e80d9300e4443158a03817b8a8cb37d230320
subpackages:
- rate
- name: google.golang.org/genproto
version: aa2eb687b4d3e17154372564ad8d6bf11c3cf21f
subpackages:
- googleapis/rpc/status
- name: google.golang.org/grpc
version: 8050b9cbc271307e5a716a9d782803d09b0d6f2d
version: 42c095b6c9bdd80d4435b064508c398afdb0ec99
subpackages:
- codes
- credentials
- grpclb/grpc_lb_v1
- grpclog
- internal
- keepalive
@ -150,6 +156,7 @@ imports:
- naming
- peer
- stats
- status
- tap
- transport
- name: gopkg.in/cheggaaa/pb.v1

View File

@ -40,7 +40,7 @@ import:
- package: github.com/google/btree
version: 925471ac9e2131377a91e1595defec898166fe49
- package: github.com/grpc-ecosystem/grpc-gateway
version: v1.2.0
version: v1.2.2
subpackages:
- runtime
- runtime/internal
@ -97,7 +97,7 @@ import:
subpackages:
- rate
- package: google.golang.org/grpc
version: v1.2.1
version: v1.4.1
subpackages:
- codes
- credentials

View File

@ -1608,7 +1608,7 @@ func TestGRPCRequireLeader(t *testing.T) {
time.Sleep(time.Duration(3*electionTicks) * tickDuration)
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
ctx := metadata.NewContext(context.Background(), md)
ctx := metadata.NewOutgoingContext(context.Background(), md)
reqput := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
if _, err := toGRPC(client).KV.Put(ctx, reqput); grpc.ErrorDesc(err) != rpctypes.ErrNoLeader.Error() {
t.Errorf("err = %v, want %v", err, rpctypes.ErrNoLeader)
@ -1630,7 +1630,7 @@ func TestGRPCStreamRequireLeader(t *testing.T) {
wAPI := toGRPC(client).Watch
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
ctx := metadata.NewContext(context.Background(), md)
ctx := metadata.NewOutgoingContext(context.Background(), md)
wStream, err := wAPI.Watch(ctx)
if err != nil {
t.Fatalf("wAPI.Watch error: %v", err)

View File

@ -454,7 +454,7 @@ func TestV3LeaseFailover(t *testing.T) {
lreq := &pb.LeaseKeepAliveRequest{ID: lresp.ID}
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
mctx := metadata.NewContext(context.Background(), md)
mctx := metadata.NewOutgoingContext(context.Background(), md)
ctx, cancel := context.WithCancel(mctx)
defer cancel()
lac, err := lc.LeaseKeepAlive(ctx)
@ -502,7 +502,7 @@ func TestV3LeaseRequireLeader(t *testing.T) {
clus.Members[2].Stop(t)
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
mctx := metadata.NewContext(context.Background(), md)
mctx := metadata.NewOutgoingContext(context.Background(), md)
ctx, cancel := context.WithCancel(mctx)
defer cancel()
lac, err := lc.LeaseKeepAlive(ctx)

View File

@ -137,7 +137,7 @@ func (lp *leaseProxy) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error
errc := make(chan error, 2)
var lostLeaderC <-chan struct{}
if md, ok := metadata.FromContext(stream.Context()); ok {
if md, ok := metadata.FromOutgoingContext(stream.Context()); ok {
v := md[rpctypes.MetadataRequireLeaderKey]
if len(v) > 0 && v[0] == rpctypes.MetadataHasLeader {
lostLeaderC = lp.leader.lostNotify()

View File

@ -95,7 +95,7 @@ func (wp *watchProxy) Watch(stream pb.Watch_WatchServer) (err error) {
}
var lostLeaderC <-chan struct{}
if md, ok := metadata.FromContext(stream.Context()); ok {
if md, ok := metadata.FromOutgoingContext(stream.Context()); ok {
v := md[rpctypes.MetadataRequireLeaderKey]
if len(v) > 0 && v[0] == rpctypes.MetadataHasLeader {
lostLeaderC = wp.leader.lostNotify()