mirror of
https://github.com/etcd-io/etcd.git
synced 2024-09-27 06:25:44 +00:00
vendor: upgrade grpc-go to 1.23.0
https://github.com/grpc/grpc-go/releases/tag/v1.23.0 Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
This commit is contained in:
parent
98462b52d1
commit
4d06d3b498
2
go.mod
2
go.mod
@ -42,7 +42,7 @@ require (
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2
|
||||
google.golang.org/grpc v1.22.1-0.20190805101010-a2bdfb40ff25
|
||||
google.golang.org/grpc v1.23.0
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.25
|
||||
gopkg.in/yaml.v2 v2.2.2
|
||||
sigs.k8s.io/yaml v1.1.0
|
||||
|
4
go.sum
4
go.sum
@ -177,8 +177,8 @@ google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9Ywl
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.22.1-0.20190805101010-a2bdfb40ff25 h1:lS/LGci7282xXbzMwFpHD7RKjsfKUK3KYwk34RYtlK0=
|
||||
google.golang.org/grpc v1.22.1-0.20190805101010-a2bdfb40ff25/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
84
vendor/google.golang.org/grpc/internal/transport/controlbuf.go
generated
vendored
84
vendor/google.golang.org/grpc/internal/transport/controlbuf.go
generated
vendored
@ -23,6 +23,7 @@ import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
@ -84,12 +85,24 @@ func (il *itemList) isEmpty() bool {
|
||||
// the control buffer of transport. They represent different aspects of
|
||||
// control tasks, e.g., flow control, settings, streaming resetting, etc.
|
||||
|
||||
// maxQueuedTransportResponseFrames is the most queued "transport response"
|
||||
// frames we will buffer before preventing new reads from occurring on the
|
||||
// transport. These are control frames sent in response to client requests,
|
||||
// such as RST_STREAM due to bad headers or settings acks.
|
||||
const maxQueuedTransportResponseFrames = 50
|
||||
|
||||
type cbItem interface {
|
||||
isTransportResponseFrame() bool
|
||||
}
|
||||
|
||||
// registerStream is used to register an incoming stream with loopy writer.
|
||||
type registerStream struct {
|
||||
streamID uint32
|
||||
wq *writeQuota
|
||||
}
|
||||
|
||||
func (*registerStream) isTransportResponseFrame() bool { return false }
|
||||
|
||||
// headerFrame is also used to register stream on the client-side.
|
||||
type headerFrame struct {
|
||||
streamID uint32
|
||||
@ -102,6 +115,10 @@ type headerFrame struct {
|
||||
onOrphaned func(error) // Valid on client-side
|
||||
}
|
||||
|
||||
func (h *headerFrame) isTransportResponseFrame() bool {
|
||||
return h.cleanup != nil && h.cleanup.rst // Results in a RST_STREAM
|
||||
}
|
||||
|
||||
type cleanupStream struct {
|
||||
streamID uint32
|
||||
rst bool
|
||||
@ -109,6 +126,8 @@ type cleanupStream struct {
|
||||
onWrite func()
|
||||
}
|
||||
|
||||
func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM
|
||||
|
||||
type dataFrame struct {
|
||||
streamID uint32
|
||||
endStream bool
|
||||
@ -119,27 +138,41 @@ type dataFrame struct {
|
||||
onEachWrite func()
|
||||
}
|
||||
|
||||
func (*dataFrame) isTransportResponseFrame() bool { return false }
|
||||
|
||||
type incomingWindowUpdate struct {
|
||||
streamID uint32
|
||||
increment uint32
|
||||
}
|
||||
|
||||
func (*incomingWindowUpdate) isTransportResponseFrame() bool { return false }
|
||||
|
||||
type outgoingWindowUpdate struct {
|
||||
streamID uint32
|
||||
increment uint32
|
||||
}
|
||||
|
||||
func (*outgoingWindowUpdate) isTransportResponseFrame() bool {
|
||||
return false // window updates are throttled by thresholds
|
||||
}
|
||||
|
||||
type incomingSettings struct {
|
||||
ss []http2.Setting
|
||||
}
|
||||
|
||||
func (*incomingSettings) isTransportResponseFrame() bool { return true } // Results in a settings ACK
|
||||
|
||||
type outgoingSettings struct {
|
||||
ss []http2.Setting
|
||||
}
|
||||
|
||||
func (*outgoingSettings) isTransportResponseFrame() bool { return false }
|
||||
|
||||
type incomingGoAway struct {
|
||||
}
|
||||
|
||||
func (*incomingGoAway) isTransportResponseFrame() bool { return false }
|
||||
|
||||
type goAway struct {
|
||||
code http2.ErrCode
|
||||
debugData []byte
|
||||
@ -147,15 +180,21 @@ type goAway struct {
|
||||
closeConn bool
|
||||
}
|
||||
|
||||
func (*goAway) isTransportResponseFrame() bool { return false }
|
||||
|
||||
type ping struct {
|
||||
ack bool
|
||||
data [8]byte
|
||||
}
|
||||
|
||||
func (*ping) isTransportResponseFrame() bool { return true }
|
||||
|
||||
type outFlowControlSizeRequest struct {
|
||||
resp chan uint32
|
||||
}
|
||||
|
||||
func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false }
|
||||
|
||||
type outStreamState int
|
||||
|
||||
const (
|
||||
@ -238,6 +277,14 @@ type controlBuffer struct {
|
||||
consumerWaiting bool
|
||||
list *itemList
|
||||
err error
|
||||
|
||||
// transportResponseFrames counts the number of queued items that represent
|
||||
// the response of an action initiated by the peer. trfChan is created
|
||||
// when transportResponseFrames >= maxQueuedTransportResponseFrames and is
|
||||
// closed and nilled when transportResponseFrames drops below the
|
||||
// threshold. Both fields are protected by mu.
|
||||
transportResponseFrames int
|
||||
trfChan atomic.Value // *chan struct{}
|
||||
}
|
||||
|
||||
func newControlBuffer(done <-chan struct{}) *controlBuffer {
|
||||
@ -248,12 +295,24 @@ func newControlBuffer(done <-chan struct{}) *controlBuffer {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *controlBuffer) put(it interface{}) error {
|
||||
// throttle blocks if there are too many incomingSettings/cleanupStreams in the
|
||||
// controlbuf.
|
||||
func (c *controlBuffer) throttle() {
|
||||
ch, _ := c.trfChan.Load().(*chan struct{})
|
||||
if ch != nil {
|
||||
select {
|
||||
case <-*ch:
|
||||
case <-c.done:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *controlBuffer) put(it cbItem) error {
|
||||
_, err := c.executeAndPut(nil, it)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it interface{}) (bool, error) {
|
||||
func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) {
|
||||
var wakeUp bool
|
||||
c.mu.Lock()
|
||||
if c.err != nil {
|
||||
@ -271,6 +330,15 @@ func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it interface{
|
||||
c.consumerWaiting = false
|
||||
}
|
||||
c.list.enqueue(it)
|
||||
if it.isTransportResponseFrame() {
|
||||
c.transportResponseFrames++
|
||||
if c.transportResponseFrames == maxQueuedTransportResponseFrames {
|
||||
// We are adding the frame that puts us over the threshold; create
|
||||
// a throttling channel.
|
||||
ch := make(chan struct{})
|
||||
c.trfChan.Store(&ch)
|
||||
}
|
||||
}
|
||||
c.mu.Unlock()
|
||||
if wakeUp {
|
||||
select {
|
||||
@ -304,7 +372,17 @@ func (c *controlBuffer) get(block bool) (interface{}, error) {
|
||||
return nil, c.err
|
||||
}
|
||||
if !c.list.isEmpty() {
|
||||
h := c.list.dequeue()
|
||||
h := c.list.dequeue().(cbItem)
|
||||
if h.isTransportResponseFrame() {
|
||||
if c.transportResponseFrames == maxQueuedTransportResponseFrames {
|
||||
// We are removing the frame that put us over the
|
||||
// threshold; close and clear the throttling channel.
|
||||
ch := c.trfChan.Load().(*chan struct{})
|
||||
close(*ch)
|
||||
c.trfChan.Store((*chan struct{})(nil))
|
||||
}
|
||||
c.transportResponseFrames--
|
||||
}
|
||||
c.mu.Unlock()
|
||||
return h, nil
|
||||
}
|
||||
|
3
vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
generated
vendored
3
vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
generated
vendored
@ -149,6 +149,7 @@ func (f *inFlow) maybeAdjust(n uint32) uint32 {
|
||||
n = uint32(math.MaxInt32)
|
||||
}
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
// estSenderQuota is the receiver's view of the maximum number of bytes the sender
|
||||
// can send without a window update.
|
||||
estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate))
|
||||
@ -169,10 +170,8 @@ func (f *inFlow) maybeAdjust(n uint32) uint32 {
|
||||
// is padded; We will fallback on the current available window(at least a 1/4th of the limit).
|
||||
f.delta = n
|
||||
}
|
||||
f.mu.Unlock()
|
||||
return f.delta
|
||||
}
|
||||
f.mu.Unlock()
|
||||
return 0
|
||||
}
|
||||
|
||||
|
1
vendor/google.golang.org/grpc/internal/transport/http2_client.go
generated
vendored
1
vendor/google.golang.org/grpc/internal/transport/http2_client.go
generated
vendored
@ -1245,6 +1245,7 @@ func (t *http2Client) reader() {
|
||||
|
||||
// loop to keep reading incoming messages on this transport.
|
||||
for {
|
||||
t.controlBuf.throttle()
|
||||
frame, err := t.framer.fr.ReadFrame()
|
||||
if t.keepaliveEnabled {
|
||||
atomic.CompareAndSwapUint32(&t.activity, 0, 1)
|
||||
|
1
vendor/google.golang.org/grpc/internal/transport/http2_server.go
generated
vendored
1
vendor/google.golang.org/grpc/internal/transport/http2_server.go
generated
vendored
@ -436,6 +436,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) {
|
||||
defer close(t.readerDone)
|
||||
for {
|
||||
t.controlBuf.throttle()
|
||||
frame, err := t.framer.fr.ReadFrame()
|
||||
atomic.StoreUint32(&t.activity, 1)
|
||||
if err != nil {
|
||||
|
21
vendor/google.golang.org/grpc/internal/transport/transport.go
generated
vendored
21
vendor/google.golang.org/grpc/internal/transport/transport.go
generated
vendored
@ -184,6 +184,19 @@ func (r *recvBufferReader) readClient(p []byte) (n int, err error) {
|
||||
// r.readAdditional acts on that message and returns the necessary error.
|
||||
select {
|
||||
case <-r.ctxDone:
|
||||
// Note that this adds the ctx error to the end of recv buffer, and
|
||||
// reads from the head. This will delay the error until recv buffer is
|
||||
// empty, thus will delay ctx cancellation in Recv().
|
||||
//
|
||||
// It's done this way to fix a race between ctx cancel and trailer. The
|
||||
// race was, stream.Recv() may return ctx error if ctxDone wins the
|
||||
// race, but stream.Trailer() may return a non-nil md because the stream
|
||||
// was not marked as done when trailer is received. This closeStream
|
||||
// call will mark stream as done, thus fix the race.
|
||||
//
|
||||
// TODO: delaying ctx error seems like a unnecessary side effect. What
|
||||
// we really want is to mark the stream as done, and return ctx error
|
||||
// faster.
|
||||
r.closeStream(ContextErr(r.ctx.Err()))
|
||||
m := <-r.recv.get()
|
||||
return r.readAdditional(m, p)
|
||||
@ -298,6 +311,14 @@ func (s *Stream) waitOnHeader() error {
|
||||
}
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
// We prefer success over failure when reading messages because we delay
|
||||
// context error in stream.Read(). To keep behavior consistent, we also
|
||||
// prefer success here.
|
||||
select {
|
||||
case <-s.headerChan:
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
return ContextErr(s.ctx.Err())
|
||||
case <-s.headerChan:
|
||||
return nil
|
||||
|
12
vendor/google.golang.org/grpc/stream.go
generated
vendored
12
vendor/google.golang.org/grpc/stream.go
generated
vendored
@ -327,13 +327,23 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
||||
return cs, nil
|
||||
}
|
||||
|
||||
func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) error {
|
||||
// newAttemptLocked creates a new attempt with a transport.
|
||||
// If it succeeds, then it replaces clientStream's attempt with this new attempt.
|
||||
func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (retErr error) {
|
||||
newAttempt := &csAttempt{
|
||||
cs: cs,
|
||||
dc: cs.cc.dopts.dc,
|
||||
statsHandler: sh,
|
||||
trInfo: trInfo,
|
||||
}
|
||||
defer func() {
|
||||
if retErr != nil {
|
||||
// This attempt is not set in the clientStream, so it's finish won't
|
||||
// be called. Call it here for stats and trace in case they are not
|
||||
// nil.
|
||||
newAttempt.finish(retErr)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := cs.ctx.Err(); err != nil {
|
||||
return toRPCErr(err)
|
||||
|
2
vendor/google.golang.org/grpc/version.go
generated
vendored
2
vendor/google.golang.org/grpc/version.go
generated
vendored
@ -19,4 +19,4 @@
|
||||
package grpc
|
||||
|
||||
// Version is the current grpc version.
|
||||
const Version = "1.23.0-dev"
|
||||
const Version = "1.23.0"
|
||||
|
Loading…
x
Reference in New Issue
Block a user