Merge pull request #9530 from gyuho/aaa

functional-tester: run network fault tests with snapshot trigger
This commit is contained in:
Gyuho Lee 2018-04-05 12:51:39 -07:00 committed by GitHub
commit b0f6afc192
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
32 changed files with 1124 additions and 690 deletions

View File

@ -30,7 +30,7 @@ See [code changes](https://github.com/coreos/etcd/compare/v3.3.0...v3.4.0) and [
- Futhermore, when `--auto-compaction-mode=periodic --auto-compaction-retention=30m` and writes per minute are about 1000, `v3.3.0`, `v3.3.1`, and `v3.3.2` compact revision 30000, 33000, and 36000, for every 3-minute, while `v3.3.3` *or later* compacts revision 30000, 60000, and 90000, for every 30-minute.
- Improve [lease expire/revoke operation performance](https://github.com/coreos/etcd/pull/9418), address [lease scalability issue](https://github.com/coreos/etcd/issues/9496).
- Make [Lease `Lookup` non-blocking with concurrent `Grant`/`Revoke`](https://github.com/coreos/etcd/pull/9229).
- Improve functional tester coverage: enable [TLS](https://github.com/coreos/etcd/issues/8943) and add [liveness mode](https://github.com/coreos/etcd/issues/8977).
- Improve functional tester coverage: use [proxy layer to run network fault tests in CIs](https://github.com/coreos/etcd/pull/9081), enable [TLS](https://github.com/coreos/etcd/issues/8943), add [liveness mode](https://github.com/coreos/etcd/issues/9230), [shuffle test sequence](https://github.com/coreos/etcd/issues/9381).
### Breaking Changes

View File

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package transport
package proxy
import (
"fmt"
@ -25,13 +25,15 @@ import (
"sync"
"time"
"github.com/coreos/etcd/pkg/transport"
humanize "github.com/dustin/go-humanize"
"go.uber.org/zap"
)
// Proxy defines proxy layer that simulates common network faults,
// Server defines proxy server layer that simulates common network faults,
// such as latency spikes, packet drop/corruption, etc..
type Proxy interface {
type Server interface {
// From returns proxy source address in "scheme://host:port" format.
From() string
// To returns proxy destination address in "scheme://host:port" format.
@ -100,11 +102,11 @@ type Proxy interface {
ResetListener() error
}
type proxy struct {
logger *zap.Logger
type proxyServer struct {
lg *zap.Logger
from, to url.URL
tlsInfo TLSInfo
tlsInfo transport.TLSInfo
dialTimeout time.Duration
bufferSize int
retryInterval time.Duration
@ -141,12 +143,12 @@ type proxy struct {
blackholeRxc chan struct{}
}
// ProxyConfig defines proxy configuration.
type ProxyConfig struct {
// ServerConfig defines proxy server configuration.
type ServerConfig struct {
Logger *zap.Logger
From url.URL
To url.URL
TLSInfo TLSInfo
TLSInfo transport.TLSInfo
DialTimeout time.Duration
BufferSize int
RetryInterval time.Duration
@ -167,11 +169,11 @@ func init() {
}
}
// NewProxy returns a proxy implementation with no iptables/tc dependencies.
// NewServer returns a proxy implementation with no iptables/tc dependencies.
// The proxy layer overhead is <1ms.
func NewProxy(cfg ProxyConfig) Proxy {
p := &proxy{
logger: cfg.Logger,
func NewServer(cfg ServerConfig) Server {
p := &proxyServer{
lg: cfg.Logger,
from: cfg.From,
to: cfg.To,
@ -199,8 +201,8 @@ func NewProxy(cfg ProxyConfig) Proxy {
if p.retryInterval == 0 {
p.retryInterval = defaultRetryInterval
}
if p.logger == nil {
p.logger = defaultLogger
if p.lg == nil {
p.lg = defaultLogger
}
close(p.pauseAcceptc)
close(p.pauseTxc)
@ -216,7 +218,7 @@ func NewProxy(cfg ProxyConfig) Proxy {
var ln net.Listener
var err error
if !p.tlsInfo.Empty() {
ln, err = NewListener(p.from.Host, p.from.Scheme, &p.tlsInfo)
ln, err = transport.NewListener(p.from.Host, p.from.Scheme, &p.tlsInfo)
} else {
ln, err = net.Listen(p.from.Scheme, p.from.Host)
}
@ -230,15 +232,15 @@ func NewProxy(cfg ProxyConfig) Proxy {
p.closeWg.Add(1)
go p.listenAndServe()
p.logger.Info("started proxying", zap.String("from", p.From()), zap.String("to", p.To()))
p.lg.Info("started proxying", zap.String("from", p.From()), zap.String("to", p.To()))
return p
}
func (p *proxy) From() string {
func (p *proxyServer) From() string {
return fmt.Sprintf("%s://%s", p.from.Scheme, p.from.Host)
}
func (p *proxy) To() string {
func (p *proxyServer) To() string {
return fmt.Sprintf("%s://%s", p.to.Scheme, p.to.Host)
}
@ -247,10 +249,10 @@ func (p *proxy) To() string {
// - https://github.com/coreos/etcd/issues/5614
// - https://github.com/coreos/etcd/pull/6918#issuecomment-264093034
func (p *proxy) listenAndServe() {
func (p *proxyServer) listenAndServe() {
defer p.closeWg.Done()
p.logger.Info("proxy is listening on", zap.String("from", p.From()))
p.lg.Info("proxy is listening on", zap.String("from", p.From()))
close(p.readyc)
for {
@ -290,7 +292,7 @@ func (p *proxy) listenAndServe() {
case <-p.donec:
return
}
p.logger.Debug("listener accept error", zap.Error(err))
p.lg.Debug("listener accept error", zap.Error(err))
if strings.HasSuffix(err.Error(), "use of closed network connection") {
select {
@ -298,7 +300,7 @@ func (p *proxy) listenAndServe() {
case <-p.donec:
return
}
p.logger.Debug("listener is closed; retry listening on", zap.String("from", p.From()))
p.lg.Debug("listener is closed; retry listening on", zap.String("from", p.From()))
if err = p.ResetListener(); err != nil {
select {
@ -311,7 +313,7 @@ func (p *proxy) listenAndServe() {
case <-p.donec:
return
}
p.logger.Warn("failed to reset listener", zap.Error(err))
p.lg.Warn("failed to reset listener", zap.Error(err))
}
}
@ -321,7 +323,7 @@ func (p *proxy) listenAndServe() {
var out net.Conn
if !p.tlsInfo.Empty() {
var tp *http.Transport
tp, err = NewTransport(p.tlsInfo, p.dialTimeout)
tp, err = transport.NewTransport(p.tlsInfo, p.dialTimeout)
if err != nil {
select {
case p.errc <- err:
@ -350,7 +352,7 @@ func (p *proxy) listenAndServe() {
case <-p.donec:
return
}
p.logger.Debug("failed to dial", zap.Error(err))
p.lg.Debug("failed to dial", zap.Error(err))
continue
}
@ -369,9 +371,9 @@ func (p *proxy) listenAndServe() {
}
}
func (p *proxy) transmit(dst io.Writer, src io.Reader) { p.ioCopy(dst, src, true) }
func (p *proxy) receive(dst io.Writer, src io.Reader) { p.ioCopy(dst, src, false) }
func (p *proxy) ioCopy(dst io.Writer, src io.Reader, proxySend bool) {
func (p *proxyServer) transmit(dst io.Writer, src io.Reader) { p.ioCopy(dst, src, true) }
func (p *proxyServer) receive(dst io.Writer, src io.Reader) { p.ioCopy(dst, src, false) }
func (p *proxyServer) ioCopy(dst io.Writer, src io.Reader, proxySend bool) {
buf := make([]byte, p.bufferSize)
for {
nr, err := src.Read(buf)
@ -396,7 +398,7 @@ func (p *proxy) ioCopy(dst io.Writer, src io.Reader, proxySend bool) {
case <-p.donec:
return
}
p.logger.Debug("failed to read", zap.Error(err))
p.lg.Debug("failed to read", zap.Error(err))
return
}
if nr == 0 {
@ -432,14 +434,14 @@ func (p *proxy) ioCopy(dst io.Writer, src io.Reader, proxySend bool) {
}
if blackholed {
if proxySend {
p.logger.Debug(
p.lg.Debug(
"dropped",
zap.String("data-size", humanize.Bytes(uint64(nr))),
zap.String("from", p.From()),
zap.String("to", p.To()),
)
} else {
p.logger.Debug(
p.lg.Debug(
"dropped",
zap.String("data-size", humanize.Bytes(uint64(nr))),
zap.String("from", p.To()),
@ -498,9 +500,9 @@ func (p *proxy) ioCopy(dst io.Writer, src io.Reader, proxySend bool) {
return
}
if proxySend {
p.logger.Debug("failed to write while sending", zap.Error(err))
p.lg.Debug("failed to write while sending", zap.Error(err))
} else {
p.logger.Debug("failed to write while receiving", zap.Error(err))
p.lg.Debug("failed to write while receiving", zap.Error(err))
}
return
}
@ -517,14 +519,14 @@ func (p *proxy) ioCopy(dst io.Writer, src io.Reader, proxySend bool) {
return
}
if proxySend {
p.logger.Debug(
p.lg.Debug(
"failed to write while sending; read/write bytes are different",
zap.Int("read-bytes", nr),
zap.Int("write-bytes", nw),
zap.Error(io.ErrShortWrite),
)
} else {
p.logger.Debug(
p.lg.Debug(
"failed to write while receiving; read/write bytes are different",
zap.Int("read-bytes", nr),
zap.Int("write-bytes", nw),
@ -535,14 +537,14 @@ func (p *proxy) ioCopy(dst io.Writer, src io.Reader, proxySend bool) {
}
if proxySend {
p.logger.Debug(
p.lg.Debug(
"transmitted",
zap.String("data-size", humanize.Bytes(uint64(nr))),
zap.String("from", p.From()),
zap.String("to", p.To()),
)
} else {
p.logger.Debug(
p.lg.Debug(
"received",
zap.String("data-size", humanize.Bytes(uint64(nr))),
zap.String("from", p.To()),
@ -553,29 +555,29 @@ func (p *proxy) ioCopy(dst io.Writer, src io.Reader, proxySend bool) {
}
}
func (p *proxy) Ready() <-chan struct{} { return p.readyc }
func (p *proxy) Done() <-chan struct{} { return p.donec }
func (p *proxy) Error() <-chan error { return p.errc }
func (p *proxy) Close() (err error) {
func (p *proxyServer) Ready() <-chan struct{} { return p.readyc }
func (p *proxyServer) Done() <-chan struct{} { return p.donec }
func (p *proxyServer) Error() <-chan error { return p.errc }
func (p *proxyServer) Close() (err error) {
p.closeOnce.Do(func() {
close(p.donec)
p.listenerMu.Lock()
if p.listener != nil {
err = p.listener.Close()
p.logger.Info(
p.lg.Info(
"closed proxy listener",
zap.String("from", p.From()),
zap.String("to", p.To()),
)
}
p.logger.Sync()
p.lg.Sync()
p.listenerMu.Unlock()
})
p.closeWg.Wait()
return err
}
func (p *proxy) DelayAccept(latency, rv time.Duration) {
func (p *proxyServer) DelayAccept(latency, rv time.Duration) {
if latency <= 0 {
return
}
@ -584,7 +586,7 @@ func (p *proxy) DelayAccept(latency, rv time.Duration) {
p.latencyAccept = d
p.latencyAcceptMu.Unlock()
p.logger.Info(
p.lg.Info(
"set accept latency",
zap.Duration("latency", d),
zap.Duration("given-latency", latency),
@ -594,13 +596,13 @@ func (p *proxy) DelayAccept(latency, rv time.Duration) {
)
}
func (p *proxy) UndelayAccept() {
func (p *proxyServer) UndelayAccept() {
p.latencyAcceptMu.Lock()
d := p.latencyAccept
p.latencyAccept = 0
p.latencyAcceptMu.Unlock()
p.logger.Info(
p.lg.Info(
"removed accept latency",
zap.Duration("latency", d),
zap.String("from", p.From()),
@ -608,14 +610,14 @@ func (p *proxy) UndelayAccept() {
)
}
func (p *proxy) LatencyAccept() time.Duration {
func (p *proxyServer) LatencyAccept() time.Duration {
p.latencyAcceptMu.RLock()
d := p.latencyAccept
p.latencyAcceptMu.RUnlock()
return d
}
func (p *proxy) DelayTx(latency, rv time.Duration) {
func (p *proxyServer) DelayTx(latency, rv time.Duration) {
if latency <= 0 {
return
}
@ -624,7 +626,7 @@ func (p *proxy) DelayTx(latency, rv time.Duration) {
p.latencyTx = d
p.latencyTxMu.Unlock()
p.logger.Info(
p.lg.Info(
"set transmit latency",
zap.Duration("latency", d),
zap.Duration("given-latency", latency),
@ -634,13 +636,13 @@ func (p *proxy) DelayTx(latency, rv time.Duration) {
)
}
func (p *proxy) UndelayTx() {
func (p *proxyServer) UndelayTx() {
p.latencyTxMu.Lock()
d := p.latencyTx
p.latencyTx = 0
p.latencyTxMu.Unlock()
p.logger.Info(
p.lg.Info(
"removed transmit latency",
zap.Duration("latency", d),
zap.String("from", p.From()),
@ -648,14 +650,14 @@ func (p *proxy) UndelayTx() {
)
}
func (p *proxy) LatencyTx() time.Duration {
func (p *proxyServer) LatencyTx() time.Duration {
p.latencyTxMu.RLock()
d := p.latencyTx
p.latencyTxMu.RUnlock()
return d
}
func (p *proxy) DelayRx(latency, rv time.Duration) {
func (p *proxyServer) DelayRx(latency, rv time.Duration) {
if latency <= 0 {
return
}
@ -664,7 +666,7 @@ func (p *proxy) DelayRx(latency, rv time.Duration) {
p.latencyRx = d
p.latencyRxMu.Unlock()
p.logger.Info(
p.lg.Info(
"set receive latency",
zap.Duration("latency", d),
zap.Duration("given-latency", latency),
@ -674,13 +676,13 @@ func (p *proxy) DelayRx(latency, rv time.Duration) {
)
}
func (p *proxy) UndelayRx() {
func (p *proxyServer) UndelayRx() {
p.latencyRxMu.Lock()
d := p.latencyRx
p.latencyRx = 0
p.latencyRxMu.Unlock()
p.logger.Info(
p.lg.Info(
"removed receive latency",
zap.Duration("latency", d),
zap.String("from", p.To()),
@ -688,7 +690,7 @@ func (p *proxy) UndelayRx() {
)
}
func (p *proxy) LatencyRx() time.Duration {
func (p *proxyServer) LatencyRx() time.Duration {
p.latencyRxMu.RLock()
d := p.latencyRx
p.latencyRxMu.RUnlock()
@ -714,19 +716,19 @@ func computeLatency(lat, rv time.Duration) time.Duration {
return lat + time.Duration(int64(sign)*mrand.Int63n(rv.Nanoseconds()))
}
func (p *proxy) PauseAccept() {
func (p *proxyServer) PauseAccept() {
p.acceptMu.Lock()
p.pauseAcceptc = make(chan struct{})
p.acceptMu.Unlock()
p.logger.Info(
p.lg.Info(
"paused accepting new connections",
zap.String("from", p.From()),
zap.String("to", p.To()),
)
}
func (p *proxy) UnpauseAccept() {
func (p *proxyServer) UnpauseAccept() {
p.acceptMu.Lock()
select {
case <-p.pauseAcceptc: // already unpaused
@ -738,26 +740,26 @@ func (p *proxy) UnpauseAccept() {
}
p.acceptMu.Unlock()
p.logger.Info(
p.lg.Info(
"unpaused accepting new connections",
zap.String("from", p.From()),
zap.String("to", p.To()),
)
}
func (p *proxy) PauseTx() {
func (p *proxyServer) PauseTx() {
p.txMu.Lock()
p.pauseTxc = make(chan struct{})
p.txMu.Unlock()
p.logger.Info(
p.lg.Info(
"paused transmit listen",
zap.String("from", p.From()),
zap.String("to", p.To()),
)
}
func (p *proxy) UnpauseTx() {
func (p *proxyServer) UnpauseTx() {
p.txMu.Lock()
select {
case <-p.pauseTxc: // already unpaused
@ -769,26 +771,26 @@ func (p *proxy) UnpauseTx() {
}
p.txMu.Unlock()
p.logger.Info(
p.lg.Info(
"unpaused transmit listen",
zap.String("from", p.From()),
zap.String("to", p.To()),
)
}
func (p *proxy) PauseRx() {
func (p *proxyServer) PauseRx() {
p.rxMu.Lock()
p.pauseRxc = make(chan struct{})
p.rxMu.Unlock()
p.logger.Info(
p.lg.Info(
"paused receive listen",
zap.String("from", p.To()),
zap.String("to", p.From()),
)
}
func (p *proxy) UnpauseRx() {
func (p *proxyServer) UnpauseRx() {
p.rxMu.Lock()
select {
case <-p.pauseRxc: // already unpaused
@ -800,14 +802,14 @@ func (p *proxy) UnpauseRx() {
}
p.rxMu.Unlock()
p.logger.Info(
p.lg.Info(
"unpaused receive listen",
zap.String("from", p.To()),
zap.String("to", p.From()),
)
}
func (p *proxy) BlackholeTx() {
func (p *proxyServer) BlackholeTx() {
p.txMu.Lock()
select {
case <-p.blackholeTxc: // already blackholed
@ -819,26 +821,26 @@ func (p *proxy) BlackholeTx() {
}
p.txMu.Unlock()
p.logger.Info(
p.lg.Info(
"blackholed transmit",
zap.String("from", p.From()),
zap.String("to", p.To()),
)
}
func (p *proxy) UnblackholeTx() {
func (p *proxyServer) UnblackholeTx() {
p.txMu.Lock()
p.blackholeTxc = make(chan struct{})
p.txMu.Unlock()
p.logger.Info(
p.lg.Info(
"unblackholed transmit",
zap.String("from", p.From()),
zap.String("to", p.To()),
)
}
func (p *proxy) BlackholeRx() {
func (p *proxyServer) BlackholeRx() {
p.rxMu.Lock()
select {
case <-p.blackholeRxc: // already blackholed
@ -850,73 +852,73 @@ func (p *proxy) BlackholeRx() {
}
p.rxMu.Unlock()
p.logger.Info(
p.lg.Info(
"blackholed receive",
zap.String("from", p.To()),
zap.String("to", p.From()),
)
}
func (p *proxy) UnblackholeRx() {
func (p *proxyServer) UnblackholeRx() {
p.rxMu.Lock()
p.blackholeRxc = make(chan struct{})
p.rxMu.Unlock()
p.logger.Info(
p.lg.Info(
"unblackholed receive",
zap.String("from", p.To()),
zap.String("to", p.From()),
)
}
func (p *proxy) CorruptTx(f func([]byte) []byte) {
func (p *proxyServer) CorruptTx(f func([]byte) []byte) {
p.corruptTxMu.Lock()
p.corruptTx = f
p.corruptTxMu.Unlock()
p.logger.Info(
p.lg.Info(
"corrupting transmit",
zap.String("from", p.From()),
zap.String("to", p.To()),
)
}
func (p *proxy) UncorruptTx() {
func (p *proxyServer) UncorruptTx() {
p.corruptTxMu.Lock()
p.corruptTx = nil
p.corruptTxMu.Unlock()
p.logger.Info(
p.lg.Info(
"stopped corrupting transmit",
zap.String("from", p.From()),
zap.String("to", p.To()),
)
}
func (p *proxy) CorruptRx(f func([]byte) []byte) {
func (p *proxyServer) CorruptRx(f func([]byte) []byte) {
p.corruptRxMu.Lock()
p.corruptRx = f
p.corruptRxMu.Unlock()
p.logger.Info(
p.lg.Info(
"corrupting receive",
zap.String("from", p.To()),
zap.String("to", p.From()),
)
}
func (p *proxy) UncorruptRx() {
func (p *proxyServer) UncorruptRx() {
p.corruptRxMu.Lock()
p.corruptRx = nil
p.corruptRxMu.Unlock()
p.logger.Info(
p.lg.Info(
"stopped corrupting receive",
zap.String("from", p.To()),
zap.String("to", p.From()),
)
}
func (p *proxy) ResetListener() error {
func (p *proxyServer) ResetListener() error {
p.listenerMu.Lock()
defer p.listenerMu.Unlock()
@ -930,7 +932,7 @@ func (p *proxy) ResetListener() error {
var ln net.Listener
var err error
if !p.tlsInfo.Empty() {
ln, err = NewListener(p.from.Host, p.from.Scheme, &p.tlsInfo)
ln, err = transport.NewListener(p.from.Host, p.from.Scheme, &p.tlsInfo)
} else {
ln, err = net.Listen(p.from.Scheme, p.from.Host)
}
@ -939,7 +941,7 @@ func (p *proxy) ResetListener() error {
}
p.listener = ln
p.logger.Info(
p.lg.Info(
"reset listener on",
zap.String("from", p.From()),
)

View File

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package transport
package proxy
import (
"bytes"
@ -28,31 +28,33 @@ import (
"testing"
"time"
"github.com/coreos/etcd/pkg/transport"
"go.uber.org/zap"
)
// enable DebugLevel
var testLogger = zap.NewExample()
var testTLSInfo = TLSInfo{
var testTLSInfo = transport.TLSInfo{
KeyFile: "./fixtures/server.key.insecure",
CertFile: "./fixtures/server.crt",
TrustedCAFile: "./fixtures/ca.crt",
ClientCertAuth: true,
}
func TestProxy_Unix_Insecure(t *testing.T) { testProxy(t, "unix", false, false) }
func TestProxy_TCP_Insecure(t *testing.T) { testProxy(t, "tcp", false, false) }
func TestProxy_Unix_Secure(t *testing.T) { testProxy(t, "unix", true, false) }
func TestProxy_TCP_Secure(t *testing.T) { testProxy(t, "tcp", true, false) }
func TestProxy_Unix_Insecure_DelayTx(t *testing.T) { testProxy(t, "unix", false, true) }
func TestProxy_TCP_Insecure_DelayTx(t *testing.T) { testProxy(t, "tcp", false, true) }
func TestProxy_Unix_Secure_DelayTx(t *testing.T) { testProxy(t, "unix", true, true) }
func TestProxy_TCP_Secure_DelayTx(t *testing.T) { testProxy(t, "tcp", true, true) }
func testProxy(t *testing.T, scheme string, secure bool, delayTx bool) {
func TestServer_Unix_Insecure(t *testing.T) { testServer(t, "unix", false, false) }
func TestServer_TCP_Insecure(t *testing.T) { testServer(t, "tcp", false, false) }
func TestServer_Unix_Secure(t *testing.T) { testServer(t, "unix", true, false) }
func TestServer_TCP_Secure(t *testing.T) { testServer(t, "tcp", true, false) }
func TestServer_Unix_Insecure_DelayTx(t *testing.T) { testServer(t, "unix", false, true) }
func TestServer_TCP_Insecure_DelayTx(t *testing.T) { testServer(t, "tcp", false, true) }
func TestServer_Unix_Secure_DelayTx(t *testing.T) { testServer(t, "unix", true, true) }
func TestServer_TCP_Secure_DelayTx(t *testing.T) { testServer(t, "tcp", true, true) }
func testServer(t *testing.T, scheme string, secure bool, delayTx bool) {
srcAddr, dstAddr := newUnixAddr(), newUnixAddr()
if scheme == "tcp" {
ln1, ln2 := listen(t, "tcp", "localhost:0", TLSInfo{}), listen(t, "tcp", "localhost:0", TLSInfo{})
ln1, ln2 := listen(t, "tcp", "localhost:0", transport.TLSInfo{}), listen(t, "tcp", "localhost:0", transport.TLSInfo{})
srcAddr, dstAddr = ln1.Addr().String(), ln2.Addr().String()
ln1.Close()
ln2.Close()
@ -64,12 +66,12 @@ func testProxy(t *testing.T, scheme string, secure bool, delayTx bool) {
}
tlsInfo := testTLSInfo
if !secure {
tlsInfo = TLSInfo{}
tlsInfo = transport.TLSInfo{}
}
ln := listen(t, scheme, dstAddr, tlsInfo)
defer ln.Close()
cfg := ProxyConfig{
cfg := ServerConfig{
Logger: testLogger,
From: url.URL{Scheme: scheme, Host: srcAddr},
To: url.URL{Scheme: scheme, Host: dstAddr},
@ -77,7 +79,7 @@ func testProxy(t *testing.T, scheme string, secure bool, delayTx bool) {
if secure {
cfg.TLSInfo = testTLSInfo
}
p := NewProxy(cfg)
p := NewServer(cfg)
<-p.Ready()
defer p.Close()
@ -162,9 +164,9 @@ func testProxy(t *testing.T, scheme string, secure bool, delayTx bool) {
}
}
func TestProxy_Unix_Insecure_DelayAccept(t *testing.T) { testProxyDelayAccept(t, false) }
func TestProxy_Unix_Secure_DelayAccept(t *testing.T) { testProxyDelayAccept(t, true) }
func testProxyDelayAccept(t *testing.T, secure bool) {
func TestServer_Unix_Insecure_DelayAccept(t *testing.T) { testServerDelayAccept(t, false) }
func TestServer_Unix_Secure_DelayAccept(t *testing.T) { testServerDelayAccept(t, true) }
func testServerDelayAccept(t *testing.T, secure bool) {
srcAddr, dstAddr := newUnixAddr(), newUnixAddr()
defer func() {
os.RemoveAll(srcAddr)
@ -172,13 +174,13 @@ func testProxyDelayAccept(t *testing.T, secure bool) {
}()
tlsInfo := testTLSInfo
if !secure {
tlsInfo = TLSInfo{}
tlsInfo = transport.TLSInfo{}
}
scheme := "unix"
ln := listen(t, scheme, dstAddr, tlsInfo)
defer ln.Close()
cfg := ProxyConfig{
cfg := ServerConfig{
Logger: testLogger,
From: url.URL{Scheme: scheme, Host: srcAddr},
To: url.URL{Scheme: scheme, Host: dstAddr},
@ -186,7 +188,7 @@ func testProxyDelayAccept(t *testing.T, secure bool) {
if secure {
cfg.TLSInfo = testTLSInfo
}
p := NewProxy(cfg)
p := NewServer(cfg)
<-p.Ready()
defer p.Close()
@ -221,17 +223,17 @@ func testProxyDelayAccept(t *testing.T, secure bool) {
}
}
func TestProxy_PauseTx(t *testing.T) {
func TestServer_PauseTx(t *testing.T) {
scheme := "unix"
srcAddr, dstAddr := newUnixAddr(), newUnixAddr()
defer func() {
os.RemoveAll(srcAddr)
os.RemoveAll(dstAddr)
}()
ln := listen(t, scheme, dstAddr, TLSInfo{})
ln := listen(t, scheme, dstAddr, transport.TLSInfo{})
defer ln.Close()
p := NewProxy(ProxyConfig{
p := NewServer(ServerConfig{
Logger: testLogger,
From: url.URL{Scheme: scheme, Host: srcAddr},
To: url.URL{Scheme: scheme, Host: dstAddr},
@ -242,7 +244,7 @@ func TestProxy_PauseTx(t *testing.T) {
p.PauseTx()
data := []byte("Hello World!")
send(t, data, scheme, srcAddr, TLSInfo{})
send(t, data, scheme, srcAddr, transport.TLSInfo{})
recvc := make(chan []byte)
go func() {
@ -267,17 +269,17 @@ func TestProxy_PauseTx(t *testing.T) {
}
}
func TestProxy_BlackholeTx(t *testing.T) {
func TestServer_BlackholeTx(t *testing.T) {
scheme := "unix"
srcAddr, dstAddr := newUnixAddr(), newUnixAddr()
defer func() {
os.RemoveAll(srcAddr)
os.RemoveAll(dstAddr)
}()
ln := listen(t, scheme, dstAddr, TLSInfo{})
ln := listen(t, scheme, dstAddr, transport.TLSInfo{})
defer ln.Close()
p := NewProxy(ProxyConfig{
p := NewServer(ServerConfig{
Logger: testLogger,
From: url.URL{Scheme: scheme, Host: srcAddr},
To: url.URL{Scheme: scheme, Host: dstAddr},
@ -288,7 +290,7 @@ func TestProxy_BlackholeTx(t *testing.T) {
p.BlackholeTx()
data := []byte("Hello World!")
send(t, data, scheme, srcAddr, TLSInfo{})
send(t, data, scheme, srcAddr, transport.TLSInfo{})
recvc := make(chan []byte)
go func() {
@ -305,7 +307,7 @@ func TestProxy_BlackholeTx(t *testing.T) {
// expect different data, old data dropped
data[0]++
send(t, data, scheme, srcAddr, TLSInfo{})
send(t, data, scheme, srcAddr, transport.TLSInfo{})
select {
case d := <-recvc:
@ -317,17 +319,17 @@ func TestProxy_BlackholeTx(t *testing.T) {
}
}
func TestProxy_CorruptTx(t *testing.T) {
func TestServer_CorruptTx(t *testing.T) {
scheme := "unix"
srcAddr, dstAddr := newUnixAddr(), newUnixAddr()
defer func() {
os.RemoveAll(srcAddr)
os.RemoveAll(dstAddr)
}()
ln := listen(t, scheme, dstAddr, TLSInfo{})
ln := listen(t, scheme, dstAddr, transport.TLSInfo{})
defer ln.Close()
p := NewProxy(ProxyConfig{
p := NewServer(ServerConfig{
Logger: testLogger,
From: url.URL{Scheme: scheme, Host: srcAddr},
To: url.URL{Scheme: scheme, Host: dstAddr},
@ -340,29 +342,29 @@ func TestProxy_CorruptTx(t *testing.T) {
return d
})
data := []byte("Hello World!")
send(t, data, scheme, srcAddr, TLSInfo{})
send(t, data, scheme, srcAddr, transport.TLSInfo{})
if d := receive(t, ln); bytes.Equal(d, data) {
t.Fatalf("expected corrupted data, got %q", string(d))
}
p.UncorruptTx()
send(t, data, scheme, srcAddr, TLSInfo{})
send(t, data, scheme, srcAddr, transport.TLSInfo{})
if d := receive(t, ln); !bytes.Equal(d, data) {
t.Fatalf("expected uncorrupted data, got %q", string(d))
}
}
func TestProxy_Shutdown(t *testing.T) {
func TestServer_Shutdown(t *testing.T) {
scheme := "unix"
srcAddr, dstAddr := newUnixAddr(), newUnixAddr()
defer func() {
os.RemoveAll(srcAddr)
os.RemoveAll(dstAddr)
}()
ln := listen(t, scheme, dstAddr, TLSInfo{})
ln := listen(t, scheme, dstAddr, transport.TLSInfo{})
defer ln.Close()
p := NewProxy(ProxyConfig{
p := NewServer(ServerConfig{
Logger: testLogger,
From: url.URL{Scheme: scheme, Host: srcAddr},
To: url.URL{Scheme: scheme, Host: dstAddr},
@ -370,18 +372,18 @@ func TestProxy_Shutdown(t *testing.T) {
<-p.Ready()
defer p.Close()
px, _ := p.(*proxy)
px, _ := p.(*proxyServer)
px.listener.Close()
time.Sleep(200 * time.Millisecond)
data := []byte("Hello World!")
send(t, data, scheme, srcAddr, TLSInfo{})
send(t, data, scheme, srcAddr, transport.TLSInfo{})
if d := receive(t, ln); !bytes.Equal(d, data) {
t.Fatalf("expected %q, got %q", string(data), string(d))
}
}
func TestProxy_ShutdownListener(t *testing.T) {
func TestServer_ShutdownListener(t *testing.T) {
scheme := "unix"
srcAddr, dstAddr := newUnixAddr(), newUnixAddr()
defer func() {
@ -389,10 +391,10 @@ func TestProxy_ShutdownListener(t *testing.T) {
os.RemoveAll(dstAddr)
}()
ln := listen(t, scheme, dstAddr, TLSInfo{})
ln := listen(t, scheme, dstAddr, transport.TLSInfo{})
defer ln.Close()
p := NewProxy(ProxyConfig{
p := NewServer(ServerConfig{
Logger: testLogger,
From: url.URL{Scheme: scheme, Host: srcAddr},
To: url.URL{Scheme: scheme, Host: dstAddr},
@ -404,23 +406,23 @@ func TestProxy_ShutdownListener(t *testing.T) {
ln.Close()
time.Sleep(200 * time.Millisecond)
ln = listen(t, scheme, dstAddr, TLSInfo{})
ln = listen(t, scheme, dstAddr, transport.TLSInfo{})
defer ln.Close()
data := []byte("Hello World!")
send(t, data, scheme, srcAddr, TLSInfo{})
send(t, data, scheme, srcAddr, transport.TLSInfo{})
if d := receive(t, ln); !bytes.Equal(d, data) {
t.Fatalf("expected %q, got %q", string(data), string(d))
}
}
func TestProxyHTTP_Insecure_DelayTx(t *testing.T) { testProxyHTTP(t, false, true) }
func TestProxyHTTP_Secure_DelayTx(t *testing.T) { testProxyHTTP(t, true, true) }
func TestProxyHTTP_Insecure_DelayRx(t *testing.T) { testProxyHTTP(t, false, false) }
func TestProxyHTTP_Secure_DelayRx(t *testing.T) { testProxyHTTP(t, true, false) }
func testProxyHTTP(t *testing.T, secure, delayTx bool) {
func TestServerHTTP_Insecure_DelayTx(t *testing.T) { testServerHTTP(t, false, true) }
func TestServerHTTP_Secure_DelayTx(t *testing.T) { testServerHTTP(t, true, true) }
func TestServerHTTP_Insecure_DelayRx(t *testing.T) { testServerHTTP(t, false, false) }
func TestServerHTTP_Secure_DelayRx(t *testing.T) { testServerHTTP(t, true, false) }
func testServerHTTP(t *testing.T, secure, delayTx bool) {
scheme := "tcp"
ln1, ln2 := listen(t, scheme, "localhost:0", TLSInfo{}), listen(t, scheme, "localhost:0", TLSInfo{})
ln1, ln2 := listen(t, scheme, "localhost:0", transport.TLSInfo{}), listen(t, scheme, "localhost:0", transport.TLSInfo{})
srcAddr, dstAddr := ln1.Addr().String(), ln2.Addr().String()
ln1.Close()
ln2.Close()
@ -464,7 +466,7 @@ func testProxyHTTP(t *testing.T, secure, delayTx bool) {
}()
time.Sleep(200 * time.Millisecond)
cfg := ProxyConfig{
cfg := ServerConfig{
Logger: testLogger,
From: url.URL{Scheme: scheme, Host: srcAddr},
To: url.URL{Scheme: scheme, Host: dstAddr},
@ -472,7 +474,7 @@ func testProxyHTTP(t *testing.T, secure, delayTx bool) {
if secure {
cfg.TLSInfo = testTLSInfo
}
p := NewProxy(cfg)
p := NewServer(cfg)
<-p.Ready()
defer p.Close()
@ -481,7 +483,7 @@ func testProxyHTTP(t *testing.T, secure, delayTx bool) {
now := time.Now()
var resp *http.Response
if secure {
tp, terr := NewTransport(testTLSInfo, 3*time.Second)
tp, terr := transport.NewTransport(testTLSInfo, 3*time.Second)
if terr != nil {
t.Fatal(terr)
}
@ -517,7 +519,7 @@ func testProxyHTTP(t *testing.T, secure, delayTx bool) {
now = time.Now()
if secure {
tp, terr := NewTransport(testTLSInfo, 3*time.Second)
tp, terr := transport.NewTransport(testTLSInfo, 3*time.Second)
if terr != nil {
t.Fatal(terr)
}
@ -553,10 +555,10 @@ func newUnixAddr() string {
return addr
}
func listen(t *testing.T, scheme, addr string, tlsInfo TLSInfo) (ln net.Listener) {
func listen(t *testing.T, scheme, addr string, tlsInfo transport.TLSInfo) (ln net.Listener) {
var err error
if !tlsInfo.Empty() {
ln, err = NewListener(addr, scheme, &tlsInfo)
ln, err = transport.NewListener(addr, scheme, &tlsInfo)
} else {
ln, err = net.Listen(scheme, addr)
}
@ -566,11 +568,11 @@ func listen(t *testing.T, scheme, addr string, tlsInfo TLSInfo) (ln net.Listener
return ln
}
func send(t *testing.T, data []byte, scheme, addr string, tlsInfo TLSInfo) {
func send(t *testing.T, data []byte, scheme, addr string, tlsInfo transport.TLSInfo) {
var out net.Conn
var err error
if !tlsInfo.Empty() {
tp, terr := NewTransport(tlsInfo, 3*time.Second)
tp, terr := transport.NewTransport(tlsInfo, 3*time.Second)
if terr != nil {
t.Fatal(terr)
}

View File

@ -26,7 +26,7 @@ import (
"syscall"
"time"
"github.com/coreos/etcd/pkg/transport"
"github.com/coreos/etcd/pkg/proxy"
"go.uber.org/zap"
)
@ -69,14 +69,14 @@ $ ETCDCTL_API=3 ./bin/etcdctl --endpoints localhost:23790 put foo bar`)
flag.Parse()
cfg := transport.ProxyConfig{
cfg := proxy.ServerConfig{
From: url.URL{Scheme: "tcp", Host: from},
To: url.URL{Scheme: "tcp", Host: to},
}
if verbose {
cfg.Logger = zap.NewExample()
}
p := transport.NewProxy(cfg)
p := proxy.NewServer(cfg)
<-p.Ready()
defer p.Close()

View File

@ -24,7 +24,7 @@ import (
"time"
"github.com/coreos/etcd/pkg/fileutil"
"github.com/coreos/etcd/pkg/transport"
"github.com/coreos/etcd/pkg/proxy"
"github.com/coreos/etcd/tools/functional-tester/rpcpb"
"go.uber.org/zap"
@ -78,7 +78,6 @@ func (srv *Server) handleInitialStartEtcd(req *rpcpb.Request) (*rpcpb.Response,
srv.Member = req.Member
srv.Tester = req.Tester
srv.lg.Info("creating base directory", zap.String("path", srv.Member.BaseDir))
err := fileutil.TouchDirAll(srv.Member.BaseDir)
if err != nil {
return nil, err
@ -90,7 +89,6 @@ func (srv *Server) handleInitialStartEtcd(req *rpcpb.Request) (*rpcpb.Response,
}
srv.creatEtcdCmd()
srv.lg.Info("starting etcd")
err = srv.startEtcdCmd()
if err != nil {
return nil, err
@ -121,8 +119,7 @@ func (srv *Server) startProxy() error {
return err
}
srv.lg.Info("starting proxy on client traffic", zap.String("url", advertiseClientURL.String()))
srv.advertiseClientPortToProxy[advertiseClientURLPort] = transport.NewProxy(transport.ProxyConfig{
srv.advertiseClientPortToProxy[advertiseClientURLPort] = proxy.NewServer(proxy.ServerConfig{
Logger: srv.lg,
From: *advertiseClientURL,
To: *listenClientURL,
@ -145,8 +142,7 @@ func (srv *Server) startProxy() error {
return err
}
srv.lg.Info("starting proxy on peer traffic", zap.String("url", advertisePeerURL.String()))
srv.advertisePeerPortToProxy[advertisePeerURLPort] = transport.NewProxy(transport.ProxyConfig{
srv.advertisePeerPortToProxy[advertisePeerURLPort] = proxy.NewServer(proxy.ServerConfig{
Logger: srv.lg,
From: *advertisePeerURL,
To: *listenPeerURL,
@ -164,11 +160,6 @@ func (srv *Server) startProxy() error {
func (srv *Server) stopProxy() {
if srv.Member.EtcdClientProxy && len(srv.advertiseClientPortToProxy) > 0 {
for port, px := range srv.advertiseClientPortToProxy {
srv.lg.Info("closing proxy",
zap.Int("port", port),
zap.String("from", px.From()),
zap.String("to", px.To()),
)
if err := px.Close(); err != nil {
srv.lg.Warn("failed to close proxy", zap.Int("port", port))
continue
@ -185,15 +176,10 @@ func (srv *Server) stopProxy() {
zap.String("to", px.To()),
)
}
srv.advertiseClientPortToProxy = make(map[int]transport.Proxy)
srv.advertiseClientPortToProxy = make(map[int]proxy.Server)
}
if srv.Member.EtcdPeerProxy && len(srv.advertisePeerPortToProxy) > 0 {
for port, px := range srv.advertisePeerPortToProxy {
srv.lg.Info("closing proxy",
zap.Int("port", port),
zap.String("from", px.From()),
zap.String("to", px.To()),
)
if err := px.Close(); err != nil {
srv.lg.Warn("failed to close proxy", zap.Int("port", port))
continue
@ -210,12 +196,11 @@ func (srv *Server) stopProxy() {
zap.String("to", px.To()),
)
}
srv.advertisePeerPortToProxy = make(map[int]transport.Proxy)
srv.advertisePeerPortToProxy = make(map[int]proxy.Server)
}
}
func (srv *Server) createEtcdFile() error {
srv.lg.Info("creating etcd log file", zap.String("path", srv.Member.EtcdLogPath))
var err error
srv.etcdLogFile, err = os.Create(srv.Member.EtcdLogPath)
if err != nil {
@ -273,7 +258,6 @@ func (srv *Server) handleRestartEtcd() (*rpcpb.Response, error) {
func (srv *Server) handleKillEtcd() (*rpcpb.Response, error) {
srv.stopProxy()
srv.lg.Info("killing etcd", zap.String("signal", syscall.SIGTERM.String()))
err := stopWithSig(srv.etcdCmd, syscall.SIGTERM)
if err != nil {
return nil, err
@ -290,7 +274,6 @@ func (srv *Server) handleFailArchive() (*rpcpb.Response, error) {
srv.stopProxy()
// exit with stackstrace
srv.lg.Info("killing etcd", zap.String("signal", syscall.SIGQUIT.String()))
err := stopWithSig(srv.etcdCmd, syscall.SIGQUIT)
if err != nil {
return nil, err
@ -301,7 +284,6 @@ func (srv *Server) handleFailArchive() (*rpcpb.Response, error) {
srv.etcdLogFile.Close()
// TODO: support separate WAL directory
srv.lg.Info("archiving data", zap.String("base-dir", srv.Member.BaseDir))
if err = archive(
srv.Member.BaseDir,
srv.Member.EtcdLogPath,
@ -329,14 +311,12 @@ func (srv *Server) handleFailArchive() (*rpcpb.Response, error) {
// stop proxy, etcd, delete data directory
func (srv *Server) handleDestroyEtcdAgent() (*rpcpb.Response, error) {
srv.lg.Info("killing etcd", zap.String("signal", syscall.SIGTERM.String()))
err := stopWithSig(srv.etcdCmd, syscall.SIGTERM)
if err != nil {
return nil, err
}
srv.lg.Info("killed etcd", zap.String("signal", syscall.SIGTERM.String()))
srv.lg.Info("removing base directory", zap.String("dir", srv.Member.BaseDir))
err = os.RemoveAll(srv.Member.BaseDir)
if err != nil {
return nil, err
@ -347,12 +327,10 @@ func (srv *Server) handleDestroyEtcdAgent() (*rpcpb.Response, error) {
srv.Stop()
for port, px := range srv.advertiseClientPortToProxy {
srv.lg.Info("closing proxy", zap.Int("client-port", port))
err := px.Close()
srv.lg.Info("closed proxy", zap.Int("client-port", port), zap.Error(err))
}
for port, px := range srv.advertisePeerPortToProxy {
srv.lg.Info("closing proxy", zap.Int("peer-port", port))
err := px.Close()
srv.lg.Info("closed proxy", zap.Int("peer-port", port), zap.Error(err))
}
@ -390,7 +368,7 @@ func (srv *Server) handleUnblackholePeerPortTxRx() (*rpcpb.Response, error) {
}
func (srv *Server) handleDelayPeerPortTxRx() (*rpcpb.Response, error) {
lat := time.Duration(srv.Tester.DelayLatencyMs) * time.Millisecond
lat := time.Duration(srv.Tester.UpdatedDelayLatencyMs) * time.Millisecond
rv := time.Duration(srv.Tester.DelayLatencyMsRv) * time.Millisecond
for port, px := range srv.advertisePeerPortToProxy {

View File

@ -21,7 +21,7 @@ import (
"os/exec"
"strings"
"github.com/coreos/etcd/pkg/transport"
"github.com/coreos/etcd/pkg/proxy"
"github.com/coreos/etcd/tools/functional-tester/rpcpb"
"go.uber.org/zap"
@ -50,8 +50,8 @@ type Server struct {
etcdLogFile *os.File
// forward incoming advertise URLs traffic to listen URLs
advertiseClientPortToProxy map[int]transport.Proxy
advertisePeerPortToProxy map[int]transport.Proxy
advertiseClientPortToProxy map[int]proxy.Server
advertisePeerPortToProxy map[int]proxy.Server
}
// NewServer returns a new agent server.
@ -65,8 +65,8 @@ func NewServer(
network: network,
address: address,
last: rpcpb.Operation_NotStarted,
advertiseClientPortToProxy: make(map[int]transport.Proxy),
advertisePeerPortToProxy: make(map[int]transport.Proxy),
advertiseClientPortToProxy: make(map[int]proxy.Server),
advertisePeerPortToProxy: make(map[int]proxy.Server),
}
}

View File

@ -24,15 +24,22 @@ var etcdFields = []string{
"Name",
"DataDir",
"WALDir",
"HeartbeatIntervalMs",
"ElectionTimeoutMs",
"ListenClientURLs",
"AdvertiseClientURLs",
"ListenPeerURLs",
"InitialAdvertisePeerURLs",
"InitialCluster",
"InitialClusterState",
"InitialClusterToken",
"SnapshotCount",
"QuotaBackendBytes",
"PreVote",
"InitialCorruptCheck",
}

View File

@ -24,6 +24,8 @@ func TestEtcdFlags(t *testing.T) {
Name: "s1",
DataDir: "/tmp/etcd-agent-data-1/etcd.data",
WALDir: "/tmp/etcd-agent-data-1/etcd.data/member/wal",
HeartbeatIntervalMs: 100,
ElectionTimeoutMs: 1000,
ListenClientURLs: []string{"127.0.0.1:1379"},
AdvertiseClientURLs: []string{"127.0.0.1:13790"},
ListenPeerURLs: []string{"127.0.0.1:1380"},
@ -40,6 +42,8 @@ func TestEtcdFlags(t *testing.T) {
"--name=s1",
"--data-dir=/tmp/etcd-agent-data-1/etcd.data",
"--wal-dir=/tmp/etcd-agent-data-1/etcd.data/member/wal",
"--heartbeat-interval=100",
"--election-timeout=1000",
"--listen-client-urls=127.0.0.1:1379",
"--advertise-client-urls=127.0.0.1:13790",
"--listen-peer-urls=127.0.0.1:1380",

View File

@ -92,63 +92,99 @@ func (Operation) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []
type FailureCase int32
const (
FailureCase_KILL_ONE_FOLLOWER FailureCase = 0
FailureCase_KILL_LEADER FailureCase = 1
FailureCase_KILL_ONE_FOLLOWER_FOR_LONG FailureCase = 2
FailureCase_KILL_LEADER_FOR_LONG FailureCase = 3
FailureCase_KILL_QUORUM FailureCase = 4
FailureCase_KILL_ALL FailureCase = 5
FailureCase_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER FailureCase = 6
FailureCase_BLACKHOLE_PEER_PORT_TX_RX_LEADER FailureCase = 7
FailureCase_BLACKHOLE_PEER_PORT_TX_RX_ALL FailureCase = 8
FailureCase_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER FailureCase = 9
FailureCase_DELAY_PEER_PORT_TX_RX_LEADER FailureCase = 10
FailureCase_DELAY_PEER_PORT_TX_RX_ALL FailureCase = 11
FailureCase_KILL_ONE_FOLLOWER FailureCase = 0
FailureCase_KILL_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT FailureCase = 1
FailureCase_KILL_LEADER FailureCase = 2
FailureCase_KILL_LEADER_UNTIL_TRIGGER_SNAPSHOT FailureCase = 3
FailureCase_KILL_QUORUM FailureCase = 4
FailureCase_KILL_ALL FailureCase = 5
FailureCase_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER FailureCase = 100
FailureCase_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT FailureCase = 101
FailureCase_BLACKHOLE_PEER_PORT_TX_RX_LEADER FailureCase = 102
FailureCase_BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT FailureCase = 103
FailureCase_BLACKHOLE_PEER_PORT_TX_RX_QUORUM FailureCase = 104
FailureCase_BLACKHOLE_PEER_PORT_TX_RX_ALL FailureCase = 105
FailureCase_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER FailureCase = 200
FailureCase_RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER FailureCase = 201
FailureCase_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT FailureCase = 202
FailureCase_RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT FailureCase = 203
FailureCase_DELAY_PEER_PORT_TX_RX_LEADER FailureCase = 204
FailureCase_RANDOM_DELAY_PEER_PORT_TX_RX_LEADER FailureCase = 205
FailureCase_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT FailureCase = 206
FailureCase_RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT FailureCase = 207
FailureCase_DELAY_PEER_PORT_TX_RX_QUORUM FailureCase = 208
FailureCase_RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM FailureCase = 209
FailureCase_DELAY_PEER_PORT_TX_RX_ALL FailureCase = 210
FailureCase_RANDOM_DELAY_PEER_PORT_TX_RX_ALL FailureCase = 211
// NO_FAIL_WITH_STRESS runs no-op failure injection for specified period
// while stressers are still sending requests.
FailureCase_NO_FAIL_WITH_STRESS FailureCase = 100
FailureCase_NO_FAIL_WITH_STRESS FailureCase = 300
// NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS runs no-op failure injection
// with all stressers stopped.
FailureCase_NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS FailureCase = 101
FailureCase_FAILPOINTS FailureCase = 200
FailureCase_EXTERNAL FailureCase = 300
FailureCase_NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS FailureCase = 301
FailureCase_FAILPOINTS FailureCase = 400
FailureCase_EXTERNAL FailureCase = 500
)
var FailureCase_name = map[int32]string{
0: "KILL_ONE_FOLLOWER",
1: "KILL_LEADER",
2: "KILL_ONE_FOLLOWER_FOR_LONG",
3: "KILL_LEADER_FOR_LONG",
1: "KILL_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT",
2: "KILL_LEADER",
3: "KILL_LEADER_UNTIL_TRIGGER_SNAPSHOT",
4: "KILL_QUORUM",
5: "KILL_ALL",
6: "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER",
7: "BLACKHOLE_PEER_PORT_TX_RX_LEADER",
8: "BLACKHOLE_PEER_PORT_TX_RX_ALL",
9: "DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER",
10: "DELAY_PEER_PORT_TX_RX_LEADER",
11: "DELAY_PEER_PORT_TX_RX_ALL",
100: "NO_FAIL_WITH_STRESS",
101: "NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS",
200: "FAILPOINTS",
300: "EXTERNAL",
100: "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER",
101: "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT",
102: "BLACKHOLE_PEER_PORT_TX_RX_LEADER",
103: "BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT",
104: "BLACKHOLE_PEER_PORT_TX_RX_QUORUM",
105: "BLACKHOLE_PEER_PORT_TX_RX_ALL",
200: "DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER",
201: "RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER",
202: "DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT",
203: "RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT",
204: "DELAY_PEER_PORT_TX_RX_LEADER",
205: "RANDOM_DELAY_PEER_PORT_TX_RX_LEADER",
206: "DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT",
207: "RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT",
208: "DELAY_PEER_PORT_TX_RX_QUORUM",
209: "RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM",
210: "DELAY_PEER_PORT_TX_RX_ALL",
211: "RANDOM_DELAY_PEER_PORT_TX_RX_ALL",
300: "NO_FAIL_WITH_STRESS",
301: "NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS",
400: "FAILPOINTS",
500: "EXTERNAL",
}
var FailureCase_value = map[string]int32{
"KILL_ONE_FOLLOWER": 0,
"KILL_LEADER": 1,
"KILL_ONE_FOLLOWER_FOR_LONG": 2,
"KILL_LEADER_FOR_LONG": 3,
"KILL_QUORUM": 4,
"KILL_ALL": 5,
"BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER": 6,
"BLACKHOLE_PEER_PORT_TX_RX_LEADER": 7,
"BLACKHOLE_PEER_PORT_TX_RX_ALL": 8,
"DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER": 9,
"DELAY_PEER_PORT_TX_RX_LEADER": 10,
"DELAY_PEER_PORT_TX_RX_ALL": 11,
"NO_FAIL_WITH_STRESS": 100,
"NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS": 101,
"FAILPOINTS": 200,
"EXTERNAL": 300,
"KILL_ONE_FOLLOWER": 0,
"KILL_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT": 1,
"KILL_LEADER": 2,
"KILL_LEADER_UNTIL_TRIGGER_SNAPSHOT": 3,
"KILL_QUORUM": 4,
"KILL_ALL": 5,
"BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER": 100,
"BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT": 101,
"BLACKHOLE_PEER_PORT_TX_RX_LEADER": 102,
"BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT": 103,
"BLACKHOLE_PEER_PORT_TX_RX_QUORUM": 104,
"BLACKHOLE_PEER_PORT_TX_RX_ALL": 105,
"DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER": 200,
"RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER": 201,
"DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT": 202,
"RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT": 203,
"DELAY_PEER_PORT_TX_RX_LEADER": 204,
"RANDOM_DELAY_PEER_PORT_TX_RX_LEADER": 205,
"DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT": 206,
"RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT": 207,
"DELAY_PEER_PORT_TX_RX_QUORUM": 208,
"RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM": 209,
"DELAY_PEER_PORT_TX_RX_ALL": 210,
"RANDOM_DELAY_PEER_PORT_TX_RX_ALL": 211,
"NO_FAIL_WITH_STRESS": 300,
"NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS": 301,
"FAILPOINTS": 400,
"EXTERNAL": 500,
}
func (x FailureCase) String() string {
@ -190,20 +226,26 @@ func (x StressType) String() string {
func (StressType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{2} }
type Etcd struct {
Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty" yaml:"name"`
DataDir string `protobuf:"bytes,2,opt,name=DataDir,proto3" json:"DataDir,omitempty" yaml:"data-dir"`
WALDir string `protobuf:"bytes,3,opt,name=WALDir,proto3" json:"WALDir,omitempty" yaml:"wal-dir"`
ListenClientURLs []string `protobuf:"bytes,4,rep,name=ListenClientURLs" json:"ListenClientURLs,omitempty" yaml:"listen-client-urls"`
AdvertiseClientURLs []string `protobuf:"bytes,5,rep,name=AdvertiseClientURLs" json:"AdvertiseClientURLs,omitempty" yaml:"advertise-client-urls"`
ListenPeerURLs []string `protobuf:"bytes,6,rep,name=ListenPeerURLs" json:"ListenPeerURLs,omitempty" yaml:"listen-peer-urls"`
InitialAdvertisePeerURLs []string `protobuf:"bytes,7,rep,name=InitialAdvertisePeerURLs" json:"InitialAdvertisePeerURLs,omitempty" yaml:"initial-advertise-peer-urls"`
InitialCluster string `protobuf:"bytes,8,opt,name=InitialCluster,proto3" json:"InitialCluster,omitempty" yaml:"initial-cluster"`
InitialClusterState string `protobuf:"bytes,9,opt,name=InitialClusterState,proto3" json:"InitialClusterState,omitempty" yaml:"initial-cluster-state"`
InitialClusterToken string `protobuf:"bytes,10,opt,name=InitialClusterToken,proto3" json:"InitialClusterToken,omitempty" yaml:"initial-cluster-token"`
SnapshotCount int64 `protobuf:"varint,11,opt,name=SnapshotCount,proto3" json:"SnapshotCount,omitempty" yaml:"snapshot-count"`
QuotaBackendBytes int64 `protobuf:"varint,12,opt,name=QuotaBackendBytes,proto3" json:"QuotaBackendBytes,omitempty" yaml:"quota-backend-bytes"`
PreVote bool `protobuf:"varint,13,opt,name=PreVote,proto3" json:"PreVote,omitempty" yaml:"pre-vote"`
InitialCorruptCheck bool `protobuf:"varint,14,opt,name=InitialCorruptCheck,proto3" json:"InitialCorruptCheck,omitempty" yaml:"initial-corrupt-check"`
Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty" yaml:"name"`
DataDir string `protobuf:"bytes,2,opt,name=DataDir,proto3" json:"DataDir,omitempty" yaml:"data-dir"`
WALDir string `protobuf:"bytes,3,opt,name=WALDir,proto3" json:"WALDir,omitempty" yaml:"wal-dir"`
// HeartbeatIntervalMs is the time (in milliseconds) of a heartbeat interval.
// Default value is 100, which is 100ms.
HeartbeatIntervalMs int64 `protobuf:"varint,11,opt,name=HeartbeatIntervalMs,proto3" json:"HeartbeatIntervalMs,omitempty" yaml:"heartbeat-interval"`
// ElectionTimeoutMs is the time (in milliseconds) for an election to timeout.
// Default value is 1000, which is 1s.
ElectionTimeoutMs int64 `protobuf:"varint,12,opt,name=ElectionTimeoutMs,proto3" json:"ElectionTimeoutMs,omitempty" yaml:"election-timeout"`
ListenClientURLs []string `protobuf:"bytes,21,rep,name=ListenClientURLs" json:"ListenClientURLs,omitempty" yaml:"listen-client-urls"`
AdvertiseClientURLs []string `protobuf:"bytes,22,rep,name=AdvertiseClientURLs" json:"AdvertiseClientURLs,omitempty" yaml:"advertise-client-urls"`
ListenPeerURLs []string `protobuf:"bytes,23,rep,name=ListenPeerURLs" json:"ListenPeerURLs,omitempty" yaml:"listen-peer-urls"`
InitialAdvertisePeerURLs []string `protobuf:"bytes,24,rep,name=InitialAdvertisePeerURLs" json:"InitialAdvertisePeerURLs,omitempty" yaml:"initial-advertise-peer-urls"`
InitialCluster string `protobuf:"bytes,31,opt,name=InitialCluster,proto3" json:"InitialCluster,omitempty" yaml:"initial-cluster"`
InitialClusterState string `protobuf:"bytes,32,opt,name=InitialClusterState,proto3" json:"InitialClusterState,omitempty" yaml:"initial-cluster-state"`
InitialClusterToken string `protobuf:"bytes,33,opt,name=InitialClusterToken,proto3" json:"InitialClusterToken,omitempty" yaml:"initial-cluster-token"`
SnapshotCount int64 `protobuf:"varint,41,opt,name=SnapshotCount,proto3" json:"SnapshotCount,omitempty" yaml:"snapshot-count"`
QuotaBackendBytes int64 `protobuf:"varint,42,opt,name=QuotaBackendBytes,proto3" json:"QuotaBackendBytes,omitempty" yaml:"quota-backend-bytes"`
PreVote bool `protobuf:"varint,43,opt,name=PreVote,proto3" json:"PreVote,omitempty" yaml:"pre-vote"`
InitialCorruptCheck bool `protobuf:"varint,44,opt,name=InitialCorruptCheck,proto3" json:"InitialCorruptCheck,omitempty" yaml:"initial-corrupt-check"`
}
func (m *Etcd) Reset() { *m = Etcd{} }
@ -249,6 +291,10 @@ type Tester struct {
DelayLatencyMs uint32 `protobuf:"varint,11,opt,name=DelayLatencyMs,proto3" json:"DelayLatencyMs,omitempty" yaml:"delay-latency-ms"`
// DelayLatencyMsRv is the delay latency random variable in milliseconds.
DelayLatencyMsRv uint32 `protobuf:"varint,12,opt,name=DelayLatencyMsRv,proto3" json:"DelayLatencyMsRv,omitempty" yaml:"delay-latency-ms-rv"`
// UpdatedDelayLatencyMs is the update delay latency in milliseconds,
// to inject to simulated slow network. It's the final latency to apply,
// in case the latency numbers are randomly generated from given delay latency field.
UpdatedDelayLatencyMs uint32 `protobuf:"varint,13,opt,name=UpdatedDelayLatencyMs,proto3" json:"UpdatedDelayLatencyMs,omitempty" yaml:"updated-delay-latency-ms"`
// RoundLimit is the limit of rounds to run failure set (-1 to run without limits).
RoundLimit int32 `protobuf:"varint,21,opt,name=RoundLimit,proto3" json:"RoundLimit,omitempty" yaml:"round-limit"`
// ExitOnFailure is true, then exit tester on first failure.
@ -467,9 +513,21 @@ func (m *Etcd) MarshalTo(dAtA []byte) (int, error) {
i = encodeVarintRpc(dAtA, i, uint64(len(m.WALDir)))
i += copy(dAtA[i:], m.WALDir)
}
if m.HeartbeatIntervalMs != 0 {
dAtA[i] = 0x58
i++
i = encodeVarintRpc(dAtA, i, uint64(m.HeartbeatIntervalMs))
}
if m.ElectionTimeoutMs != 0 {
dAtA[i] = 0x60
i++
i = encodeVarintRpc(dAtA, i, uint64(m.ElectionTimeoutMs))
}
if len(m.ListenClientURLs) > 0 {
for _, s := range m.ListenClientURLs {
dAtA[i] = 0x22
dAtA[i] = 0xaa
i++
dAtA[i] = 0x1
i++
l = len(s)
for l >= 1<<7 {
@ -484,7 +542,9 @@ func (m *Etcd) MarshalTo(dAtA []byte) (int, error) {
}
if len(m.AdvertiseClientURLs) > 0 {
for _, s := range m.AdvertiseClientURLs {
dAtA[i] = 0x2a
dAtA[i] = 0xb2
i++
dAtA[i] = 0x1
i++
l = len(s)
for l >= 1<<7 {
@ -499,7 +559,9 @@ func (m *Etcd) MarshalTo(dAtA []byte) (int, error) {
}
if len(m.ListenPeerURLs) > 0 {
for _, s := range m.ListenPeerURLs {
dAtA[i] = 0x32
dAtA[i] = 0xba
i++
dAtA[i] = 0x1
i++
l = len(s)
for l >= 1<<7 {
@ -514,7 +576,9 @@ func (m *Etcd) MarshalTo(dAtA []byte) (int, error) {
}
if len(m.InitialAdvertisePeerURLs) > 0 {
for _, s := range m.InitialAdvertisePeerURLs {
dAtA[i] = 0x3a
dAtA[i] = 0xc2
i++
dAtA[i] = 0x1
i++
l = len(s)
for l >= 1<<7 {
@ -528,35 +592,47 @@ func (m *Etcd) MarshalTo(dAtA []byte) (int, error) {
}
}
if len(m.InitialCluster) > 0 {
dAtA[i] = 0x42
dAtA[i] = 0xfa
i++
dAtA[i] = 0x1
i++
i = encodeVarintRpc(dAtA, i, uint64(len(m.InitialCluster)))
i += copy(dAtA[i:], m.InitialCluster)
}
if len(m.InitialClusterState) > 0 {
dAtA[i] = 0x4a
dAtA[i] = 0x82
i++
dAtA[i] = 0x2
i++
i = encodeVarintRpc(dAtA, i, uint64(len(m.InitialClusterState)))
i += copy(dAtA[i:], m.InitialClusterState)
}
if len(m.InitialClusterToken) > 0 {
dAtA[i] = 0x52
dAtA[i] = 0x8a
i++
dAtA[i] = 0x2
i++
i = encodeVarintRpc(dAtA, i, uint64(len(m.InitialClusterToken)))
i += copy(dAtA[i:], m.InitialClusterToken)
}
if m.SnapshotCount != 0 {
dAtA[i] = 0x58
dAtA[i] = 0xc8
i++
dAtA[i] = 0x2
i++
i = encodeVarintRpc(dAtA, i, uint64(m.SnapshotCount))
}
if m.QuotaBackendBytes != 0 {
dAtA[i] = 0x60
dAtA[i] = 0xd0
i++
dAtA[i] = 0x2
i++
i = encodeVarintRpc(dAtA, i, uint64(m.QuotaBackendBytes))
}
if m.PreVote {
dAtA[i] = 0x68
dAtA[i] = 0xd8
i++
dAtA[i] = 0x2
i++
if m.PreVote {
dAtA[i] = 1
@ -566,7 +642,9 @@ func (m *Etcd) MarshalTo(dAtA []byte) (int, error) {
i++
}
if m.InitialCorruptCheck {
dAtA[i] = 0x70
dAtA[i] = 0xe0
i++
dAtA[i] = 0x2
i++
if m.InitialCorruptCheck {
dAtA[i] = 1
@ -723,6 +801,11 @@ func (m *Tester) MarshalTo(dAtA []byte) (int, error) {
i++
i = encodeVarintRpc(dAtA, i, uint64(m.DelayLatencyMsRv))
}
if m.UpdatedDelayLatencyMs != 0 {
dAtA[i] = 0x68
i++
i = encodeVarintRpc(dAtA, i, uint64(m.UpdatedDelayLatencyMs))
}
if m.RoundLimit != 0 {
dAtA[i] = 0xa8
i++
@ -1005,53 +1088,59 @@ func (m *Etcd) Size() (n int) {
if l > 0 {
n += 1 + l + sovRpc(uint64(l))
}
if m.HeartbeatIntervalMs != 0 {
n += 1 + sovRpc(uint64(m.HeartbeatIntervalMs))
}
if m.ElectionTimeoutMs != 0 {
n += 1 + sovRpc(uint64(m.ElectionTimeoutMs))
}
if len(m.ListenClientURLs) > 0 {
for _, s := range m.ListenClientURLs {
l = len(s)
n += 1 + l + sovRpc(uint64(l))
n += 2 + l + sovRpc(uint64(l))
}
}
if len(m.AdvertiseClientURLs) > 0 {
for _, s := range m.AdvertiseClientURLs {
l = len(s)
n += 1 + l + sovRpc(uint64(l))
n += 2 + l + sovRpc(uint64(l))
}
}
if len(m.ListenPeerURLs) > 0 {
for _, s := range m.ListenPeerURLs {
l = len(s)
n += 1 + l + sovRpc(uint64(l))
n += 2 + l + sovRpc(uint64(l))
}
}
if len(m.InitialAdvertisePeerURLs) > 0 {
for _, s := range m.InitialAdvertisePeerURLs {
l = len(s)
n += 1 + l + sovRpc(uint64(l))
n += 2 + l + sovRpc(uint64(l))
}
}
l = len(m.InitialCluster)
if l > 0 {
n += 1 + l + sovRpc(uint64(l))
n += 2 + l + sovRpc(uint64(l))
}
l = len(m.InitialClusterState)
if l > 0 {
n += 1 + l + sovRpc(uint64(l))
n += 2 + l + sovRpc(uint64(l))
}
l = len(m.InitialClusterToken)
if l > 0 {
n += 1 + l + sovRpc(uint64(l))
n += 2 + l + sovRpc(uint64(l))
}
if m.SnapshotCount != 0 {
n += 1 + sovRpc(uint64(m.SnapshotCount))
n += 2 + sovRpc(uint64(m.SnapshotCount))
}
if m.QuotaBackendBytes != 0 {
n += 1 + sovRpc(uint64(m.QuotaBackendBytes))
n += 2 + sovRpc(uint64(m.QuotaBackendBytes))
}
if m.PreVote {
n += 2
n += 3
}
if m.InitialCorruptCheck {
n += 2
n += 3
}
return n
}
@ -1116,6 +1205,9 @@ func (m *Tester) Size() (n int) {
if m.DelayLatencyMsRv != 0 {
n += 1 + sovRpc(uint64(m.DelayLatencyMsRv))
}
if m.UpdatedDelayLatencyMs != 0 {
n += 1 + sovRpc(uint64(m.UpdatedDelayLatencyMs))
}
if m.RoundLimit != 0 {
n += 2 + sovRpc(uint64(m.RoundLimit))
}
@ -1343,7 +1435,45 @@ func (m *Etcd) Unmarshal(dAtA []byte) error {
}
m.WALDir = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
case 11:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field HeartbeatIntervalMs", wireType)
}
m.HeartbeatIntervalMs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRpc
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.HeartbeatIntervalMs |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 12:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ElectionTimeoutMs", wireType)
}
m.ElectionTimeoutMs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRpc
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ElectionTimeoutMs |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 21:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ListenClientURLs", wireType)
}
@ -1372,7 +1502,7 @@ func (m *Etcd) Unmarshal(dAtA []byte) error {
}
m.ListenClientURLs = append(m.ListenClientURLs, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
case 5:
case 22:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field AdvertiseClientURLs", wireType)
}
@ -1401,7 +1531,7 @@ func (m *Etcd) Unmarshal(dAtA []byte) error {
}
m.AdvertiseClientURLs = append(m.AdvertiseClientURLs, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
case 6:
case 23:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ListenPeerURLs", wireType)
}
@ -1430,7 +1560,7 @@ func (m *Etcd) Unmarshal(dAtA []byte) error {
}
m.ListenPeerURLs = append(m.ListenPeerURLs, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
case 7:
case 24:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field InitialAdvertisePeerURLs", wireType)
}
@ -1459,7 +1589,7 @@ func (m *Etcd) Unmarshal(dAtA []byte) error {
}
m.InitialAdvertisePeerURLs = append(m.InitialAdvertisePeerURLs, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
case 8:
case 31:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field InitialCluster", wireType)
}
@ -1488,7 +1618,7 @@ func (m *Etcd) Unmarshal(dAtA []byte) error {
}
m.InitialCluster = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 9:
case 32:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field InitialClusterState", wireType)
}
@ -1517,7 +1647,7 @@ func (m *Etcd) Unmarshal(dAtA []byte) error {
}
m.InitialClusterState = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 10:
case 33:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field InitialClusterToken", wireType)
}
@ -1546,7 +1676,7 @@ func (m *Etcd) Unmarshal(dAtA []byte) error {
}
m.InitialClusterToken = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 11:
case 41:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field SnapshotCount", wireType)
}
@ -1565,7 +1695,7 @@ func (m *Etcd) Unmarshal(dAtA []byte) error {
break
}
}
case 12:
case 42:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field QuotaBackendBytes", wireType)
}
@ -1584,7 +1714,7 @@ func (m *Etcd) Unmarshal(dAtA []byte) error {
break
}
}
case 13:
case 43:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field PreVote", wireType)
}
@ -1604,7 +1734,7 @@ func (m *Etcd) Unmarshal(dAtA []byte) error {
}
}
m.PreVote = bool(v != 0)
case 14:
case 44:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field InitialCorruptCheck", wireType)
}
@ -2087,6 +2217,25 @@ func (m *Tester) Unmarshal(dAtA []byte) error {
break
}
}
case 13:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field UpdatedDelayLatencyMs", wireType)
}
m.UpdatedDelayLatencyMs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRpc
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.UpdatedDelayLatencyMs |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 21:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field RoundLimit", wireType)
@ -2846,122 +2995,137 @@ var (
func init() { proto.RegisterFile("rpcpb/rpc.proto", fileDescriptorRpc) }
var fileDescriptorRpc = []byte{
// 1870 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x58, 0x7f, 0x72, 0xdb, 0xb8,
0x15, 0xb6, 0x6c, 0xcb, 0xb6, 0xe0, 0x5f, 0x0c, 0x6c, 0xc7, 0x8c, 0x93, 0x98, 0x5e, 0x6e, 0x37,
0x75, 0x3d, 0x43, 0xa7, 0xcd, 0x76, 0x3a, 0xd3, 0x9d, 0xed, 0xa4, 0x92, 0xcc, 0xac, 0x5d, 0x33,
0x92, 0x02, 0xc9, 0x49, 0xfa, 0x97, 0x4a, 0x51, 0x90, 0xc4, 0x9a, 0x22, 0x19, 0x00, 0xf2, 0x4a,
0x7b, 0x81, 0x4e, 0x6f, 0xd0, 0x43, 0xb4, 0xf7, 0x48, 0xdb, 0x9d, 0x4e, 0x4f, 0xa0, 0xb6, 0xe9,
0x0d, 0x74, 0x81, 0x76, 0x00, 0x50, 0x12, 0x28, 0xc9, 0xed, 0x7f, 0xc2, 0x7b, 0xdf, 0xf7, 0xf1,
0xe1, 0x01, 0xef, 0x3d, 0xd8, 0x60, 0x97, 0xc4, 0x5e, 0xdc, 0x78, 0x4e, 0x62, 0xef, 0x3c, 0x26,
0x11, 0x8b, 0x60, 0x56, 0x18, 0x8e, 0xac, 0xb6, 0xcf, 0x3a, 0xbd, 0xc6, 0xb9, 0x17, 0x75, 0x9f,
0xb7, 0xa3, 0x76, 0xf4, 0x5c, 0x78, 0x1b, 0xbd, 0x96, 0x58, 0x89, 0x85, 0xf8, 0x25, 0x59, 0xe6,
0xf7, 0xeb, 0x60, 0xd5, 0x66, 0x5e, 0x13, 0x7e, 0x0e, 0x56, 0x4b, 0x6e, 0x17, 0xeb, 0x99, 0x93,
0xcc, 0x69, 0xae, 0xb0, 0x3b, 0x1a, 0x1a, 0x9b, 0x03, 0xb7, 0x1b, 0x7c, 0x65, 0x86, 0x6e, 0x17,
0x9b, 0x48, 0x38, 0xa1, 0x05, 0xd6, 0x2f, 0x5c, 0xe6, 0x5e, 0xf8, 0x44, 0x5f, 0x16, 0xb8, 0xbd,
0xd1, 0xd0, 0xd8, 0x95, 0xb8, 0xa6, 0xcb, 0x5c, 0xab, 0xe9, 0x13, 0x13, 0x8d, 0x31, 0xf0, 0x0c,
0xac, 0xbd, 0xcb, 0x3b, 0x1c, 0xbd, 0x22, 0xd0, 0x70, 0x34, 0x34, 0x76, 0x24, 0xfa, 0x5b, 0x37,
0x90, 0xe0, 0x04, 0x01, 0xaf, 0x80, 0xe6, 0xf8, 0x94, 0xe1, 0xb0, 0x18, 0xf8, 0x38, 0x64, 0x37,
0xc8, 0xa1, 0xfa, 0xea, 0xc9, 0xca, 0x69, 0xae, 0xf0, 0x74, 0x34, 0x34, 0x1e, 0x49, 0x56, 0x20,
0x10, 0x96, 0x27, 0x20, 0x56, 0x8f, 0x04, 0xd4, 0x44, 0x73, 0x34, 0x88, 0xc0, 0x5e, 0xbe, 0x79,
0x87, 0x09, 0xf3, 0x29, 0x56, 0xd4, 0xb2, 0x42, 0xed, 0x64, 0x34, 0x34, 0x9e, 0x48, 0x35, 0x77,
0x0c, 0x4a, 0x0b, 0x2e, 0x22, 0xc3, 0x22, 0xd8, 0x91, 0xdf, 0xa9, 0x60, 0x4c, 0x84, 0xdc, 0x9a,
0x90, 0x7b, 0x3c, 0x1a, 0x1a, 0x87, 0xa9, 0xe0, 0x62, 0x8c, 0x49, 0xa2, 0x34, 0x43, 0x81, 0x0d,
0xa0, 0x5f, 0x85, 0x3e, 0xf3, 0xdd, 0x60, 0xf2, 0x89, 0x89, 0xdc, 0xba, 0x90, 0x7b, 0x36, 0x1a,
0x1a, 0xa6, 0x94, 0xf3, 0x25, 0xd2, 0x9a, 0x46, 0xa9, 0x28, 0xdf, 0xab, 0x03, 0x0b, 0x60, 0x27,
0xf1, 0x15, 0x83, 0x1e, 0x65, 0x98, 0xe8, 0x1b, 0x22, 0xf7, 0x47, 0xa3, 0xa1, 0xf1, 0x30, 0xad,
0xec, 0x49, 0x80, 0x89, 0x66, 0x18, 0x3c, 0x81, 0x69, 0x4b, 0x95, 0xb9, 0x0c, 0xeb, 0x39, 0x21,
0xa4, 0x24, 0x70, 0x46, 0xc8, 0xa2, 0x1c, 0x66, 0xa2, 0x45, 0xe4, 0x79, 0xcd, 0x5a, 0x74, 0x8b,
0x43, 0x1d, 0xfc, 0x3f, 0x4d, 0xc6, 0x61, 0x73, 0x9a, 0x82, 0x0c, 0x5f, 0x82, 0xed, 0x6a, 0xe8,
0xc6, 0xb4, 0x13, 0xb1, 0x62, 0xd4, 0x0b, 0x99, 0xbe, 0x79, 0x92, 0x39, 0x5d, 0x29, 0x3c, 0x1a,
0x0d, 0x8d, 0x03, 0xa9, 0x46, 0x13, 0xb7, 0xe5, 0x71, 0xbf, 0x89, 0xd2, 0x78, 0xe8, 0x80, 0x07,
0x6f, 0x7a, 0x11, 0x73, 0x0b, 0xae, 0x77, 0x8b, 0xc3, 0x66, 0x61, 0xc0, 0x30, 0xd5, 0xb7, 0x84,
0xc8, 0xf1, 0x68, 0x68, 0x1c, 0x49, 0x91, 0x0f, 0x1c, 0x62, 0x35, 0x24, 0xc6, 0x6a, 0x70, 0x90,
0x89, 0xe6, 0x89, 0xbc, 0x3a, 0x2a, 0x04, 0xbf, 0x8d, 0x18, 0xd6, 0xb7, 0x4f, 0x32, 0xa7, 0x1b,
0x6a, 0x75, 0xc4, 0x04, 0x5b, 0x77, 0x11, 0xcf, 0xce, 0x18, 0xa3, 0x66, 0x24, 0x22, 0xa4, 0x17,
0xb3, 0x62, 0x07, 0x7b, 0xb7, 0xfa, 0x8e, 0xa0, 0x2e, 0xca, 0x88, 0x44, 0x59, 0x1e, 0x87, 0x29,
0x19, 0x51, 0xc8, 0xe6, 0xef, 0xb3, 0x60, 0xed, 0x35, 0xee, 0x36, 0x30, 0x81, 0xbf, 0x00, 0x5b,
0xbc, 0xb0, 0xed, 0x3e, 0xf6, 0x2a, 0x2e, 0xeb, 0x24, 0x85, 0xad, 0xe4, 0x06, 0x33, 0xaf, 0x69,
0xe1, 0x3e, 0xf6, 0xac, 0xd8, 0x65, 0x1d, 0x13, 0xa5, 0xe0, 0xf0, 0x4b, 0x90, 0xcb, 0xb7, 0x71,
0xc8, 0xf2, 0xcd, 0x26, 0x11, 0x79, 0xcd, 0x15, 0x0e, 0x46, 0x43, 0xe3, 0x41, 0x52, 0x3a, 0xdc,
0x65, 0xb9, 0xcd, 0x26, 0x31, 0xd1, 0x14, 0xc7, 0xf3, 0xf9, 0xca, 0xf5, 0x83, 0x38, 0xf2, 0x43,
0x76, 0x59, 0xab, 0x55, 0x04, 0x79, 0x4b, 0x90, 0x95, 0x7c, 0xb6, 0xc6, 0x10, 0xab, 0xc3, 0x58,
0x9c, 0xa8, 0xcc, 0x13, 0x79, 0x3e, 0x0b, 0x2e, 0xc5, 0xbc, 0x7f, 0xe0, 0xd9, 0x6e, 0xd3, 0x70,
0x29, 0x4e, 0xba, 0x4d, 0x82, 0x81, 0x5f, 0x81, 0x4d, 0xbe, 0x03, 0x27, 0x6a, 0x8b, 0xfd, 0xb6,
0x04, 0x45, 0x1f, 0x0d, 0x8d, 0x7d, 0x65, 0xbf, 0x41, 0xd4, 0x4e, 0xb6, 0xab, 0x82, 0x61, 0x1e,
0x6c, 0xf3, 0xa5, 0x2c, 0xf8, 0x9a, 0x53, 0xd5, 0xff, 0x9c, 0x11, 0xc7, 0xa0, 0x54, 0x8d, 0xa0,
0x27, 0x8d, 0x82, 0xf1, 0x1a, 0x4c, 0x33, 0xe0, 0x37, 0x60, 0x77, 0x6a, 0xa8, 0x90, 0xa8, 0x3f,
0xd0, 0xff, 0x22, 0x45, 0x9e, 0x8c, 0x86, 0x86, 0x3e, 0x2f, 0x12, 0x73, 0x8c, 0x89, 0x66, 0x59,
0xe3, 0x58, 0x78, 0x45, 0x4b, 0x99, 0xbf, 0x2e, 0x8e, 0x45, 0xb4, 0x83, 0x44, 0x24, 0xcd, 0x80,
0x15, 0x00, 0xa7, 0xaa, 0x76, 0xd8, 0x14, 0x79, 0xd5, 0xbf, 0x97, 0x57, 0xc0, 0x18, 0x0d, 0x8d,
0xc7, 0xf3, 0xe1, 0xe0, 0x04, 0x66, 0xa2, 0x05, 0x5c, 0xf8, 0x13, 0x39, 0x26, 0xf4, 0x3f, 0xf1,
0xbe, 0xbf, 0xf9, 0x62, 0xf3, 0x5c, 0x4c, 0x9b, 0x73, 0x6e, 0x53, 0x87, 0x05, 0x17, 0x34, 0x91,
0x80, 0x9a, 0xff, 0xd9, 0x04, 0x6b, 0x35, 0x2c, 0x1a, 0xca, 0x4b, 0xb0, 0x2d, 0x7f, 0x95, 0x30,
0xfb, 0x36, 0x22, 0xb7, 0xf3, 0x97, 0x91, 0x09, 0xb7, 0x15, 0x4a, 0xbf, 0x89, 0xd2, 0x78, 0xf8,
0x33, 0x00, 0xa4, 0x41, 0xdc, 0x28, 0x39, 0x7b, 0x1e, 0x8e, 0x86, 0x06, 0x4c, 0xb1, 0xe5, 0x4d,
0x52, 0x90, 0xbc, 0x6d, 0x5f, 0xe0, 0xc0, 0x1d, 0x38, 0x2e, 0xc3, 0xa1, 0x37, 0x78, 0x4d, 0xc5,
0x55, 0xde, 0x56, 0xdb, 0x76, 0x93, 0xfb, 0xad, 0x40, 0x02, 0xac, 0x2e, 0x6f, 0xdb, 0x69, 0x0a,
0xfc, 0x15, 0xd0, 0xd2, 0x16, 0x74, 0x27, 0x2e, 0xf5, 0xb6, 0x7a, 0xa9, 0x67, 0x65, 0x2c, 0x72,
0x67, 0xa2, 0x39, 0x1e, 0xdf, 0x08, 0x8a, 0x7a, 0x61, 0xd3, 0xf1, 0xbb, 0x3e, 0xd3, 0x0f, 0x4e,
0x32, 0xa7, 0x59, 0x75, 0x23, 0x84, 0xfb, 0xac, 0x80, 0x3b, 0x4d, 0xa4, 0x20, 0xe1, 0x2f, 0xc1,
0xb6, 0xdd, 0xf7, 0x59, 0x39, 0xe4, 0x65, 0xd2, 0x23, 0x58, 0x7f, 0x38, 0x77, 0x27, 0xfa, 0x3e,
0xb3, 0xa2, 0xd0, 0x6a, 0x49, 0x00, 0xbf, 0x13, 0x2a, 0x01, 0x5e, 0x02, 0xad, 0x18, 0x85, 0x54,
0x4c, 0x24, 0x6f, 0x20, 0x7b, 0xcd, 0xe1, 0xec, 0xfd, 0xf4, 0xa6, 0x88, 0x71, 0x9f, 0x99, 0x63,
0xc1, 0x9f, 0x83, 0x4d, 0x3b, 0x74, 0x1b, 0x01, 0xae, 0xc4, 0x24, 0x6a, 0xe9, 0xba, 0x10, 0x39,
0x1c, 0x0d, 0x8d, 0xbd, 0x24, 0x12, 0xe1, 0xb4, 0x62, 0xee, 0xe5, 0x75, 0x36, 0xc5, 0xc2, 0xaf,
0xc1, 0x56, 0x12, 0x4f, 0xd1, 0xa5, 0x98, 0xea, 0x86, 0x98, 0x7a, 0x4a, 0x91, 0x26, 0xd1, 0x5b,
0x1e, 0x77, 0x9b, 0x28, 0x85, 0xe6, 0xa7, 0x99, 0xac, 0x45, 0x5e, 0x5f, 0x53, 0xfd, 0x64, 0xf6,
0x34, 0xc7, 0x7c, 0x79, 0x1c, 0xe2, 0x34, 0xd3, 0x14, 0x3e, 0x20, 0x13, 0x4b, 0xb5, 0xd3, 0x6b,
0xb5, 0x02, 0xac, 0x7f, 0x36, 0x9b, 0xca, 0xb1, 0x08, 0x95, 0x80, 0xa9, 0x46, 0xc2, 0x80, 0xd7,
0x4a, 0x9f, 0x2b, 0x46, 0xdd, 0xae, 0x1b, 0x36, 0xa9, 0x6e, 0xce, 0xbe, 0x56, 0xa6, 0x7d, 0xce,
0x4b, 0x30, 0x6a, 0x9b, 0x1b, 0xf3, 0xf8, 0xae, 0x50, 0x2f, 0x0c, 0x31, 0x99, 0xb4, 0xea, 0x1f,
0x89, 0xfb, 0xad, 0xec, 0x8a, 0x08, 0xbf, 0xda, 0xac, 0x67, 0x28, 0xfc, 0xf9, 0x64, 0xf7, 0x19,
0x26, 0xa1, 0x1b, 0x4c, 0x64, 0xce, 0x84, 0x8c, 0x12, 0x10, 0x4e, 0x10, 0xaa, 0xd0, 0x1c, 0x8d,
0x1f, 0x6f, 0x95, 0x11, 0x4c, 0x69, 0x6d, 0x10, 0x63, 0xaa, 0x63, 0xb1, 0x2d, 0xe5, 0x78, 0xa9,
0x70, 0x5a, 0x8c, 0x7b, 0x4d, 0xa4, 0x62, 0xf9, 0x2d, 0x95, 0xcb, 0x6b, 0x3c, 0xa8, 0xfa, 0xdf,
0x61, 0xd1, 0x84, 0xb3, 0x6a, 0x6a, 0x13, 0xf2, 0x2d, 0x1e, 0x58, 0xd4, 0xff, 0x8e, 0xdf, 0xd2,
0x14, 0x81, 0x77, 0xae, 0x94, 0xc1, 0x71, 0x49, 0x1b, 0xeb, 0x6d, 0x21, 0xa3, 0xcc, 0xc4, 0x19,
0x19, 0x2b, 0xe0, 0x30, 0x13, 0x2d, 0xe0, 0xc2, 0xb7, 0x60, 0x7f, 0x6a, 0xed, 0xb5, 0x5a, 0x7e,
0x1f, 0xb9, 0x61, 0x1b, 0xeb, 0x1d, 0xa1, 0x69, 0x8e, 0x86, 0xc6, 0xf1, 0xbc, 0xa6, 0xc0, 0x59,
0x84, 0x03, 0x4d, 0xb4, 0x90, 0x0f, 0x7f, 0x03, 0x0e, 0x17, 0xd9, 0x6b, 0xfd, 0x50, 0xf7, 0x85,
0xb4, 0xf2, 0x96, 0xbb, 0x47, 0xda, 0x62, 0xfd, 0xd0, 0x44, 0xf7, 0xc9, 0xf0, 0x89, 0x32, 0x71,
0xd5, 0xfa, 0x61, 0x39, 0xa6, 0xfa, 0x6f, 0x85, 0xb2, 0x72, 0xa4, 0x8a, 0x32, 0xeb, 0x87, 0x56,
0x14, 0x53, 0x13, 0xcd, 0xb2, 0xa6, 0xc7, 0x22, 0x9b, 0x3a, 0x95, 0xd3, 0x2d, 0x9b, 0x7a, 0x28,
0x49, 0x1d, 0x39, 0x0b, 0xe8, 0xe4, 0x58, 0x12, 0x02, 0xfc, 0x29, 0xc8, 0x49, 0xc3, 0x9b, 0x4a,
0x55, 0x8e, 0xb5, 0xac, 0xfa, 0x1c, 0x48, 0xd8, 0x1f, 0xf8, 0xd7, 0xa7, 0x40, 0xf3, 0x77, 0x19,
0xb0, 0x8e, 0xf0, 0x87, 0x1e, 0xa6, 0x0c, 0x9e, 0x83, 0x5c, 0x39, 0xc6, 0xc4, 0x65, 0x7e, 0x14,
0x8a, 0xf6, 0xbf, 0xf3, 0x42, 0x4b, 0x86, 0xc8, 0xc4, 0x8e, 0xa6, 0x10, 0xf8, 0xc5, 0xf8, 0x21,
0xa3, 0xcb, 0x89, 0xb3, 0x9d, 0x80, 0xa5, 0x11, 0x8d, 0x5f, 0x39, 0x5f, 0x8c, 0x67, 0x8c, 0xf8,
0x13, 0x63, 0x0a, 0x93, 0x46, 0x94, 0x38, 0xcd, 0xaf, 0xc1, 0x06, 0xc2, 0x34, 0x8e, 0x42, 0x8a,
0xa1, 0x0e, 0xd6, 0xab, 0x3d, 0xcf, 0xc3, 0x94, 0x8a, 0x38, 0x36, 0xd0, 0x78, 0x09, 0x1f, 0x82,
0x35, 0xfe, 0x58, 0xed, 0x51, 0x39, 0x61, 0x50, 0xb2, 0x3a, 0xfb, 0x47, 0x46, 0x09, 0x1e, 0xee,
0x00, 0x50, 0x8a, 0x58, 0x95, 0xb9, 0x84, 0xe1, 0xa6, 0xb6, 0x04, 0xf7, 0x81, 0x96, 0x3c, 0xc5,
0x84, 0x8d, 0xcf, 0x3e, 0x2d, 0x03, 0x77, 0xc1, 0x26, 0xc2, 0x74, 0x62, 0x58, 0x86, 0x5b, 0x60,
0xe3, 0xda, 0x0f, 0x02, 0xb1, 0x5a, 0xe1, 0x6e, 0xde, 0x09, 0xf2, 0xc4, 0xeb, 0xf8, 0x77, 0x58,
0x5b, 0xe5, 0x2a, 0x17, 0x98, 0x32, 0x12, 0x0d, 0x38, 0x42, 0x3c, 0xa9, 0xb4, 0x2c, 0x7c, 0x04,
0x0e, 0x0a, 0x81, 0xeb, 0xdd, 0x76, 0xa2, 0x40, 0x3c, 0xf1, 0x2b, 0x11, 0x61, 0xb5, 0x3e, 0xea,
0x6b, 0x4d, 0xf8, 0x18, 0x1c, 0xde, 0x84, 0x8d, 0x85, 0x4e, 0x0c, 0x0f, 0xc0, 0x03, 0xd1, 0xef,
0x52, 0xe6, 0x16, 0x3c, 0x04, 0x7b, 0x37, 0x61, 0x73, 0xce, 0xd1, 0x3e, 0xfb, 0xdb, 0x8a, 0x8c,
0x27, 0x69, 0xb5, 0x9c, 0x7f, 0x7d, 0xe5, 0x38, 0xf5, 0x72, 0xc9, 0xae, 0xbf, 0x2a, 0x3b, 0x4e,
0xf9, 0x9d, 0x8d, 0xb4, 0x25, 0x1e, 0xb5, 0x30, 0x3b, 0x76, 0xfe, 0xc2, 0x46, 0x5a, 0x06, 0x1e,
0x83, 0xa3, 0x39, 0x5c, 0xfd, 0x55, 0x19, 0xd5, 0x9d, 0x72, 0xe9, 0x1b, 0x6d, 0x19, 0xea, 0x60,
0x5f, 0x21, 0x4c, 0x3d, 0x2b, 0x13, 0xa9, 0x37, 0x37, 0x65, 0x74, 0xf3, 0x5a, 0x5b, 0x15, 0xf9,
0xe1, 0x86, 0xbc, 0xe3, 0x68, 0x59, 0x78, 0x06, 0x9e, 0x15, 0x9c, 0x7c, 0xf1, 0xfa, 0xb2, 0xec,
0xd8, 0xf5, 0x8a, 0x6d, 0xa3, 0x7a, 0xa5, 0x8c, 0x6a, 0xf5, 0xda, 0xfb, 0x3a, 0x7a, 0x9f, 0x8e,
0x6a, 0x0d, 0xfe, 0x00, 0x9c, 0xdc, 0x8f, 0x4d, 0x42, 0x5d, 0x87, 0x9f, 0x81, 0xa7, 0xf7, 0xa3,
0xf8, 0x47, 0x37, 0xe0, 0x33, 0x60, 0x5e, 0xd8, 0x4e, 0xfe, 0xd7, 0xff, 0xfb, 0x83, 0x39, 0x78,
0x02, 0x9e, 0x2c, 0xc6, 0x25, 0x1f, 0x03, 0xf0, 0x29, 0x78, 0xb4, 0x18, 0xc1, 0x3f, 0xb4, 0xc9,
0xcf, 0xa1, 0x54, 0xae, 0xbf, 0xca, 0x5f, 0x39, 0xf5, 0x77, 0x57, 0xb5, 0xcb, 0x7a, 0xb5, 0x86,
0xec, 0x6a, 0x55, 0x6b, 0xc2, 0x1f, 0x82, 0xcf, 0x53, 0x8e, 0x52, 0x39, 0xf1, 0xc9, 0xd4, 0x5d,
0xbd, 0xb5, 0x4b, 0x1c, 0x88, 0xe1, 0x2e, 0x00, 0x1c, 0x55, 0x29, 0x5f, 0x95, 0x6a, 0x55, 0xed,
0x63, 0x06, 0x6e, 0x83, 0x0d, 0xfb, 0x7d, 0xcd, 0x46, 0xa5, 0xbc, 0xa3, 0xfd, 0x71, 0xf9, 0x2c,
0x02, 0x60, 0xda, 0x98, 0xe1, 0x1a, 0x58, 0xbe, 0x7e, 0xab, 0x2d, 0xc1, 0x1c, 0xc8, 0x3a, 0x76,
0xbe, 0x6a, 0x6b, 0x19, 0xb8, 0x07, 0x76, 0x6d, 0xc7, 0x2e, 0xd6, 0xae, 0xca, 0xa5, 0x3a, 0xba,
0x29, 0x95, 0x6c, 0xa4, 0x2d, 0x43, 0x0d, 0x6c, 0xbd, 0xcb, 0xd7, 0x8a, 0x97, 0x63, 0xcb, 0x0a,
0xbf, 0x08, 0x4e, 0xb9, 0x78, 0x5d, 0x47, 0xf9, 0xa2, 0x8d, 0xc6, 0xe6, 0x55, 0x0e, 0x14, 0x42,
0x63, 0x4b, 0xf6, 0xc5, 0x4b, 0x90, 0xab, 0x11, 0x37, 0xa4, 0x71, 0x44, 0x18, 0x7c, 0xa1, 0x2e,
0x76, 0x92, 0x92, 0x4c, 0x3a, 0xc1, 0xd1, 0xee, 0x64, 0x2d, 0x0b, 0xd2, 0x5c, 0x3a, 0xcd, 0xfc,
0x38, 0x53, 0xd8, 0xff, 0xf8, 0xaf, 0xe3, 0xa5, 0x8f, 0x9f, 0x8e, 0x33, 0x7f, 0xff, 0x74, 0x9c,
0xf9, 0xe7, 0xa7, 0xe3, 0xcc, 0x1f, 0xfe, 0x7d, 0xbc, 0xd4, 0x58, 0x13, 0xff, 0xa6, 0xf8, 0xf2,
0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x33, 0x62, 0x54, 0x24, 0xef, 0x10, 0x00, 0x00,
// 2099 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x58, 0x5b, 0x77, 0xdb, 0x58,
0x15, 0x8e, 0x92, 0x26, 0x4d, 0x4e, 0x6e, 0xea, 0x49, 0xd3, 0xa8, 0x97, 0x89, 0x52, 0x95, 0x96,
0x34, 0x8c, 0x52, 0x68, 0x59, 0xc0, 0x94, 0x19, 0x3a, 0x8e, 0xa3, 0x36, 0x26, 0x8a, 0xed, 0x1e,
0x2b, 0x6d, 0xe7, 0xc9, 0xc8, 0xd2, 0xb1, 0x2d, 0xa2, 0x48, 0xaa, 0x74, 0x9c, 0x71, 0xe6, 0x0f,
0xb0, 0x78, 0xe3, 0x91, 0x1f, 0xc1, 0xfc, 0x8f, 0xce, 0x05, 0x18, 0xe0, 0x8d, 0x07, 0x03, 0x65,
0xb1, 0x16, 0xcf, 0x5e, 0xfc, 0x00, 0xd6, 0xb9, 0xd8, 0x96, 0x7c, 0x49, 0xfb, 0x96, 0xb3, 0xf7,
0xf7, 0x7d, 0xda, 0x67, 0x9f, 0x7d, 0xf6, 0x3e, 0x0e, 0x58, 0x8d, 0x23, 0x27, 0xaa, 0x3d, 0x88,
0x23, 0x67, 0x37, 0x8a, 0x43, 0x12, 0xc2, 0x59, 0x66, 0xb8, 0xa1, 0x37, 0x3c, 0xd2, 0x6c, 0xd5,
0x76, 0x9d, 0xf0, 0xf4, 0x41, 0x23, 0x6c, 0x84, 0x0f, 0x98, 0xb7, 0xd6, 0xaa, 0xb3, 0x15, 0x5b,
0xb0, 0xbf, 0x38, 0x4b, 0xfb, 0xef, 0x3c, 0xb8, 0x64, 0x10, 0xc7, 0x85, 0x77, 0xc0, 0xa5, 0xa2,
0x7d, 0x8a, 0x15, 0x69, 0x4b, 0xda, 0x5e, 0xd8, 0x5b, 0xed, 0x76, 0xd4, 0xc5, 0x73, 0xfb, 0xd4,
0x7f, 0xac, 0x05, 0xf6, 0x29, 0xd6, 0x10, 0x73, 0x42, 0x1d, 0x5c, 0xde, 0xb7, 0x89, 0xbd, 0xef,
0xc5, 0xca, 0x34, 0xc3, 0xad, 0x75, 0x3b, 0xea, 0x2a, 0xc7, 0xb9, 0x36, 0xb1, 0x75, 0xd7, 0x8b,
0x35, 0xd4, 0xc3, 0xc0, 0x1d, 0x30, 0xf7, 0x32, 0x67, 0x52, 0xf4, 0x0c, 0x43, 0xc3, 0x6e, 0x47,
0x5d, 0xe1, 0xe8, 0xcf, 0x6d, 0x9f, 0x83, 0x05, 0x02, 0x96, 0xc0, 0xda, 0x01, 0xb6, 0x63, 0x52,
0xc3, 0x36, 0x29, 0x04, 0x04, 0xc7, 0x67, 0xb6, 0x7f, 0x94, 0x28, 0x8b, 0x5b, 0xd2, 0xf6, 0xcc,
0xde, 0x07, 0xdd, 0x8e, 0x7a, 0x9d, 0x13, 0x9b, 0x3d, 0x90, 0xee, 0x09, 0x94, 0x86, 0xc6, 0x31,
0x61, 0x01, 0x5c, 0x31, 0x7c, 0xec, 0x10, 0x2f, 0x0c, 0x2c, 0xef, 0x14, 0x87, 0x2d, 0x72, 0x94,
0x28, 0x4b, 0x4c, 0xee, 0x66, 0xb7, 0xa3, 0x6e, 0x70, 0x39, 0x2c, 0x20, 0x3a, 0xe1, 0x18, 0x0d,
0x8d, 0xb2, 0x60, 0x01, 0xc8, 0xa6, 0x97, 0x10, 0x1c, 0xe4, 0x7d, 0x0f, 0x07, 0xe4, 0x18, 0x99,
0x89, 0xb2, 0xbe, 0x35, 0xb3, 0xbd, 0x90, 0x0e, 0xcc, 0x67, 0x08, 0xdd, 0x61, 0x10, 0xbd, 0x15,
0xfb, 0x89, 0x86, 0x46, 0x68, 0x10, 0x81, 0xb5, 0x9c, 0x7b, 0x86, 0x63, 0xe2, 0x25, 0x38, 0xa5,
0x76, 0x8d, 0xa9, 0x6d, 0x75, 0x3b, 0xea, 0x2d, 0xae, 0x66, 0xf7, 0x40, 0x59, 0xc1, 0x71, 0x64,
0x98, 0x07, 0x2b, 0xfc, 0x3b, 0x65, 0x8c, 0x63, 0x26, 0xb7, 0xc1, 0xe4, 0x52, 0xdb, 0x14, 0xc1,
0x45, 0x18, 0xc7, 0x42, 0x69, 0x88, 0x02, 0x6b, 0x40, 0x29, 0x04, 0x1e, 0xf1, 0x6c, 0xbf, 0xff,
0x89, 0xbe, 0x9c, 0xc2, 0xe4, 0xee, 0x75, 0x3b, 0xaa, 0xc6, 0xe5, 0x3c, 0x8e, 0xd4, 0x07, 0x51,
0xa6, 0x94, 0x27, 0xea, 0xc0, 0x3d, 0xb0, 0x22, 0x7c, 0x79, 0xbf, 0x95, 0x10, 0x1c, 0x2b, 0x2a,
0xab, 0x8b, 0x1b, 0xdd, 0x8e, 0x7a, 0x2d, 0xab, 0xec, 0x70, 0x80, 0x86, 0x86, 0x18, 0x34, 0x81,
0x59, 0x4b, 0x85, 0xd8, 0x04, 0x2b, 0x5b, 0x4c, 0x28, 0x95, 0xc0, 0x21, 0x21, 0x3d, 0xa1, 0x30,
0x0d, 0x8d, 0x23, 0x8f, 0x6a, 0x5a, 0xe1, 0x09, 0x0e, 0x94, 0xdb, 0xef, 0xd2, 0x24, 0x14, 0x36,
0xa2, 0xc9, 0xc8, 0xf0, 0x09, 0x58, 0xae, 0x04, 0x76, 0x94, 0x34, 0x43, 0x92, 0x0f, 0x5b, 0x01,
0x51, 0xee, 0xb3, 0xd2, 0xbb, 0xde, 0xed, 0xa8, 0xeb, 0x5c, 0x2d, 0x11, 0x6e, 0xdd, 0xa1, 0x7e,
0x0d, 0x65, 0xf1, 0xd0, 0x04, 0x57, 0x9e, 0xb7, 0x42, 0x62, 0xef, 0xd9, 0xce, 0x09, 0x0e, 0xdc,
0xbd, 0x73, 0x82, 0x13, 0x65, 0x87, 0x89, 0x6c, 0x76, 0x3b, 0xea, 0x0d, 0x2e, 0xf2, 0x9a, 0x42,
0xf4, 0x1a, 0xc7, 0xe8, 0x35, 0x0a, 0xd2, 0xd0, 0x28, 0x91, 0xde, 0xdc, 0x72, 0x8c, 0x5f, 0x84,
0x04, 0x2b, 0x3f, 0xd8, 0x92, 0xb6, 0xe7, 0xd3, 0x37, 0x37, 0x8a, 0xb1, 0x7e, 0x16, 0xd2, 0xec,
0xf4, 0x30, 0xe9, 0x8c, 0x84, 0x71, 0xdc, 0x8a, 0x48, 0xbe, 0x89, 0x9d, 0x13, 0xe5, 0x43, 0x46,
0x1d, 0x97, 0x11, 0x8e, 0xd2, 0x1d, 0x0a, 0x4b, 0x65, 0x24, 0x45, 0xd6, 0x7e, 0x3b, 0x0b, 0xe6,
0x8e, 0xf0, 0x69, 0x0d, 0xc7, 0xf0, 0x13, 0xb0, 0x44, 0x9b, 0x8e, 0xd1, 0xc6, 0x4e, 0xd9, 0x26,
0x4d, 0xd1, 0x74, 0x52, 0xb9, 0xc1, 0xc4, 0x71, 0x75, 0xdc, 0xc6, 0x8e, 0x1e, 0xd9, 0xa4, 0xa9,
0xa1, 0x0c, 0x1c, 0x3e, 0x02, 0x0b, 0xb9, 0x06, 0x0e, 0x48, 0xce, 0x75, 0x63, 0xd6, 0x21, 0x16,
0xf6, 0xd6, 0xbb, 0x1d, 0xf5, 0x8a, 0xb8, 0x3a, 0xd4, 0xa5, 0xdb, 0xae, 0x1b, 0x6b, 0x68, 0x80,
0xa3, 0xf9, 0x7c, 0x6a, 0x7b, 0x7e, 0x14, 0x7a, 0x01, 0x39, 0xb0, 0xac, 0x32, 0x23, 0x2f, 0x31,
0x72, 0x2a, 0x9f, 0xf5, 0x1e, 0x44, 0x6f, 0x12, 0x12, 0x09, 0x95, 0x51, 0x22, 0xcd, 0xe7, 0x9e,
0x9d, 0x60, 0xda, 0xdb, 0xf0, 0x70, 0x27, 0xac, 0xd9, 0x09, 0x16, 0x9d, 0x50, 0x60, 0xe0, 0x63,
0xb0, 0x48, 0x77, 0x60, 0x86, 0x0d, 0xb6, 0xdf, 0x3a, 0xa3, 0x28, 0xdd, 0x8e, 0x7a, 0x35, 0xb5,
0x5f, 0x3f, 0x6c, 0x88, 0xed, 0xa6, 0xc1, 0x30, 0x07, 0x96, 0xe9, 0x92, 0x5f, 0x78, 0xcb, 0xac,
0x28, 0x5f, 0x49, 0xec, 0x18, 0x52, 0xb7, 0x86, 0xd1, 0x45, 0xa3, 0x20, 0xf4, 0x0e, 0x66, 0x19,
0xf0, 0x19, 0x58, 0x1d, 0x18, 0xca, 0x71, 0xd8, 0x3e, 0x57, 0xbe, 0xe6, 0x22, 0xb7, 0xba, 0x1d,
0x55, 0x19, 0x15, 0x89, 0x28, 0x46, 0x43, 0xc3, 0xac, 0x5e, 0x2c, 0xf4, 0x46, 0x73, 0x99, 0x6f,
0xc6, 0xc7, 0xc2, 0xda, 0x81, 0x10, 0xc9, 0x32, 0x60, 0x19, 0xc0, 0x81, 0xaa, 0x11, 0xb8, 0x2c,
0xaf, 0xca, 0xb7, 0xbc, 0x04, 0xd4, 0x6e, 0x47, 0xbd, 0x39, 0x1a, 0x0e, 0x16, 0x30, 0x0d, 0x8d,
0xe1, 0xc2, 0x1f, 0xf1, 0x11, 0xa6, 0x7c, 0x49, 0x67, 0xd2, 0xe2, 0xc3, 0xc5, 0x5d, 0x36, 0x09,
0x77, 0xa9, 0x2d, 0x3d, 0xc8, 0xa8, 0xa0, 0x86, 0x18, 0x54, 0xfb, 0xfb, 0x12, 0x98, 0xb3, 0x30,
0x6b, 0x28, 0x4f, 0xc0, 0x32, 0xff, 0xab, 0x88, 0xc9, 0xe7, 0x61, 0x7c, 0x32, 0x5a, 0x8c, 0x84,
0xb9, 0xf5, 0x80, 0xfb, 0x35, 0x94, 0xc5, 0xc3, 0x9f, 0x00, 0xc0, 0x0d, 0xac, 0xa2, 0xf8, 0x5c,
0xbc, 0xd6, 0xed, 0xa8, 0x30, 0xc3, 0xe6, 0x95, 0x94, 0x42, 0xd2, 0xb6, 0xbd, 0x8f, 0x7d, 0xfb,
0xdc, 0xb4, 0x09, 0x0e, 0x9c, 0x73, 0x31, 0xec, 0x96, 0xd3, 0x6d, 0xdb, 0xa5, 0x7e, 0xdd, 0xe7,
0x00, 0xfd, 0x94, 0xb6, 0xed, 0x2c, 0x05, 0xfe, 0x12, 0xc8, 0x59, 0x0b, 0x3a, 0x63, 0x45, 0xbd,
0x9c, 0x2e, 0xea, 0x61, 0x19, 0x3d, 0x3e, 0xd3, 0xd0, 0x08, 0x0f, 0x7e, 0x06, 0xd6, 0x8f, 0x23,
0xd7, 0x26, 0xd8, 0x1d, 0x8a, 0x6b, 0x99, 0x09, 0xde, 0xe9, 0x76, 0x54, 0x95, 0x0b, 0xb6, 0x38,
0x4c, 0x1f, 0x8d, 0x6f, 0xbc, 0x02, 0xcd, 0x11, 0x0a, 0x5b, 0x81, 0x6b, 0x7a, 0xa7, 0x1e, 0x51,
0xd6, 0xb7, 0xa4, 0xed, 0xd9, 0x74, 0x8e, 0x62, 0xea, 0xd3, 0x7d, 0xea, 0xd4, 0x50, 0x0a, 0x09,
0x3f, 0x05, 0xcb, 0x46, 0xdb, 0x23, 0xa5, 0x80, 0xde, 0xc0, 0x56, 0x8c, 0x95, 0x6b, 0x23, 0xe5,
0xd6, 0xf6, 0x88, 0x1e, 0x06, 0x7a, 0x9d, 0x03, 0x68, 0xb9, 0xa5, 0x09, 0xf0, 0x00, 0xc8, 0xf9,
0x30, 0x48, 0xd8, 0xb0, 0x73, 0xce, 0x79, 0x1b, 0xdb, 0x18, 0x2e, 0x7d, 0x67, 0x80, 0xe8, 0xb5,
0xb0, 0x11, 0x16, 0xfc, 0x08, 0x2c, 0x1a, 0x81, 0x5d, 0xf3, 0x71, 0x39, 0x8a, 0xc3, 0xba, 0xa2,
0x30, 0x91, 0x8d, 0x6e, 0x47, 0x5d, 0x13, 0x91, 0x30, 0xa7, 0x1e, 0x51, 0x2f, 0xbd, 0xc2, 0x03,
0x2c, 0xfc, 0x18, 0x2c, 0x89, 0x78, 0xf2, 0x76, 0x82, 0x13, 0x45, 0x65, 0x03, 0x35, 0x75, 0xff,
0x45, 0xf4, 0xba, 0x43, 0xdd, 0x1a, 0xca, 0xa0, 0x69, 0xa1, 0x88, 0x35, 0xcb, 0xea, 0x51, 0xc2,
0xa6, 0x5d, 0xa6, 0x50, 0x7a, 0x7c, 0x7e, 0x20, 0xac, 0x50, 0xb2, 0x14, 0x3a, 0x7b, 0x85, 0xa5,
0xd2, 0x6c, 0xd5, 0xeb, 0x3e, 0x66, 0xe3, 0x2d, 0x93, 0xca, 0x9e, 0x48, 0xc2, 0x01, 0x03, 0x0d,
0xc1, 0x80, 0x87, 0xa9, 0x16, 0x9a, 0x0f, 0x4f, 0x4f, 0xed, 0xc0, 0x4d, 0x14, 0x6d, 0xf8, 0x21,
0x34, 0x68, 0xa1, 0x8e, 0xc0, 0xa4, 0x3b, 0x68, 0x8f, 0x47, 0x77, 0x85, 0x5a, 0x41, 0x80, 0xe3,
0xfe, 0x14, 0xb8, 0xcf, 0xae, 0x4e, 0x6a, 0x57, 0x31, 0xf3, 0xa7, 0xe7, 0xc0, 0x10, 0x85, 0xbe,
0xcc, 0x8c, 0x36, 0xc1, 0x71, 0x60, 0xfb, 0x7d, 0x99, 0x1d, 0x26, 0x93, 0x0a, 0x08, 0x0b, 0x44,
0x5a, 0x68, 0x84, 0x46, 0x8f, 0xb7, 0x42, 0x62, 0x9c, 0x24, 0xd6, 0x79, 0x84, 0x13, 0x05, 0xb3,
0x6d, 0xa5, 0x8e, 0x37, 0x61, 0x4e, 0x9d, 0x50, 0xaf, 0x86, 0xd2, 0x58, 0x5a, 0xa5, 0x7c, 0x79,
0x88, 0xcf, 0x2b, 0xde, 0x17, 0x98, 0xf5, 0xf7, 0xd9, 0x74, 0x6a, 0x05, 0xf9, 0x04, 0x9f, 0xeb,
0x89, 0xf7, 0x05, 0xad, 0xd2, 0x0c, 0x81, 0x36, 0xc5, 0x8c, 0xc1, 0xb4, 0xe3, 0x06, 0x56, 0x1a,
0x4c, 0x26, 0x35, 0x6e, 0x87, 0x64, 0x74, 0x9f, 0xc2, 0x34, 0x34, 0x86, 0x0b, 0x5f, 0x80, 0xab,
0x03, 0x6b, 0xab, 0x5e, 0xf7, 0xda, 0xc8, 0x0e, 0x1a, 0x58, 0x69, 0x32, 0x4d, 0xad, 0xdb, 0x51,
0x37, 0x47, 0x35, 0x19, 0x4e, 0x8f, 0x29, 0x50, 0x43, 0x63, 0xf9, 0xf0, 0x57, 0x60, 0x63, 0x9c,
0xdd, 0x6a, 0x07, 0x8a, 0xc7, 0xa4, 0x53, 0xcf, 0xc4, 0x09, 0xd2, 0x3a, 0x69, 0x07, 0x1a, 0x9a,
0x24, 0x43, 0x87, 0x55, 0xdf, 0x65, 0xb5, 0x83, 0x52, 0x94, 0x28, 0xbf, 0x66, 0xca, 0xa9, 0x23,
0x4d, 0x29, 0x93, 0x76, 0xa0, 0x87, 0x51, 0xa2, 0xa1, 0x61, 0xd6, 0xe0, 0x58, 0xf8, 0xbc, 0x48,
0xf8, 0xe0, 0x9c, 0xcd, 0xbc, 0xc1, 0xb8, 0x0e, 0x1f, 0x33, 0x49, 0xff, 0x58, 0x04, 0x01, 0xfe,
0x18, 0x2c, 0x70, 0xc3, 0xf3, 0x72, 0x85, 0x4f, 0xcc, 0xd9, 0xf4, 0x4b, 0x43, 0xb0, 0x5f, 0xd3,
0xaf, 0x0f, 0x80, 0xda, 0x6f, 0x24, 0x70, 0x19, 0xe1, 0xd7, 0x2d, 0x9c, 0x10, 0xb8, 0x0b, 0x16,
0x4a, 0x11, 0x8e, 0x6d, 0xfa, 0x83, 0x82, 0x4d, 0x96, 0x95, 0x87, 0xb2, 0x98, 0x4f, 0x7d, 0x3b,
0x1a, 0x40, 0xe0, 0xdd, 0xde, 0x1b, 0x49, 0xe1, 0xc3, 0x6c, 0x59, 0x80, 0xb9, 0x11, 0xf5, 0x1e,
0x50, 0x77, 0x7b, 0xe3, 0x8b, 0xfd, 0xb2, 0x1a, 0xc0, 0xb8, 0x11, 0x09, 0xa7, 0xf6, 0x31, 0x98,
0x47, 0x38, 0x89, 0xc2, 0x20, 0xc1, 0x50, 0x01, 0x97, 0x2b, 0x2d, 0xc7, 0xc1, 0x49, 0xc2, 0xe2,
0x98, 0x47, 0xbd, 0x25, 0xbc, 0x06, 0xe6, 0xe8, 0x3b, 0xb8, 0x95, 0xf0, 0xe1, 0x85, 0xc4, 0x6a,
0xe7, 0x1f, 0x52, 0x2a, 0x78, 0xb8, 0x02, 0x40, 0x31, 0x24, 0x15, 0x62, 0xc7, 0x04, 0xbb, 0xf2,
0x14, 0xbc, 0x0a, 0x64, 0xf1, 0xca, 0x63, 0x36, 0x3a, 0x56, 0x65, 0x09, 0xae, 0x82, 0x45, 0x84,
0x93, 0xbe, 0x61, 0x1a, 0x2e, 0x81, 0xf9, 0x43, 0xcf, 0xf7, 0xd9, 0x6a, 0x86, 0xba, 0x69, 0x27,
0xc8, 0xc5, 0x4e, 0xd3, 0x3b, 0xc3, 0xf2, 0x25, 0xaa, 0xb2, 0x8f, 0x13, 0x12, 0x87, 0xe7, 0x14,
0xc1, 0x5e, 0x6b, 0xf2, 0x2c, 0xbc, 0x0e, 0xd6, 0xf7, 0x7c, 0xdb, 0x39, 0x69, 0x86, 0x3e, 0xfb,
0xf5, 0x50, 0x0e, 0x63, 0x62, 0xb5, 0x51, 0x5b, 0x76, 0xe1, 0x4d, 0xb0, 0x71, 0x1c, 0xd4, 0xc6,
0x3a, 0x31, 0x5c, 0x07, 0x57, 0x58, 0xbf, 0xcb, 0x98, 0xeb, 0x70, 0x03, 0xac, 0x1d, 0x07, 0xee,
0x88, 0xa3, 0xb1, 0xf3, 0x9f, 0x79, 0x1e, 0x8f, 0x68, 0xb5, 0x94, 0x7f, 0x58, 0x30, 0xcd, 0x6a,
0xa9, 0x68, 0x54, 0x9f, 0x96, 0x4c, 0xb3, 0xf4, 0xd2, 0x40, 0xf2, 0x14, 0xfc, 0x10, 0x6c, 0x8f,
0x98, 0xab, 0xc7, 0x45, 0xab, 0x60, 0x56, 0x2d, 0x54, 0x78, 0xf6, 0xcc, 0x40, 0xd5, 0x4a, 0x31,
0x57, 0xae, 0x1c, 0x94, 0x2c, 0x9e, 0x02, 0x86, 0x36, 0x8d, 0xdc, 0xbe, 0x81, 0xe4, 0x69, 0x78,
0x0f, 0x68, 0x29, 0xc3, 0x24, 0xe2, 0x4c, 0x9f, 0xf8, 0xfc, 0xb8, 0x84, 0x8e, 0x8f, 0xe4, 0x4b,
0x2c, 0x77, 0xd4, 0x90, 0x33, 0x4d, 0x79, 0x16, 0xee, 0x80, 0x7b, 0x7b, 0x66, 0x2e, 0x7f, 0x78,
0x50, 0x32, 0x8d, 0x6a, 0xd9, 0x30, 0x50, 0xb5, 0x5c, 0x42, 0x56, 0xd5, 0x7a, 0x55, 0x45, 0xaf,
0xb2, 0x11, 0xbb, 0x30, 0x07, 0x3e, 0x79, 0x3f, 0xec, 0xa4, 0x68, 0x30, 0xfc, 0x1e, 0xd8, 0x9a,
0x2c, 0x21, 0xf6, 0x56, 0x87, 0x3f, 0x07, 0x3f, 0x7d, 0x17, 0x6a, 0xd2, 0x27, 0x1a, 0x17, 0x7f,
0x42, 0x64, 0xa1, 0x09, 0x6f, 0x83, 0x0f, 0x26, 0xa3, 0x68, 0x6a, 0x3c, 0xf8, 0x7d, 0xa0, 0xed,
0x1b, 0x66, 0xee, 0xb3, 0x8b, 0xd3, 0xf2, 0x46, 0x82, 0xbb, 0xe0, 0x3e, 0xca, 0x15, 0xf7, 0x4b,
0x47, 0xd5, 0xf7, 0xc0, 0x7f, 0x25, 0xc1, 0x5f, 0x80, 0x8f, 0xde, 0x0d, 0x9c, 0xb4, 0xc1, 0xaf,
0x25, 0x68, 0x80, 0x4f, 0xdf, 0xfb, 0x7b, 0x93, 0x64, 0xbe, 0x91, 0xe0, 0x6d, 0x70, 0x6b, 0x3c,
0x5f, 0x9c, 0xc3, 0xb7, 0x12, 0xdc, 0x06, 0x77, 0x2e, 0xfc, 0x92, 0x40, 0xfe, 0x51, 0x82, 0x3f,
0x03, 0x8f, 0x2e, 0x82, 0x4c, 0x0a, 0xe3, 0x4f, 0x12, 0x7c, 0x02, 0x1e, 0xbf, 0xc7, 0x37, 0x26,
0x09, 0xfc, 0xf9, 0x82, 0x7d, 0x88, 0xc3, 0xfe, 0xee, 0xdd, 0xfb, 0x10, 0xc8, 0xbf, 0x48, 0x70,
0x13, 0x5c, 0x1f, 0x0f, 0xa1, 0x35, 0xf1, 0x57, 0x09, 0xde, 0x05, 0x5b, 0x17, 0x2a, 0x51, 0xd8,
0xdf, 0x24, 0xa8, 0x80, 0xb5, 0x62, 0xa9, 0xfa, 0x34, 0x57, 0x30, 0xab, 0x2f, 0x0b, 0xd6, 0x41,
0xb5, 0x62, 0x21, 0xa3, 0x52, 0x91, 0xff, 0x30, 0x4d, 0x43, 0xc9, 0x78, 0x8a, 0x25, 0xe1, 0xac,
0x3e, 0x2d, 0xa1, 0xaa, 0x59, 0x78, 0x61, 0x14, 0x29, 0xf2, 0xcb, 0x69, 0xb8, 0x0a, 0x00, 0x85,
0x95, 0x4b, 0x85, 0xa2, 0x55, 0x91, 0x7f, 0x37, 0x03, 0x97, 0xc1, 0xbc, 0xf1, 0xca, 0x32, 0x50,
0x31, 0x67, 0xca, 0xff, 0x9b, 0xd9, 0x09, 0x01, 0x18, 0xbc, 0x17, 0xe0, 0x1c, 0x98, 0x3e, 0x7c,
0x21, 0x4f, 0xc1, 0x05, 0x30, 0x6b, 0x1a, 0xb9, 0x8a, 0x21, 0x4b, 0x70, 0x0d, 0xac, 0x1a, 0xa6,
0x91, 0xb7, 0x0a, 0xa5, 0x62, 0x15, 0x1d, 0x17, 0x8b, 0xac, 0x6f, 0xc8, 0x60, 0xe9, 0x65, 0xce,
0xca, 0x1f, 0xf4, 0x2c, 0x33, 0xb4, 0x3f, 0x99, 0xa5, 0xfc, 0x61, 0x15, 0xe5, 0xf2, 0x06, 0xea,
0x99, 0x2f, 0x51, 0x20, 0x13, 0xea, 0x59, 0x66, 0x1f, 0x3e, 0x01, 0x0b, 0x56, 0x6c, 0x07, 0x49,
0x14, 0xc6, 0x04, 0x3e, 0x4c, 0x2f, 0x56, 0xc4, 0xa4, 0x10, 0x03, 0xea, 0xc6, 0x6a, 0x7f, 0xcd,
0xe7, 0x84, 0x36, 0xb5, 0x2d, 0xfd, 0x50, 0xda, 0xbb, 0xfa, 0xe6, 0x5f, 0x9b, 0x53, 0x6f, 0xde,
0x6e, 0x4a, 0xdf, 0xbd, 0xdd, 0x94, 0xfe, 0xf9, 0x76, 0x53, 0xfa, 0xfd, 0xbf, 0x37, 0xa7, 0x6a,
0x73, 0xec, 0x9f, 0x86, 0x8f, 0xfe, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x2d, 0x31, 0xfa, 0xc2, 0x7d,
0x14, 0x00, 0x00,
}

View File

@ -38,17 +38,28 @@ message Etcd {
string Name = 1 [(gogoproto.moretags) = "yaml:\"name\""];
string DataDir = 2 [(gogoproto.moretags) = "yaml:\"data-dir\""];
string WALDir = 3 [(gogoproto.moretags) = "yaml:\"wal-dir\""];
repeated string ListenClientURLs = 4 [(gogoproto.moretags) = "yaml:\"listen-client-urls\""];
repeated string AdvertiseClientURLs = 5 [(gogoproto.moretags) = "yaml:\"advertise-client-urls\""];
repeated string ListenPeerURLs = 6 [(gogoproto.moretags) = "yaml:\"listen-peer-urls\""];
repeated string InitialAdvertisePeerURLs = 7 [(gogoproto.moretags) = "yaml:\"initial-advertise-peer-urls\""];
string InitialCluster = 8 [(gogoproto.moretags) = "yaml:\"initial-cluster\""];
string InitialClusterState = 9 [(gogoproto.moretags) = "yaml:\"initial-cluster-state\""];
string InitialClusterToken = 10 [(gogoproto.moretags) = "yaml:\"initial-cluster-token\""];
int64 SnapshotCount = 11 [(gogoproto.moretags) = "yaml:\"snapshot-count\""];
int64 QuotaBackendBytes = 12 [(gogoproto.moretags) = "yaml:\"quota-backend-bytes\""];
bool PreVote = 13 [(gogoproto.moretags) = "yaml:\"pre-vote\""];
bool InitialCorruptCheck = 14 [(gogoproto.moretags) = "yaml:\"initial-corrupt-check\""];
// HeartbeatIntervalMs is the time (in milliseconds) of a heartbeat interval.
// Default value is 100, which is 100ms.
int64 HeartbeatIntervalMs = 11 [(gogoproto.moretags) = "yaml:\"heartbeat-interval\""];
// ElectionTimeoutMs is the time (in milliseconds) for an election to timeout.
// Default value is 1000, which is 1s.
int64 ElectionTimeoutMs = 12 [(gogoproto.moretags) = "yaml:\"election-timeout\""];
repeated string ListenClientURLs = 21 [(gogoproto.moretags) = "yaml:\"listen-client-urls\""];
repeated string AdvertiseClientURLs = 22 [(gogoproto.moretags) = "yaml:\"advertise-client-urls\""];
repeated string ListenPeerURLs = 23 [(gogoproto.moretags) = "yaml:\"listen-peer-urls\""];
repeated string InitialAdvertisePeerURLs = 24 [(gogoproto.moretags) = "yaml:\"initial-advertise-peer-urls\""];
string InitialCluster = 31 [(gogoproto.moretags) = "yaml:\"initial-cluster\""];
string InitialClusterState = 32 [(gogoproto.moretags) = "yaml:\"initial-cluster-state\""];
string InitialClusterToken = 33 [(gogoproto.moretags) = "yaml:\"initial-cluster-token\""];
int64 SnapshotCount = 41 [(gogoproto.moretags) = "yaml:\"snapshot-count\""];
int64 QuotaBackendBytes = 42 [(gogoproto.moretags) = "yaml:\"quota-backend-bytes\""];
bool PreVote = 43 [(gogoproto.moretags) = "yaml:\"pre-vote\""];
bool InitialCorruptCheck = 44 [(gogoproto.moretags) = "yaml:\"initial-corrupt-check\""];
// TODO: support TLS
}
@ -86,29 +97,41 @@ message Member {
enum FailureCase {
KILL_ONE_FOLLOWER = 0;
KILL_LEADER = 1;
KILL_ONE_FOLLOWER_FOR_LONG = 2;
KILL_LEADER_FOR_LONG = 3;
KILL_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT = 1;
KILL_LEADER = 2;
KILL_LEADER_UNTIL_TRIGGER_SNAPSHOT = 3;
KILL_QUORUM = 4;
KILL_ALL = 5;
BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER = 6;
BLACKHOLE_PEER_PORT_TX_RX_LEADER = 7;
BLACKHOLE_PEER_PORT_TX_RX_ALL = 8;
BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER = 100;
BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT = 101;
BLACKHOLE_PEER_PORT_TX_RX_LEADER = 102;
BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT = 103;
BLACKHOLE_PEER_PORT_TX_RX_QUORUM = 104;
BLACKHOLE_PEER_PORT_TX_RX_ALL = 105;
DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER = 9;
DELAY_PEER_PORT_TX_RX_LEADER = 10;
DELAY_PEER_PORT_TX_RX_ALL = 11;
DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER = 200;
RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER = 201;
DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT = 202;
RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT = 203;
DELAY_PEER_PORT_TX_RX_LEADER = 204;
RANDOM_DELAY_PEER_PORT_TX_RX_LEADER = 205;
DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT = 206;
RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT = 207;
DELAY_PEER_PORT_TX_RX_QUORUM = 208;
RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM = 209;
DELAY_PEER_PORT_TX_RX_ALL = 210;
RANDOM_DELAY_PEER_PORT_TX_RX_ALL = 211;
// NO_FAIL_WITH_STRESS runs no-op failure injection for specified period
// while stressers are still sending requests.
NO_FAIL_WITH_STRESS = 100;
NO_FAIL_WITH_STRESS = 300;
// NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS runs no-op failure injection
// with all stressers stopped.
NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS = 101;
NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS = 301;
FAILPOINTS = 200;
EXTERNAL = 300;
FAILPOINTS = 400;
EXTERNAL = 500;
}
enum StressType {
@ -129,6 +152,10 @@ message Tester {
uint32 DelayLatencyMs = 11 [(gogoproto.moretags) = "yaml:\"delay-latency-ms\""];
// DelayLatencyMsRv is the delay latency random variable in milliseconds.
uint32 DelayLatencyMsRv = 12 [(gogoproto.moretags) = "yaml:\"delay-latency-ms-rv\""];
// UpdatedDelayLatencyMs is the update delay latency in milliseconds,
// to inject to simulated slow network. It's the final latency to apply,
// in case the latency numbers are randomly generated from given delay latency field.
uint32 UpdatedDelayLatencyMs = 13 [(gogoproto.moretags) = "yaml:\"updated-delay-latency-ms\""];
// RoundLimit is the limit of rounds to run failure set (-1 to run without limits).
int32 RoundLimit = 21 [(gogoproto.moretags) = "yaml:\"round-limit\""];

View File

@ -104,6 +104,16 @@ type leaseChecker struct {
}
func (lc *leaseChecker) Check() error {
if lc.ls == nil {
return nil
}
if lc.ls != nil &&
(lc.ls.revokedLeases == nil ||
lc.ls.aliveLeases == nil ||
lc.ls.shortLivedLeases == nil) {
return nil
}
cli, err := lc.m.CreateEtcdClient(grpc.WithBackoffMaxDelay(time.Second))
if err != nil {
return fmt.Errorf("%v (%q)", err, lc.m.EtcdClientEndpoint)
@ -114,6 +124,7 @@ func (lc *leaseChecker) Check() error {
}
}()
lc.cli = cli
if err := lc.check(true, lc.ls.revokedLeases.leases); err != nil {
return err
}

View File

@ -61,7 +61,6 @@ type Cluster struct {
}
func newCluster(lg *zap.Logger, fpath string) (*Cluster, error) {
lg.Info("reading configuration file", zap.String("path", fpath))
bts, err := ioutil.ReadFile(fpath)
if err != nil {
return nil, err
@ -97,6 +96,16 @@ func newCluster(lg *zap.Logger, fpath string) (*Cluster, error) {
clus.Members[i].Etcd.WALDir = filepath.Join(clus.Members[i].Etcd.DataDir, "member", "wal")
}
if clus.Members[i].Etcd.HeartbeatIntervalMs == 0 {
return nil, fmt.Errorf("'--heartbeat-interval' cannot be 0 (got %+v)", clus.Members[i].Etcd)
}
if clus.Members[i].Etcd.ElectionTimeoutMs == 0 {
return nil, fmt.Errorf("'--election-timeout' cannot be 0 (got %+v)", clus.Members[i].Etcd)
}
if int64(clus.Tester.DelayLatencyMs) <= clus.Members[i].Etcd.ElectionTimeoutMs {
return nil, fmt.Errorf("delay latency %d ms must be greater than election timeout %d ms", clus.Tester.DelayLatencyMs, clus.Members[i].Etcd.ElectionTimeoutMs)
}
port := ""
listenClientPorts := make([]string, len(clus.Members))
for i, u := range clus.Members[i].Etcd.ListenClientURLs {
@ -161,6 +170,13 @@ func newCluster(lg *zap.Logger, fpath string) (*Cluster, error) {
}
}
if clus.Tester.DelayLatencyMs <= clus.Tester.DelayLatencyMsRv*5 {
return nil, fmt.Errorf("delay latency %d ms must be greater than 5x of delay latency random variable %d ms", clus.Tester.DelayLatencyMs, clus.Tester.DelayLatencyMsRv)
}
if clus.Tester.UpdatedDelayLatencyMs == 0 {
clus.Tester.UpdatedDelayLatencyMs = clus.Tester.DelayLatencyMs
}
for _, v := range clus.Tester.FailureCases {
if _, ok := rpcpb.FailureCase_value[v]; !ok {
return nil, fmt.Errorf("%q is not defined in 'rpcpb.FailureCase_value'", v)
@ -204,7 +220,6 @@ func NewCluster(lg *zap.Logger, fpath string) (*Cluster, error) {
clus.failures = make([]Failure, 0)
for i, ap := range clus.Members {
clus.lg.Info("connecting", zap.String("agent-address", ap.AgentAddr))
var err error
clus.agentConns[i], err = grpc.Dial(ap.AgentAddr, dialOpts...)
if err != nil {
@ -213,7 +228,6 @@ func NewCluster(lg *zap.Logger, fpath string) (*Cluster, error) {
clus.agentClients[i] = rpcpb.NewTransportClient(clus.agentConns[i])
clus.lg.Info("connected", zap.String("agent-address", ap.AgentAddr))
clus.lg.Info("creating stream", zap.String("agent-address", ap.AgentAddr))
clus.agentStreams[i], err = clus.agentClients[i].Transport(context.Background())
if err != nil {
return nil, err
@ -240,7 +254,9 @@ func NewCluster(lg *zap.Logger, fpath string) (*Cluster, error) {
rate.Limit(int(clus.Tester.StressQPS)),
int(clus.Tester.StressQPS),
)
clus.updateStresserChecker()
return clus, nil
}
@ -265,32 +281,60 @@ func (clus *Cluster) updateFailures() {
switch cs {
case "KILL_ONE_FOLLOWER":
clus.failures = append(clus.failures, newFailureKillOneFollower())
case "KILL_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT":
clus.failures = append(clus.failures, newFailureKillOneFollowerUntilTriggerSnapshot())
case "KILL_LEADER":
clus.failures = append(clus.failures, newFailureKillLeader())
case "KILL_ONE_FOLLOWER_FOR_LONG":
clus.failures = append(clus.failures, newFailureKillOneFollowerForLongTime())
case "KILL_LEADER_FOR_LONG":
clus.failures = append(clus.failures, newFailureKillLeaderForLongTime())
case "KILL_LEADER_UNTIL_TRIGGER_SNAPSHOT":
clus.failures = append(clus.failures, newFailureKillLeaderUntilTriggerSnapshot())
case "KILL_QUORUM":
clus.failures = append(clus.failures, newFailureKillQuorum())
case "KILL_ALL":
clus.failures = append(clus.failures, newFailureKillAll())
case "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER":
clus.failures = append(clus.failures, newFailureBlackholePeerPortTxRxOneFollower(clus))
case "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT":
clus.failures = append(clus.failures, newFailureBlackholePeerPortTxRxOneFollowerUntilTriggerSnapshot())
case "BLACKHOLE_PEER_PORT_TX_RX_LEADER":
clus.failures = append(clus.failures, newFailureBlackholePeerPortTxRxLeader(clus))
case "BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT":
clus.failures = append(clus.failures, newFailureBlackholePeerPortTxRxLeaderUntilTriggerSnapshot())
case "BLACKHOLE_PEER_PORT_TX_RX_QUORUM":
clus.failures = append(clus.failures, newFailureBlackholePeerPortTxRxQuorum(clus))
case "BLACKHOLE_PEER_PORT_TX_RX_ALL":
clus.failures = append(clus.failures, newFailureBlackholePeerPortTxRxAll(clus))
case "DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER":
clus.failures = append(clus.failures, newFailureDelayPeerPortTxRxOneFollower(clus))
clus.failures = append(clus.failures, newFailureDelayPeerPortTxRxOneFollower(clus, false))
case "RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER":
clus.failures = append(clus.failures, newFailureDelayPeerPortTxRxOneFollower(clus, true))
case "DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT":
clus.failures = append(clus.failures, newFailureDelayPeerPortTxRxOneFollowerUntilTriggerSnapshot(clus, false))
case "RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT":
clus.failures = append(clus.failures, newFailureDelayPeerPortTxRxOneFollowerUntilTriggerSnapshot(clus, true))
case "DELAY_PEER_PORT_TX_RX_LEADER":
clus.failures = append(clus.failures, newFailureDelayPeerPortTxRxLeader(clus))
clus.failures = append(clus.failures, newFailureDelayPeerPortTxRxLeader(clus, false))
case "RANDOM_DELAY_PEER_PORT_TX_RX_LEADER":
clus.failures = append(clus.failures, newFailureDelayPeerPortTxRxLeader(clus, true))
case "DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT":
clus.failures = append(clus.failures, newFailureDelayPeerPortTxRxLeaderUntilTriggerSnapshot(clus, false))
case "RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT":
clus.failures = append(clus.failures, newFailureDelayPeerPortTxRxLeaderUntilTriggerSnapshot(clus, true))
case "DELAY_PEER_PORT_TX_RX_QUORUM":
clus.failures = append(clus.failures, newFailureDelayPeerPortTxRxQuorum(clus, false))
case "RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM":
clus.failures = append(clus.failures, newFailureDelayPeerPortTxRxQuorum(clus, true))
case "DELAY_PEER_PORT_TX_RX_ALL":
clus.failures = append(clus.failures, newFailureDelayPeerPortTxRxAll(clus))
clus.failures = append(clus.failures, newFailureDelayPeerPortTxRxAll(clus, false))
case "RANDOM_DELAY_PEER_PORT_TX_RX_ALL":
clus.failures = append(clus.failures, newFailureDelayPeerPortTxRxAll(clus, true))
case "NO_FAIL_WITH_STRESS":
clus.failures = append(clus.failures, newFailureNoFailWithStress(clus))
case "NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS":
clus.failures = append(clus.failures, newFailureNoFailWithNoStressForLiveness(clus))
case "EXTERNAL":
clus.failures = append(clus.failures, newFailureExternal(clus.Tester.ExternalExecPath))
case "FAILPOINTS":
@ -311,13 +355,24 @@ func (clus *Cluster) failureStrings() (fs []string) {
return fs
}
// UpdateDelayLatencyMs updates delay latency with random value
// within election timeout.
func (clus *Cluster) UpdateDelayLatencyMs() {
rand.Seed(time.Now().UnixNano())
clus.Tester.UpdatedDelayLatencyMs = uint32(rand.Int63n(clus.Members[0].Etcd.ElectionTimeoutMs))
minLatRv := clus.Tester.DelayLatencyMsRv + clus.Tester.DelayLatencyMsRv/5
if clus.Tester.UpdatedDelayLatencyMs <= minLatRv {
clus.Tester.UpdatedDelayLatencyMs += minLatRv
}
}
func (clus *Cluster) shuffleFailures() {
rand.Seed(time.Now().UnixNano())
offset := rand.Intn(1000)
n := len(clus.failures)
cp := coprime(n)
clus.lg.Info("shuffling test failure cases", zap.Int("total", n))
fs := make([]Failure, n)
for i := 0; i < n; i++ {
fs[i] = clus.failures[(cp*i+offset)%n]
@ -355,12 +410,6 @@ func gcd(x, y int) int {
}
func (clus *Cluster) updateStresserChecker() {
clus.lg.Info(
"updating stressers",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
)
cs := &compositeStresser{}
for _, m := range clus.Members {
cs.stressers = append(cs.stressers, newStresser(clus, m))
@ -397,21 +446,17 @@ func (clus *Cluster) checkConsistency() (err error) {
}
}()
clus.lg.Info(
"checking consistency and invariant of cluster",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.String("desc", clus.failures[clus.cs].Desc()),
)
if err = clus.checker.Check(); err != nil {
clus.lg.Warn(
"checker.Check failed",
"consistency check FAIL",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.Error(err),
)
return err
}
clus.lg.Info(
"checked consistency and invariant of cluster",
"consistency check ALL PASS",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.String("desc", clus.failures[clus.cs].Desc()),
@ -468,11 +513,6 @@ func (clus *Cluster) sendOperation(idx int, op rpcpb.Operation) error {
clus.agentRequests[idx].Operation = op
}
clus.lg.Info(
"sending request",
zap.String("operation", op.String()),
zap.String("to", clus.Members[idx].EtcdClientEndpoint),
)
err := clus.agentStreams[idx].Send(clus.agentRequests[idx])
clus.lg.Info(
"sent request",
@ -484,11 +524,6 @@ func (clus *Cluster) sendOperation(idx int, op rpcpb.Operation) error {
return err
}
clus.lg.Info(
"receiving response",
zap.String("operation", op.String()),
zap.String("from", clus.Members[idx].EtcdClientEndpoint),
)
resp, err := clus.agentStreams[idx].Recv()
if resp != nil {
clus.lg.Info(
@ -519,22 +554,19 @@ func (clus *Cluster) sendOperation(idx int, op rpcpb.Operation) error {
// DestroyEtcdAgents terminates all tester connections to agents and etcd servers.
func (clus *Cluster) DestroyEtcdAgents() {
clus.lg.Info("destroying etcd servers and agents")
err := clus.broadcastOperation(rpcpb.Operation_DestroyEtcdAgent)
if err != nil {
clus.lg.Warn("failed to destroy etcd servers and agents", zap.Error(err))
clus.lg.Warn("destroying etcd/agents FAIL", zap.Error(err))
} else {
clus.lg.Info("destroyed etcd servers and agents")
clus.lg.Info("destroying etcd/agents PASS")
}
for i, conn := range clus.agentConns {
clus.lg.Info("closing connection to agent", zap.String("agent-address", clus.Members[i].AgentAddr))
err := conn.Close()
clus.lg.Info("closed connection to agent", zap.String("agent-address", clus.Members[i].AgentAddr), zap.Error(err))
}
if clus.testerHTTPServer != nil {
clus.lg.Info("closing tester HTTP server", zap.String("tester-address", clus.Tester.TesterAddr))
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
err := clus.testerHTTPServer.Shutdown(ctx)
cancel()
@ -552,14 +584,9 @@ func (clus *Cluster) WaitHealth() error {
// reasonable workload (https://github.com/coreos/etcd/issues/2698)
for i := 0; i < 60; i++ {
for _, m := range clus.Members {
clus.lg.Info(
"writing health key",
zap.Int("retries", i),
zap.String("endpoint", m.EtcdClientEndpoint),
)
if err = m.WriteHealthKey(); err != nil {
clus.lg.Warn(
"writing health key failed",
"health check FAIL",
zap.Int("retries", i),
zap.String("endpoint", m.EtcdClientEndpoint),
zap.Error(err),
@ -567,15 +594,16 @@ func (clus *Cluster) WaitHealth() error {
break
}
clus.lg.Info(
"wrote health key",
"health check PASS",
zap.Int("retries", i),
zap.String("endpoint", m.EtcdClientEndpoint),
)
}
if err == nil {
clus.lg.Info(
"writing health key success on all members",
zap.Int("retries", i),
"health check ALL PASS",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
)
return nil
}
@ -639,7 +667,7 @@ func (clus *Cluster) compactKV(rev int64, timeout time.Duration) (err error) {
for i, m := range clus.Members {
clus.lg.Info(
"compacting",
"compact START",
zap.String("endpoint", m.EtcdClientEndpoint),
zap.Int64("compact-revision", rev),
zap.Duration("timeout", timeout),
@ -657,7 +685,7 @@ func (clus *Cluster) compactKV(rev int64, timeout time.Duration) (err error) {
)
} else {
clus.lg.Warn(
"compact failed",
"compact FAIL",
zap.String("endpoint", m.EtcdClientEndpoint),
zap.Int64("compact-revision", rev),
zap.Error(cerr),
@ -669,7 +697,7 @@ func (clus *Cluster) compactKV(rev int64, timeout time.Duration) (err error) {
if succeed {
clus.lg.Info(
"compacted",
"compact PASS",
zap.String("endpoint", m.EtcdClientEndpoint),
zap.Int64("compact-revision", rev),
zap.Duration("timeout", timeout),
@ -693,24 +721,22 @@ func (clus *Cluster) checkCompact(rev int64) error {
}
func (clus *Cluster) defrag() error {
clus.lg.Info(
"defragmenting",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
)
for _, m := range clus.Members {
if err := m.Defrag(); err != nil {
clus.lg.Warn(
"defrag failed",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
"defrag FAIL",
zap.String("endpoint", m.EtcdClientEndpoint),
zap.Error(err),
)
return err
}
clus.lg.Info(
"defrag PASS",
zap.String("endpoint", m.EtcdClientEndpoint),
)
}
clus.lg.Info(
"defragmented",
"defrag ALL PASS",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
)

View File

@ -41,6 +41,8 @@ func Test_newCluster(t *testing.T) {
Name: "s1",
DataDir: "/tmp/etcd-agent-data-1/etcd.data",
WALDir: "/tmp/etcd-agent-data-1/etcd.data/member/wal",
HeartbeatIntervalMs: 100,
ElectionTimeoutMs: 1000,
ListenClientURLs: []string{"http://127.0.0.1:1379"},
AdvertiseClientURLs: []string{"http://127.0.0.1:1379"},
ListenPeerURLs: []string{"http://127.0.0.1:1380"},
@ -68,6 +70,8 @@ func Test_newCluster(t *testing.T) {
Name: "s2",
DataDir: "/tmp/etcd-agent-data-2/etcd.data",
WALDir: "/tmp/etcd-agent-data-2/etcd.data/member/wal",
HeartbeatIntervalMs: 100,
ElectionTimeoutMs: 1000,
ListenClientURLs: []string{"http://127.0.0.1:2379"},
AdvertiseClientURLs: []string{"http://127.0.0.1:2379"},
ListenPeerURLs: []string{"http://127.0.0.1:2380"},
@ -95,6 +99,8 @@ func Test_newCluster(t *testing.T) {
Name: "s3",
DataDir: "/tmp/etcd-agent-data-3/etcd.data",
WALDir: "/tmp/etcd-agent-data-3/etcd.data/member/wal",
HeartbeatIntervalMs: 100,
ElectionTimeoutMs: 1000,
ListenClientURLs: []string{"http://127.0.0.1:3379"},
AdvertiseClientURLs: []string{"http://127.0.0.1:3379"},
ListenPeerURLs: []string{"http://127.0.0.1:3380"},
@ -110,27 +116,40 @@ func Test_newCluster(t *testing.T) {
},
},
Tester: &rpcpb.Tester{
TesterNetwork: "tcp",
TesterAddr: "127.0.0.1:9028",
DelayLatencyMs: 500,
DelayLatencyMsRv: 50,
RoundLimit: 1,
ExitOnFailure: true,
ConsistencyCheck: true,
EnablePprof: true,
TesterNetwork: "tcp",
TesterAddr: "127.0.0.1:9028",
DelayLatencyMs: 5000,
DelayLatencyMsRv: 500,
UpdatedDelayLatencyMs: 5000,
RoundLimit: 1,
ExitOnFailure: true,
ConsistencyCheck: true,
EnablePprof: true,
FailureCases: []string{
"KILL_ONE_FOLLOWER",
"KILL_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT",
"KILL_LEADER",
"KILL_ONE_FOLLOWER_FOR_LONG",
"KILL_LEADER_FOR_LONG",
"KILL_LEADER_UNTIL_TRIGGER_SNAPSHOT",
"KILL_QUORUM",
"KILL_ALL",
"BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER",
"BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT",
"BLACKHOLE_PEER_PORT_TX_RX_LEADER",
"BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT",
"BLACKHOLE_PEER_PORT_TX_RX_QUORUM",
"BLACKHOLE_PEER_PORT_TX_RX_ALL",
"DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER",
"RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER",
"DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT",
"RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT",
"DELAY_PEER_PORT_TX_RX_LEADER",
"RANDOM_DELAY_PEER_PORT_TX_RX_LEADER",
"DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT",
"RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT",
"DELAY_PEER_PORT_TX_RX_QUORUM",
"RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM",
"DELAY_PEER_PORT_TX_RX_ALL",
"RANDOM_DELAY_PEER_PORT_TX_RX_ALL",
"NO_FAIL_WITH_STRESS",
"NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS",
},
@ -146,7 +165,7 @@ func Test_newCluster(t *testing.T) {
StressKeySuffixRangeTxn: 100,
StressKeyTxnOps: 10,
StressClients: 100,
StressQPS: 1000,
StressQPS: 2000,
},
}

View File

@ -39,7 +39,7 @@ func (clus *Cluster) StartTester() {
if err := clus.doRound(); err != nil {
clus.lg.Warn(
"doRound failed; returning",
"round FAIL",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.Error(err),
@ -62,21 +62,21 @@ func (clus *Cluster) StartTester() {
timeout := 10 * time.Second
timeout += time.Duration(modifiedKey/compactQPS) * time.Second
clus.lg.Info(
"compacting",
"compact START",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.Duration("timeout", timeout),
)
if err := clus.compact(revToCompact, timeout); err != nil {
clus.lg.Warn(
"compact failed",
"compact FAIL",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.Error(err),
)
if err = clus.cleanup(); err != nil {
clus.lg.Warn(
"cleanup failed",
"cleanup FAIL",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.Error(err),
@ -88,12 +88,6 @@ func (clus *Cluster) StartTester() {
}
if round > 0 && round%500 == 0 { // every 500 rounds
if err := clus.defrag(); err != nil {
clus.lg.Warn(
"defrag failed; returning",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.Error(err),
)
clus.failed()
return
}
@ -101,7 +95,7 @@ func (clus *Cluster) StartTester() {
}
clus.lg.Info(
"functional-tester passed",
"functional-tester PASS",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
)
@ -112,18 +106,21 @@ func (clus *Cluster) doRound() error {
clus.shuffleFailures()
}
roundNow := time.Now()
clus.lg.Info(
"starting round",
"round START",
zap.Int("round", clus.rd),
zap.Strings("failures", clus.failureStrings()),
zap.Int("total-failures", len(clus.failures)),
)
for i, fa := range clus.failures {
clus.cs = i
caseTotalCounter.WithLabelValues(fa.Desc()).Inc()
caseNow := time.Now()
clus.lg.Info(
"failure case START",
"case START",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.String("desc", fa.Desc()),
@ -138,7 +135,7 @@ func (clus *Cluster) doRound() error {
fcase := fa.FailureCase()
if fcase != rpcpb.FailureCase_NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS {
clus.lg.Info(
"starting stressers before injecting failures",
"stresser START",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.String("desc", fa.Desc()),
@ -150,7 +147,7 @@ func (clus *Cluster) doRound() error {
}
clus.lg.Info(
"injecting",
"inject START",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.String("desc", fa.Desc()),
@ -163,7 +160,7 @@ func (clus *Cluster) doRound() error {
// with stressing client ports
// TODO: use unix for local tests
clus.lg.Info(
"recovering",
"recover START",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.String("desc", fa.Desc()),
@ -173,13 +170,13 @@ func (clus *Cluster) doRound() error {
}
if stressStarted {
clus.lg.Info("pausing stresser after failure recovery, before wait health")
clus.lg.Info("stresser PAUSE")
ems := clus.stresser.Pause()
if fcase == rpcpb.FailureCase_NO_FAIL_WITH_STRESS && len(ems) > 0 {
ess := make([]string, 0, len(ems))
cnt := 0
for k, v := range ems {
ess = append(ess, fmt.Sprintf("%s (count %d)", k, v))
ess = append(ess, fmt.Sprintf("%s (count: %d)", k, v))
cnt += v
}
clus.lg.Warn(
@ -187,34 +184,40 @@ func (clus *Cluster) doRound() error {
zap.String("desc", fa.Desc()),
zap.Strings("errors", ess),
)
return fmt.Errorf("expected no error in %q, got %q", fcase.String(), ess)
// with network delay, some ongoing requests may fail
// only return error, if more than 10% of QPS requests fail
if cnt > int(clus.Tester.StressQPS)/10 {
return fmt.Errorf("expected no error in %q, got %q", fcase.String(), ess)
}
}
}
clus.lg.Info("wait health after recover")
clus.lg.Info("health check START")
if err := clus.WaitHealth(); err != nil {
return fmt.Errorf("wait full health error: %v", err)
}
clus.lg.Info("check consistency after recover")
clus.lg.Info("consistency check START")
if err := clus.checkConsistency(); err != nil {
return fmt.Errorf("tt.checkConsistency error (%v)", err)
return fmt.Errorf("consistency check error (%v)", err)
}
clus.lg.Info(
"failure case PASS",
"case PASS",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.String("desc", fa.Desc()),
zap.Duration("took", time.Since(caseNow)),
)
}
clus.lg.Info(
"finished round",
"round ALL PASS",
zap.Int("round", clus.rd),
zap.Strings("failures", clus.failureStrings()),
zap.Duration("took", time.Since(roundNow)),
)
return nil
}
@ -233,28 +236,9 @@ func (clus *Cluster) updateRevision() error {
}
func (clus *Cluster) compact(rev int64, timeout time.Duration) (err error) {
clus.lg.Info(
"compacting storage",
zap.Int64("current-revision", clus.currentRevision),
zap.Int64("compact-revision", rev),
)
if err = clus.compactKV(rev, timeout); err != nil {
return err
}
clus.lg.Info(
"compacted storage",
zap.Int64("current-revision", clus.currentRevision),
zap.Int64("compact-revision", rev),
)
clus.lg.Info(
"checking compaction",
zap.Int64("current-revision", clus.currentRevision),
zap.Int64("compact-revision", rev),
)
if err = clus.checkCompact(rev); err != nil {
clus.lg.Warn(
"checkCompact failed",
"compact FAIL",
zap.Int64("current-revision", clus.currentRevision),
zap.Int64("compact-revision", rev),
zap.Error(err),
@ -262,7 +246,22 @@ func (clus *Cluster) compact(rev int64, timeout time.Duration) (err error) {
return err
}
clus.lg.Info(
"confirmed compaction",
"compact DONE",
zap.Int64("current-revision", clus.currentRevision),
zap.Int64("compact-revision", rev),
)
if err = clus.checkCompact(rev); err != nil {
clus.lg.Warn(
"check compact FAIL",
zap.Int64("current-revision", clus.currentRevision),
zap.Int64("compact-revision", rev),
zap.Error(err),
)
return err
}
clus.lg.Info(
"check compact DONE",
zap.Int64("current-revision", clus.currentRevision),
zap.Int64("compact-revision", rev),
)
@ -276,7 +275,7 @@ func (clus *Cluster) failed() {
}
clus.lg.Info(
"exiting on failure",
"functional-tester FAIL",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
)
@ -303,7 +302,7 @@ func (clus *Cluster) cleanup() error {
if err := clus.FailArchive(); err != nil {
clus.lg.Warn(
"cleanup failed",
"cleanup FAIL",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.Error(err),
@ -312,7 +311,7 @@ func (clus *Cluster) cleanup() error {
}
if err := clus.Restart(); err != nil {
clus.lg.Warn(
"restart failed",
"restart FAIL",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.Error(err),

View File

@ -20,6 +20,8 @@ import (
"time"
"github.com/coreos/etcd/tools/functional-tester/rpcpb"
"go.uber.org/zap"
)
// Failure defines failure injection interface.
@ -43,15 +45,15 @@ type injectMemberFunc func(*Cluster, int) error
type recoverMemberFunc func(*Cluster, int) error
type failureByFunc struct {
desc
desc string
failureCase rpcpb.FailureCase
injectMember injectMemberFunc
recoverMember recoverMemberFunc
}
func (f *failureByFunc) Desc() string {
if string(f.desc) != "" {
return string(f.desc)
if f.desc != "" {
return f.desc
}
return f.failureCase.String()
}
@ -100,8 +102,8 @@ func (f *failureFollower) Recover(clus *Cluster) error {
}
func (f *failureFollower) Desc() string {
if string(f.desc) != "" {
return string(f.desc)
if f.desc != "" {
return f.desc
}
return f.failureCase.String()
}
@ -162,8 +164,8 @@ func (f *failureQuorum) Recover(clus *Cluster) error {
}
func (f *failureQuorum) Desc() string {
if string(f.desc) != "" {
return string(f.desc)
if f.desc != "" {
return f.desc
}
return f.failureCase.String()
}
@ -172,6 +174,18 @@ func (f *failureQuorum) FailureCase() rpcpb.FailureCase {
return f.failureCase
}
func killMap(size int, seed int) map[int]bool {
m := make(map[int]bool)
r := rand.New(rand.NewSource(int64(seed)))
majority := size/2 + 1
for {
m[r.Intn(size)] = true
if len(m) >= majority {
return m
}
}
}
type failureAll failureByFunc
func (f *failureAll) Inject(clus *Cluster) error {
@ -193,8 +207,8 @@ func (f *failureAll) Recover(clus *Cluster) error {
}
func (f *failureAll) Desc() string {
if string(f.desc) != "" {
return string(f.desc)
if f.desc != "" {
return f.desc
}
return f.failureCase.String()
}
@ -205,13 +219,24 @@ func (f *failureAll) FailureCase() rpcpb.FailureCase {
// failureUntilSnapshot injects a failure and waits for a snapshot event
type failureUntilSnapshot struct {
desc desc
desc string
failureCase rpcpb.FailureCase
Failure
}
const snapshotCount = 10000
// all delay failure cases except the ones failing with latency
// greater than election timeout (trigger leader election and
// cluster keeps operating anyways)
var slowCases = map[rpcpb.FailureCase]bool{
rpcpb.FailureCase_RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER: true,
rpcpb.FailureCase_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT: true,
rpcpb.FailureCase_RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT: true,
rpcpb.FailureCase_RANDOM_DELAY_PEER_PORT_TX_RX_LEADER: true,
rpcpb.FailureCase_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT: true,
rpcpb.FailureCase_RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT: true,
rpcpb.FailureCase_RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM: true,
rpcpb.FailureCase_RANDOM_DELAY_PEER_PORT_TX_RX_ALL: true,
}
func (f *failureUntilSnapshot) Inject(clus *Cluster) error {
if err := f.Failure.Inject(clus); err != nil {
@ -220,6 +245,18 @@ func (f *failureUntilSnapshot) Inject(clus *Cluster) error {
if len(clus.Members) < 3 {
return nil
}
snapshotCount := clus.Members[0].Etcd.SnapshotCount
now := time.Now()
clus.lg.Info(
"trigger snapshot START",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.String("desc", f.Desc()),
zap.Int64("etcd-snapshot-count", snapshotCount),
)
// maxRev may fail since failure just injected, retry if failed.
startRev, err := clus.maxRev()
for i := 0; i < 10 && startRev == 0; i++ {
@ -229,44 +266,59 @@ func (f *failureUntilSnapshot) Inject(clus *Cluster) error {
return err
}
lastRev := startRev
// Normal healthy cluster could accept 1000req/s at least.
// Give it 3-times time to create a new snapshot.
retry := snapshotCount / 1000 * 3
for j := 0; j < retry; j++ {
// healthy cluster could accept 1000 req/sec at least.
// 3x time to trigger snapshot.
retries := int(snapshotCount) / 1000 * 3
if v, ok := slowCases[f.FailureCase()]; v && ok {
// slow network takes more retries
retries *= 5
}
for i := 0; i < retries; i++ {
lastRev, _ = clus.maxRev()
// If the number of proposals committed is bigger than snapshot count,
// a new snapshot should have been created.
if lastRev-startRev > snapshotCount {
diff := lastRev - startRev
if diff > snapshotCount {
clus.lg.Info(
"trigger snapshot PASS",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.Int("retries", i),
zap.String("desc", f.Desc()),
zap.Int64("committed-entries", diff),
zap.Int64("etcd-snapshot-count", snapshotCount),
zap.Int64("last-revision", lastRev),
zap.Duration("took", time.Since(now)),
)
return nil
}
clus.lg.Info(
"trigger snapshot PROGRESS",
zap.Int("retries", i),
zap.Int64("committed-entries", diff),
zap.Int64("etcd-snapshot-count", snapshotCount),
zap.Int64("last-revision", lastRev),
zap.Duration("took", time.Since(now)),
)
time.Sleep(time.Second)
}
return fmt.Errorf("cluster too slow: only commit %d requests in %ds", lastRev-startRev, retry)
return fmt.Errorf("cluster too slow: only %d commits in %d retries", lastRev-startRev, retries)
}
func (f *failureUntilSnapshot) Desc() string {
if f.desc.Desc() != "" {
return f.desc.Desc()
if f.desc != "" {
return f.desc
}
return f.failureCase.String()
if f.failureCase.String() != "" {
return f.failureCase.String()
}
return f.Failure.Desc()
}
func (f *failureUntilSnapshot) FailureCase() rpcpb.FailureCase {
return f.failureCase
}
func killMap(size int, seed int) map[int]bool {
m := make(map[int]bool)
r := rand.New(rand.NewSource(int64(seed)))
majority := size/2 + 1
for {
m[r.Intn(size)] = true
if len(m) >= majority {
return m
}
}
}
type desc string
func (d desc) Desc() string { return string(d) }

View File

@ -51,7 +51,7 @@ func failpointFailures(clus *Cluster) (ret []Failure, err error) {
if strings.Contains(fp, "Snap") {
// hack to trigger snapshot failpoints
fpFails[i] = &failureUntilSnapshot{
desc: desc(fpf.Desc()),
desc: fpf.Desc(),
failureCase: rpcpb.FailureCase_FAILPOINTS,
Failure: fpf,
}
@ -95,7 +95,7 @@ func failuresFromFailpoint(fp string, failpointCommands []string) (fs []Failure)
fs = append(fs, []Failure{
&failureFollower{
failureByFunc: failureByFunc{
desc: desc(fmt.Sprintf("failpoint %q (one: %q)", fp, fcmd)),
desc: fmt.Sprintf("failpoint %q (one: %q)", fp, fcmd),
failureCase: rpcpb.FailureCase_FAILPOINTS,
injectMember: inject,
recoverMember: recov,
@ -105,7 +105,7 @@ func failuresFromFailpoint(fp string, failpointCommands []string) (fs []Failure)
},
&failureLeader{
failureByFunc: failureByFunc{
desc: desc(fmt.Sprintf("failpoint %q (leader: %q)", fp, fcmd)),
desc: fmt.Sprintf("failpoint %q (leader: %q)", fp, fcmd),
failureCase: rpcpb.FailureCase_FAILPOINTS,
injectMember: inject,
recoverMember: recov,
@ -114,13 +114,13 @@ func failuresFromFailpoint(fp string, failpointCommands []string) (fs []Failure)
lead: -1,
},
&failureQuorum{
desc: desc(fmt.Sprintf("failpoint %q (quorum: %q)", fp, fcmd)),
desc: fmt.Sprintf("failpoint %q (quorum: %q)", fp, fcmd),
failureCase: rpcpb.FailureCase_FAILPOINTS,
injectMember: inject,
recoverMember: recov,
},
&failureAll{
desc: desc(fmt.Sprintf("failpoint %q (all: %q)", fp, fcmd)),
desc: fmt.Sprintf("failpoint %q (all: %q)", fp, fcmd),
failureCase: rpcpb.FailureCase_FAILPOINTS,
injectMember: inject,
recoverMember: recov,

View File

@ -58,16 +58,16 @@ func newFailureKillAll() Failure {
}
}
func newFailureKillOneFollowerForLongTime() Failure {
func newFailureKillOneFollowerUntilTriggerSnapshot() Failure {
return &failureUntilSnapshot{
failureCase: rpcpb.FailureCase_KILL_ONE_FOLLOWER_FOR_LONG,
failureCase: rpcpb.FailureCase_KILL_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT,
Failure: newFailureKillOneFollower(),
}
}
func newFailureKillLeaderForLongTime() Failure {
func newFailureKillLeaderUntilTriggerSnapshot() Failure {
return &failureUntilSnapshot{
failureCase: rpcpb.FailureCase_KILL_LEADER_FOR_LONG,
failureCase: rpcpb.FailureCase_KILL_LEADER_UNTIL_TRIGGER_SNAPSHOT,
Failure: newFailureKillLeader(),
}
}

View File

@ -14,9 +14,7 @@
package tester
import (
"github.com/coreos/etcd/tools/functional-tester/rpcpb"
)
import "github.com/coreos/etcd/tools/functional-tester/rpcpb"
func injectBlackholePeerPortTxRx(clus *Cluster, idx int) error {
return clus.sendOperation(idx, rpcpb.Operation_BlackholePeerPortTxRx)
@ -39,6 +37,19 @@ func newFailureBlackholePeerPortTxRxOneFollower(clus *Cluster) Failure {
}
}
func newFailureBlackholePeerPortTxRxOneFollowerUntilTriggerSnapshot() Failure {
ff := failureByFunc{
failureCase: rpcpb.FailureCase_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT,
injectMember: injectBlackholePeerPortTxRx,
recoverMember: recoverBlackholePeerPortTxRx,
}
f := &failureFollower{ff, -1, -1}
return &failureUntilSnapshot{
failureCase: rpcpb.FailureCase_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT,
Failure: f,
}
}
func newFailureBlackholePeerPortTxRxLeader(clus *Cluster) Failure {
ff := failureByFunc{
failureCase: rpcpb.FailureCase_BLACKHOLE_PEER_PORT_TX_RX_LEADER,
@ -52,6 +63,31 @@ func newFailureBlackholePeerPortTxRxLeader(clus *Cluster) Failure {
}
}
func newFailureBlackholePeerPortTxRxLeaderUntilTriggerSnapshot() Failure {
ff := failureByFunc{
failureCase: rpcpb.FailureCase_BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT,
injectMember: injectBlackholePeerPortTxRx,
recoverMember: recoverBlackholePeerPortTxRx,
}
f := &failureLeader{ff, -1, -1}
return &failureUntilSnapshot{
failureCase: rpcpb.FailureCase_BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT,
Failure: f,
}
}
func newFailureBlackholePeerPortTxRxQuorum(clus *Cluster) Failure {
f := &failureQuorum{
failureCase: rpcpb.FailureCase_BLACKHOLE_PEER_PORT_TX_RX_QUORUM,
injectMember: injectBlackholePeerPortTxRx,
recoverMember: recoverBlackholePeerPortTxRx,
}
return &failureDelay{
Failure: f,
delayDuration: clus.GetFailureDelayDuration(),
}
}
func newFailureBlackholePeerPortTxRxAll(clus *Cluster) Failure {
f := &failureAll{
failureCase: rpcpb.FailureCase_BLACKHOLE_PEER_PORT_TX_RX_ALL,

View File

@ -0,0 +1,165 @@
// Copyright 2018 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tester
import (
"time"
"github.com/coreos/etcd/tools/functional-tester/rpcpb"
"go.uber.org/zap"
)
const (
// Wait more when it recovers from slow network, because network layer
// needs extra time to propagate traffic control (tc command) change.
// Otherwise, we get different hash values from the previous revision.
// For more detail, please see https://github.com/coreos/etcd/issues/5121.
waitRecover = 5 * time.Second
)
func injectDelayPeerPortTxRx(clus *Cluster, idx int) error {
clus.lg.Info(
"injecting delay latency",
zap.Duration("latency", time.Duration(clus.Tester.UpdatedDelayLatencyMs)*time.Millisecond),
zap.Duration("latency-rv", time.Duration(clus.Tester.DelayLatencyMsRv)*time.Millisecond),
zap.String("endpoint", clus.Members[idx].EtcdClientEndpoint),
)
return clus.sendOperation(idx, rpcpb.Operation_DelayPeerPortTxRx)
}
func recoverDelayPeerPortTxRx(clus *Cluster, idx int) error {
err := clus.sendOperation(idx, rpcpb.Operation_UndelayPeerPortTxRx)
time.Sleep(waitRecover)
return err
}
func newFailureDelayPeerPortTxRxOneFollower(clus *Cluster, random bool) Failure {
ff := failureByFunc{
failureCase: rpcpb.FailureCase_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER,
injectMember: injectDelayPeerPortTxRx,
recoverMember: recoverDelayPeerPortTxRx,
}
clus.Tester.UpdatedDelayLatencyMs = clus.Tester.DelayLatencyMs
if random {
clus.UpdateDelayLatencyMs()
ff.failureCase = rpcpb.FailureCase_RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER
}
f := &failureFollower{ff, -1, -1}
return &failureDelay{
Failure: f,
delayDuration: clus.GetFailureDelayDuration(),
}
}
func newFailureDelayPeerPortTxRxOneFollowerUntilTriggerSnapshot(clus *Cluster, random bool) Failure {
ff := failureByFunc{
failureCase: rpcpb.FailureCase_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT,
injectMember: injectDelayPeerPortTxRx,
recoverMember: recoverDelayPeerPortTxRx,
}
clus.Tester.UpdatedDelayLatencyMs = clus.Tester.DelayLatencyMs
if random {
clus.UpdateDelayLatencyMs()
ff.failureCase = rpcpb.FailureCase_RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT
}
f := &failureFollower{ff, -1, -1}
return &failureUntilSnapshot{
failureCase: ff.failureCase,
Failure: f,
}
}
func newFailureDelayPeerPortTxRxLeader(clus *Cluster, random bool) Failure {
ff := failureByFunc{
failureCase: rpcpb.FailureCase_DELAY_PEER_PORT_TX_RX_LEADER,
injectMember: injectDelayPeerPortTxRx,
recoverMember: recoverDelayPeerPortTxRx,
}
clus.Tester.UpdatedDelayLatencyMs = clus.Tester.DelayLatencyMs
if random {
clus.UpdateDelayLatencyMs()
ff.failureCase = rpcpb.FailureCase_RANDOM_DELAY_PEER_PORT_TX_RX_LEADER
}
f := &failureLeader{ff, -1, -1}
return &failureDelay{
Failure: f,
delayDuration: clus.GetFailureDelayDuration(),
}
}
func newFailureDelayPeerPortTxRxLeaderUntilTriggerSnapshot(clus *Cluster, random bool) Failure {
ff := failureByFunc{
failureCase: rpcpb.FailureCase_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT,
injectMember: injectDelayPeerPortTxRx,
recoverMember: recoverDelayPeerPortTxRx,
}
clus.Tester.UpdatedDelayLatencyMs = clus.Tester.DelayLatencyMs
if random {
clus.UpdateDelayLatencyMs()
ff.failureCase = rpcpb.FailureCase_RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT
}
f := &failureLeader{ff, -1, -1}
return &failureUntilSnapshot{
failureCase: ff.failureCase,
Failure: f,
}
}
func newFailureDelayPeerPortTxRxQuorum(clus *Cluster, random bool) Failure {
f := &failureQuorum{
failureCase: rpcpb.FailureCase_DELAY_PEER_PORT_TX_RX_QUORUM,
injectMember: injectDelayPeerPortTxRx,
recoverMember: recoverDelayPeerPortTxRx,
}
clus.Tester.UpdatedDelayLatencyMs = clus.Tester.DelayLatencyMs
if random {
clus.UpdateDelayLatencyMs()
f.failureCase = rpcpb.FailureCase_RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM
}
return &failureDelay{
Failure: f,
delayDuration: clus.GetFailureDelayDuration(),
}
}
func newFailureDelayPeerPortTxRxAll(clus *Cluster, random bool) Failure {
f := &failureAll{
failureCase: rpcpb.FailureCase_DELAY_PEER_PORT_TX_RX_ALL,
injectMember: injectDelayPeerPortTxRx,
recoverMember: recoverDelayPeerPortTxRx,
}
clus.Tester.UpdatedDelayLatencyMs = clus.Tester.DelayLatencyMs
if random {
clus.UpdateDelayLatencyMs()
f.failureCase = rpcpb.FailureCase_RANDOM_DELAY_PEER_PORT_TX_RX_ALL
}
return &failureDelay{
Failure: f,
delayDuration: clus.GetFailureDelayDuration(),
}
}

View File

@ -1,77 +0,0 @@
// Copyright 2018 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tester
import (
"time"
"github.com/coreos/etcd/tools/functional-tester/rpcpb"
)
const (
// Wait more when it recovers from slow network, because network layer
// needs extra time to propagate traffic control (tc command) change.
// Otherwise, we get different hash values from the previous revision.
// For more detail, please see https://github.com/coreos/etcd/issues/5121.
waitRecover = 5 * time.Second
)
func injectDelayPeerPortTxRx(clus *Cluster, idx int) error {
return clus.sendOperation(idx, rpcpb.Operation_DelayPeerPortTxRx)
}
func recoverDelayPeerPortTxRx(clus *Cluster, idx int) error {
err := clus.sendOperation(idx, rpcpb.Operation_UndelayPeerPortTxRx)
time.Sleep(waitRecover)
return err
}
func newFailureDelayPeerPortTxRxOneFollower(clus *Cluster) Failure {
ff := failureByFunc{
failureCase: rpcpb.FailureCase_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER,
injectMember: injectDelayPeerPortTxRx,
recoverMember: recoverDelayPeerPortTxRx,
}
f := &failureFollower{ff, -1, -1}
return &failureDelay{
Failure: f,
delayDuration: clus.GetFailureDelayDuration(),
}
}
func newFailureDelayPeerPortTxRxLeader(clus *Cluster) Failure {
ff := failureByFunc{
failureCase: rpcpb.FailureCase_DELAY_PEER_PORT_TX_RX_LEADER,
injectMember: injectDelayPeerPortTxRx,
recoverMember: recoverDelayPeerPortTxRx,
}
f := &failureLeader{ff, -1, -1}
return &failureDelay{
Failure: f,
delayDuration: clus.GetFailureDelayDuration(),
}
}
func newFailureDelayPeerPortTxRxAll(clus *Cluster) Failure {
f := &failureAll{
failureCase: rpcpb.FailureCase_DELAY_PEER_PORT_TX_RX_ALL,
injectMember: injectDelayPeerPortTxRx,
recoverMember: recoverDelayPeerPortTxRx,
}
return &failureDelay{
Failure: f,
delayDuration: clus.GetFailureDelayDuration(),
}
}

View File

@ -33,8 +33,8 @@ func (f *failureNoFailWithStress) Recover(clus *Cluster) error {
}
func (f *failureNoFailWithStress) Desc() string {
if f.desc.Desc() != "" {
return f.desc.Desc()
if f.desc != "" {
return f.desc
}
return f.failureCase.String()
}
@ -78,8 +78,8 @@ func (f *failureNoFailWithNoStressForLiveness) Recover(clus *Cluster) error {
}
func (f *failureNoFailWithNoStressForLiveness) Desc() string {
if f.desc.Desc() != "" {
return f.desc.Desc()
if f.desc != "" {
return f.desc
}
return f.failureCase.String()
}

View File

@ -12,6 +12,8 @@ agent-configs:
name: s1
data-dir: /tmp/etcd-agent-data-1/etcd.data
wal-dir: /tmp/etcd-agent-data-1/etcd.data/member/wal
heartbeat-interval: 100
election-timeout: 1000
listen-client-urls: ["http://127.0.0.1:1379"]
advertise-client-urls: ["http://127.0.0.1:1379"]
listen-peer-urls: ["http://127.0.0.1:1380"]
@ -36,6 +38,8 @@ agent-configs:
name: s2
data-dir: /tmp/etcd-agent-data-2/etcd.data
wal-dir: /tmp/etcd-agent-data-2/etcd.data/member/wal
heartbeat-interval: 100
election-timeout: 1000
listen-client-urls: ["http://127.0.0.1:2379"]
advertise-client-urls: ["http://127.0.0.1:2379"]
listen-peer-urls: ["http://127.0.0.1:2380"]
@ -60,6 +64,8 @@ agent-configs:
name: s3
data-dir: /tmp/etcd-agent-data-3/etcd.data
wal-dir: /tmp/etcd-agent-data-3/etcd.data/member/wal
heartbeat-interval: 100
election-timeout: 1000
listen-client-urls: ["http://127.0.0.1:3379"]
advertise-client-urls: ["http://127.0.0.1:3379"]
listen-peer-urls: ["http://127.0.0.1:3380"]
@ -76,8 +82,9 @@ tester-config:
tester-network: tcp
tester-addr: 127.0.0.1:9028
delay-latency-ms: 500
delay-latency-ms-rv: 50
# slow enough to trigger election
delay-latency-ms: 5000
delay-latency-ms-rv: 500
round-limit: 1
exit-on-failure: true
@ -86,17 +93,29 @@ tester-config:
failure-cases:
- KILL_ONE_FOLLOWER
- KILL_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT
- KILL_LEADER
- KILL_ONE_FOLLOWER_FOR_LONG
- KILL_LEADER_FOR_LONG
- KILL_LEADER_UNTIL_TRIGGER_SNAPSHOT
- KILL_QUORUM
- KILL_ALL
- BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER
- BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT
- BLACKHOLE_PEER_PORT_TX_RX_LEADER
- BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT
- BLACKHOLE_PEER_PORT_TX_RX_QUORUM
- BLACKHOLE_PEER_PORT_TX_RX_ALL
- DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER
- RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER
- DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT
- RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT
- DELAY_PEER_PORT_TX_RX_LEADER
- RANDOM_DELAY_PEER_PORT_TX_RX_LEADER
- DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT
- RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT
- DELAY_PEER_PORT_TX_RX_QUORUM
- RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM
- DELAY_PEER_PORT_TX_RX_ALL
- RANDOM_DELAY_PEER_PORT_TX_RX_ALL
- NO_FAIL_WITH_STRESS
- NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS
@ -125,4 +144,4 @@ tester-config:
stress-key-txn-ops: 10
stress-clients: 100
stress-qps: 1000
stress-qps: 2000

View File

@ -41,7 +41,11 @@ type Stresser interface {
func newStresser(clus *Cluster, m *rpcpb.Member) Stresser {
stressers := make([]Stresser, len(clus.Tester.StressTypes))
for i, stype := range clus.Tester.StressTypes {
clus.lg.Info("creating stresser", zap.String("type", stype))
clus.lg.Info(
"creating stresser",
zap.String("type", stype),
zap.String("endpoint", m.EtcdClientEndpoint),
)
switch stype {
case "KV":

View File

@ -102,7 +102,7 @@ func (s *keyStresser) Stress() error {
}
s.lg.Info(
"key stresser started in background",
"key stresser START",
zap.String("endpoint", s.m.EtcdClientEndpoint),
)
return nil
@ -181,16 +181,16 @@ func (s *keyStresser) Close() map[string]int {
s.cli.Close()
s.wg.Wait()
s.lg.Info(
"key stresser is closed",
zap.String("endpoint", s.m.EtcdClientEndpoint),
)
s.emu.Lock()
s.paused = true
ess := s.ems
s.ems = make(map[string]int, 100)
s.emu.Unlock()
s.lg.Info(
"key stresser STOP",
zap.String("endpoint", s.m.EtcdClientEndpoint),
)
return ess
}

View File

@ -121,7 +121,7 @@ func (ls *leaseStresser) setupOnce() error {
func (ls *leaseStresser) Stress() error {
ls.lg.Info(
"lease stresser is started",
"lease stresser START",
zap.String("endpoint", ls.m.EtcdClientEndpoint),
)
@ -452,16 +452,12 @@ func (ls *leaseStresser) Pause() map[string]int {
}
func (ls *leaseStresser) Close() map[string]int {
ls.lg.Info(
"lease stresser is closing",
zap.String("endpoint", ls.m.EtcdClientEndpoint),
)
ls.cancel()
ls.runWg.Wait()
ls.aliveWg.Wait()
ls.cli.Close()
ls.lg.Info(
"lease stresser is closed",
"lease stresser STOP",
zap.String("endpoint", ls.m.EtcdClientEndpoint),
)
return nil