functional: rename "FailureCase" to "Case"

Later to add benchmark marks

Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
This commit is contained in:
Gyuho Lee 2018-04-11 15:12:09 -07:00
parent 251e783d3f
commit 3815d6edb7
23 changed files with 1195 additions and 1197 deletions

View File

@ -160,12 +160,12 @@ tester-config:
consistency-check: true
enable-pprof: true
failure-delay-ms: 7000
failure-shuffle: true
case-delay-ms: 7000
case-shuffle: true
# For full descriptions,
# https://godoc.org/github.com/coreos/etcd/functional/rpcpb#FailureCase
failure-cases:
# https://godoc.org/github.com/coreos/etcd/functional/rpcpb#Case
cases:
- SIGTERM_ONE_FOLLOWER
- SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT
- SIGTERM_LEADER

View File

@ -2,7 +2,7 @@
[`functional`](https://godoc.org/github.com/coreos/etcd/functional) verifies the correct behavior of etcd under various system and network malfunctions. It sets up an etcd cluster under high pressure loads and continuously injects failures into the cluster. Then it expects the etcd cluster to recover within a few seconds. This has been extremely helpful to find critical bugs.
See [`rpcpb.FailureCase`](https://godoc.org/github.com/coreos/etcd/functional/rpcpb#FailureCase) for all failure cases.
See [`rpcpb.Case`](https://godoc.org/github.com/coreos/etcd/functional/rpcpb#Case) for all failure cases.
See [functional.yaml](https://github.com/coreos/etcd/blob/master/functional.yaml) for an example configuration.

View File

@ -108,18 +108,18 @@ func (x Operation) String() string {
}
func (Operation) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{0} }
// FailureCase defines various system faults in distributed systems,
// Case defines various system faults or test case in distributed systems,
// in order to verify correct behavior of etcd servers and clients.
type FailureCase int32
type Case int32
const (
// SIGTERM_ONE_FOLLOWER stops a randomly chosen follower (non-leader)
// but does not delete its data directories on disk for next restart.
// It waits "failure-delay-ms" before recovering this failure.
// It waits "delay-ms" before recovering this failure.
// The expected behavior is that the follower comes back online
// and rejoins the cluster, and then each member continues to process
// client requests ('Put' request that requires Raft consensus).
FailureCase_SIGTERM_ONE_FOLLOWER FailureCase = 0
Case_SIGTERM_ONE_FOLLOWER Case = 0
// SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT stops a randomly chosen
// follower but does not delete its data directories on disk for next
// restart. And waits until most up-to-date node (leader) applies the
@ -129,16 +129,15 @@ const (
// to the follower to force it to follow the leader's log.
// As always, after recovery, each member must be able to process
// client requests.
FailureCase_SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT FailureCase = 1
Case_SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT Case = 1
// SIGTERM_LEADER stops the active leader node but does not delete its
// data directories on disk for next restart. Then it waits
// "failure-delay-ms" before recovering this failure, in order to
// trigger election timeouts.
// data directories on disk for next restart. Then it waits "delay-ms"
// before recovering this failure, in order to trigger election timeouts.
// The expected behavior is that a new leader gets elected, and the
// old leader comes back online and rejoins the cluster as a follower.
// As always, after recovery, each member must be able to process
// client requests.
FailureCase_SIGTERM_LEADER FailureCase = 2
Case_SIGTERM_LEADER Case = 2
// SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT stops the active leader node
// but does not delete its data directories on disk for next restart.
// And waits until most up-to-date node ("new" leader) applies the
@ -148,32 +147,31 @@ const (
// And it receives the snapshot from the new leader to overwrite its
// store. As always, after recovery, each member must be able to
// process client requests.
FailureCase_SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT FailureCase = 3
Case_SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT Case = 3
// SIGTERM_QUORUM stops majority number of nodes to make the whole cluster
// inoperable but does not delete data directories on stopped nodes
// for next restart. And it waits "failure-delay-ms" before recovering
// for next restart. And it waits "delay-ms" before recovering failure.
// The expected behavior is that nodes come back online, thus cluster
// comes back operative as well. As always, after recovery, each member
// must be able to process client requests.
Case_SIGTERM_QUORUM Case = 4
// SIGTERM_ALL stops the whole cluster but does not delete data directories
// on disk for next restart. And it waits "delay-ms" before recovering
// this failure.
// The expected behavior is that nodes come back online, thus cluster
// comes back operative as well. As always, after recovery, each member
// must be able to process client requests.
FailureCase_SIGTERM_QUORUM FailureCase = 4
// SIGTERM_ALL stops the whole cluster but does not delete data directories
// on disk for next restart. And it waits "failure-delay-ms" before
// recovering this failure.
// The expected behavior is that nodes come back online, thus cluster
// comes back operative as well. As always, after recovery, each member
// must be able to process client requests.
FailureCase_SIGTERM_ALL FailureCase = 5
Case_SIGTERM_ALL Case = 5
// SIGQUIT_AND_REMOVE_ONE_FOLLOWER stops a randomly chosen follower
// (non-leader), deletes its data directories on disk, and removes
// this member from cluster (membership reconfiguration). On recovery,
// tester adds a new member, and this member joins the existing cluster
// with fresh data. It waits "failure-delay-ms" before recovering this
// with fresh data. It waits "delay-ms" before recovering this
// failure. This simulates destroying one follower machine, where operator
// needs to add a new member from a fresh machine.
// The expected behavior is that a new member joins the existing cluster,
// and then each member continues to process client requests.
FailureCase_SIGQUIT_AND_REMOVE_ONE_FOLLOWER FailureCase = 10
Case_SIGQUIT_AND_REMOVE_ONE_FOLLOWER Case = 10
// SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT stops a randomly
// chosen follower, deletes its data directories on disk, and removes
// this member from cluster (membership reconfiguration). On recovery,
@ -185,16 +183,16 @@ const (
// The expected behavior is that a new member joins the existing cluster,
// and receives a snapshot from the active leader. As always, after
// recovery, each member must be able to process client requests.
FailureCase_SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT FailureCase = 11
Case_SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT Case = 11
// SIGQUIT_AND_REMOVE_LEADER stops the active leader node, deletes its
// data directories on disk, and removes this member from cluster.
// On recovery, tester adds a new member, and this member joins the
// existing cluster with fresh data. It waits "failure-delay-ms" before
// existing cluster with fresh data. It waits "delay-ms" before
// recovering this failure. This simulates destroying a leader machine,
// where operator needs to add a new member from a fresh machine.
// The expected behavior is that a new member joins the existing cluster,
// and then each member continues to process client requests.
FailureCase_SIGQUIT_AND_REMOVE_LEADER FailureCase = 12
Case_SIGQUIT_AND_REMOVE_LEADER Case = 12
// SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT stops the active leader,
// deletes its data directories on disk, and removes this member from
// cluster (membership reconfiguration). On recovery, tester adds a new
@ -207,7 +205,7 @@ const (
// leader, and a new member joins the existing cluster and receives a
// snapshot from the newly elected leader. As always, after recovery, each
// member must be able to process client requests.
FailureCase_SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT FailureCase = 13
Case_SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT Case = 13
// SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH first
// stops majority number of nodes, deletes data directories on those quorum
// nodes, to make the whole cluster inoperable. Now that quorum and their
@ -232,13 +230,13 @@ const (
// new members joins the existing cluster, and previous data from snapshot
// are still preserved after recovery process. As always, after recovery,
// each member must be able to process client requests.
FailureCase_SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH FailureCase = 14
Case_SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH Case = 14
// BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER drops all outgoing/incoming
// packets from/to the peer port on a randomly chosen follower
// (non-leader), and waits for "failure-delay-ms" until recovery.
// (non-leader), and waits for "delay-ms" until recovery.
// The expected behavior is that once dropping operation is undone,
// each member must be able to process client requests.
FailureCase_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER FailureCase = 100
Case_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER Case = 100
// BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT drops
// all outgoing/incoming packets from/to the peer port on a randomly
// chosen follower (non-leader), and waits for most up-to-date node
@ -248,15 +246,15 @@ const (
// the slow follower tries to catch up, possibly receiving the snapshot
// from the active leader. As always, after recovery, each member must
// be able to process client requests.
FailureCase_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT FailureCase = 101
Case_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT Case = 101
// BLACKHOLE_PEER_PORT_TX_RX_LEADER drops all outgoing/incoming packets
// from/to the peer port on the active leader (isolated), and waits for
// "failure-delay-ms" until recovery, in order to trigger election timeout.
// "delay-ms" until recovery, in order to trigger election timeout.
// The expected behavior is that after election timeout, a new leader gets
// elected, and once dropping operation is undone, the old leader comes
// back and rejoins the cluster as a follower. As always, after recovery,
// each member must be able to process client requests.
FailureCase_BLACKHOLE_PEER_PORT_TX_RX_LEADER FailureCase = 102
Case_BLACKHOLE_PEER_PORT_TX_RX_LEADER Case = 102
// BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT drops all
// outgoing/incoming packets from/to the peer port on the active leader,
// and waits for most up-to-date node (leader) applies the snapshot
@ -266,37 +264,37 @@ const (
// the cluster as a follower. The slow follower tries to catch up, likely
// receiving the snapshot from the new active leader. As always, after
// recovery, each member must be able to process client requests.
FailureCase_BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT FailureCase = 103
Case_BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT Case = 103
// BLACKHOLE_PEER_PORT_TX_RX_QUORUM drops all outgoing/incoming packets
// from/to the peer ports on majority nodes of cluster, thus losing its
// leader and cluster being inoperable. And it waits for "failure-delay-ms"
// leader and cluster being inoperable. And it waits for "delay-ms"
// until recovery.
// The expected behavior is that once packet drop operation is undone,
// nodes come back online, thus cluster comes back operative. As always,
// after recovery, each member must be able to process client requests.
FailureCase_BLACKHOLE_PEER_PORT_TX_RX_QUORUM FailureCase = 104
Case_BLACKHOLE_PEER_PORT_TX_RX_QUORUM Case = 104
// BLACKHOLE_PEER_PORT_TX_RX_ALL drops all outgoing/incoming packets
// from/to the peer ports on all nodes, thus making cluster totally
// inoperable. It waits for "failure-delay-ms" until recovery.
// inoperable. It waits for "delay-ms" until recovery.
// The expected behavior is that once packet drop operation is undone,
// nodes come back online, thus cluster comes back operative. As always,
// after recovery, each member must be able to process client requests.
FailureCase_BLACKHOLE_PEER_PORT_TX_RX_ALL FailureCase = 105
Case_BLACKHOLE_PEER_PORT_TX_RX_ALL Case = 105
// DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER delays outgoing/incoming packets
// from/to the peer port on a randomly chosen follower (non-leader).
// It waits for "failure-delay-ms" until recovery.
// It waits for "delay-ms" until recovery.
// The expected behavior is that once packet delay operation is undone,
// the follower comes back and tries to catch up with latest changes from
// cluster. And as always, after recovery, each member must be able to
// process client requests.
FailureCase_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER FailureCase = 200
Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER Case = 200
// RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER delays outgoing/incoming
// packets from/to the peer port on a randomly chosen follower
// (non-leader) with a randomized time duration (thus isolated). It waits
// for "failure-delay-ms" until recovery.
// (non-leader) with a randomized time duration (thus isolated). It
// waits for "delay-ms" until recovery.
// The expected behavior is that once packet delay operation is undone,
// each member must be able to process client requests.
FailureCase_RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER FailureCase = 201
Case_RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER Case = 201
// DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT delays
// outgoing/incoming packets from/to the peer port on a randomly chosen
// follower (non-leader), and waits for most up-to-date node (leader)
@ -306,7 +304,7 @@ const (
// the slow follower comes back and catches up possibly receiving snapshot
// from the active leader. As always, after recovery, each member must be
// able to process client requests.
FailureCase_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT FailureCase = 202
Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT Case = 202
// RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT delays
// outgoing/incoming packets from/to the peer port on a randomly chosen
// follower (non-leader) with a randomized time duration, and waits for
@ -317,23 +315,23 @@ const (
// the slow follower comes back and catches up, possibly receiving a
// snapshot from the active leader. As always, after recovery, each member
// must be able to process client requests.
FailureCase_RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT FailureCase = 203
Case_RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT Case = 203
// DELAY_PEER_PORT_TX_RX_LEADER delays outgoing/incoming packets from/to
// the peer port on the active leader. And waits for "failure-delay-ms"
// until recovery.
// the peer port on the active leader. And waits for "delay-ms" until
// recovery.
// The expected behavior is that cluster may elect a new leader, and
// once packet delay operation is undone, the (old) leader comes back
// and tries to catch up with latest changes from cluster. As always,
// after recovery, each member must be able to process client requests.
FailureCase_DELAY_PEER_PORT_TX_RX_LEADER FailureCase = 204
Case_DELAY_PEER_PORT_TX_RX_LEADER Case = 204
// RANDOM_DELAY_PEER_PORT_TX_RX_LEADER delays outgoing/incoming packets
// from/to the peer port on the active leader with a randomized time
// duration. And waits for "failure-delay-ms" until recovery.
// duration. And waits for "delay-ms" until recovery.
// The expected behavior is that cluster may elect a new leader, and
// once packet delay operation is undone, the (old) leader comes back
// and tries to catch up with latest changes from cluster. As always,
// after recovery, each member must be able to process client requests.
FailureCase_RANDOM_DELAY_PEER_PORT_TX_RX_LEADER FailureCase = 205
Case_RANDOM_DELAY_PEER_PORT_TX_RX_LEADER Case = 205
// DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT delays
// outgoing/incoming packets from/to the peer port on the active leader,
// and waits for most up-to-date node (current or new leader) applies the
@ -344,7 +342,7 @@ const (
// and catches up, likely receiving a snapshot from the active leader.
// As always, after recovery, each member must be able to process client
// requests.
FailureCase_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT FailureCase = 206
Case_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT Case = 206
// RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT delays
// outgoing/incoming packets from/to the peer port on the active leader,
// with a randomized time duration. And it waits for most up-to-date node
@ -356,55 +354,55 @@ const (
// and catches up, likely receiving a snapshot from the active leader.
// As always, after recovery, each member must be able to process client
// requests.
FailureCase_RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT FailureCase = 207
Case_RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT Case = 207
// DELAY_PEER_PORT_TX_RX_QUORUM delays outgoing/incoming packets from/to
// the peer ports on majority nodes of cluster. And it waits for
// "failure-delay-ms" until recovery, likely to trigger election timeouts.
// "delay-ms" until recovery, likely to trigger election timeouts.
// The expected behavior is that cluster may elect a new leader, while
// quorum of nodes struggle with slow networks, and once delay operation
// is undone, nodes come back and cluster comes back operative. As always,
// after recovery, each member must be able to process client requests.
FailureCase_DELAY_PEER_PORT_TX_RX_QUORUM FailureCase = 208
Case_DELAY_PEER_PORT_TX_RX_QUORUM Case = 208
// RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM delays outgoing/incoming packets
// from/to the peer ports on majority nodes of cluster, with randomized
// time durations. And it waits for "failure-delay-ms" until recovery,
// likely to trigger election timeouts.
// time durations. And it waits for "delay-ms" until recovery, likely
// to trigger election timeouts.
// The expected behavior is that cluster may elect a new leader, while
// quorum of nodes struggle with slow networks, and once delay operation
// is undone, nodes come back and cluster comes back operative. As always,
// after recovery, each member must be able to process client requests.
FailureCase_RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM FailureCase = 209
Case_RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM Case = 209
// DELAY_PEER_PORT_TX_RX_ALL delays outgoing/incoming packets from/to the
// peer ports on all nodes. And it waits for "failure-delay-ms" until
// recovery, likely to trigger election timeouts.
// peer ports on all nodes. And it waits for "delay-ms" until recovery,
// likely to trigger election timeouts.
// The expected behavior is that cluster may become totally inoperable,
// struggling with slow networks across the whole cluster. Once delay
// operation is undone, nodes come back and cluster comes back operative.
// As always, after recovery, each member must be able to process client
// requests.
FailureCase_DELAY_PEER_PORT_TX_RX_ALL FailureCase = 210
Case_DELAY_PEER_PORT_TX_RX_ALL Case = 210
// RANDOM_DELAY_PEER_PORT_TX_RX_ALL delays outgoing/incoming packets
// from/to the peer ports on all nodes, with randomized time durations.
// And it waits for "failure-delay-ms" until recovery, likely to trigger
// And it waits for "delay-ms" until recovery, likely to trigger
// election timeouts.
// The expected behavior is that cluster may become totally inoperable,
// struggling with slow networks across the whole cluster. Once delay
// operation is undone, nodes come back and cluster comes back operative.
// As always, after recovery, each member must be able to process client
// requests.
FailureCase_RANDOM_DELAY_PEER_PORT_TX_RX_ALL FailureCase = 211
Case_RANDOM_DELAY_PEER_PORT_TX_RX_ALL Case = 211
// NO_FAIL_WITH_STRESS stops injecting failures while testing the
// consistency and correctness under pressure loads, for the duration of
// "failure-delay-ms". Goal is to ensure cluster be still making progress
// "delay-ms". Goal is to ensure cluster be still making progress
// on recovery, and verify system does not deadlock following a sequence
// of failure injections.
// The expected behavior is that cluster remains fully operative in healthy
// condition. As always, after recovery, each member must be able to process
// client requests.
FailureCase_NO_FAIL_WITH_STRESS FailureCase = 300
Case_NO_FAIL_WITH_STRESS Case = 300
// NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS neither injects failures nor
// sends stressig client requests to the cluster, for the duration of
// "failure-delay-ms". Goal is to ensure cluster be still making progress
// "delay-ms". Goal is to ensure cluster be still making progress
// on recovery, and verify system does not deadlock following a sequence
// of failure injections.
// The expected behavior is that cluster remains fully operative in healthy
@ -412,15 +410,15 @@ const (
// errors.
// Note: this is how Google Chubby does failure injection testing
// https://static.googleusercontent.com/media/research.google.com/en//archive/paxos_made_live.pdf.
FailureCase_NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS FailureCase = 301
Case_NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS Case = 301
// FAILPOINTS injects failpoints to etcd server runtime, triggering panics
// in critical code paths.
FailureCase_FAILPOINTS FailureCase = 400
Case_FAILPOINTS Case = 400
// EXTERNAL runs external failure injection scripts.
FailureCase_EXTERNAL FailureCase = 500
Case_EXTERNAL Case = 500
)
var FailureCase_name = map[int32]string{
var Case_name = map[int32]string{
0: "SIGTERM_ONE_FOLLOWER",
1: "SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT",
2: "SIGTERM_LEADER",
@ -455,7 +453,7 @@ var FailureCase_name = map[int32]string{
400: "FAILPOINTS",
500: "EXTERNAL",
}
var FailureCase_value = map[string]int32{
var Case_value = map[string]int32{
"SIGTERM_ONE_FOLLOWER": 0,
"SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT": 1,
"SIGTERM_LEADER": 2,
@ -491,10 +489,10 @@ var FailureCase_value = map[string]int32{
"EXTERNAL": 500,
}
func (x FailureCase) String() string {
return proto.EnumName(FailureCase_name, int32(x))
func (x Case) String() string {
return proto.EnumName(Case_name, int32(x))
}
func (FailureCase) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1} }
func (Case) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1} }
type StressType int32
@ -642,21 +640,22 @@ type Tester struct {
UpdatedDelayLatencyMs uint32 `protobuf:"varint,13,opt,name=UpdatedDelayLatencyMs,proto3" json:"UpdatedDelayLatencyMs,omitempty" yaml:"updated-delay-latency-ms"`
// RoundLimit is the limit of rounds to run failure set (-1 to run without limits).
RoundLimit int32 `protobuf:"varint,21,opt,name=RoundLimit,proto3" json:"RoundLimit,omitempty" yaml:"round-limit"`
// ExitOnFailure is true, then exit tester on first failure.
ExitOnFailure bool `protobuf:"varint,22,opt,name=ExitOnFailure,proto3" json:"ExitOnFailure,omitempty" yaml:"exit-on-failure"`
// ExitOnCaseFail is true, then exit tester on first failure.
ExitOnCaseFail bool `protobuf:"varint,22,opt,name=ExitOnCaseFail,proto3" json:"ExitOnCaseFail,omitempty" yaml:"exit-on-failure"`
// ConsistencyCheck is true to check consistency (revision, hash).
ConsistencyCheck bool `protobuf:"varint,23,opt,name=ConsistencyCheck,proto3" json:"ConsistencyCheck,omitempty" yaml:"consistency-check"`
// EnablePprof is true to enable profiler.
EnablePprof bool `protobuf:"varint,24,opt,name=EnablePprof,proto3" json:"EnablePprof,omitempty" yaml:"enable-pprof"`
// FailureDelayMs is the delay duration after failure is injected.
// CaseDelayMs is the delay duration after failure is injected.
// Useful when triggering snapshot or no-op failure cases.
FailureDelayMs uint32 `protobuf:"varint,31,opt,name=FailureDelayMs,proto3" json:"FailureDelayMs,omitempty" yaml:"failure-delay-ms"`
// FailureShuffle is true to randomize failure injecting order.
FailureShuffle bool `protobuf:"varint,32,opt,name=FailureShuffle,proto3" json:"FailureShuffle,omitempty" yaml:"failure-shuffle"`
// FailureCases is the selected test cases to schedule.
CaseDelayMs uint32 `protobuf:"varint,31,opt,name=CaseDelayMs,proto3" json:"CaseDelayMs,omitempty" yaml:"case-delay-ms"`
// CaseShuffle is true to randomize failure injecting order.
CaseShuffle bool `protobuf:"varint,32,opt,name=CaseShuffle,proto3" json:"CaseShuffle,omitempty" yaml:"case-shuffle"`
// Cases is the selected test cases to schedule.
// If empty, run all failure cases.
FailureCases []string `protobuf:"bytes,33,rep,name=FailureCases" json:"FailureCases,omitempty" yaml:"failure-cases"`
// Failpoinommands is the list of "gofail" commands (e.g. panic("etcd-tester"),1*sleep(1000)
Cases []string `protobuf:"bytes,33,rep,name=Cases" json:"Cases,omitempty" yaml:"cases"`
// FailpointCommands is the list of "gofail" commands
// (e.g. panic("etcd-tester"),1*sleep(1000).
FailpointCommands []string `protobuf:"bytes,34,rep,name=FailpointCommands" json:"FailpointCommands,omitempty" yaml:"failpoint-commands"`
// RunnerExecPath is a path of etcd-runner binary.
RunnerExecPath string `protobuf:"bytes,41,opt,name=RunnerExecPath,proto3" json:"RunnerExecPath,omitempty" yaml:"runner-exec-path"`
@ -735,7 +734,7 @@ func init() {
proto.RegisterType((*Tester)(nil), "rpcpb.Tester")
proto.RegisterType((*Etcd)(nil), "rpcpb.Etcd")
proto.RegisterEnum("rpcpb.Operation", Operation_name, Operation_value)
proto.RegisterEnum("rpcpb.FailureCase", FailureCase_name, FailureCase_value)
proto.RegisterEnum("rpcpb.Case", Case_name, Case_value)
proto.RegisterEnum("rpcpb.StressType", StressType_name, StressType_value)
}
@ -1297,12 +1296,12 @@ func (m *Tester) MarshalTo(dAtA []byte) (int, error) {
i++
i = encodeVarintRpc(dAtA, i, uint64(m.RoundLimit))
}
if m.ExitOnFailure {
if m.ExitOnCaseFail {
dAtA[i] = 0xb0
i++
dAtA[i] = 0x1
i++
if m.ExitOnFailure {
if m.ExitOnCaseFail {
dAtA[i] = 1
} else {
dAtA[i] = 0
@ -1333,27 +1332,27 @@ func (m *Tester) MarshalTo(dAtA []byte) (int, error) {
}
i++
}
if m.FailureDelayMs != 0 {
if m.CaseDelayMs != 0 {
dAtA[i] = 0xf8
i++
dAtA[i] = 0x1
i++
i = encodeVarintRpc(dAtA, i, uint64(m.FailureDelayMs))
i = encodeVarintRpc(dAtA, i, uint64(m.CaseDelayMs))
}
if m.FailureShuffle {
if m.CaseShuffle {
dAtA[i] = 0x80
i++
dAtA[i] = 0x2
i++
if m.FailureShuffle {
if m.CaseShuffle {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if len(m.FailureCases) > 0 {
for _, s := range m.FailureCases {
if len(m.Cases) > 0 {
for _, s := range m.Cases {
dAtA[i] = 0x8a
i++
dAtA[i] = 0x2
@ -1958,7 +1957,7 @@ func (m *Tester) Size() (n int) {
if m.RoundLimit != 0 {
n += 2 + sovRpc(uint64(m.RoundLimit))
}
if m.ExitOnFailure {
if m.ExitOnCaseFail {
n += 3
}
if m.ConsistencyCheck {
@ -1967,14 +1966,14 @@ func (m *Tester) Size() (n int) {
if m.EnablePprof {
n += 3
}
if m.FailureDelayMs != 0 {
n += 2 + sovRpc(uint64(m.FailureDelayMs))
if m.CaseDelayMs != 0 {
n += 2 + sovRpc(uint64(m.CaseDelayMs))
}
if m.FailureShuffle {
if m.CaseShuffle {
n += 3
}
if len(m.FailureCases) > 0 {
for _, s := range m.FailureCases {
if len(m.Cases) > 0 {
for _, s := range m.Cases {
l = len(s)
n += 2 + l + sovRpc(uint64(l))
}
@ -3659,7 +3658,7 @@ func (m *Tester) Unmarshal(dAtA []byte) error {
}
case 22:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ExitOnFailure", wireType)
return fmt.Errorf("proto: wrong wireType = %d for field ExitOnCaseFail", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
@ -3676,7 +3675,7 @@ func (m *Tester) Unmarshal(dAtA []byte) error {
break
}
}
m.ExitOnFailure = bool(v != 0)
m.ExitOnCaseFail = bool(v != 0)
case 23:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ConsistencyCheck", wireType)
@ -3719,9 +3718,9 @@ func (m *Tester) Unmarshal(dAtA []byte) error {
m.EnablePprof = bool(v != 0)
case 31:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field FailureDelayMs", wireType)
return fmt.Errorf("proto: wrong wireType = %d for field CaseDelayMs", wireType)
}
m.FailureDelayMs = 0
m.CaseDelayMs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRpc
@ -3731,14 +3730,14 @@ func (m *Tester) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
m.FailureDelayMs |= (uint32(b) & 0x7F) << shift
m.CaseDelayMs |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 32:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field FailureShuffle", wireType)
return fmt.Errorf("proto: wrong wireType = %d for field CaseShuffle", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
@ -3755,10 +3754,10 @@ func (m *Tester) Unmarshal(dAtA []byte) error {
break
}
}
m.FailureShuffle = bool(v != 0)
m.CaseShuffle = bool(v != 0)
case 33:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field FailureCases", wireType)
return fmt.Errorf("proto: wrong wireType = %d for field Cases", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@ -3783,7 +3782,7 @@ func (m *Tester) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.FailureCases = append(m.FailureCases, string(dAtA[iNdEx:postIndex]))
m.Cases = append(m.Cases, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
case 34:
if wireType != 2 {
@ -4873,177 +4872,177 @@ var (
func init() { proto.RegisterFile("rpcpb/rpc.proto", fileDescriptorRpc) }
var fileDescriptorRpc = []byte{
// 2743 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x59, 0x5b, 0x77, 0xdb, 0xc6,
0xb5, 0x36, 0x44, 0xc9, 0xb6, 0x46, 0x37, 0x6a, 0x64, 0xd9, 0xf0, 0x4d, 0x90, 0xe1, 0x38, 0x47,
0x56, 0x02, 0x3b, 0xc7, 0xce, 0xca, 0xc5, 0xb9, 0x38, 0x20, 0x05, 0x59, 0x3c, 0x82, 0x48, 0x7a,
0x08, 0xd9, 0xc9, 0x13, 0x0f, 0x44, 0x0e, 0x25, 0x1c, 0x53, 0x00, 0x03, 0x0c, 0x1d, 0x29, 0x7f,
0xe0, 0xbc, 0xf6, 0xbe, 0xfa, 0xd0, 0x9f, 0xd0, 0xb4, 0x7f, 0xc3, 0x49, 0x6f, 0x69, 0xfb, 0xd4,
0xcb, 0xe2, 0x6a, 0x93, 0x97, 0x3e, 0x73, 0x35, 0x6d, 0xd3, 0xa7, 0xae, 0xb9, 0x40, 0x1c, 0x00,
0xa4, 0xa4, 0x27, 0x09, 0x7b, 0x7f, 0xdf, 0x87, 0x3d, 0x7b, 0x0f, 0x66, 0xef, 0x91, 0xc0, 0x5c,
0xd8, 0x69, 0x74, 0x76, 0xee, 0x86, 0x9d, 0xc6, 0x9d, 0x4e, 0x18, 0x90, 0x00, 0x4e, 0x30, 0xc3,
0x15, 0x63, 0xd7, 0x23, 0x7b, 0xdd, 0x9d, 0x3b, 0x8d, 0x60, 0xff, 0xee, 0x6e, 0xb0, 0x1b, 0xdc,
0x65, 0xde, 0x9d, 0x6e, 0x8b, 0x3d, 0xb1, 0x07, 0xf6, 0x1b, 0x67, 0xe9, 0xff, 0xaf, 0x80, 0x73,
0x08, 0x7f, 0xdc, 0xc5, 0x11, 0x81, 0x77, 0xc0, 0x64, 0xa5, 0x83, 0x43, 0x97, 0x78, 0x81, 0xaf,
0x2a, 0xcb, 0xca, 0xca, 0xec, 0xbd, 0xfc, 0x1d, 0xa6, 0x7a, 0xe7, 0xc8, 0x8e, 0x06, 0x10, 0x78,
0x0b, 0x9c, 0xdd, 0xc2, 0xfb, 0x3b, 0x38, 0x54, 0xc7, 0x96, 0x95, 0x95, 0xa9, 0x7b, 0x33, 0x02,
0xcc, 0x8d, 0x48, 0x38, 0x29, 0xcc, 0xc1, 0x11, 0xc1, 0xa1, 0x9a, 0x4b, 0xc0, 0xb8, 0x11, 0x09,
0xa7, 0xfe, 0xb7, 0x31, 0x30, 0x5d, 0xf3, 0xdd, 0x4e, 0xb4, 0x17, 0x90, 0x92, 0xdf, 0x0a, 0xe0,
0x12, 0x00, 0x5c, 0xa1, 0xec, 0xee, 0x63, 0x16, 0xcf, 0x24, 0x92, 0x2c, 0x70, 0x15, 0xe4, 0xf9,
0x53, 0xb1, 0xed, 0x61, 0x9f, 0x6c, 0x23, 0x3b, 0x52, 0xc7, 0x96, 0x73, 0x2b, 0x93, 0x28, 0x63,
0x87, 0xfa, 0x40, 0xbb, 0xea, 0x92, 0x3d, 0x16, 0xc9, 0x24, 0x4a, 0xd8, 0xa8, 0x5e, 0xfc, 0xbc,
0xee, 0xb5, 0x71, 0xcd, 0xfb, 0x14, 0xab, 0xe3, 0x0c, 0x97, 0xb1, 0xc3, 0x57, 0xc1, 0x7c, 0x6c,
0x73, 0x02, 0xe2, 0xb6, 0x19, 0x78, 0x82, 0x81, 0xb3, 0x0e, 0x59, 0x99, 0x19, 0x37, 0xf1, 0xa1,
0x7a, 0x76, 0x59, 0x59, 0xc9, 0xa1, 0x8c, 0x5d, 0x8e, 0x74, 0xc3, 0x8d, 0xf6, 0xd4, 0x73, 0x0c,
0x97, 0xb0, 0xc9, 0x7a, 0x08, 0x3f, 0xf7, 0x22, 0x5a, 0xaf, 0xf3, 0x49, 0xbd, 0xd8, 0x0e, 0x21,
0x18, 0x77, 0x82, 0xe0, 0x99, 0x3a, 0xc9, 0x82, 0x63, 0xbf, 0xeb, 0x3f, 0x51, 0xc0, 0x79, 0x84,
0xa3, 0x4e, 0xe0, 0x47, 0x18, 0xaa, 0xe0, 0x5c, 0xad, 0xdb, 0x68, 0xe0, 0x28, 0x62, 0x39, 0x3e,
0x8f, 0xe2, 0x47, 0x78, 0x11, 0x9c, 0xad, 0x11, 0x97, 0x74, 0x23, 0x56, 0xdf, 0x49, 0x24, 0x9e,
0xa4, 0xba, 0xe7, 0x8e, 0xab, 0xfb, 0x9b, 0xc9, 0x7a, 0xb2, 0x5c, 0x4e, 0xdd, 0x5b, 0x10, 0x60,
0xd9, 0x85, 0x12, 0x40, 0xfd, 0x4f, 0xd3, 0xf1, 0x0b, 0xe0, 0x7b, 0x60, 0xda, 0x22, 0x8d, 0xa6,
0x75, 0x80, 0x1b, 0xac, 0x6e, 0x6c, 0x17, 0x14, 0x2e, 0xf7, 0x7b, 0xda, 0xe2, 0xa1, 0xbb, 0xdf,
0x7e, 0xa0, 0x63, 0xd2, 0x68, 0x1a, 0xf8, 0x00, 0x37, 0x8c, 0x8e, 0x4b, 0xf6, 0x74, 0x94, 0x80,
0xc3, 0xfb, 0x60, 0xd2, 0xdc, 0xc5, 0x3e, 0x31, 0x9b, 0xcd, 0x50, 0x9d, 0x62, 0xdc, 0xc5, 0x7e,
0x4f, 0x9b, 0xe7, 0x5c, 0x97, 0xba, 0x0c, 0xb7, 0xd9, 0x0c, 0x75, 0x34, 0xc0, 0x41, 0x1b, 0xcc,
0xaf, 0xbb, 0x5e, 0xbb, 0x13, 0x78, 0x3e, 0xd9, 0x70, 0x9c, 0x2a, 0x23, 0x4f, 0x33, 0xf2, 0x52,
0xbf, 0xa7, 0x5d, 0xe1, 0xe4, 0x56, 0x0c, 0x31, 0xf6, 0x08, 0xe9, 0x08, 0x95, 0x2c, 0x11, 0x1a,
0xe0, 0x5c, 0xc1, 0x8d, 0xf0, 0x9a, 0x17, 0xaa, 0x98, 0x69, 0x2c, 0xf4, 0x7b, 0xda, 0x1c, 0xd7,
0xd8, 0x71, 0x23, 0x6c, 0x34, 0xbd, 0x50, 0x47, 0x31, 0x06, 0x3e, 0x00, 0x53, 0x74, 0x05, 0x76,
0xb0, 0xcb, 0xd6, 0xdb, 0x62, 0x14, 0xb5, 0xdf, 0xd3, 0x2e, 0x48, 0xeb, 0x6d, 0x07, 0xbb, 0x62,
0xb9, 0x32, 0x18, 0x3e, 0x02, 0x73, 0xf4, 0x91, 0x6f, 0xfb, 0x6a, 0x18, 0x1c, 0x1c, 0xaa, 0x9f,
0xb3, 0x92, 0x16, 0xae, 0xf5, 0x7b, 0x9a, 0x2a, 0x09, 0x34, 0x18, 0xc4, 0xe8, 0x50, 0x8c, 0x8e,
0xd2, 0x2c, 0x68, 0x82, 0x19, 0x6a, 0xaa, 0x62, 0x1c, 0x72, 0x99, 0x2f, 0xb8, 0xcc, 0x95, 0x7e,
0x4f, 0xbb, 0x28, 0xc9, 0x74, 0x30, 0x0e, 0x63, 0x91, 0x24, 0x03, 0x56, 0x01, 0x1c, 0xa8, 0x5a,
0x7e, 0x93, 0x25, 0x45, 0xfd, 0x8c, 0x6d, 0xa4, 0x82, 0xd6, 0xef, 0x69, 0x57, 0xb3, 0xe1, 0x60,
0x01, 0xd3, 0xd1, 0x10, 0x2e, 0xfc, 0x6f, 0x30, 0x4e, 0xad, 0xea, 0xcf, 0xf8, 0x61, 0x33, 0x25,
0xf6, 0x11, 0xb5, 0x15, 0xe6, 0xfa, 0x3d, 0x6d, 0x6a, 0x20, 0xa8, 0x23, 0x06, 0x85, 0x05, 0xb0,
0x48, 0x7f, 0x56, 0xfc, 0xc1, 0x57, 0x11, 0x91, 0x20, 0xc4, 0xea, 0xcf, 0xb3, 0x1a, 0x68, 0x38,
0x14, 0xae, 0x81, 0x59, 0x1e, 0x48, 0x11, 0x87, 0x64, 0xcd, 0x25, 0xae, 0xfa, 0x5d, 0x76, 0x78,
0x14, 0xae, 0xf6, 0x7b, 0xda, 0x25, 0xfe, 0x4e, 0x11, 0x7f, 0x03, 0x87, 0xc4, 0x68, 0xba, 0xc4,
0xd5, 0x51, 0x8a, 0x93, 0x54, 0x61, 0x95, 0xfd, 0xde, 0xb1, 0x2a, 0xbc, 0xba, 0x29, 0x0e, 0xad,
0x0b, 0xb7, 0x6c, 0xe2, 0x43, 0x16, 0xca, 0xf7, 0xb9, 0x88, 0x54, 0x17, 0x21, 0xf2, 0x0c, 0x1f,
0x8a, 0x48, 0x92, 0x8c, 0x84, 0x04, 0x8b, 0xe3, 0x07, 0xc7, 0x49, 0xf0, 0x30, 0x92, 0x0c, 0xe8,
0x80, 0x05, 0x6e, 0x70, 0xc2, 0x6e, 0x44, 0x70, 0xb3, 0x68, 0xb2, 0x58, 0x7e, 0xc8, 0x85, 0x6e,
0xf4, 0x7b, 0xda, 0xf5, 0x84, 0x10, 0xe1, 0x30, 0xa3, 0xe1, 0x8a, 0x90, 0x86, 0xd1, 0x87, 0xa8,
0xb2, 0xf0, 0x7e, 0x74, 0x0a, 0x55, 0x1e, 0xe5, 0x30, 0x3a, 0x7c, 0x1f, 0x4c, 0xd3, 0x3d, 0x79,
0x54, 0xbb, 0x6f, 0x72, 0xe9, 0x03, 0x84, 0xed, 0x61, 0xa9, 0x72, 0x09, 0xbc, 0xcc, 0x67, 0xe1,
0xfc, 0xe3, 0x18, 0xbe, 0x38, 0x80, 0x64, 0x3c, 0x7c, 0x07, 0x4c, 0xd1, 0xe7, 0xb8, 0x5e, 0xff,
0xcc, 0xa5, 0xbf, 0x67, 0x46, 0x1f, 0x54, 0x4b, 0x46, 0x4b, 0x64, 0xf6, 0xee, 0x7f, 0x8d, 0x26,
0x8b, 0xc3, 0x40, 0x42, 0xc3, 0x32, 0x98, 0xa7, 0x8f, 0xc9, 0x1a, 0x7d, 0x9b, 0x4b, 0x7f, 0x7f,
0x4c, 0x22, 0x53, 0xa1, 0x2c, 0x35, 0xa3, 0xc7, 0x42, 0xfa, 0xf7, 0x89, 0x7a, 0x3c, 0xb2, 0x2c,
0x95, 0x9e, 0xec, 0x89, 0x8e, 0xfc, 0x87, 0xf1, 0xf4, 0xea, 0x22, 0xe1, 0x8e, 0x13, 0x9b, 0x68,
0xd6, 0x6f, 0xa5, 0x9a, 0xcb, 0x1f, 0x4f, 0xdd, 0x5d, 0xbe, 0x9e, 0x8e, 0xe7, 0x11, 0x7a, 0x36,
0xd3, 0xb5, 0xd1, 0xb3, 0x59, 0x49, 0x9f, 0xcd, 0x34, 0x11, 0xe2, 0x6c, 0x16, 0x18, 0xf8, 0x2a,
0x38, 0x57, 0xc6, 0xe4, 0x93, 0x20, 0x7c, 0xc6, 0x1b, 0x62, 0x01, 0xf6, 0x7b, 0xda, 0x2c, 0x87,
0xfb, 0xdc, 0xa1, 0xa3, 0x18, 0x02, 0x6f, 0x82, 0x71, 0xd6, 0x39, 0x78, 0x8a, 0xa4, 0x13, 0x8a,
0xb7, 0x0a, 0xe6, 0x84, 0x45, 0x30, 0xbb, 0x86, 0xdb, 0xee, 0xa1, 0xed, 0x12, 0xec, 0x37, 0x0e,
0xb7, 0x22, 0xd6, 0xa5, 0x66, 0xe4, 0x63, 0xa1, 0x49, 0xfd, 0x46, 0x9b, 0x03, 0x8c, 0xfd, 0x48,
0x47, 0x29, 0x0a, 0xfc, 0x1f, 0x90, 0x4f, 0x5a, 0xd0, 0x73, 0xd6, 0xaf, 0x66, 0xe4, 0x7e, 0x95,
0x96, 0x31, 0xc2, 0xe7, 0x3a, 0xca, 0xf0, 0xe0, 0x47, 0x60, 0x71, 0xbb, 0xd3, 0x74, 0x09, 0x6e,
0xa6, 0xe2, 0x9a, 0x61, 0x82, 0x37, 0xfb, 0x3d, 0x4d, 0xe3, 0x82, 0x5d, 0x0e, 0x33, 0xb2, 0xf1,
0x0d, 0x57, 0x80, 0x6f, 0x00, 0x80, 0x82, 0xae, 0xdf, 0xb4, 0xbd, 0x7d, 0x8f, 0xa8, 0x8b, 0xcb,
0xca, 0xca, 0x44, 0xe1, 0x62, 0xbf, 0xa7, 0x41, 0xae, 0x17, 0x52, 0x9f, 0xd1, 0xa6, 0x4e, 0x1d,
0x49, 0x48, 0xf8, 0x01, 0x98, 0xb1, 0x0e, 0x3c, 0x52, 0xf1, 0x69, 0x73, 0xed, 0x86, 0x58, 0xbd,
0x98, 0x69, 0x46, 0x07, 0x1e, 0x31, 0x02, 0xdf, 0x68, 0x71, 0x00, 0x6d, 0x46, 0x32, 0x01, 0x6e,
0x80, 0x7c, 0x31, 0xf0, 0x23, 0x2f, 0x62, 0xa1, 0x14, 0xf7, 0x70, 0xe3, 0x99, 0x7a, 0x29, 0xdd,
0x18, 0x1b, 0x03, 0x84, 0xd1, 0xa0, 0x10, 0x1d, 0x65, 0x58, 0xf0, 0x6d, 0x30, 0x65, 0xf9, 0xee,
0x4e, 0x1b, 0x57, 0x3b, 0x61, 0xd0, 0x52, 0x55, 0x26, 0x72, 0xa9, 0xdf, 0xd3, 0x16, 0x44, 0x24,
0xcc, 0x69, 0x74, 0xa8, 0x97, 0x76, 0xe7, 0x01, 0x96, 0x96, 0x5a, 0xc4, 0xc3, 0xf2, 0xb2, 0x15,
0xa9, 0x5a, 0xba, 0xd4, 0x22, 0x7e, 0x91, 0x52, 0x56, 0xea, 0x24, 0x05, 0x16, 0x8e, 0x44, 0x6a,
0x7b, 0xdd, 0x56, 0xab, 0x8d, 0xd5, 0xe5, 0x74, 0x32, 0x62, 0x91, 0x88, 0x03, 0x06, 0x1a, 0x82,
0x01, 0xdf, 0x05, 0xd3, 0xc2, 0x52, 0x74, 0x23, 0x1c, 0xa9, 0x37, 0xe8, 0xcc, 0x2c, 0x7f, 0x78,
0xb1, 0x42, 0x83, 0xba, 0x75, 0x94, 0x40, 0xc3, 0x4d, 0x69, 0x3a, 0x2a, 0x06, 0xfb, 0xfb, 0xae,
0xdf, 0x8c, 0x54, 0x9d, 0x49, 0x5c, 0xef, 0xf7, 0xb4, 0xcb, 0xe9, 0xe9, 0xa8, 0x21, 0x30, 0xf2,
0x70, 0x14, 0xf3, 0x68, 0x4e, 0x50, 0xd7, 0xf7, 0x71, 0x78, 0x34, 0xe0, 0xdd, 0x4e, 0x77, 0xc5,
0x90, 0xf9, 0xe5, 0x11, 0x2f, 0x45, 0x81, 0x25, 0x90, 0xb7, 0x0e, 0x08, 0x0e, 0x7d, 0xb7, 0x7d,
0x24, 0xb3, 0xca, 0x64, 0xa4, 0x80, 0xb0, 0x40, 0xc8, 0x42, 0x19, 0x1a, 0x2d, 0x6f, 0x8d, 0x84,
0x38, 0x8a, 0x9c, 0xc3, 0x0e, 0x8e, 0x54, 0xcc, 0x96, 0x25, 0x95, 0x37, 0x62, 0x4e, 0x83, 0x50,
0xaf, 0x8e, 0x64, 0x2c, 0xdd, 0xa5, 0xfc, 0x71, 0x13, 0x1f, 0xb2, 0xdb, 0x40, 0x8b, 0x6d, 0x70,
0xa9, 0x30, 0x82, 0x4c, 0xcf, 0xeb, 0xc8, 0xfb, 0x94, 0xee, 0xd2, 0x04, 0x81, 0x8e, 0x4c, 0x09,
0x83, 0xed, 0x86, 0xbb, 0x58, 0xdd, 0x65, 0x32, 0xcb, 0xfd, 0x9e, 0x76, 0x6d, 0xa8, 0x8c, 0xd1,
0xa6, 0x30, 0x1d, 0x0d, 0xe1, 0xc2, 0x27, 0xe0, 0xc2, 0xc0, 0xda, 0x6d, 0xb5, 0xbc, 0x03, 0xe4,
0xfa, 0xbb, 0x58, 0xdd, 0x63, 0x9a, 0x7a, 0xbf, 0xa7, 0x2d, 0x65, 0x35, 0x19, 0xce, 0x08, 0x29,
0x50, 0x47, 0x43, 0xf9, 0xf0, 0x7f, 0xc1, 0xa5, 0x61, 0x76, 0xe7, 0xc0, 0x57, 0x3d, 0x26, 0xfd,
0x72, 0xbf, 0xa7, 0xe9, 0xc7, 0x4a, 0x1b, 0xe4, 0xc0, 0xd7, 0xd1, 0x28, 0x19, 0x3a, 0xca, 0x1e,
0xb9, 0x9c, 0x03, 0xbf, 0xd2, 0x89, 0xd4, 0xff, 0x63, 0xca, 0x52, 0x49, 0x25, 0x65, 0x72, 0xe0,
0x1b, 0x41, 0x27, 0xd2, 0x51, 0x9a, 0x35, 0x28, 0x0b, 0x9f, 0x0e, 0x22, 0x3e, 0x11, 0x4f, 0xc8,
0x1d, 0x5c, 0xe8, 0xf0, 0xb9, 0x22, 0x3a, 0x2a, 0x8b, 0x20, 0xc0, 0xd7, 0xc1, 0x24, 0x37, 0x3c,
0xae, 0xd6, 0xf8, 0x20, 0x3c, 0x21, 0x5f, 0x22, 0x04, 0xfb, 0x63, 0xfa, 0xf6, 0x01, 0x50, 0xff,
0x76, 0x86, 0x8f, 0xab, 0xb4, 0x0d, 0x0c, 0xee, 0xaf, 0x72, 0x1b, 0xf0, 0xdd, 0x7d, 0xac, 0x23,
0xe6, 0x94, 0x1b, 0xd1, 0xd8, 0x29, 0x1a, 0xd1, 0x2a, 0x38, 0xfb, 0xd4, 0xb4, 0x29, 0x3a, 0x97,
0xee, 0x43, 0x9f, 0xb8, 0x6d, 0x0e, 0x16, 0x08, 0x58, 0x01, 0x0b, 0x1b, 0xd8, 0x0d, 0xc9, 0x0e,
0x76, 0x49, 0xc9, 0x27, 0x38, 0x7c, 0xee, 0xb6, 0x45, 0x9b, 0xc9, 0xc9, 0xd9, 0xdc, 0x8b, 0x41,
0x86, 0x27, 0x50, 0x3a, 0x1a, 0xc6, 0x84, 0x25, 0x30, 0x6f, 0xb5, 0x71, 0x83, 0x78, 0x81, 0xef,
0x78, 0xfb, 0x38, 0xe8, 0x92, 0xad, 0x88, 0xb5, 0x9b, 0x9c, 0xfc, 0xd9, 0x62, 0x01, 0x31, 0x08,
0xc7, 0xe8, 0x28, 0xcb, 0xa2, 0x5f, 0xae, 0xcd, 0x8e, 0x57, 0xe9, 0x06, 0xbf, 0x98, 0x3e, 0x4a,
0xda, 0x0c, 0x11, 0xdf, 0x11, 0xba, 0x61, 0x3b, 0xd2, 0x51, 0x86, 0x06, 0x11, 0x58, 0x30, 0x9b,
0xcf, 0x71, 0x48, 0xbc, 0x08, 0x4b, 0x6a, 0x17, 0x99, 0x9a, 0xf4, 0xf5, 0xb8, 0x31, 0x28, 0x29,
0x38, 0x8c, 0x0c, 0xdf, 0x8e, 0x67, 0x65, 0xb3, 0x4b, 0x02, 0xc7, 0xae, 0x89, 0x9e, 0x21, 0xd5,
0xc6, 0xed, 0x92, 0xc0, 0x20, 0x54, 0x20, 0x89, 0xa4, 0x07, 0xdb, 0x60, 0x76, 0x37, 0xbb, 0x64,
0x4f, 0xb4, 0x8a, 0x11, 0xe3, 0xbe, 0xdb, 0x4d, 0x8d, 0xfb, 0x94, 0x02, 0xdf, 0x95, 0x45, 0xd6,
0xbd, 0x36, 0x56, 0x2f, 0xb3, 0x72, 0x5f, 0xe8, 0xf7, 0xb4, 0xbc, 0x10, 0xa1, 0xec, 0x96, 0xc7,
0x8e, 0xf9, 0x24, 0x76, 0x10, 0xfd, 0x26, 0x3e, 0x64, 0xe4, 0x2b, 0xe9, 0x9d, 0x45, 0xbf, 0x1c,
0xce, 0x4d, 0x22, 0xa1, 0x9d, 0x99, 0xc5, 0x99, 0xc0, 0xd5, 0xf4, 0x4d, 0x41, 0x9a, 0xf3, 0xb8,
0xce, 0x30, 0x1a, 0xcd, 0x05, 0x2f, 0x17, 0x1d, 0x02, 0x59, 0x55, 0x34, 0x56, 0x15, 0x29, 0x17,
0xa2, 0xc6, 0x6c, 0x78, 0xe4, 0x05, 0x49, 0x51, 0xa0, 0x03, 0xe6, 0x8f, 0x4a, 0x74, 0xa4, 0xb3,
0xcc, 0x74, 0xa4, 0xc3, 0xc6, 0xf3, 0x3d, 0xe2, 0xb9, 0x6d, 0x63, 0x50, 0x65, 0x49, 0x32, 0x2b,
0x40, 0x6f, 0xdb, 0xf4, 0xf7, 0xb8, 0xbe, 0x37, 0x58, 0x8d, 0xd2, 0x03, 0xf6, 0xa0, 0xc8, 0x32,
0x98, 0x1e, 0xd7, 0x6c, 0xd4, 0x4f, 0x96, 0x59, 0x67, 0x12, 0xd2, 0x86, 0xe3, 0xf7, 0x83, 0x4c,
0xad, 0x87, 0x70, 0xe9, 0x48, 0x1c, 0x5f, 0x1e, 0x58, 0xbe, 0x6f, 0x8e, 0xbe, 0x6b, 0xf0, 0x74,
0x27, 0xe0, 0xf1, 0x62, 0xe2, 0x72, 0xbf, 0x34, 0xf2, 0xb6, 0xc0, 0xc9, 0x32, 0x18, 0x6e, 0xa5,
0xa6, 0x7b, 0xa6, 0x70, 0xeb, 0xa4, 0xe1, 0x9e, 0x0b, 0x65, 0x99, 0x74, 0x4c, 0x29, 0xf1, 0x52,
0x14, 0xdb, 0x5d, 0xf6, 0xa7, 0xbf, 0xdb, 0xe9, 0xbd, 0x13, 0x97, 0xaa, 0xc1, 0x01, 0x3a, 0x4a,
0x31, 0xe8, 0x17, 0x9d, 0xb4, 0xd4, 0x88, 0x4b, 0xb0, 0xe8, 0xec, 0x52, 0x82, 0x53, 0x42, 0x46,
0x44, 0x61, 0x3a, 0x1a, 0x46, 0xce, 0x6a, 0x3a, 0xc1, 0x33, 0xec, 0xab, 0xaf, 0x9c, 0xa4, 0x49,
0x28, 0x2c, 0xa3, 0xc9, 0xc8, 0xf0, 0x21, 0x98, 0x89, 0xef, 0x17, 0xc5, 0xa0, 0xeb, 0x13, 0xf5,
0x3e, 0x3b, 0x0b, 0xe5, 0x06, 0x13, 0x5f, 0x64, 0x1a, 0xd4, 0x4f, 0x1b, 0x8c, 0x8c, 0x87, 0x36,
0x98, 0x7f, 0xdc, 0x0d, 0x88, 0x5b, 0x70, 0x1b, 0xcf, 0xb0, 0xdf, 0x2c, 0x1c, 0x12, 0x1c, 0xa9,
0xaf, 0x33, 0x11, 0x69, 0x7e, 0xff, 0x98, 0x42, 0x8c, 0x1d, 0x8e, 0x31, 0x76, 0x28, 0x48, 0x47,
0x59, 0x22, 0x6d, 0x25, 0xd5, 0x10, 0x3f, 0x09, 0x08, 0x56, 0x1f, 0xa6, 0x8f, 0xab, 0x4e, 0x88,
0x8d, 0xe7, 0x01, 0xcd, 0x4e, 0x8c, 0x91, 0x33, 0x12, 0x84, 0x61, 0xb7, 0x43, 0xf8, 0x74, 0xfc,
0x41, 0x7a, 0x1b, 0x1f, 0x65, 0x84, 0xa3, 0xe2, 0x09, 0x79, 0x18, 0x79, 0xf5, 0xcf, 0x63, 0xd2,
0x1f, 0x92, 0xe1, 0x1c, 0x98, 0x2a, 0x57, 0x9c, 0x7a, 0xcd, 0x31, 0x91, 0x63, 0xad, 0xe5, 0xcf,
0xc0, 0x8b, 0x00, 0x96, 0xca, 0x25, 0xa7, 0x64, 0xda, 0xdc, 0x58, 0xb7, 0x9c, 0xe2, 0x5a, 0x1e,
0xc0, 0x3c, 0x98, 0x46, 0x96, 0x64, 0x99, 0xa2, 0x96, 0x5a, 0xe9, 0x91, 0x63, 0xa1, 0x2d, 0x6e,
0xb9, 0x00, 0x97, 0xc1, 0xb5, 0x5a, 0xe9, 0xd1, 0xe3, 0xed, 0x12, 0xc7, 0xd4, 0xcd, 0xf2, 0x5a,
0x1d, 0x59, 0x5b, 0x95, 0x27, 0x56, 0x7d, 0xcd, 0x74, 0xcc, 0xfc, 0x22, 0x84, 0x60, 0x76, 0xdd,
0x72, 0x8a, 0x1b, 0xf5, 0x5a, 0xd9, 0xac, 0xd6, 0x36, 0x2a, 0x4e, 0x7e, 0x09, 0xde, 0x00, 0xd7,
0x33, 0x2c, 0x13, 0x15, 0x37, 0x4a, 0x31, 0x6d, 0x05, 0xde, 0x05, 0xaf, 0x1c, 0x27, 0xcc, 0x9e,
0x6b, 0x4e, 0xa5, 0x5a, 0x37, 0x1f, 0x59, 0x65, 0x27, 0x7f, 0x1b, 0x5e, 0x07, 0x97, 0x0b, 0xb6,
0x59, 0xdc, 0xdc, 0xa8, 0xd8, 0x56, 0xbd, 0x6a, 0x59, 0xa8, 0x5e, 0xad, 0x20, 0xa7, 0xee, 0x7c,
0x58, 0x47, 0x1f, 0xe6, 0x9b, 0x50, 0x03, 0x57, 0xb7, 0xcb, 0xa3, 0x01, 0x18, 0x5e, 0x01, 0x8b,
0x6b, 0x96, 0x6d, 0x7e, 0x94, 0x71, 0xbd, 0x50, 0xe0, 0x35, 0x70, 0x69, 0xbb, 0x3c, 0xdc, 0xfb,
0xb9, 0xb2, 0xfa, 0x0d, 0x00, 0x53, 0xd2, 0x48, 0x0e, 0x55, 0x70, 0x21, 0xce, 0x52, 0xa5, 0x6c,
0xd5, 0xd7, 0x2b, 0xb6, 0x5d, 0x79, 0x6a, 0xa1, 0xfc, 0x19, 0xb1, 0xa8, 0x8c, 0xa7, 0xbe, 0x5d,
0x76, 0x4a, 0x76, 0xdd, 0x41, 0xa5, 0x47, 0x8f, 0x2c, 0x34, 0x48, 0x94, 0x42, 0x93, 0x17, 0x13,
0x6c, 0xcb, 0x5c, 0xb3, 0x50, 0x7e, 0x0c, 0xde, 0x06, 0xb7, 0x92, 0xb6, 0x51, 0xf4, 0x9c, 0x4c,
0x7f, 0xbc, 0x5d, 0x41, 0xdb, 0x5b, 0xf9, 0x71, 0x5a, 0xfe, 0xd8, 0x66, 0xda, 0x76, 0x7e, 0x02,
0xde, 0x04, 0x5a, 0x9c, 0x69, 0x29, 0xc9, 0x89, 0xc8, 0x01, 0x7c, 0x00, 0xde, 0x38, 0x01, 0x34,
0x2a, 0x8a, 0x29, 0x5a, 0x99, 0x21, 0x5c, 0xb1, 0x9e, 0x69, 0xf8, 0x3a, 0x78, 0x6d, 0xa4, 0x7b,
0x94, 0xe8, 0x0c, 0x5c, 0x07, 0x85, 0x21, 0x2c, 0xbe, 0x4a, 0x61, 0xa9, 0x39, 0x15, 0x74, 0x24,
0x14, 0x53, 0xeb, 0xeb, 0xa8, 0xb2, 0x55, 0xaf, 0x15, 0x91, 0xe9, 0x14, 0x37, 0xf2, 0xb3, 0x70,
0x15, 0xbc, 0x3c, 0x72, 0x57, 0x24, 0x93, 0xd0, 0x84, 0x26, 0x78, 0xef, 0x74, 0xd8, 0x51, 0x61,
0x63, 0xf8, 0x12, 0x58, 0x1e, 0x2d, 0x21, 0x52, 0xd2, 0x82, 0xef, 0x80, 0x37, 0x4f, 0x42, 0x8d,
0x7a, 0xc5, 0xee, 0xf1, 0xaf, 0x10, 0xdb, 0x60, 0x8f, 0x7e, 0x82, 0xa3, 0x51, 0x74, 0x63, 0x78,
0xf0, 0xbf, 0x80, 0x3e, 0x74, 0xcf, 0x27, 0xd3, 0xf2, 0x42, 0x81, 0x77, 0xc0, 0x6d, 0x64, 0x96,
0xd7, 0x2a, 0x5b, 0xf5, 0x53, 0xe0, 0x3f, 0x57, 0xe0, 0xfb, 0xe0, 0xed, 0x93, 0x81, 0xa3, 0x16,
0xf8, 0x85, 0x02, 0x2d, 0xf0, 0xc1, 0xa9, 0xdf, 0x37, 0x4a, 0xe6, 0x17, 0x0a, 0xbc, 0x01, 0xae,
0x0d, 0xe7, 0x8b, 0x3a, 0xfc, 0x52, 0x81, 0x2b, 0xe0, 0xe6, 0xb1, 0x6f, 0x12, 0xc8, 0x5f, 0x29,
0xf0, 0x2d, 0x70, 0xff, 0x38, 0xc8, 0xa8, 0x30, 0x7e, 0xad, 0xc0, 0x87, 0xe0, 0xc1, 0x29, 0xde,
0x31, 0x4a, 0xe0, 0x37, 0xc7, 0xac, 0x43, 0x14, 0xfb, 0xcb, 0x93, 0xd7, 0x21, 0x90, 0xbf, 0x55,
0xe0, 0x12, 0xb8, 0x3c, 0x1c, 0x42, 0xf7, 0xc4, 0xef, 0x14, 0x78, 0x0b, 0x2c, 0x1f, 0xab, 0x44,
0x61, 0xbf, 0x57, 0xa0, 0x0a, 0x16, 0xca, 0x95, 0xfa, 0xba, 0x59, 0xb2, 0xeb, 0x4f, 0x4b, 0xce,
0x46, 0xbd, 0xe6, 0x20, 0xab, 0x56, 0xcb, 0xff, 0x74, 0x8c, 0x86, 0x92, 0xf0, 0x94, 0x2b, 0xc2,
0x59, 0x5f, 0xaf, 0xa0, 0xba, 0x5d, 0x7a, 0x62, 0x95, 0x29, 0xf2, 0xb3, 0x31, 0x38, 0x07, 0x00,
0x85, 0x55, 0x2b, 0xa5, 0xb2, 0x53, 0xcb, 0x7f, 0x27, 0x07, 0x67, 0xc0, 0x79, 0xeb, 0x43, 0xc7,
0x42, 0x65, 0xd3, 0xce, 0xff, 0x3d, 0xb7, 0x1a, 0x00, 0x30, 0xb8, 0xf0, 0xc3, 0xb3, 0x60, 0x6c,
0xf3, 0x49, 0xfe, 0x0c, 0x9c, 0x04, 0x13, 0xb6, 0x65, 0xd6, 0xac, 0xbc, 0x02, 0x17, 0xc0, 0x9c,
0x65, 0x5b, 0x45, 0xa7, 0x54, 0x29, 0xd7, 0xd1, 0x76, 0xb9, 0xcc, 0x8e, 0xcf, 0x3c, 0x98, 0x7e,
0x4a, 0xbf, 0xfd, 0xd8, 0x92, 0x83, 0x8b, 0x60, 0xde, 0xae, 0x14, 0x37, 0xeb, 0xc8, 0x2c, 0x5a,
0x28, 0x36, 0x8f, 0x53, 0x20, 0x13, 0x8a, 0x2d, 0x13, 0xf7, 0x1e, 0x82, 0x49, 0x27, 0x74, 0xfd,
0xa8, 0x13, 0x84, 0x04, 0xde, 0x93, 0x1f, 0x66, 0xc5, 0x9f, 0x39, 0xc5, 0x7f, 0x6e, 0xaf, 0xcc,
0x1d, 0x3d, 0xf3, 0x7f, 0xea, 0xe9, 0x67, 0x56, 0x94, 0xd7, 0x94, 0xc2, 0x85, 0x17, 0x7f, 0x5d,
0x3a, 0xf3, 0xe2, 0xab, 0x25, 0xe5, 0xcb, 0xaf, 0x96, 0x94, 0xbf, 0x7c, 0xb5, 0xa4, 0xfc, 0xf8,
0xeb, 0xa5, 0x33, 0x3b, 0x67, 0xd9, 0x7f, 0x7e, 0xef, 0xff, 0x27, 0x00, 0x00, 0xff, 0xff, 0xd9,
0x0f, 0x34, 0x6e, 0x42, 0x1e, 0x00, 0x00,
// 2746 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x59, 0x4b, 0x77, 0xdb, 0xc6,
0x15, 0x36, 0x44, 0xc9, 0xb6, 0x46, 0x2f, 0x68, 0x64, 0xd9, 0xf0, 0x4b, 0x90, 0xe1, 0x38, 0x95,
0x95, 0xc0, 0x4e, 0xed, 0x9c, 0x3c, 0x9c, 0x26, 0x0e, 0x48, 0x41, 0x16, 0x2b, 0x88, 0xa4, 0x87,
0x90, 0x9d, 0xac, 0x50, 0x88, 0x1c, 0x49, 0xa8, 0x29, 0x80, 0x01, 0x86, 0x8a, 0x94, 0x3f, 0xd0,
0x6d, 0xdf, 0xa7, 0x8b, 0xfe, 0x84, 0xa6, 0xfd, 0x05, 0xdd, 0x3b, 0xe9, 0x2b, 0x6d, 0x57, 0x7d,
0x1c, 0x9e, 0xd6, 0xdd, 0x74, 0xd5, 0x05, 0x4f, 0x5f, 0xe9, 0xaa, 0x67, 0x66, 0x00, 0x71, 0x00,
0x90, 0x92, 0x56, 0xd2, 0xdc, 0xfb, 0x7d, 0x1f, 0xee, 0xdc, 0x3b, 0x98, 0x7b, 0x21, 0x81, 0x99,
0xb0, 0xdd, 0x68, 0x6f, 0xdd, 0x0d, 0xdb, 0x8d, 0x3b, 0xed, 0x30, 0x20, 0x01, 0x1c, 0x63, 0x86,
0x2b, 0xfa, 0x8e, 0x47, 0x76, 0x3b, 0x5b, 0x77, 0x1a, 0xc1, 0xde, 0xdd, 0x9d, 0x60, 0x27, 0xb8,
0xcb, 0xbc, 0x5b, 0x9d, 0x6d, 0xb6, 0x62, 0x0b, 0xf6, 0x1b, 0x67, 0x69, 0xdf, 0x92, 0xc0, 0x39,
0x84, 0x3f, 0xea, 0xe0, 0x88, 0xc0, 0x3b, 0x60, 0xbc, 0xda, 0xc6, 0xa1, 0x4b, 0xbc, 0xc0, 0x57,
0xa4, 0x45, 0x69, 0x69, 0xfa, 0x9e, 0x7c, 0x87, 0xa9, 0xde, 0x39, 0xb2, 0xa3, 0x3e, 0x04, 0xde,
0x02, 0x67, 0x37, 0xf0, 0xde, 0x16, 0x0e, 0x95, 0x91, 0x45, 0x69, 0x69, 0xe2, 0xde, 0x54, 0x0c,
0xe6, 0x46, 0x14, 0x3b, 0x29, 0xcc, 0xc6, 0x11, 0xc1, 0xa1, 0x52, 0x48, 0xc1, 0xb8, 0x11, 0xc5,
0x4e, 0xed, 0xef, 0x23, 0x60, 0xb2, 0xee, 0xbb, 0xed, 0x68, 0x37, 0x20, 0x65, 0x7f, 0x3b, 0x80,
0x0b, 0x00, 0x70, 0x85, 0x8a, 0xbb, 0x87, 0x59, 0x3c, 0xe3, 0x48, 0xb0, 0xc0, 0x65, 0x20, 0xf3,
0x55, 0xa9, 0xe5, 0x61, 0x9f, 0x6c, 0x22, 0x2b, 0x52, 0x46, 0x16, 0x0b, 0x4b, 0xe3, 0x28, 0x67,
0x87, 0x5a, 0x5f, 0xbb, 0xe6, 0x92, 0x5d, 0x16, 0xc9, 0x38, 0x4a, 0xd9, 0xa8, 0x5e, 0xb2, 0x5e,
0xf5, 0x5a, 0xb8, 0xee, 0x7d, 0x82, 0x95, 0x51, 0x86, 0xcb, 0xd9, 0xe1, 0xab, 0x60, 0x36, 0xb1,
0xd9, 0x01, 0x71, 0x5b, 0x0c, 0x3c, 0xc6, 0xc0, 0x79, 0x87, 0xa8, 0xcc, 0x8c, 0xeb, 0xf8, 0x50,
0x39, 0xbb, 0x28, 0x2d, 0x15, 0x50, 0xce, 0x2e, 0x46, 0xba, 0xe6, 0x46, 0xbb, 0xca, 0x39, 0x86,
0x4b, 0xd9, 0x44, 0x3d, 0x84, 0xf7, 0xbd, 0x88, 0xd6, 0xeb, 0x7c, 0x5a, 0x2f, 0xb1, 0x43, 0x08,
0x46, 0xed, 0x20, 0x78, 0xa6, 0x8c, 0xb3, 0xe0, 0xd8, 0xef, 0xda, 0x8f, 0x25, 0x70, 0x1e, 0xe1,
0xa8, 0x1d, 0xf8, 0x11, 0x86, 0x0a, 0x38, 0x57, 0xef, 0x34, 0x1a, 0x38, 0x8a, 0x58, 0x8e, 0xcf,
0xa3, 0x64, 0x09, 0x2f, 0x82, 0xb3, 0x75, 0xe2, 0x92, 0x4e, 0xc4, 0xea, 0x3b, 0x8e, 0xe2, 0x95,
0x50, 0xf7, 0xc2, 0x71, 0x75, 0x7f, 0x33, 0x5d, 0x4f, 0x96, 0xcb, 0x89, 0x7b, 0x73, 0x31, 0x58,
0x74, 0xa1, 0x14, 0x50, 0xfb, 0xd3, 0x64, 0xf2, 0x00, 0xf8, 0x2e, 0x98, 0x34, 0x49, 0xa3, 0x69,
0x1e, 0xe0, 0x06, 0xab, 0x1b, 0x3b, 0x05, 0xc5, 0xcb, 0xbd, 0xae, 0x3a, 0x7f, 0xe8, 0xee, 0xb5,
0x1e, 0x68, 0x98, 0x34, 0x9a, 0x3a, 0x3e, 0xc0, 0x0d, 0xbd, 0xed, 0x92, 0x5d, 0x0d, 0xa5, 0xe0,
0xf0, 0x3e, 0x18, 0x37, 0x76, 0xb0, 0x4f, 0x8c, 0x66, 0x33, 0x54, 0x26, 0x18, 0x77, 0xbe, 0xd7,
0x55, 0x67, 0x39, 0xd7, 0xa5, 0x2e, 0xdd, 0x6d, 0x36, 0x43, 0x0d, 0xf5, 0x71, 0xd0, 0x02, 0xb3,
0xab, 0xae, 0xd7, 0x6a, 0x07, 0x9e, 0x4f, 0xd6, 0x6c, 0xbb, 0xc6, 0xc8, 0x93, 0x8c, 0xbc, 0xd0,
0xeb, 0xaa, 0x57, 0x38, 0x79, 0x3b, 0x81, 0xe8, 0xbb, 0x84, 0xb4, 0x63, 0x95, 0x3c, 0x11, 0xea,
0xe0, 0x5c, 0xd1, 0x8d, 0xf0, 0x8a, 0x17, 0x2a, 0x98, 0x69, 0xcc, 0xf5, 0xba, 0xea, 0x0c, 0xd7,
0xd8, 0x72, 0x23, 0xac, 0x37, 0xbd, 0x50, 0x43, 0x09, 0x06, 0x3e, 0x00, 0x13, 0x74, 0x07, 0x56,
0xb0, 0xc3, 0xf6, 0xbb, 0xcd, 0x28, 0x4a, 0xaf, 0xab, 0x5e, 0x10, 0xf6, 0xdb, 0x0a, 0x76, 0xe2,
0xed, 0x8a, 0x60, 0xf8, 0x08, 0xcc, 0xd0, 0x25, 0x3f, 0xf6, 0xb5, 0x30, 0x38, 0x38, 0x54, 0x3e,
0x63, 0x25, 0x2d, 0x5e, 0xeb, 0x75, 0x55, 0x45, 0x10, 0x68, 0x30, 0x88, 0xde, 0xa6, 0x18, 0x0d,
0x65, 0x59, 0xd0, 0x00, 0x53, 0xd4, 0x54, 0xc3, 0x38, 0xe4, 0x32, 0x9f, 0x73, 0x99, 0x2b, 0xbd,
0xae, 0x7a, 0x51, 0x90, 0x69, 0x63, 0x1c, 0x26, 0x22, 0x69, 0x06, 0xac, 0x01, 0xd8, 0x57, 0x35,
0xfd, 0x26, 0x4b, 0x8a, 0xf2, 0x29, 0x3b, 0x48, 0x45, 0xb5, 0xd7, 0x55, 0xaf, 0xe6, 0xc3, 0xc1,
0x31, 0x4c, 0x43, 0x03, 0xb8, 0xf0, 0xab, 0x60, 0x94, 0x5a, 0x95, 0x9f, 0xf2, 0xcb, 0x66, 0x22,
0x3e, 0x47, 0xd4, 0x56, 0x9c, 0xe9, 0x75, 0xd5, 0x89, 0xbe, 0xa0, 0x86, 0x18, 0x14, 0x16, 0xc1,
0x3c, 0xfd, 0x59, 0xf5, 0xfb, 0x6f, 0x45, 0x44, 0x82, 0x10, 0x2b, 0x3f, 0xcb, 0x6b, 0xa0, 0xc1,
0x50, 0xb8, 0x02, 0xa6, 0x79, 0x20, 0x25, 0x1c, 0x92, 0x15, 0x97, 0xb8, 0xca, 0x77, 0xd8, 0xe5,
0x51, 0xbc, 0xda, 0xeb, 0xaa, 0x97, 0xf8, 0x33, 0xe3, 0xf8, 0x1b, 0x38, 0x24, 0x7a, 0xd3, 0x25,
0xae, 0x86, 0x32, 0x9c, 0xb4, 0x0a, 0xab, 0xec, 0x77, 0x8f, 0x55, 0xe1, 0xd5, 0xcd, 0x70, 0x68,
0x5d, 0xb8, 0x65, 0x1d, 0x1f, 0xb2, 0x50, 0xbe, 0xc7, 0x45, 0x84, 0xba, 0xc4, 0x22, 0xcf, 0xf0,
0x61, 0x1c, 0x49, 0x9a, 0x91, 0x92, 0x60, 0x71, 0x7c, 0xff, 0x38, 0x09, 0x1e, 0x46, 0x9a, 0x01,
0x6d, 0x30, 0xc7, 0x0d, 0x76, 0xd8, 0x89, 0x08, 0x6e, 0x96, 0x0c, 0x16, 0xcb, 0x0f, 0xb8, 0xd0,
0x8d, 0x5e, 0x57, 0xbd, 0x9e, 0x12, 0x22, 0x1c, 0xa6, 0x37, 0xdc, 0x38, 0xa4, 0x41, 0xf4, 0x01,
0xaa, 0x2c, 0xbc, 0x1f, 0x9e, 0x42, 0x95, 0x47, 0x39, 0x88, 0x0e, 0xdf, 0x03, 0x93, 0xf4, 0x4c,
0x1e, 0xd5, 0xee, 0x5f, 0x85, 0xec, 0x05, 0xc2, 0xce, 0xb0, 0x50, 0xb9, 0x14, 0x5e, 0xe4, 0xb3,
0x70, 0xfe, 0x7d, 0x0c, 0x3f, 0xbe, 0x80, 0x44, 0x3c, 0x7c, 0x07, 0x4c, 0xd0, 0x75, 0x52, 0xaf,
0xff, 0x14, 0xb2, 0xef, 0x33, 0xa3, 0xf7, 0xab, 0x25, 0xa2, 0x05, 0x32, 0x7b, 0xf6, 0x7f, 0x87,
0x93, 0xe3, 0xcb, 0x40, 0x40, 0xc3, 0x0a, 0x98, 0xa5, 0xcb, 0x74, 0x8d, 0xbe, 0x2c, 0x64, 0xdf,
0x3f, 0x26, 0x91, 0xab, 0x50, 0x9e, 0x9a, 0xd3, 0x63, 0x21, 0xfd, 0xef, 0x44, 0x3d, 0x1e, 0x59,
0x9e, 0x4a, 0x6f, 0xf6, 0x54, 0x47, 0xfe, 0xc3, 0x68, 0x76, 0x77, 0x51, 0xec, 0x4e, 0x12, 0x9b,
0x6a, 0xd6, 0x6f, 0x65, 0x9a, 0xcb, 0x1f, 0x4f, 0xdd, 0x5d, 0x7e, 0x3e, 0x99, 0xcc, 0x23, 0xf4,
0x6e, 0xa6, 0x7b, 0xa3, 0x77, 0xb3, 0x94, 0xbd, 0x9b, 0x69, 0x22, 0xe2, 0xbb, 0x39, 0xc6, 0xc0,
0x57, 0xc1, 0xb9, 0x0a, 0x26, 0x1f, 0x07, 0xe1, 0x33, 0xde, 0x10, 0x8b, 0xb0, 0xd7, 0x55, 0xa7,
0x39, 0xdc, 0xe7, 0x0e, 0x0d, 0x25, 0x10, 0x78, 0x13, 0x8c, 0xb2, 0xce, 0xc1, 0x53, 0x24, 0xdc,
0x50, 0xbc, 0x55, 0x30, 0x27, 0x2c, 0x81, 0xe9, 0x15, 0xdc, 0x72, 0x0f, 0x2d, 0x97, 0x60, 0xbf,
0x71, 0xb8, 0x11, 0xb1, 0x2e, 0x35, 0x25, 0x5e, 0x0b, 0x4d, 0xea, 0xd7, 0x5b, 0x1c, 0xa0, 0xef,
0x45, 0x1a, 0xca, 0x50, 0xe0, 0xd7, 0x81, 0x9c, 0xb6, 0xa0, 0x7d, 0xd6, 0xaf, 0xa6, 0xc4, 0x7e,
0x95, 0x95, 0xd1, 0xc3, 0x7d, 0x0d, 0xe5, 0x78, 0xf0, 0x43, 0x30, 0xbf, 0xd9, 0x6e, 0xba, 0x04,
0x37, 0x33, 0x71, 0x4d, 0x31, 0xc1, 0x9b, 0xbd, 0xae, 0xaa, 0x72, 0xc1, 0x0e, 0x87, 0xe9, 0xf9,
0xf8, 0x06, 0x2b, 0xc0, 0x37, 0x00, 0x40, 0x41, 0xc7, 0x6f, 0x5a, 0xde, 0x9e, 0x47, 0x94, 0xf9,
0x45, 0x69, 0x69, 0xac, 0x78, 0xb1, 0xd7, 0x55, 0x21, 0xd7, 0x0b, 0xa9, 0x4f, 0x6f, 0x51, 0xa7,
0x86, 0x04, 0x24, 0x2c, 0x82, 0x69, 0xf3, 0xc0, 0x23, 0x55, 0xbf, 0xe4, 0x46, 0x98, 0x36, 0x58,
0xe5, 0x62, 0xae, 0x1b, 0x1d, 0x78, 0x44, 0x0f, 0x7c, 0x9d, 0x36, 0xe5, 0x4e, 0x88, 0x35, 0x94,
0x61, 0xc0, 0x35, 0x20, 0x97, 0x02, 0x3f, 0xf2, 0x22, 0x16, 0x4c, 0x69, 0x17, 0x37, 0x9e, 0x29,
0x97, 0xb2, 0xad, 0xb1, 0xd1, 0x47, 0xe8, 0x0d, 0x0a, 0xd1, 0x50, 0x8e, 0x05, 0xdf, 0x06, 0x13,
0xa6, 0xef, 0x6e, 0xb5, 0x70, 0xad, 0x1d, 0x06, 0xdb, 0x8a, 0xc2, 0x44, 0x2e, 0xf5, 0xba, 0xea,
0x5c, 0x1c, 0x0a, 0x73, 0xea, 0x6d, 0xea, 0xa5, 0xfd, 0xb9, 0x8f, 0xa5, 0xbd, 0x9d, 0x06, 0xc4,
0xd2, 0xb2, 0x11, 0x29, 0x2a, 0xcb, 0xa8, 0x70, 0xe0, 0x1b, 0x6c, 0x1c, 0x60, 0xe9, 0xa4, 0x69,
0x14, 0xc1, 0xf4, 0xb1, 0x74, 0x59, 0xdf, 0xed, 0x6c, 0x6f, 0xb7, 0xb0, 0xb2, 0x98, 0x7d, 0x2c,
0xe3, 0x46, 0xdc, 0x1b, 0x53, 0x63, 0x2c, 0x7c, 0x19, 0x8c, 0xd1, 0x65, 0xa4, 0xdc, 0xa0, 0xc3,
0x71, 0x51, 0xee, 0x75, 0xd5, 0xc9, 0x3e, 0x29, 0xd2, 0x10, 0x77, 0xc3, 0x75, 0x61, 0xee, 0x29,
0x05, 0x7b, 0x7b, 0xae, 0xdf, 0x8c, 0x14, 0x8d, 0x71, 0xae, 0xf7, 0xba, 0xea, 0xe5, 0xec, 0xdc,
0xd3, 0x88, 0x31, 0xe2, 0xd8, 0x93, 0xf0, 0xe8, 0xc1, 0x46, 0x1d, 0xdf, 0xc7, 0xe1, 0xd1, 0xe8,
0x76, 0x3b, 0xdb, 0xef, 0x42, 0xe6, 0x17, 0x87, 0xb7, 0x0c, 0x05, 0x96, 0x81, 0x6c, 0x1e, 0x10,
0x1c, 0xfa, 0x6e, 0xeb, 0x48, 0x66, 0x99, 0xc9, 0x08, 0x01, 0xe1, 0x18, 0x21, 0x0a, 0xe5, 0x68,
0x34, 0x7f, 0x75, 0x12, 0xe2, 0x28, 0xb2, 0x0f, 0xdb, 0x38, 0x52, 0x30, 0xdb, 0x96, 0x90, 0xbf,
0x88, 0x39, 0x75, 0x42, 0xbd, 0x1a, 0x12, 0xb1, 0xf0, 0x7d, 0x30, 0xc5, 0x97, 0xeb, 0xf8, 0x90,
0xcd, 0xf9, 0xdb, 0xec, 0xe8, 0x0a, 0xc7, 0x2f, 0x26, 0xd3, 0x9b, 0x38, 0xf2, 0x3e, 0xc1, 0x1a,
0x4a, 0x13, 0xe8, 0x30, 0x94, 0x32, 0x58, 0x6e, 0xb8, 0x83, 0x95, 0x1d, 0x26, 0xb3, 0xd8, 0xeb,
0xaa, 0xd7, 0x06, 0xca, 0xe8, 0x2d, 0x0a, 0xd3, 0xd0, 0x00, 0x2e, 0x7c, 0x02, 0x2e, 0xf4, 0xad,
0x9d, 0xed, 0x6d, 0xef, 0x00, 0xb9, 0xfe, 0x0e, 0x56, 0x76, 0x99, 0xa6, 0xd6, 0xeb, 0xaa, 0x0b,
0x79, 0x4d, 0x86, 0xd3, 0x43, 0x0a, 0xd4, 0xd0, 0x40, 0x3e, 0xfc, 0x06, 0xb8, 0x34, 0xc8, 0x6e,
0x1f, 0xf8, 0x8a, 0xc7, 0xa4, 0x5f, 0xee, 0x75, 0x55, 0xed, 0x58, 0x69, 0x9d, 0x1c, 0xf8, 0x1a,
0x1a, 0x26, 0x43, 0x87, 0xd4, 0x23, 0x97, 0x7d, 0xe0, 0x57, 0xdb, 0x91, 0xf2, 0x4d, 0xa6, 0x2c,
0x94, 0x54, 0x50, 0x26, 0x07, 0xbe, 0x1e, 0xb4, 0x23, 0x0d, 0x65, 0x59, 0xfd, 0xb2, 0xf0, 0xbe,
0x1f, 0xf1, 0x59, 0x77, 0x4c, 0xec, 0xcd, 0xb1, 0x0e, 0x9f, 0x18, 0xa2, 0xa3, 0xb2, 0xc4, 0x04,
0xf8, 0x3a, 0x18, 0xe7, 0x86, 0xc7, 0xb5, 0x3a, 0x1f, 0x71, 0xc7, 0xc4, 0xcf, 0x83, 0x98, 0xfd,
0x11, 0x7d, 0x7a, 0x1f, 0xa8, 0x7d, 0x39, 0xc5, 0x07, 0x51, 0x7a, 0xc1, 0xf7, 0xbf, 0x4c, 0xc5,
0x0b, 0xde, 0x77, 0xf7, 0xb0, 0x86, 0x98, 0x53, 0x6c, 0x31, 0x23, 0xa7, 0x68, 0x31, 0xcb, 0xe0,
0xec, 0x53, 0xc3, 0xa2, 0xe8, 0x42, 0xb6, 0xc3, 0x7c, 0xec, 0xb6, 0x38, 0x38, 0x46, 0xc0, 0x2a,
0x98, 0x5b, 0xc3, 0x6e, 0x48, 0xb6, 0xb0, 0x4b, 0xca, 0x3e, 0xc1, 0xe1, 0xbe, 0xdb, 0x8a, 0x1b,
0x48, 0x41, 0xcc, 0xe6, 0x6e, 0x02, 0xd2, 0xbd, 0x18, 0xa5, 0xa1, 0x41, 0x4c, 0x58, 0x06, 0xb3,
0x66, 0x0b, 0x37, 0xe8, 0xb7, 0xbd, 0xed, 0xed, 0xe1, 0xa0, 0x43, 0x36, 0x22, 0xd6, 0x48, 0x0a,
0xe2, 0x6b, 0x8b, 0x63, 0x88, 0x4e, 0x38, 0x46, 0x43, 0x79, 0x16, 0x7d, 0x73, 0x2d, 0x76, 0x6d,
0x0a, 0xdf, 0xe6, 0xf3, 0xd9, 0xab, 0xa4, 0xc5, 0x10, 0xc9, 0xf4, 0xdf, 0x09, 0x5b, 0x91, 0x86,
0x72, 0x34, 0x88, 0xc0, 0x9c, 0xd1, 0xdc, 0xc7, 0x21, 0xf1, 0x22, 0x2c, 0xa8, 0x5d, 0x64, 0x6a,
0xc2, 0xdb, 0xe3, 0x26, 0xa0, 0xb4, 0xe0, 0x20, 0x32, 0x7c, 0x3b, 0x99, 0x82, 0x8d, 0x0e, 0x09,
0x6c, 0xab, 0x1e, 0xf7, 0x02, 0xa1, 0x36, 0x6e, 0x87, 0x04, 0x3a, 0xa1, 0x02, 0x69, 0x24, 0xbd,
0xd8, 0xfa, 0x53, 0xb9, 0xd1, 0x21, 0xbb, 0x71, 0x0b, 0x18, 0x32, 0xc8, 0xbb, 0x9d, 0xcc, 0x20,
0x4f, 0x29, 0xf0, 0x6b, 0xa2, 0xc8, 0xaa, 0xd7, 0xc2, 0xca, 0x65, 0x56, 0xee, 0x0b, 0xbd, 0xae,
0x2a, 0xc7, 0x22, 0x94, 0xbd, 0xed, 0xd1, 0xdb, 0x3c, 0x83, 0xed, 0x47, 0xbf, 0x8e, 0x0f, 0x19,
0xf9, 0x4a, 0xf6, 0x64, 0xd1, 0x37, 0x87, 0x73, 0xd3, 0x48, 0x68, 0xe5, 0xa6, 0x6c, 0x26, 0x70,
0x35, 0xfb, 0x0d, 0x20, 0x4c, 0x70, 0x5c, 0x67, 0x10, 0x8d, 0xe6, 0x82, 0x97, 0x8b, 0x8e, 0x77,
0xac, 0x2a, 0x2a, 0xab, 0x8a, 0x90, 0x8b, 0xb8, 0xc6, 0x6c, 0x2c, 0xe4, 0x05, 0xc9, 0x50, 0xa0,
0x0d, 0x66, 0x8f, 0x4a, 0x74, 0xa4, 0xb3, 0xc8, 0x74, 0x84, 0xcb, 0xc6, 0xf3, 0x3d, 0xe2, 0xb9,
0x2d, 0xbd, 0x5f, 0x65, 0x41, 0x32, 0x2f, 0x40, 0x7b, 0x2d, 0xfd, 0x3d, 0xa9, 0xef, 0x0d, 0x56,
0xa3, 0xec, 0xe8, 0xdc, 0x2f, 0xb2, 0x08, 0xa6, 0xd7, 0x35, 0x1b, 0xe2, 0xd3, 0x65, 0xd6, 0x98,
0x84, 0x70, 0xe0, 0xf8, 0xe4, 0x9f, 0xab, 0xf5, 0x00, 0x2e, 0x1d, 0x76, 0x93, 0xcf, 0x02, 0x96,
0xef, 0x9b, 0xc3, 0xbf, 0x22, 0x78, 0xba, 0x53, 0xf0, 0x64, 0x33, 0x49, 0xb9, 0x5f, 0x1a, 0xfa,
0x1d, 0xc0, 0xc9, 0x22, 0x18, 0x6e, 0x64, 0xe6, 0x76, 0xa6, 0x70, 0xeb, 0xa4, 0xb1, 0x9d, 0x0b,
0xe5, 0x99, 0x74, 0x18, 0x2b, 0xf3, 0x52, 0x94, 0x5a, 0x1d, 0xf6, 0x47, 0xbd, 0xdb, 0xd9, 0xb3,
0x93, 0x94, 0xaa, 0xc1, 0x01, 0x1a, 0xca, 0x30, 0xe8, 0x1b, 0x9d, 0xb6, 0xd4, 0x89, 0x4b, 0x70,
0xdc, 0xd9, 0x85, 0x04, 0x67, 0x84, 0xf4, 0x88, 0xc2, 0x34, 0x34, 0x88, 0x9c, 0xd7, 0xb4, 0x83,
0x67, 0xd8, 0x57, 0x5e, 0x39, 0x49, 0x93, 0x50, 0x58, 0x4e, 0x93, 0x91, 0xe1, 0x43, 0x30, 0x95,
0x7c, 0x39, 0x94, 0x82, 0x8e, 0x4f, 0x94, 0xfb, 0xec, 0x2e, 0x14, 0x1b, 0x4c, 0xf2, 0x89, 0xd2,
0xa0, 0x7e, 0xda, 0x60, 0x44, 0x3c, 0xb4, 0xc0, 0xec, 0xe3, 0x4e, 0x40, 0xdc, 0xa2, 0xdb, 0x78,
0x86, 0xfd, 0x66, 0xf1, 0x90, 0xe0, 0x48, 0x79, 0x9d, 0x89, 0x08, 0x93, 0xf9, 0x47, 0x14, 0xa2,
0x6f, 0x71, 0x8c, 0xbe, 0x45, 0x41, 0x1a, 0xca, 0x13, 0x69, 0x2b, 0xa9, 0x85, 0xf8, 0x49, 0x40,
0xb0, 0xf2, 0x30, 0x7b, 0x5d, 0xb5, 0x43, 0xac, 0xef, 0x07, 0x34, 0x3b, 0x09, 0x46, 0xcc, 0x48,
0x10, 0x86, 0x9d, 0x36, 0xe1, 0x53, 0xef, 0xfb, 0xd9, 0x63, 0x7c, 0x94, 0x11, 0x8e, 0x4a, 0x26,
0xdf, 0x41, 0xe4, 0xe5, 0x3f, 0x8f, 0x08, 0x7f, 0x22, 0x86, 0x33, 0x60, 0xa2, 0x52, 0xb5, 0x9d,
0xba, 0x6d, 0x20, 0xdb, 0x5c, 0x91, 0xcf, 0xc0, 0x8b, 0x00, 0x96, 0x2b, 0x65, 0xbb, 0x6c, 0x58,
0xdc, 0xe8, 0x98, 0x76, 0x69, 0x45, 0x06, 0x50, 0x06, 0x93, 0xc8, 0x14, 0x2c, 0x13, 0xd4, 0x52,
0x2f, 0x3f, 0xb2, 0x4d, 0xb4, 0xc1, 0x2d, 0x17, 0xe0, 0x22, 0xb8, 0x56, 0x2f, 0x3f, 0x7a, 0xbc,
0x59, 0xe6, 0x18, 0xc7, 0xa8, 0xac, 0x38, 0xc8, 0xdc, 0xa8, 0x3e, 0x31, 0x9d, 0x15, 0xc3, 0x36,
0xe4, 0x79, 0x08, 0xc1, 0xf4, 0xaa, 0x69, 0x97, 0xd6, 0x9c, 0x7a, 0xc5, 0xa8, 0xd5, 0xd7, 0xaa,
0xb6, 0xbc, 0x00, 0x6f, 0x80, 0xeb, 0x39, 0x96, 0x81, 0x4a, 0x6b, 0xe5, 0x84, 0xb6, 0x04, 0xef,
0x82, 0x57, 0x8e, 0x13, 0x66, 0xeb, 0xba, 0x5d, 0xad, 0x39, 0xc6, 0x23, 0xb3, 0x62, 0xcb, 0xb7,
0xe1, 0x75, 0x70, 0xb9, 0x68, 0x19, 0xa5, 0xf5, 0xb5, 0xaa, 0x65, 0x3a, 0x35, 0xd3, 0x44, 0x4e,
0xad, 0x8a, 0x6c, 0xc7, 0xfe, 0xc0, 0x41, 0x1f, 0xc8, 0x4d, 0xa8, 0x82, 0xab, 0x9b, 0x95, 0xe1,
0x00, 0x0c, 0xaf, 0x80, 0xf9, 0x15, 0xd3, 0x32, 0x3e, 0xcc, 0xb9, 0x9e, 0x4b, 0xf0, 0x1a, 0xb8,
0xb4, 0x59, 0x19, 0xec, 0xfd, 0x4c, 0x5a, 0xfe, 0x07, 0x00, 0xa3, 0x74, 0x16, 0x87, 0x0a, 0xb8,
0x90, 0xa4, 0xa7, 0x5a, 0x31, 0x9d, 0xd5, 0xaa, 0x65, 0x55, 0x9f, 0x9a, 0x48, 0x3e, 0x13, 0xef,
0x26, 0xe7, 0x71, 0x36, 0x2b, 0x76, 0xd9, 0x72, 0x6c, 0x54, 0x7e, 0xf4, 0xc8, 0x44, 0xfd, 0x0c,
0x49, 0x34, 0x6b, 0x09, 0xc1, 0x32, 0x8d, 0x15, 0x13, 0xc9, 0x23, 0xf0, 0x36, 0xb8, 0x95, 0xb6,
0x0d, 0xa3, 0x17, 0x44, 0xfa, 0xe3, 0xcd, 0x2a, 0xda, 0xdc, 0x90, 0x47, 0x69, 0xdd, 0x13, 0x9b,
0x61, 0x59, 0xf2, 0x18, 0xbc, 0x09, 0xd4, 0x24, 0xc5, 0x42, 0x76, 0x53, 0x91, 0x03, 0xf8, 0x00,
0xbc, 0x71, 0x02, 0x68, 0x58, 0x14, 0x13, 0xb4, 0x24, 0x03, 0xb8, 0xf1, 0x7e, 0x26, 0xe1, 0xeb,
0xe0, 0xb5, 0xa1, 0xee, 0x61, 0xa2, 0x53, 0x70, 0x15, 0x14, 0x07, 0xb0, 0xf8, 0x2e, 0x63, 0x4b,
0xdd, 0xae, 0xa2, 0x23, 0xa1, 0x84, 0xea, 0xac, 0xa2, 0xea, 0x86, 0x53, 0x2f, 0x21, 0xc3, 0x2e,
0xad, 0xc9, 0xd3, 0x70, 0x19, 0xbc, 0x3c, 0xf4, 0x38, 0xa4, 0x93, 0xd0, 0x84, 0x06, 0x78, 0xf7,
0x74, 0xd8, 0x61, 0x61, 0x63, 0xf8, 0x12, 0x58, 0x1c, 0x2e, 0x11, 0xa7, 0x64, 0x1b, 0xbe, 0x03,
0xde, 0x3c, 0x09, 0x35, 0xec, 0x11, 0x3b, 0xc7, 0x3f, 0x22, 0x3e, 0x06, 0xbb, 0xf4, 0xdd, 0x1b,
0x8e, 0xa2, 0x07, 0xc3, 0x83, 0x5f, 0x01, 0xda, 0xc0, 0xc3, 0x9e, 0x4e, 0xcb, 0x73, 0x09, 0xde,
0x01, 0xb7, 0x91, 0x51, 0x59, 0xa9, 0x6e, 0x38, 0xa7, 0xc0, 0x7f, 0x26, 0xc1, 0xf7, 0xc0, 0xdb,
0x27, 0x03, 0x87, 0x6d, 0xf0, 0x73, 0x09, 0x9a, 0xe0, 0xfd, 0x53, 0x3f, 0x6f, 0x98, 0xcc, 0x2f,
0x24, 0x78, 0x03, 0x5c, 0x1b, 0xcc, 0x8f, 0xeb, 0xf0, 0x4b, 0x09, 0x2e, 0x81, 0x9b, 0xc7, 0x3e,
0x29, 0x46, 0xfe, 0x4a, 0x82, 0x6f, 0x81, 0xfb, 0xc7, 0x41, 0x86, 0x85, 0xf1, 0x6b, 0x09, 0x3e,
0x04, 0x0f, 0x4e, 0xf1, 0x8c, 0x61, 0x02, 0xbf, 0x39, 0x66, 0x1f, 0x71, 0xb1, 0xbf, 0x38, 0x79,
0x1f, 0x31, 0xf2, 0xb7, 0x12, 0x5c, 0x00, 0x97, 0x07, 0x43, 0xe8, 0x99, 0xf8, 0x9d, 0x04, 0x6f,
0x81, 0xc5, 0x63, 0x95, 0x28, 0xec, 0xf7, 0x12, 0x54, 0xc0, 0x5c, 0xa5, 0xea, 0xac, 0x1a, 0x65,
0xcb, 0x79, 0x5a, 0xb6, 0xd7, 0x9c, 0xba, 0x8d, 0xcc, 0x7a, 0x5d, 0xfe, 0xc9, 0x08, 0x0d, 0x25,
0xe5, 0xa9, 0x54, 0x63, 0xa7, 0xb3, 0x5a, 0x45, 0x8e, 0x55, 0x7e, 0x62, 0x56, 0x28, 0xf2, 0xd3,
0x11, 0x38, 0x03, 0x00, 0x85, 0xd5, 0xaa, 0xe5, 0x8a, 0x5d, 0x97, 0xbf, 0x5d, 0x80, 0x53, 0xe0,
0xbc, 0xf9, 0x81, 0x6d, 0xa2, 0x8a, 0x61, 0xc9, 0xff, 0x2c, 0x2c, 0x07, 0x00, 0xf4, 0xbf, 0xf4,
0xe1, 0x59, 0x30, 0xb2, 0xfe, 0x44, 0x3e, 0x03, 0xc7, 0xc1, 0x98, 0x65, 0x1a, 0x75, 0x53, 0x96,
0xe0, 0x1c, 0x98, 0x31, 0x2d, 0xb3, 0x64, 0x97, 0xab, 0x15, 0x07, 0x6d, 0x56, 0x2a, 0xec, 0xfa,
0x94, 0xc1, 0xe4, 0x53, 0xfa, 0xee, 0x27, 0x96, 0x02, 0x9c, 0x07, 0xb3, 0x56, 0xb5, 0xb4, 0xee,
0x20, 0xa3, 0x64, 0xa2, 0xc4, 0x3c, 0x4a, 0x81, 0x4c, 0x28, 0xb1, 0x8c, 0xdd, 0x7b, 0x08, 0xc6,
0xed, 0xd0, 0xf5, 0xa3, 0x76, 0x10, 0x12, 0x78, 0x4f, 0x5c, 0x4c, 0xc7, 0x7f, 0xb9, 0x8c, 0xff,
0x19, 0x7b, 0x65, 0xe6, 0x68, 0xcd, 0xff, 0x4f, 0xa7, 0x9d, 0x59, 0x92, 0x5e, 0x93, 0x8a, 0x17,
0x9e, 0xff, 0x75, 0xe1, 0xcc, 0xf3, 0x17, 0x0b, 0xd2, 0x17, 0x2f, 0x16, 0xa4, 0xbf, 0xbc, 0x58,
0x90, 0x7e, 0xf4, 0xb7, 0x85, 0x33, 0x5b, 0x67, 0xd9, 0x3f, 0x73, 0xef, 0xff, 0x3f, 0x00, 0x00,
0xff, 0xff, 0x2f, 0x55, 0xf7, 0x61, 0x15, 0x1e, 0x00, 0x00,
}

View File

@ -117,22 +117,23 @@ message Tester {
// RoundLimit is the limit of rounds to run failure set (-1 to run without limits).
int32 RoundLimit = 21 [(gogoproto.moretags) = "yaml:\"round-limit\""];
// ExitOnFailure is true, then exit tester on first failure.
bool ExitOnFailure = 22 [(gogoproto.moretags) = "yaml:\"exit-on-failure\""];
// ExitOnCaseFail is true, then exit tester on first failure.
bool ExitOnCaseFail = 22 [(gogoproto.moretags) = "yaml:\"exit-on-failure\""];
// ConsistencyCheck is true to check consistency (revision, hash).
bool ConsistencyCheck = 23 [(gogoproto.moretags) = "yaml:\"consistency-check\""];
// EnablePprof is true to enable profiler.
bool EnablePprof = 24 [(gogoproto.moretags) = "yaml:\"enable-pprof\""];
// FailureDelayMs is the delay duration after failure is injected.
// CaseDelayMs is the delay duration after failure is injected.
// Useful when triggering snapshot or no-op failure cases.
uint32 FailureDelayMs = 31 [(gogoproto.moretags) = "yaml:\"failure-delay-ms\""];
// FailureShuffle is true to randomize failure injecting order.
bool FailureShuffle = 32 [(gogoproto.moretags) = "yaml:\"failure-shuffle\""];
// FailureCases is the selected test cases to schedule.
uint32 CaseDelayMs = 31 [(gogoproto.moretags) = "yaml:\"case-delay-ms\""];
// CaseShuffle is true to randomize failure injecting order.
bool CaseShuffle = 32 [(gogoproto.moretags) = "yaml:\"case-shuffle\""];
// Cases is the selected test cases to schedule.
// If empty, run all failure cases.
repeated string FailureCases = 33 [(gogoproto.moretags) = "yaml:\"failure-cases\""];
// Failpoinommands is the list of "gofail" commands (e.g. panic("etcd-tester"),1*sleep(1000)
repeated string Cases = 33 [(gogoproto.moretags) = "yaml:\"cases\""];
// FailpointCommands is the list of "gofail" commands
// (e.g. panic("etcd-tester"),1*sleep(1000).
repeated string FailpointCommands = 34 [(gogoproto.moretags) = "yaml:\"failpoint-commands\""];
// RunnerExecPath is a path of etcd-runner binary.
@ -242,12 +243,12 @@ enum Operation {
UNDELAY_PEER_PORT_TX_RX = 201;
}
// FailureCase defines various system faults in distributed systems,
// Case defines various system faults or test case in distributed systems,
// in order to verify correct behavior of etcd servers and clients.
enum FailureCase {
enum Case {
// SIGTERM_ONE_FOLLOWER stops a randomly chosen follower (non-leader)
// but does not delete its data directories on disk for next restart.
// It waits "failure-delay-ms" before recovering this failure.
// It waits "delay-ms" before recovering this failure.
// The expected behavior is that the follower comes back online
// and rejoins the cluster, and then each member continues to process
// client requests ('Put' request that requires Raft consensus).
@ -265,9 +266,8 @@ enum FailureCase {
SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT = 1;
// SIGTERM_LEADER stops the active leader node but does not delete its
// data directories on disk for next restart. Then it waits
// "failure-delay-ms" before recovering this failure, in order to
// trigger election timeouts.
// data directories on disk for next restart. Then it waits "delay-ms"
// before recovering this failure, in order to trigger election timeouts.
// The expected behavior is that a new leader gets elected, and the
// old leader comes back online and rejoins the cluster as a follower.
// As always, after recovery, each member must be able to process
@ -287,16 +287,15 @@ enum FailureCase {
// SIGTERM_QUORUM stops majority number of nodes to make the whole cluster
// inoperable but does not delete data directories on stopped nodes
// for next restart. And it waits "failure-delay-ms" before recovering
// this failure.
// for next restart. And it waits "delay-ms" before recovering failure.
// The expected behavior is that nodes come back online, thus cluster
// comes back operative as well. As always, after recovery, each member
// must be able to process client requests.
SIGTERM_QUORUM = 4;
// SIGTERM_ALL stops the whole cluster but does not delete data directories
// on disk for next restart. And it waits "failure-delay-ms" before
// recovering this failure.
// on disk for next restart. And it waits "delay-ms" before recovering
// this failure.
// The expected behavior is that nodes come back online, thus cluster
// comes back operative as well. As always, after recovery, each member
// must be able to process client requests.
@ -306,7 +305,7 @@ enum FailureCase {
// (non-leader), deletes its data directories on disk, and removes
// this member from cluster (membership reconfiguration). On recovery,
// tester adds a new member, and this member joins the existing cluster
// with fresh data. It waits "failure-delay-ms" before recovering this
// with fresh data. It waits "delay-ms" before recovering this
// failure. This simulates destroying one follower machine, where operator
// needs to add a new member from a fresh machine.
// The expected behavior is that a new member joins the existing cluster,
@ -329,7 +328,7 @@ enum FailureCase {
// SIGQUIT_AND_REMOVE_LEADER stops the active leader node, deletes its
// data directories on disk, and removes this member from cluster.
// On recovery, tester adds a new member, and this member joins the
// existing cluster with fresh data. It waits "failure-delay-ms" before
// existing cluster with fresh data. It waits "delay-ms" before
// recovering this failure. This simulates destroying a leader machine,
// where operator needs to add a new member from a fresh machine.
// The expected behavior is that a new member joins the existing cluster,
@ -378,7 +377,7 @@ enum FailureCase {
// BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER drops all outgoing/incoming
// packets from/to the peer port on a randomly chosen follower
// (non-leader), and waits for "failure-delay-ms" until recovery.
// (non-leader), and waits for "delay-ms" until recovery.
// The expected behavior is that once dropping operation is undone,
// each member must be able to process client requests.
BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER = 100;
@ -396,7 +395,7 @@ enum FailureCase {
// BLACKHOLE_PEER_PORT_TX_RX_LEADER drops all outgoing/incoming packets
// from/to the peer port on the active leader (isolated), and waits for
// "failure-delay-ms" until recovery, in order to trigger election timeout.
// "delay-ms" until recovery, in order to trigger election timeout.
// The expected behavior is that after election timeout, a new leader gets
// elected, and once dropping operation is undone, the old leader comes
// back and rejoins the cluster as a follower. As always, after recovery,
@ -416,7 +415,7 @@ enum FailureCase {
// BLACKHOLE_PEER_PORT_TX_RX_QUORUM drops all outgoing/incoming packets
// from/to the peer ports on majority nodes of cluster, thus losing its
// leader and cluster being inoperable. And it waits for "failure-delay-ms"
// leader and cluster being inoperable. And it waits for "delay-ms"
// until recovery.
// The expected behavior is that once packet drop operation is undone,
// nodes come back online, thus cluster comes back operative. As always,
@ -425,7 +424,7 @@ enum FailureCase {
// BLACKHOLE_PEER_PORT_TX_RX_ALL drops all outgoing/incoming packets
// from/to the peer ports on all nodes, thus making cluster totally
// inoperable. It waits for "failure-delay-ms" until recovery.
// inoperable. It waits for "delay-ms" until recovery.
// The expected behavior is that once packet drop operation is undone,
// nodes come back online, thus cluster comes back operative. As always,
// after recovery, each member must be able to process client requests.
@ -433,7 +432,7 @@ enum FailureCase {
// DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER delays outgoing/incoming packets
// from/to the peer port on a randomly chosen follower (non-leader).
// It waits for "failure-delay-ms" until recovery.
// It waits for "delay-ms" until recovery.
// The expected behavior is that once packet delay operation is undone,
// the follower comes back and tries to catch up with latest changes from
// cluster. And as always, after recovery, each member must be able to
@ -442,8 +441,8 @@ enum FailureCase {
// RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER delays outgoing/incoming
// packets from/to the peer port on a randomly chosen follower
// (non-leader) with a randomized time duration (thus isolated). It waits
// for "failure-delay-ms" until recovery.
// (non-leader) with a randomized time duration (thus isolated). It
// waits for "delay-ms" until recovery.
// The expected behavior is that once packet delay operation is undone,
// each member must be able to process client requests.
RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER = 201;
@ -472,8 +471,8 @@ enum FailureCase {
RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT = 203;
// DELAY_PEER_PORT_TX_RX_LEADER delays outgoing/incoming packets from/to
// the peer port on the active leader. And waits for "failure-delay-ms"
// until recovery.
// the peer port on the active leader. And waits for "delay-ms" until
// recovery.
// The expected behavior is that cluster may elect a new leader, and
// once packet delay operation is undone, the (old) leader comes back
// and tries to catch up with latest changes from cluster. As always,
@ -482,7 +481,7 @@ enum FailureCase {
// RANDOM_DELAY_PEER_PORT_TX_RX_LEADER delays outgoing/incoming packets
// from/to the peer port on the active leader with a randomized time
// duration. And waits for "failure-delay-ms" until recovery.
// duration. And waits for "delay-ms" until recovery.
// The expected behavior is that cluster may elect a new leader, and
// once packet delay operation is undone, the (old) leader comes back
// and tries to catch up with latest changes from cluster. As always,
@ -516,7 +515,7 @@ enum FailureCase {
// DELAY_PEER_PORT_TX_RX_QUORUM delays outgoing/incoming packets from/to
// the peer ports on majority nodes of cluster. And it waits for
// "failure-delay-ms" until recovery, likely to trigger election timeouts.
// "delay-ms" until recovery, likely to trigger election timeouts.
// The expected behavior is that cluster may elect a new leader, while
// quorum of nodes struggle with slow networks, and once delay operation
// is undone, nodes come back and cluster comes back operative. As always,
@ -525,8 +524,8 @@ enum FailureCase {
// RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM delays outgoing/incoming packets
// from/to the peer ports on majority nodes of cluster, with randomized
// time durations. And it waits for "failure-delay-ms" until recovery,
// likely to trigger election timeouts.
// time durations. And it waits for "delay-ms" until recovery, likely
// to trigger election timeouts.
// The expected behavior is that cluster may elect a new leader, while
// quorum of nodes struggle with slow networks, and once delay operation
// is undone, nodes come back and cluster comes back operative. As always,
@ -534,8 +533,8 @@ enum FailureCase {
RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM = 209;
// DELAY_PEER_PORT_TX_RX_ALL delays outgoing/incoming packets from/to the
// peer ports on all nodes. And it waits for "failure-delay-ms" until
// recovery, likely to trigger election timeouts.
// peer ports on all nodes. And it waits for "delay-ms" until recovery,
// likely to trigger election timeouts.
// The expected behavior is that cluster may become totally inoperable,
// struggling with slow networks across the whole cluster. Once delay
// operation is undone, nodes come back and cluster comes back operative.
@ -545,7 +544,7 @@ enum FailureCase {
// RANDOM_DELAY_PEER_PORT_TX_RX_ALL delays outgoing/incoming packets
// from/to the peer ports on all nodes, with randomized time durations.
// And it waits for "failure-delay-ms" until recovery, likely to trigger
// And it waits for "delay-ms" until recovery, likely to trigger
// election timeouts.
// The expected behavior is that cluster may become totally inoperable,
// struggling with slow networks across the whole cluster. Once delay
@ -556,7 +555,7 @@ enum FailureCase {
// NO_FAIL_WITH_STRESS stops injecting failures while testing the
// consistency and correctness under pressure loads, for the duration of
// "failure-delay-ms". Goal is to ensure cluster be still making progress
// "delay-ms". Goal is to ensure cluster be still making progress
// on recovery, and verify system does not deadlock following a sequence
// of failure injections.
// The expected behavior is that cluster remains fully operative in healthy
@ -566,7 +565,7 @@ enum FailureCase {
// NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS neither injects failures nor
// sends stressig client requests to the cluster, for the duration of
// "failure-delay-ms". Goal is to ensure cluster be still making progress
// "delay-ms". Goal is to ensure cluster be still making progress
// on recovery, and verify system does not deadlock following a sequence
// of failure injections.
// The expected behavior is that cluster remains fully operative in healthy

320
functional/tester/case.go Normal file
View File

@ -0,0 +1,320 @@
// Copyright 2018 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tester
import (
"fmt"
"math/rand"
"time"
"github.com/coreos/etcd/functional/rpcpb"
"go.uber.org/zap"
)
// Case defines failure/test injection interface.
// To add a test case:
// 1. implement "Case" interface
// 2. define fail case name in "rpcpb.Case"
type Case interface {
// Inject injeccts the failure into the testing cluster at the given
// round. When calling the function, the cluster should be in health.
Inject(clus *Cluster) error
// Recover recovers the injected failure caused by the injection of the
// given round and wait for the recovery of the testing cluster.
Recover(clus *Cluster) error
// Desc returns a description of the failure
Desc() string
// TestCase returns "rpcpb.Case" enum type.
TestCase() rpcpb.Case
}
type injectMemberFunc func(*Cluster, int) error
type recoverMemberFunc func(*Cluster, int) error
type caseByFunc struct {
desc string
rpcpbCase rpcpb.Case
injectMember injectMemberFunc
recoverMember recoverMemberFunc
}
func (c *caseByFunc) Desc() string {
if c.desc != "" {
return c.desc
}
return c.rpcpbCase.String()
}
func (c *caseByFunc) TestCase() rpcpb.Case {
return c.rpcpbCase
}
type caseFollower struct {
caseByFunc
last int
lead int
}
func (c *caseFollower) updateIndex(clus *Cluster) error {
lead, err := clus.GetLeader()
if err != nil {
return err
}
c.lead = lead
n := len(clus.Members)
if c.last == -1 { // first run
c.last = clus.rd % n
if c.last == c.lead {
c.last = (c.last + 1) % n
}
} else {
c.last = (c.last + 1) % n
if c.last == c.lead {
c.last = (c.last + 1) % n
}
}
return nil
}
func (c *caseFollower) Inject(clus *Cluster) error {
if err := c.updateIndex(clus); err != nil {
return err
}
return c.injectMember(clus, c.last)
}
func (c *caseFollower) Recover(clus *Cluster) error {
return c.recoverMember(clus, c.last)
}
func (c *caseFollower) Desc() string {
if c.desc != "" {
return c.desc
}
return c.rpcpbCase.String()
}
func (c *caseFollower) TestCase() rpcpb.Case {
return c.rpcpbCase
}
type caseLeader struct {
caseByFunc
last int
lead int
}
func (c *caseLeader) updateIndex(clus *Cluster) error {
lead, err := clus.GetLeader()
if err != nil {
return err
}
c.lead = lead
c.last = lead
return nil
}
func (c *caseLeader) Inject(clus *Cluster) error {
if err := c.updateIndex(clus); err != nil {
return err
}
return c.injectMember(clus, c.last)
}
func (c *caseLeader) Recover(clus *Cluster) error {
return c.recoverMember(clus, c.last)
}
func (c *caseLeader) TestCase() rpcpb.Case {
return c.rpcpbCase
}
type caseQuorum struct {
caseByFunc
injected map[int]struct{}
}
func (c *caseQuorum) Inject(clus *Cluster) error {
c.injected = pickQuorum(len(clus.Members))
for idx := range c.injected {
if err := c.injectMember(clus, idx); err != nil {
return err
}
}
return nil
}
func (c *caseQuorum) Recover(clus *Cluster) error {
for idx := range c.injected {
if err := c.recoverMember(clus, idx); err != nil {
return err
}
}
return nil
}
func (c *caseQuorum) Desc() string {
if c.desc != "" {
return c.desc
}
return c.rpcpbCase.String()
}
func (c *caseQuorum) TestCase() rpcpb.Case {
return c.rpcpbCase
}
func pickQuorum(size int) (picked map[int]struct{}) {
picked = make(map[int]struct{})
r := rand.New(rand.NewSource(time.Now().UnixNano()))
quorum := size/2 + 1
for len(picked) < quorum {
idx := r.Intn(size)
picked[idx] = struct{}{}
}
return picked
}
type caseAll caseByFunc
func (c *caseAll) Inject(clus *Cluster) error {
for i := range clus.Members {
if err := c.injectMember(clus, i); err != nil {
return err
}
}
return nil
}
func (c *caseAll) Recover(clus *Cluster) error {
for i := range clus.Members {
if err := c.recoverMember(clus, i); err != nil {
return err
}
}
return nil
}
func (c *caseAll) Desc() string {
if c.desc != "" {
return c.desc
}
return c.rpcpbCase.String()
}
func (c *caseAll) TestCase() rpcpb.Case {
return c.rpcpbCase
}
// caseUntilSnapshot injects a failure/test and waits for a snapshot event
type caseUntilSnapshot struct {
desc string
rpcpbCase rpcpb.Case
Case
}
// all delay failure cases except the ones failing with latency
// greater than election timeout (trigger leader election and
// cluster keeps operating anyways)
var slowCases = map[rpcpb.Case]bool{
rpcpb.Case_RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER: true,
rpcpb.Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT: true,
rpcpb.Case_RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT: true,
rpcpb.Case_RANDOM_DELAY_PEER_PORT_TX_RX_LEADER: true,
rpcpb.Case_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT: true,
rpcpb.Case_RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT: true,
rpcpb.Case_RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM: true,
rpcpb.Case_RANDOM_DELAY_PEER_PORT_TX_RX_ALL: true,
}
func (c *caseUntilSnapshot) Inject(clus *Cluster) error {
if err := c.Case.Inject(clus); err != nil {
return err
}
snapshotCount := clus.Members[0].Etcd.SnapshotCount
now := time.Now()
clus.lg.Info(
"trigger snapshot START",
zap.String("desc", c.Desc()),
zap.Int64("etcd-snapshot-count", snapshotCount),
)
// maxRev may fail since failure just injected, retry if failed.
startRev, err := clus.maxRev()
for i := 0; i < 10 && startRev == 0; i++ {
startRev, err = clus.maxRev()
}
if startRev == 0 {
return err
}
lastRev := startRev
// healthy cluster could accept 1000 req/sec at least.
// 3x time to trigger snapshot.
retries := int(snapshotCount) / 1000 * 3
if v, ok := slowCases[c.TestCase()]; v && ok {
// slow network takes more retries
retries *= 5
}
for i := 0; i < retries; i++ {
lastRev, _ = clus.maxRev()
// If the number of proposals committed is bigger than snapshot count,
// a new snapshot should have been created.
dicc := lastRev - startRev
if dicc > snapshotCount {
clus.lg.Info(
"trigger snapshot PASS",
zap.Int("retries", i),
zap.String("desc", c.Desc()),
zap.Int64("committed-entries", dicc),
zap.Int64("etcd-snapshot-count", snapshotCount),
zap.Int64("last-revision", lastRev),
zap.Duration("took", time.Since(now)),
)
return nil
}
clus.lg.Info(
"trigger snapshot PROGRESS",
zap.Int("retries", i),
zap.Int64("committed-entries", dicc),
zap.Int64("etcd-snapshot-count", snapshotCount),
zap.Int64("last-revision", lastRev),
zap.Duration("took", time.Since(now)),
)
time.Sleep(time.Second)
}
return fmt.Errorf("cluster too slow: only %d commits in %d retries", lastRev-startRev, retries)
}
func (c *caseUntilSnapshot) Desc() string {
if c.desc != "" {
return c.desc
}
if c.rpcpbCase.String() != "" {
return c.rpcpbCase.String()
}
return c.Case.Desc()
}
func (c *caseUntilSnapshot) TestCase() rpcpb.Case {
return c.rpcpbCase
}

View File

@ -20,22 +20,22 @@ import (
"go.uber.org/zap"
)
type failureDelay struct {
Failure
type caseDelay struct {
Case
delayDuration time.Duration
}
func (f *failureDelay) Inject(clus *Cluster) error {
if err := f.Failure.Inject(clus); err != nil {
func (c *caseDelay) Inject(clus *Cluster) error {
if err := c.Case.Inject(clus); err != nil {
return err
}
if f.delayDuration > 0 {
if c.delayDuration > 0 {
clus.lg.Info(
"wait after inject",
zap.Duration("delay", f.delayDuration),
zap.String("desc", f.Failure.Desc()),
zap.Duration("delay", c.delayDuration),
zap.String("desc", c.Case.Desc()),
)
time.Sleep(f.delayDuration)
time.Sleep(c.delayDuration)
}
return nil
}

View File

@ -21,35 +21,35 @@ import (
"github.com/coreos/etcd/functional/rpcpb"
)
type failureExternal struct {
Failure
type caseExternal struct {
Case
desc string
failureCase rpcpb.FailureCase
desc string
rpcpbCase rpcpb.Case
scriptPath string
}
func (f *failureExternal) Inject(clus *Cluster) error {
return exec.Command(f.scriptPath, "enable", fmt.Sprintf("%d", clus.rd)).Run()
func (c *caseExternal) Inject(clus *Cluster) error {
return exec.Command(c.scriptPath, "enable", fmt.Sprintf("%d", clus.rd)).Run()
}
func (f *failureExternal) Recover(clus *Cluster) error {
return exec.Command(f.scriptPath, "disable", fmt.Sprintf("%d", clus.rd)).Run()
func (c *caseExternal) Recover(clus *Cluster) error {
return exec.Command(c.scriptPath, "disable", fmt.Sprintf("%d", clus.rd)).Run()
}
func (f *failureExternal) Desc() string {
return f.desc
func (c *caseExternal) Desc() string {
return c.desc
}
func (f *failureExternal) FailureCase() rpcpb.FailureCase {
return f.failureCase
func (c *caseExternal) TestCase() rpcpb.Case {
return c.rpcpbCase
}
func new_FailureCase_EXTERNAL(scriptPath string) Failure {
return &failureExternal{
desc: fmt.Sprintf("external fault injector (script: %q)", scriptPath),
failureCase: rpcpb.FailureCase_EXTERNAL,
scriptPath: scriptPath,
func new_Case_EXTERNAL(scriptPath string) Case {
return &caseExternal{
desc: fmt.Sprintf("external fault injector (script: %q)", scriptPath),
rpcpbCase: rpcpb.Case_EXTERNAL,
scriptPath: scriptPath,
}
}

View File

@ -32,7 +32,7 @@ type failpointStats struct {
var fpStats failpointStats
func failpointFailures(clus *Cluster) (ret []Failure, err error) {
func failpointFailures(clus *Cluster) (ret []Case, err error) {
var fps []string
fps, err = failpointPaths(clus.Members[0].FailpointHTTPAddr)
if err != nil {
@ -44,21 +44,21 @@ func failpointFailures(clus *Cluster) (ret []Failure, err error) {
continue
}
fpFails := failuresFromFailpoint(fp, clus.Tester.FailpointCommands)
fpFails := casesFromFailpoint(fp, clus.Tester.FailpointCommands)
// wrap in delays so failpoint has time to trigger
for i, fpf := range fpFails {
if strings.Contains(fp, "Snap") {
// hack to trigger snapshot failpoints
fpFails[i] = &failureUntilSnapshot{
desc: fpf.Desc(),
failureCase: rpcpb.FailureCase_FAILPOINTS,
Failure: fpf,
fpFails[i] = &caseUntilSnapshot{
desc: fpf.Desc(),
rpcpbCase: rpcpb.Case_FAILPOINTS,
Case: fpf,
}
} else {
fpFails[i] = &failureDelay{
Failure: fpf,
delayDuration: clus.GetFailureDelayDuration(),
fpFails[i] = &caseDelay{
Case: fpf,
delayDuration: clus.GetCaseDelayDuration(),
}
}
}
@ -86,45 +86,45 @@ func failpointPaths(endpoint string) ([]string, error) {
return fps, nil
}
// failpoints follows FreeBSD KFAIL_POINT syntax.
// failpoints follows FreeBSD FAIL_POINT syntax.
// e.g. panic("etcd-tester"),1*sleep(1000)->panic("etcd-tester")
func failuresFromFailpoint(fp string, failpointCommands []string) (fs []Failure) {
func casesFromFailpoint(fp string, failpointCommands []string) (fs []Case) {
recov := makeRecoverFailpoint(fp)
for _, fcmd := range failpointCommands {
inject := makeInjectFailpoint(fp, fcmd)
fs = append(fs, []Failure{
&failureFollower{
failureByFunc: failureByFunc{
fs = append(fs, []Case{
&caseFollower{
caseByFunc: caseByFunc{
desc: fmt.Sprintf("failpoint %q (one: %q)", fp, fcmd),
failureCase: rpcpb.FailureCase_FAILPOINTS,
rpcpbCase: rpcpb.Case_FAILPOINTS,
injectMember: inject,
recoverMember: recov,
},
last: -1,
lead: -1,
},
&failureLeader{
failureByFunc: failureByFunc{
&caseLeader{
caseByFunc: caseByFunc{
desc: fmt.Sprintf("failpoint %q (leader: %q)", fp, fcmd),
failureCase: rpcpb.FailureCase_FAILPOINTS,
rpcpbCase: rpcpb.Case_FAILPOINTS,
injectMember: inject,
recoverMember: recov,
},
last: -1,
lead: -1,
},
&failureQuorum{
failureByFunc: failureByFunc{
&caseQuorum{
caseByFunc: caseByFunc{
desc: fmt.Sprintf("failpoint %q (quorum: %q)", fp, fcmd),
failureCase: rpcpb.FailureCase_FAILPOINTS,
rpcpbCase: rpcpb.Case_FAILPOINTS,
injectMember: inject,
recoverMember: recov,
},
injected: make(map[int]struct{}),
},
&failureAll{
&caseAll{
desc: fmt.Sprintf("failpoint %q (all: %q)", fp, fcmd),
failureCase: rpcpb.FailureCase_FAILPOINTS,
rpcpbCase: rpcpb.Case_FAILPOINTS,
injectMember: inject,
recoverMember: recov,
},

View File

@ -0,0 +1,104 @@
// Copyright 2018 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tester
import "github.com/coreos/etcd/functional/rpcpb"
func inject_BLACKHOLE_PEER_PORT_TX_RX(clus *Cluster, idx int) error {
return clus.sendOp(idx, rpcpb.Operation_BLACKHOLE_PEER_PORT_TX_RX)
}
func recover_BLACKHOLE_PEER_PORT_TX_RX(clus *Cluster, idx int) error {
return clus.sendOp(idx, rpcpb.Operation_UNBLACKHOLE_PEER_PORT_TX_RX)
}
func new_Case_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER(clus *Cluster) Case {
cc := caseByFunc{
rpcpbCase: rpcpb.Case_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER,
injectMember: inject_BLACKHOLE_PEER_PORT_TX_RX,
recoverMember: recover_BLACKHOLE_PEER_PORT_TX_RX,
}
c := &caseFollower{cc, -1, -1}
return &caseDelay{
Case: c,
delayDuration: clus.GetCaseDelayDuration(),
}
}
func new_Case_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT() Case {
cc := caseByFunc{
rpcpbCase: rpcpb.Case_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT,
injectMember: inject_BLACKHOLE_PEER_PORT_TX_RX,
recoverMember: recover_BLACKHOLE_PEER_PORT_TX_RX,
}
c := &caseFollower{cc, -1, -1}
return &caseUntilSnapshot{
rpcpbCase: rpcpb.Case_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT,
Case: c,
}
}
func new_Case_BLACKHOLE_PEER_PORT_TX_RX_LEADER(clus *Cluster) Case {
cc := caseByFunc{
rpcpbCase: rpcpb.Case_BLACKHOLE_PEER_PORT_TX_RX_LEADER,
injectMember: inject_BLACKHOLE_PEER_PORT_TX_RX,
recoverMember: recover_BLACKHOLE_PEER_PORT_TX_RX,
}
c := &caseLeader{cc, -1, -1}
return &caseDelay{
Case: c,
delayDuration: clus.GetCaseDelayDuration(),
}
}
func new_Case_BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT() Case {
cc := caseByFunc{
rpcpbCase: rpcpb.Case_BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT,
injectMember: inject_BLACKHOLE_PEER_PORT_TX_RX,
recoverMember: recover_BLACKHOLE_PEER_PORT_TX_RX,
}
c := &caseLeader{cc, -1, -1}
return &caseUntilSnapshot{
rpcpbCase: rpcpb.Case_BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT,
Case: c,
}
}
func new_Case_BLACKHOLE_PEER_PORT_TX_RX_QUORUM(clus *Cluster) Case {
c := &caseQuorum{
caseByFunc: caseByFunc{
rpcpbCase: rpcpb.Case_BLACKHOLE_PEER_PORT_TX_RX_QUORUM,
injectMember: inject_BLACKHOLE_PEER_PORT_TX_RX,
recoverMember: recover_BLACKHOLE_PEER_PORT_TX_RX,
},
injected: make(map[int]struct{}),
}
return &caseDelay{
Case: c,
delayDuration: clus.GetCaseDelayDuration(),
}
}
func new_Case_BLACKHOLE_PEER_PORT_TX_RX_ALL(clus *Cluster) Case {
c := &caseAll{
rpcpbCase: rpcpb.Case_BLACKHOLE_PEER_PORT_TX_RX_ALL,
injectMember: inject_BLACKHOLE_PEER_PORT_TX_RX,
recoverMember: recover_BLACKHOLE_PEER_PORT_TX_RX,
}
return &caseDelay{
Case: c,
delayDuration: clus.GetCaseDelayDuration(),
}
}

View File

@ -46,82 +46,82 @@ func recover_DELAY_PEER_PORT_TX_RX(clus *Cluster, idx int) error {
return err
}
func new_FailureCase_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER(clus *Cluster, random bool) Failure {
ff := failureByFunc{
failureCase: rpcpb.FailureCase_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER,
func new_Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER(clus *Cluster, random bool) Case {
cc := caseByFunc{
rpcpbCase: rpcpb.Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER,
injectMember: inject_DELAY_PEER_PORT_TX_RX,
recoverMember: recover_DELAY_PEER_PORT_TX_RX,
}
clus.Tester.UpdatedDelayLatencyMs = clus.Tester.DelayLatencyMs
if random {
clus.UpdateDelayLatencyMs()
ff.failureCase = rpcpb.FailureCase_RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER
cc.rpcpbCase = rpcpb.Case_RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER
}
f := &failureFollower{ff, -1, -1}
return &failureDelay{
Failure: f,
delayDuration: clus.GetFailureDelayDuration(),
c := &caseFollower{cc, -1, -1}
return &caseDelay{
Case: c,
delayDuration: clus.GetCaseDelayDuration(),
}
}
func new_FailureCase_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT(clus *Cluster, random bool) Failure {
ff := failureByFunc{
failureCase: rpcpb.FailureCase_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT,
func new_Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT(clus *Cluster, random bool) Case {
cc := caseByFunc{
rpcpbCase: rpcpb.Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT,
injectMember: inject_DELAY_PEER_PORT_TX_RX,
recoverMember: recover_DELAY_PEER_PORT_TX_RX,
}
clus.Tester.UpdatedDelayLatencyMs = clus.Tester.DelayLatencyMs
if random {
clus.UpdateDelayLatencyMs()
ff.failureCase = rpcpb.FailureCase_RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT
cc.rpcpbCase = rpcpb.Case_RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT
}
f := &failureFollower{ff, -1, -1}
return &failureUntilSnapshot{
failureCase: ff.failureCase,
Failure: f,
c := &caseFollower{cc, -1, -1}
return &caseUntilSnapshot{
rpcpbCase: cc.rpcpbCase,
Case: c,
}
}
func new_FailureCase_DELAY_PEER_PORT_TX_RX_LEADER(clus *Cluster, random bool) Failure {
ff := failureByFunc{
failureCase: rpcpb.FailureCase_DELAY_PEER_PORT_TX_RX_LEADER,
func new_Case_DELAY_PEER_PORT_TX_RX_LEADER(clus *Cluster, random bool) Case {
cc := caseByFunc{
rpcpbCase: rpcpb.Case_DELAY_PEER_PORT_TX_RX_LEADER,
injectMember: inject_DELAY_PEER_PORT_TX_RX,
recoverMember: recover_DELAY_PEER_PORT_TX_RX,
}
clus.Tester.UpdatedDelayLatencyMs = clus.Tester.DelayLatencyMs
if random {
clus.UpdateDelayLatencyMs()
ff.failureCase = rpcpb.FailureCase_RANDOM_DELAY_PEER_PORT_TX_RX_LEADER
cc.rpcpbCase = rpcpb.Case_RANDOM_DELAY_PEER_PORT_TX_RX_LEADER
}
f := &failureLeader{ff, -1, -1}
return &failureDelay{
Failure: f,
delayDuration: clus.GetFailureDelayDuration(),
c := &caseLeader{cc, -1, -1}
return &caseDelay{
Case: c,
delayDuration: clus.GetCaseDelayDuration(),
}
}
func new_FailureCase_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT(clus *Cluster, random bool) Failure {
ff := failureByFunc{
failureCase: rpcpb.FailureCase_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT,
func new_Case_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT(clus *Cluster, random bool) Case {
cc := caseByFunc{
rpcpbCase: rpcpb.Case_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT,
injectMember: inject_DELAY_PEER_PORT_TX_RX,
recoverMember: recover_DELAY_PEER_PORT_TX_RX,
}
clus.Tester.UpdatedDelayLatencyMs = clus.Tester.DelayLatencyMs
if random {
clus.UpdateDelayLatencyMs()
ff.failureCase = rpcpb.FailureCase_RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT
cc.rpcpbCase = rpcpb.Case_RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT
}
f := &failureLeader{ff, -1, -1}
return &failureUntilSnapshot{
failureCase: ff.failureCase,
Failure: f,
c := &caseLeader{cc, -1, -1}
return &caseUntilSnapshot{
rpcpbCase: cc.rpcpbCase,
Case: c,
}
}
func new_FailureCase_DELAY_PEER_PORT_TX_RX_QUORUM(clus *Cluster, random bool) Failure {
f := &failureQuorum{
failureByFunc: failureByFunc{
failureCase: rpcpb.FailureCase_DELAY_PEER_PORT_TX_RX_QUORUM,
func new_Case_DELAY_PEER_PORT_TX_RX_QUORUM(clus *Cluster, random bool) Case {
c := &caseQuorum{
caseByFunc: caseByFunc{
rpcpbCase: rpcpb.Case_DELAY_PEER_PORT_TX_RX_QUORUM,
injectMember: inject_DELAY_PEER_PORT_TX_RX,
recoverMember: recover_DELAY_PEER_PORT_TX_RX,
},
@ -130,27 +130,27 @@ func new_FailureCase_DELAY_PEER_PORT_TX_RX_QUORUM(clus *Cluster, random bool) Fa
clus.Tester.UpdatedDelayLatencyMs = clus.Tester.DelayLatencyMs
if random {
clus.UpdateDelayLatencyMs()
f.failureCase = rpcpb.FailureCase_RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM
c.rpcpbCase = rpcpb.Case_RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM
}
return &failureDelay{
Failure: f,
delayDuration: clus.GetFailureDelayDuration(),
return &caseDelay{
Case: c,
delayDuration: clus.GetCaseDelayDuration(),
}
}
func new_FailureCase_DELAY_PEER_PORT_TX_RX_ALL(clus *Cluster, random bool) Failure {
f := &failureAll{
failureCase: rpcpb.FailureCase_DELAY_PEER_PORT_TX_RX_ALL,
func new_Case_DELAY_PEER_PORT_TX_RX_ALL(clus *Cluster, random bool) Case {
c := &caseAll{
rpcpbCase: rpcpb.Case_DELAY_PEER_PORT_TX_RX_ALL,
injectMember: inject_DELAY_PEER_PORT_TX_RX,
recoverMember: recover_DELAY_PEER_PORT_TX_RX,
}
clus.Tester.UpdatedDelayLatencyMs = clus.Tester.DelayLatencyMs
if random {
clus.UpdateDelayLatencyMs()
f.failureCase = rpcpb.FailureCase_RANDOM_DELAY_PEER_PORT_TX_RX_ALL
c.rpcpbCase = rpcpb.Case_RANDOM_DELAY_PEER_PORT_TX_RX_ALL
}
return &failureDelay{
Failure: f,
delayDuration: clus.GetFailureDelayDuration(),
return &caseDelay{
Case: c,
delayDuration: clus.GetCaseDelayDuration(),
}
}

View File

@ -0,0 +1,99 @@
// Copyright 2018 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tester
import (
"time"
"github.com/coreos/etcd/functional/rpcpb"
"go.uber.org/zap"
)
type caseNoFailWithStress caseByFunc
func (c *caseNoFailWithStress) Inject(clus *Cluster) error {
return nil
}
func (c *caseNoFailWithStress) Recover(clus *Cluster) error {
return nil
}
func (c *caseNoFailWithStress) Desc() string {
if c.desc != "" {
return c.desc
}
return c.rpcpbCase.String()
}
func (c *caseNoFailWithStress) TestCase() rpcpb.Case {
return c.rpcpbCase
}
func new_Case_NO_FAIL_WITH_STRESS(clus *Cluster) Case {
c := &caseNoFailWithStress{
rpcpbCase: rpcpb.Case_NO_FAIL_WITH_STRESS,
}
return &caseDelay{
Case: c,
delayDuration: clus.GetCaseDelayDuration(),
}
}
type caseNoFailWithNoStressForLiveness caseByFunc
func (c *caseNoFailWithNoStressForLiveness) Inject(clus *Cluster) error {
clus.lg.Info(
"extra delay for liveness mode with no stresser",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.String("desc", c.Desc()),
)
time.Sleep(clus.GetCaseDelayDuration())
clus.lg.Info(
"wait health in liveness mode",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.String("desc", c.Desc()),
)
return clus.WaitHealth()
}
func (c *caseNoFailWithNoStressForLiveness) Recover(clus *Cluster) error {
return nil
}
func (c *caseNoFailWithNoStressForLiveness) Desc() string {
if c.desc != "" {
return c.desc
}
return c.rpcpbCase.String()
}
func (c *caseNoFailWithNoStressForLiveness) TestCase() rpcpb.Case {
return c.rpcpbCase
}
func new_Case_NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS(clus *Cluster) Case {
c := &caseNoFailWithNoStressForLiveness{
rpcpbCase: rpcpb.Case_NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS,
}
return &caseDelay{
Case: c,
delayDuration: clus.GetCaseDelayDuration(),
}
}

View File

@ -174,43 +174,43 @@ func recover_SIGQUIT_ETCD_AND_REMOVE_DATA(clus *Cluster, idx1 int) error {
return err
}
func new_FailureCase_SIGQUIT_AND_REMOVE_ONE_FOLLOWER(clus *Cluster) Failure {
ff := failureByFunc{
failureCase: rpcpb.FailureCase_SIGQUIT_AND_REMOVE_ONE_FOLLOWER,
func new_Case_SIGQUIT_AND_REMOVE_ONE_FOLLOWER(clus *Cluster) Case {
cc := caseByFunc{
rpcpbCase: rpcpb.Case_SIGQUIT_AND_REMOVE_ONE_FOLLOWER,
injectMember: inject_SIGQUIT_ETCD_AND_REMOVE_DATA,
recoverMember: recover_SIGQUIT_ETCD_AND_REMOVE_DATA,
}
f := &failureFollower{ff, -1, -1}
return &failureDelay{
Failure: f,
delayDuration: clus.GetFailureDelayDuration(),
c := &caseFollower{cc, -1, -1}
return &caseDelay{
Case: c,
delayDuration: clus.GetCaseDelayDuration(),
}
}
func new_FailureCase_SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT(clus *Cluster) Failure {
return &failureUntilSnapshot{
failureCase: rpcpb.FailureCase_SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT,
Failure: new_FailureCase_SIGQUIT_AND_REMOVE_ONE_FOLLOWER(clus),
func new_Case_SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT(clus *Cluster) Case {
return &caseUntilSnapshot{
rpcpbCase: rpcpb.Case_SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT,
Case: new_Case_SIGQUIT_AND_REMOVE_ONE_FOLLOWER(clus),
}
}
func new_FailureCase_SIGQUIT_AND_REMOVE_LEADER(clus *Cluster) Failure {
ff := failureByFunc{
failureCase: rpcpb.FailureCase_SIGQUIT_AND_REMOVE_LEADER,
func new_Case_SIGQUIT_AND_REMOVE_LEADER(clus *Cluster) Case {
cc := caseByFunc{
rpcpbCase: rpcpb.Case_SIGQUIT_AND_REMOVE_LEADER,
injectMember: inject_SIGQUIT_ETCD_AND_REMOVE_DATA,
recoverMember: recover_SIGQUIT_ETCD_AND_REMOVE_DATA,
}
f := &failureLeader{ff, -1, -1}
return &failureDelay{
Failure: f,
delayDuration: clus.GetFailureDelayDuration(),
c := &caseLeader{cc, -1, -1}
return &caseDelay{
Case: c,
delayDuration: clus.GetCaseDelayDuration(),
}
}
func new_FailureCase_SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT(clus *Cluster) Failure {
return &failureUntilSnapshot{
failureCase: rpcpb.FailureCase_SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT,
Failure: new_FailureCase_SIGQUIT_AND_REMOVE_LEADER(clus),
func new_Case_SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT(clus *Cluster) Case {
return &caseUntilSnapshot{
rpcpbCase: rpcpb.Case_SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT,
Case: new_Case_SIGQUIT_AND_REMOVE_LEADER(clus),
}
}

View File

@ -24,20 +24,20 @@ import (
"go.uber.org/zap"
)
type fetchSnapshotAndFailureQuorum struct {
type fetchSnapshotCaseQuorum struct {
desc string
failureCase rpcpb.FailureCase
rpcpbCase rpcpb.Case
injected map[int]struct{}
snapshotted int
}
func (f *fetchSnapshotAndFailureQuorum) Inject(clus *Cluster) error {
func (c *fetchSnapshotCaseQuorum) Inject(clus *Cluster) error {
// 1. Assume node C is the current leader with most up-to-date data.
lead, err := clus.GetLeader()
if err != nil {
return err
}
f.snapshotted = lead
c.snapshotted = lead
// 2. Download snapshot from node C, before destroying node A and B.
clus.lg.Info(
@ -101,12 +101,12 @@ func (f *fetchSnapshotAndFailureQuorum) Inject(clus *Cluster) error {
// 3. Destroy node A and B, and make the whole cluster inoperable.
for {
f.injected = pickQuorum(len(clus.Members))
if _, ok := f.injected[lead]; !ok {
c.injected = pickQuorum(len(clus.Members))
if _, ok := c.injected[lead]; !ok {
break
}
}
for idx := range f.injected {
for idx := range c.injected {
clus.lg.Info(
"disastrous machine failure to quorum START",
zap.String("target-endpoint", clus.Members[idx].EtcdClientEndpoint),
@ -139,42 +139,42 @@ func (f *fetchSnapshotAndFailureQuorum) Inject(clus *Cluster) error {
return err
}
func (f *fetchSnapshotAndFailureQuorum) Recover(clus *Cluster) error {
func (c *fetchSnapshotCaseQuorum) Recover(clus *Cluster) error {
// 6. Restore a new seed member from node C's latest snapshot file.
// 7. Add another member to establish 2-node cluster.
// 8. Add another member to establish 3-node cluster.
// for idx := range f.injected {
// if err := f.recoverMember(clus, idx); err != nil {
// for idx := range c.injected {
// if err := c.recoverMember(clus, idx); err != nil {
// return err
// }
// }
return nil
}
func (f *fetchSnapshotAndFailureQuorum) Desc() string {
if f.desc != "" {
return f.desc
func (c *fetchSnapshotCaseQuorum) Desc() string {
if c.desc != "" {
return c.desc
}
return f.failureCase.String()
return c.rpcpbCase.String()
}
func (f *fetchSnapshotAndFailureQuorum) FailureCase() rpcpb.FailureCase {
return f.failureCase
func (c *fetchSnapshotCaseQuorum) TestCase() rpcpb.Case {
return c.rpcpbCase
}
func new_FailureCase_SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH(clus *Cluster) Failure {
f := &fetchSnapshotAndFailureQuorum{
failureCase: rpcpb.FailureCase_SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH,
func new_Case_SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH(clus *Cluster) Case {
c := &fetchSnapshotCaseQuorum{
rpcpbCase: rpcpb.Case_SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH,
injected: make(map[int]struct{}),
snapshotted: -1,
}
// simulate real life; machine replacements may happen
// after some time since disaster
return &failureDelay{
Failure: f,
delayDuration: clus.GetFailureDelayDuration(),
return &caseDelay{
Case: c,
delayDuration: clus.GetCaseDelayDuration(),
}
}

View File

@ -0,0 +1,92 @@
// Copyright 2018 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tester
import "github.com/coreos/etcd/functional/rpcpb"
func inject_SIGTERM_ETCD(clus *Cluster, idx int) error {
return clus.sendOp(idx, rpcpb.Operation_SIGTERM_ETCD)
}
func recover_SIGTERM_ETCD(clus *Cluster, idx int) error {
return clus.sendOp(idx, rpcpb.Operation_RESTART_ETCD)
}
func new_Case_SIGTERM_ONE_FOLLOWER(clus *Cluster) Case {
cc := caseByFunc{
rpcpbCase: rpcpb.Case_SIGTERM_ONE_FOLLOWER,
injectMember: inject_SIGTERM_ETCD,
recoverMember: recover_SIGTERM_ETCD,
}
c := &caseFollower{cc, -1, -1}
return &caseDelay{
Case: c,
delayDuration: clus.GetCaseDelayDuration(),
}
}
func new_Case_SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT(clus *Cluster) Case {
return &caseUntilSnapshot{
rpcpbCase: rpcpb.Case_SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT,
Case: new_Case_SIGTERM_ONE_FOLLOWER(clus),
}
}
func new_Case_SIGTERM_LEADER(clus *Cluster) Case {
cc := caseByFunc{
rpcpbCase: rpcpb.Case_SIGTERM_LEADER,
injectMember: inject_SIGTERM_ETCD,
recoverMember: recover_SIGTERM_ETCD,
}
c := &caseLeader{cc, -1, -1}
return &caseDelay{
Case: c,
delayDuration: clus.GetCaseDelayDuration(),
}
}
func new_Case_SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT(clus *Cluster) Case {
return &caseUntilSnapshot{
rpcpbCase: rpcpb.Case_SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT,
Case: new_Case_SIGTERM_LEADER(clus),
}
}
func new_Case_SIGTERM_QUORUM(clus *Cluster) Case {
c := &caseQuorum{
caseByFunc: caseByFunc{
rpcpbCase: rpcpb.Case_SIGTERM_QUORUM,
injectMember: inject_SIGTERM_ETCD,
recoverMember: recover_SIGTERM_ETCD,
},
injected: make(map[int]struct{}),
}
return &caseDelay{
Case: c,
delayDuration: clus.GetCaseDelayDuration(),
}
}
func new_Case_SIGTERM_ALL(clus *Cluster) Case {
c := &caseAll{
rpcpbCase: rpcpb.Case_SIGTERM_ALL,
injectMember: inject_SIGTERM_ETCD,
recoverMember: recover_SIGTERM_ETCD,
}
return &caseDelay{
Case: c,
delayDuration: clus.GetCaseDelayDuration(),
}
}

View File

@ -52,7 +52,7 @@ type Cluster struct {
Members []*rpcpb.Member `yaml:"agent-configs"`
Tester *rpcpb.Tester `yaml:"tester-config"`
failures []Failure
cases []Case
rateLimiter *rate.Limiter
stresser Stresser
@ -80,7 +80,7 @@ func NewCluster(lg *zap.Logger, fpath string) (*Cluster, error) {
clus.agentClients = make([]rpcpb.TransportClient, len(clus.Members))
clus.agentStreams = make([]rpcpb.Transport_TransportClient, len(clus.Members))
clus.agentRequests = make([]*rpcpb.Request, len(clus.Members))
clus.failures = make([]Failure, 0)
clus.cases = make([]Case, 0)
for i, ap := range clus.Members {
var err error
@ -111,7 +111,7 @@ func NewCluster(lg *zap.Logger, fpath string) (*Cluster, error) {
}
go clus.serveTesterServer()
clus.updateFailures()
clus.updateCases()
clus.rateLimiter = rate.NewLimiter(
rate.Limit(int(clus.Tester.StressQPS)),
@ -139,125 +139,125 @@ func (clus *Cluster) serveTesterServer() {
}
}
func (clus *Cluster) updateFailures() {
for _, cs := range clus.Tester.FailureCases {
func (clus *Cluster) updateCases() {
for _, cs := range clus.Tester.Cases {
switch cs {
case "SIGTERM_ONE_FOLLOWER":
clus.failures = append(clus.failures,
new_FailureCase_SIGTERM_ONE_FOLLOWER(clus))
clus.cases = append(clus.cases,
new_Case_SIGTERM_ONE_FOLLOWER(clus))
case "SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT":
clus.failures = append(clus.failures,
new_FailureCase_SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT(clus))
clus.cases = append(clus.cases,
new_Case_SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT(clus))
case "SIGTERM_LEADER":
clus.failures = append(clus.failures,
new_FailureCase_SIGTERM_LEADER(clus))
clus.cases = append(clus.cases,
new_Case_SIGTERM_LEADER(clus))
case "SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT":
clus.failures = append(clus.failures,
new_FailureCase_SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT(clus))
clus.cases = append(clus.cases,
new_Case_SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT(clus))
case "SIGTERM_QUORUM":
clus.failures = append(clus.failures,
new_FailureCase_SIGTERM_QUORUM(clus))
clus.cases = append(clus.cases,
new_Case_SIGTERM_QUORUM(clus))
case "SIGTERM_ALL":
clus.failures = append(clus.failures,
new_FailureCase_SIGTERM_ALL(clus))
clus.cases = append(clus.cases,
new_Case_SIGTERM_ALL(clus))
case "SIGQUIT_AND_REMOVE_ONE_FOLLOWER":
clus.failures = append(clus.failures,
new_FailureCase_SIGQUIT_AND_REMOVE_ONE_FOLLOWER(clus))
clus.cases = append(clus.cases,
new_Case_SIGQUIT_AND_REMOVE_ONE_FOLLOWER(clus))
case "SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT":
clus.failures = append(clus.failures,
new_FailureCase_SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT(clus))
clus.cases = append(clus.cases,
new_Case_SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT(clus))
case "SIGQUIT_AND_REMOVE_LEADER":
clus.failures = append(clus.failures,
new_FailureCase_SIGQUIT_AND_REMOVE_LEADER(clus))
clus.cases = append(clus.cases,
new_Case_SIGQUIT_AND_REMOVE_LEADER(clus))
case "SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT":
clus.failures = append(clus.failures,
new_FailureCase_SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT(clus))
clus.cases = append(clus.cases,
new_Case_SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT(clus))
case "SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH":
clus.failures = append(clus.failures,
new_FailureCase_SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH(clus))
clus.cases = append(clus.cases,
new_Case_SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH(clus))
case "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER":
clus.failures = append(clus.failures,
new_FailureCase_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER(clus))
clus.cases = append(clus.cases,
new_Case_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER(clus))
case "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT":
clus.failures = append(clus.failures,
new_FailureCase_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT())
clus.cases = append(clus.cases,
new_Case_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT())
case "BLACKHOLE_PEER_PORT_TX_RX_LEADER":
clus.failures = append(clus.failures,
new_FailureCase_BLACKHOLE_PEER_PORT_TX_RX_LEADER(clus))
clus.cases = append(clus.cases,
new_Case_BLACKHOLE_PEER_PORT_TX_RX_LEADER(clus))
case "BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT":
clus.failures = append(clus.failures,
new_FailureCase_BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT())
clus.cases = append(clus.cases,
new_Case_BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT())
case "BLACKHOLE_PEER_PORT_TX_RX_QUORUM":
clus.failures = append(clus.failures,
new_FailureCase_BLACKHOLE_PEER_PORT_TX_RX_QUORUM(clus))
clus.cases = append(clus.cases,
new_Case_BLACKHOLE_PEER_PORT_TX_RX_QUORUM(clus))
case "BLACKHOLE_PEER_PORT_TX_RX_ALL":
clus.failures = append(clus.failures,
new_FailureCase_BLACKHOLE_PEER_PORT_TX_RX_ALL(clus))
clus.cases = append(clus.cases,
new_Case_BLACKHOLE_PEER_PORT_TX_RX_ALL(clus))
case "DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER":
clus.failures = append(clus.failures,
new_FailureCase_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER(clus, false))
clus.cases = append(clus.cases,
new_Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER(clus, false))
case "RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER":
clus.failures = append(clus.failures,
new_FailureCase_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER(clus, true))
clus.cases = append(clus.cases,
new_Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER(clus, true))
case "DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT":
clus.failures = append(clus.failures,
new_FailureCase_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT(clus, false))
clus.cases = append(clus.cases,
new_Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT(clus, false))
case "RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT":
clus.failures = append(clus.failures,
new_FailureCase_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT(clus, true))
clus.cases = append(clus.cases,
new_Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT(clus, true))
case "DELAY_PEER_PORT_TX_RX_LEADER":
clus.failures = append(clus.failures,
new_FailureCase_DELAY_PEER_PORT_TX_RX_LEADER(clus, false))
clus.cases = append(clus.cases,
new_Case_DELAY_PEER_PORT_TX_RX_LEADER(clus, false))
case "RANDOM_DELAY_PEER_PORT_TX_RX_LEADER":
clus.failures = append(clus.failures,
new_FailureCase_DELAY_PEER_PORT_TX_RX_LEADER(clus, true))
clus.cases = append(clus.cases,
new_Case_DELAY_PEER_PORT_TX_RX_LEADER(clus, true))
case "DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT":
clus.failures = append(clus.failures,
new_FailureCase_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT(clus, false))
clus.cases = append(clus.cases,
new_Case_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT(clus, false))
case "RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT":
clus.failures = append(clus.failures,
new_FailureCase_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT(clus, true))
clus.cases = append(clus.cases,
new_Case_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT(clus, true))
case "DELAY_PEER_PORT_TX_RX_QUORUM":
clus.failures = append(clus.failures,
new_FailureCase_DELAY_PEER_PORT_TX_RX_QUORUM(clus, false))
clus.cases = append(clus.cases,
new_Case_DELAY_PEER_PORT_TX_RX_QUORUM(clus, false))
case "RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM":
clus.failures = append(clus.failures,
new_FailureCase_DELAY_PEER_PORT_TX_RX_QUORUM(clus, true))
clus.cases = append(clus.cases,
new_Case_DELAY_PEER_PORT_TX_RX_QUORUM(clus, true))
case "DELAY_PEER_PORT_TX_RX_ALL":
clus.failures = append(clus.failures,
new_FailureCase_DELAY_PEER_PORT_TX_RX_ALL(clus, false))
clus.cases = append(clus.cases,
new_Case_DELAY_PEER_PORT_TX_RX_ALL(clus, false))
case "RANDOM_DELAY_PEER_PORT_TX_RX_ALL":
clus.failures = append(clus.failures,
new_FailureCase_DELAY_PEER_PORT_TX_RX_ALL(clus, true))
clus.cases = append(clus.cases,
new_Case_DELAY_PEER_PORT_TX_RX_ALL(clus, true))
case "NO_FAIL_WITH_STRESS":
clus.failures = append(clus.failures,
new_FailureCase_NO_FAIL_WITH_STRESS(clus))
clus.cases = append(clus.cases,
new_Case_NO_FAIL_WITH_STRESS(clus))
case "NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS":
clus.failures = append(clus.failures,
new_FailureCase_NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS(clus))
clus.cases = append(clus.cases,
new_Case_NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS(clus))
case "EXTERNAL":
clus.failures = append(clus.failures,
new_FailureCase_EXTERNAL(clus.Tester.ExternalExecPath))
clus.cases = append(clus.cases,
new_Case_EXTERNAL(clus.Tester.ExternalExecPath))
case "FAILPOINTS":
fpFailures, fperr := failpointFailures(clus)
if len(fpFailures) == 0 {
clus.lg.Info("no failpoints found!", zap.Error(fperr))
}
clus.failures = append(clus.failures,
clus.cases = append(clus.cases,
fpFailures...)
}
}
}
func (clus *Cluster) failureStrings() (fs []string) {
fs = make([]string, len(clus.failures))
for i := range clus.failures {
fs[i] = clus.failures[i].Desc()
fs = make([]string, len(clus.cases))
for i := range clus.cases {
fs[i] = clus.cases[i].Desc()
}
return fs
}
@ -320,7 +320,7 @@ func (clus *Cluster) checkConsistency() (err error) {
"consistency check ALL PASS",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.String("desc", clus.failures[clus.cs].Desc()),
zap.String("desc", clus.cases[clus.cs].Desc()),
)
return err
@ -699,14 +699,14 @@ func (clus *Cluster) defrag() error {
"defrag ALL PASS",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.Int("case-total", len(clus.failures)),
zap.Int("case-total", len(clus.cases)),
)
return nil
}
// GetFailureDelayDuration computes failure delay duration.
func (clus *Cluster) GetFailureDelayDuration() time.Duration {
return time.Duration(clus.Tester.FailureDelayMs) * time.Millisecond
// GetCaseDelayDuration computes failure delay duration.
func (clus *Cluster) GetCaseDelayDuration() time.Duration {
return time.Duration(clus.Tester.CaseDelayMs) * time.Millisecond
}
// Report reports the number of modified keys.

View File

@ -320,8 +320,8 @@ func read(lg *zap.Logger, fpath string) (*Cluster, error) {
}
}
if len(clus.Tester.FailureCases) == 0 {
return nil, errors.New("FailureCases not found")
if len(clus.Tester.Cases) == 0 {
return nil, errors.New("Cases not found")
}
if clus.Tester.DelayLatencyMs <= clus.Tester.DelayLatencyMsRv*5 {
return nil, fmt.Errorf("delay latency %d ms must be greater than 5x of delay latency random variable %d ms", clus.Tester.DelayLatencyMs, clus.Tester.DelayLatencyMsRv)
@ -330,9 +330,9 @@ func read(lg *zap.Logger, fpath string) (*Cluster, error) {
clus.Tester.UpdatedDelayLatencyMs = clus.Tester.DelayLatencyMs
}
for _, v := range clus.Tester.FailureCases {
if _, ok := rpcpb.FailureCase_value[v]; !ok {
return nil, fmt.Errorf("%q is not defined in 'rpcpb.FailureCase_value'", v)
for _, v := range clus.Tester.Cases {
if _, ok := rpcpb.Case_value[v]; !ok {
return nil, fmt.Errorf("%q is not defined in 'rpcpb.Case_value'", v)
}
}

View File

@ -51,7 +51,7 @@ func (clus *Cluster) Run() {
"round FAIL",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.Int("case-total", len(clus.failures)),
zap.Int("case-total", len(clus.cases)),
zap.Error(err),
)
if clus.cleanup() != nil {
@ -75,7 +75,7 @@ func (clus *Cluster) Run() {
"compact START",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.Int("case-total", len(clus.failures)),
zap.Int("case-total", len(clus.cases)),
zap.Duration("timeout", timeout),
)
if err := clus.compact(revToCompact, timeout); err != nil {
@ -83,7 +83,7 @@ func (clus *Cluster) Run() {
"compact FAIL",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.Int("case-total", len(clus.failures)),
zap.Int("case-total", len(clus.cases)),
zap.Error(err),
)
if err = clus.cleanup(); err != nil {
@ -91,7 +91,7 @@ func (clus *Cluster) Run() {
"cleanup FAIL",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.Int("case-total", len(clus.failures)),
zap.Int("case-total", len(clus.cases)),
zap.Error(err),
)
return
@ -111,13 +111,13 @@ func (clus *Cluster) Run() {
"functional-tester PASS",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.Int("case-total", len(clus.failures)),
zap.Int("case-total", len(clus.cases)),
)
}
func (clus *Cluster) doRound() error {
if clus.Tester.FailureShuffle {
clus.shuffleFailures()
if clus.Tester.CaseShuffle {
clus.shuffleCases()
}
roundNow := time.Now()
@ -125,10 +125,10 @@ func (clus *Cluster) doRound() error {
"round START",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.Int("case-total", len(clus.failures)),
zap.Int("case-total", len(clus.cases)),
zap.Strings("failures", clus.failureStrings()),
)
for i, fa := range clus.failures {
for i, fa := range clus.cases {
clus.cs = i
caseTotal[fa.Desc()]++
@ -139,7 +139,7 @@ func (clus *Cluster) doRound() error {
"case START",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.Int("case-total", len(clus.failures)),
zap.Int("case-total", len(clus.cases)),
zap.String("desc", fa.Desc()),
)
@ -149,13 +149,13 @@ func (clus *Cluster) doRound() error {
}
stressStarted := false
fcase := fa.FailureCase()
if fcase != rpcpb.FailureCase_NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS {
fcase := fa.TestCase()
if fcase != rpcpb.Case_NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS {
clus.lg.Info(
"stress START",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.Int("case-total", len(clus.failures)),
zap.Int("case-total", len(clus.cases)),
zap.String("desc", fa.Desc()),
)
if err := clus.stresser.Stress(); err != nil {
@ -168,7 +168,7 @@ func (clus *Cluster) doRound() error {
"inject START",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.Int("case-total", len(clus.failures)),
zap.Int("case-total", len(clus.cases)),
zap.String("desc", fa.Desc()),
)
if err := fa.Inject(clus); err != nil {
@ -182,7 +182,7 @@ func (clus *Cluster) doRound() error {
"recover START",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.Int("case-total", len(clus.failures)),
zap.Int("case-total", len(clus.cases)),
zap.String("desc", fa.Desc()),
)
if err := fa.Recover(clus); err != nil {
@ -194,11 +194,11 @@ func (clus *Cluster) doRound() error {
"stress PAUSE",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.Int("case-total", len(clus.failures)),
zap.Int("case-total", len(clus.cases)),
zap.String("desc", fa.Desc()),
)
ems := clus.stresser.Pause()
if fcase == rpcpb.FailureCase_NO_FAIL_WITH_STRESS && len(ems) > 0 {
if fcase == rpcpb.Case_NO_FAIL_WITH_STRESS && len(ems) > 0 {
ess := make([]string, 0, len(ems))
cnt := 0
for k, v := range ems {
@ -223,7 +223,7 @@ func (clus *Cluster) doRound() error {
"health check START",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.Int("case-total", len(clus.failures)),
zap.Int("case-total", len(clus.cases)),
zap.String("desc", fa.Desc()),
)
if err := clus.WaitHealth(); err != nil {
@ -234,7 +234,7 @@ func (clus *Cluster) doRound() error {
"consistency check START",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.Int("case-total", len(clus.failures)),
zap.Int("case-total", len(clus.cases)),
zap.String("desc", fa.Desc()),
)
if err := clus.checkConsistency(); err != nil {
@ -245,7 +245,7 @@ func (clus *Cluster) doRound() error {
"case PASS",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.Int("case-total", len(clus.failures)),
zap.Int("case-total", len(clus.cases)),
zap.String("desc", fa.Desc()),
zap.Duration("took", time.Since(caseNow)),
)
@ -255,7 +255,7 @@ func (clus *Cluster) doRound() error {
"round ALL PASS",
zap.Int("round", clus.rd),
zap.Strings("failures", clus.failureStrings()),
zap.Int("case-total", len(clus.failures)),
zap.Int("case-total", len(clus.cases)),
zap.Duration("took", time.Since(roundNow)),
)
return nil
@ -314,7 +314,7 @@ func (clus *Cluster) failed() {
"functional-tester FAIL",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.Int("case-total", len(clus.failures)),
zap.Int("case-total", len(clus.cases)),
)
clus.Send_SIGQUIT_ETCD_AND_REMOVE_DATA_AND_STOP_AGENT()
@ -322,14 +322,14 @@ func (clus *Cluster) failed() {
}
func (clus *Cluster) cleanup() error {
if clus.Tester.ExitOnFailure {
if clus.Tester.ExitOnCaseFail {
defer clus.failed()
}
roundFailedTotalCounter.Inc()
desc := "compact/defrag"
if clus.cs != -1 {
desc = clus.failures[clus.cs].Desc()
desc = clus.cases[clus.cs].Desc()
}
caseFailedTotalCounter.WithLabelValues(desc).Inc()
@ -337,7 +337,7 @@ func (clus *Cluster) cleanup() error {
"closing stressers before archiving failure data",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.Int("case-total", len(clus.failures)),
zap.Int("case-total", len(clus.cases)),
)
clus.stresser.Close()
@ -346,7 +346,7 @@ func (clus *Cluster) cleanup() error {
"cleanup FAIL",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.Int("case-total", len(clus.failures)),
zap.Int("case-total", len(clus.cases)),
zap.Error(err),
)
return err
@ -356,7 +356,7 @@ func (clus *Cluster) cleanup() error {
"restart FAIL",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.Int("case-total", len(clus.failures)),
zap.Int("case-total", len(clus.cases)),
zap.Error(err),
)
return err

View File

@ -21,17 +21,17 @@ import (
"go.uber.org/zap"
)
func (clus *Cluster) shuffleFailures() {
func (clus *Cluster) shuffleCases() {
rand.Seed(time.Now().UnixNano())
offset := rand.Intn(1000)
n := len(clus.failures)
n := len(clus.cases)
cp := coprime(n)
fs := make([]Failure, n)
css := make([]Case, n)
for i := 0; i < n; i++ {
fs[i] = clus.failures[(cp*i+offset)%n]
css[i] = clus.cases[(cp*i+offset)%n]
}
clus.failures = fs
clus.cases = css
clus.lg.Info("shuffled test failure cases", zap.Int("total", n))
}

View File

@ -189,12 +189,12 @@ func Test_read(t *testing.T) {
DelayLatencyMsRv: 500,
UpdatedDelayLatencyMs: 5000,
RoundLimit: 1,
ExitOnFailure: true,
ExitOnCaseFail: true,
ConsistencyCheck: true,
EnablePprof: true,
FailureDelayMs: 7000,
FailureShuffle: true,
FailureCases: []string{
CaseDelayMs: 7000,
CaseShuffle: true,
Cases: []string{
"SIGTERM_ONE_FOLLOWER",
"SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT",
"SIGTERM_LEADER",
@ -259,16 +259,16 @@ func Test_read(t *testing.T) {
cfg.lg = logger
cfg.updateFailures()
cfg.updateCases()
fs1 := cfg.failureStrings()
cfg.shuffleFailures()
cfg.shuffleCases()
fs2 := cfg.failureStrings()
if reflect.DeepEqual(fs1, fs2) {
t.Fatalf("expected shuffled failure cases, got %q", fs2)
}
cfg.shuffleFailures()
cfg.shuffleCases()
fs3 := cfg.failureStrings()
if reflect.DeepEqual(fs2, fs3) {
t.Fatalf("expected reshuffled failure cases from %q, got %q", fs2, fs3)

View File

@ -1,320 +0,0 @@
// Copyright 2018 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tester
import (
"fmt"
"math/rand"
"time"
"github.com/coreos/etcd/functional/rpcpb"
"go.uber.org/zap"
)
// Failure defines failure injection interface.
// To add a fail case:
// 1. implement "Failure" interface
// 2. define fail case name in "rpcpb.FailureCase"
type Failure interface {
// Inject injeccts the failure into the testing cluster at the given
// round. When calling the function, the cluster should be in health.
Inject(clus *Cluster) error
// Recover recovers the injected failure caused by the injection of the
// given round and wait for the recovery of the testing cluster.
Recover(clus *Cluster) error
// Desc returns a description of the failure
Desc() string
// FailureCase returns "rpcpb.FailureCase" enum type.
FailureCase() rpcpb.FailureCase
}
type injectMemberFunc func(*Cluster, int) error
type recoverMemberFunc func(*Cluster, int) error
type failureByFunc struct {
desc string
failureCase rpcpb.FailureCase
injectMember injectMemberFunc
recoverMember recoverMemberFunc
}
func (f *failureByFunc) Desc() string {
if f.desc != "" {
return f.desc
}
return f.failureCase.String()
}
func (f *failureByFunc) FailureCase() rpcpb.FailureCase {
return f.failureCase
}
type failureFollower struct {
failureByFunc
last int
lead int
}
func (f *failureFollower) updateIndex(clus *Cluster) error {
lead, err := clus.GetLeader()
if err != nil {
return err
}
f.lead = lead
n := len(clus.Members)
if f.last == -1 { // first run
f.last = clus.rd % n
if f.last == f.lead {
f.last = (f.last + 1) % n
}
} else {
f.last = (f.last + 1) % n
if f.last == f.lead {
f.last = (f.last + 1) % n
}
}
return nil
}
func (f *failureFollower) Inject(clus *Cluster) error {
if err := f.updateIndex(clus); err != nil {
return err
}
return f.injectMember(clus, f.last)
}
func (f *failureFollower) Recover(clus *Cluster) error {
return f.recoverMember(clus, f.last)
}
func (f *failureFollower) Desc() string {
if f.desc != "" {
return f.desc
}
return f.failureCase.String()
}
func (f *failureFollower) FailureCase() rpcpb.FailureCase {
return f.failureCase
}
type failureLeader struct {
failureByFunc
last int
lead int
}
func (f *failureLeader) updateIndex(clus *Cluster) error {
lead, err := clus.GetLeader()
if err != nil {
return err
}
f.lead = lead
f.last = lead
return nil
}
func (f *failureLeader) Inject(clus *Cluster) error {
if err := f.updateIndex(clus); err != nil {
return err
}
return f.injectMember(clus, f.last)
}
func (f *failureLeader) Recover(clus *Cluster) error {
return f.recoverMember(clus, f.last)
}
func (f *failureLeader) FailureCase() rpcpb.FailureCase {
return f.failureCase
}
type failureQuorum struct {
failureByFunc
injected map[int]struct{}
}
func (f *failureQuorum) Inject(clus *Cluster) error {
f.injected = pickQuorum(len(clus.Members))
for idx := range f.injected {
if err := f.injectMember(clus, idx); err != nil {
return err
}
}
return nil
}
func (f *failureQuorum) Recover(clus *Cluster) error {
for idx := range f.injected {
if err := f.recoverMember(clus, idx); err != nil {
return err
}
}
return nil
}
func (f *failureQuorum) Desc() string {
if f.desc != "" {
return f.desc
}
return f.failureCase.String()
}
func (f *failureQuorum) FailureCase() rpcpb.FailureCase {
return f.failureCase
}
func pickQuorum(size int) (picked map[int]struct{}) {
picked = make(map[int]struct{})
r := rand.New(rand.NewSource(time.Now().UnixNano()))
quorum := size/2 + 1
for len(picked) < quorum {
idx := r.Intn(size)
picked[idx] = struct{}{}
}
return picked
}
type failureAll failureByFunc
func (f *failureAll) Inject(clus *Cluster) error {
for i := range clus.Members {
if err := f.injectMember(clus, i); err != nil {
return err
}
}
return nil
}
func (f *failureAll) Recover(clus *Cluster) error {
for i := range clus.Members {
if err := f.recoverMember(clus, i); err != nil {
return err
}
}
return nil
}
func (f *failureAll) Desc() string {
if f.desc != "" {
return f.desc
}
return f.failureCase.String()
}
func (f *failureAll) FailureCase() rpcpb.FailureCase {
return f.failureCase
}
// failureUntilSnapshot injects a failure and waits for a snapshot event
type failureUntilSnapshot struct {
desc string
failureCase rpcpb.FailureCase
Failure
}
// all delay failure cases except the ones failing with latency
// greater than election timeout (trigger leader election and
// cluster keeps operating anyways)
var slowCases = map[rpcpb.FailureCase]bool{
rpcpb.FailureCase_RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER: true,
rpcpb.FailureCase_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT: true,
rpcpb.FailureCase_RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT: true,
rpcpb.FailureCase_RANDOM_DELAY_PEER_PORT_TX_RX_LEADER: true,
rpcpb.FailureCase_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT: true,
rpcpb.FailureCase_RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT: true,
rpcpb.FailureCase_RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM: true,
rpcpb.FailureCase_RANDOM_DELAY_PEER_PORT_TX_RX_ALL: true,
}
func (f *failureUntilSnapshot) Inject(clus *Cluster) error {
if err := f.Failure.Inject(clus); err != nil {
return err
}
snapshotCount := clus.Members[0].Etcd.SnapshotCount
now := time.Now()
clus.lg.Info(
"trigger snapshot START",
zap.String("desc", f.Desc()),
zap.Int64("etcd-snapshot-count", snapshotCount),
)
// maxRev may fail since failure just injected, retry if failed.
startRev, err := clus.maxRev()
for i := 0; i < 10 && startRev == 0; i++ {
startRev, err = clus.maxRev()
}
if startRev == 0 {
return err
}
lastRev := startRev
// healthy cluster could accept 1000 req/sec at least.
// 3x time to trigger snapshot.
retries := int(snapshotCount) / 1000 * 3
if v, ok := slowCases[f.FailureCase()]; v && ok {
// slow network takes more retries
retries *= 5
}
for i := 0; i < retries; i++ {
lastRev, _ = clus.maxRev()
// If the number of proposals committed is bigger than snapshot count,
// a new snapshot should have been created.
diff := lastRev - startRev
if diff > snapshotCount {
clus.lg.Info(
"trigger snapshot PASS",
zap.Int("retries", i),
zap.String("desc", f.Desc()),
zap.Int64("committed-entries", diff),
zap.Int64("etcd-snapshot-count", snapshotCount),
zap.Int64("last-revision", lastRev),
zap.Duration("took", time.Since(now)),
)
return nil
}
clus.lg.Info(
"trigger snapshot PROGRESS",
zap.Int("retries", i),
zap.Int64("committed-entries", diff),
zap.Int64("etcd-snapshot-count", snapshotCount),
zap.Int64("last-revision", lastRev),
zap.Duration("took", time.Since(now)),
)
time.Sleep(time.Second)
}
return fmt.Errorf("cluster too slow: only %d commits in %d retries", lastRev-startRev, retries)
}
func (f *failureUntilSnapshot) Desc() string {
if f.desc != "" {
return f.desc
}
if f.failureCase.String() != "" {
return f.failureCase.String()
}
return f.Failure.Desc()
}
func (f *failureUntilSnapshot) FailureCase() rpcpb.FailureCase {
return f.failureCase
}

View File

@ -1,104 +0,0 @@
// Copyright 2018 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tester
import "github.com/coreos/etcd/functional/rpcpb"
func inject_BLACKHOLE_PEER_PORT_TX_RX(clus *Cluster, idx int) error {
return clus.sendOp(idx, rpcpb.Operation_BLACKHOLE_PEER_PORT_TX_RX)
}
func recover_BLACKHOLE_PEER_PORT_TX_RX(clus *Cluster, idx int) error {
return clus.sendOp(idx, rpcpb.Operation_UNBLACKHOLE_PEER_PORT_TX_RX)
}
func new_FailureCase_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER(clus *Cluster) Failure {
ff := failureByFunc{
failureCase: rpcpb.FailureCase_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER,
injectMember: inject_BLACKHOLE_PEER_PORT_TX_RX,
recoverMember: recover_BLACKHOLE_PEER_PORT_TX_RX,
}
f := &failureFollower{ff, -1, -1}
return &failureDelay{
Failure: f,
delayDuration: clus.GetFailureDelayDuration(),
}
}
func new_FailureCase_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT() Failure {
ff := failureByFunc{
failureCase: rpcpb.FailureCase_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT,
injectMember: inject_BLACKHOLE_PEER_PORT_TX_RX,
recoverMember: recover_BLACKHOLE_PEER_PORT_TX_RX,
}
f := &failureFollower{ff, -1, -1}
return &failureUntilSnapshot{
failureCase: rpcpb.FailureCase_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT,
Failure: f,
}
}
func new_FailureCase_BLACKHOLE_PEER_PORT_TX_RX_LEADER(clus *Cluster) Failure {
ff := failureByFunc{
failureCase: rpcpb.FailureCase_BLACKHOLE_PEER_PORT_TX_RX_LEADER,
injectMember: inject_BLACKHOLE_PEER_PORT_TX_RX,
recoverMember: recover_BLACKHOLE_PEER_PORT_TX_RX,
}
f := &failureLeader{ff, -1, -1}
return &failureDelay{
Failure: f,
delayDuration: clus.GetFailureDelayDuration(),
}
}
func new_FailureCase_BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT() Failure {
ff := failureByFunc{
failureCase: rpcpb.FailureCase_BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT,
injectMember: inject_BLACKHOLE_PEER_PORT_TX_RX,
recoverMember: recover_BLACKHOLE_PEER_PORT_TX_RX,
}
f := &failureLeader{ff, -1, -1}
return &failureUntilSnapshot{
failureCase: rpcpb.FailureCase_BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT,
Failure: f,
}
}
func new_FailureCase_BLACKHOLE_PEER_PORT_TX_RX_QUORUM(clus *Cluster) Failure {
f := &failureQuorum{
failureByFunc: failureByFunc{
failureCase: rpcpb.FailureCase_BLACKHOLE_PEER_PORT_TX_RX_QUORUM,
injectMember: inject_BLACKHOLE_PEER_PORT_TX_RX,
recoverMember: recover_BLACKHOLE_PEER_PORT_TX_RX,
},
injected: make(map[int]struct{}),
}
return &failureDelay{
Failure: f,
delayDuration: clus.GetFailureDelayDuration(),
}
}
func new_FailureCase_BLACKHOLE_PEER_PORT_TX_RX_ALL(clus *Cluster) Failure {
f := &failureAll{
failureCase: rpcpb.FailureCase_BLACKHOLE_PEER_PORT_TX_RX_ALL,
injectMember: inject_BLACKHOLE_PEER_PORT_TX_RX,
recoverMember: recover_BLACKHOLE_PEER_PORT_TX_RX,
}
return &failureDelay{
Failure: f,
delayDuration: clus.GetFailureDelayDuration(),
}
}

View File

@ -1,99 +0,0 @@
// Copyright 2018 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tester
import (
"time"
"github.com/coreos/etcd/functional/rpcpb"
"go.uber.org/zap"
)
type failureNoFailWithStress failureByFunc
func (f *failureNoFailWithStress) Inject(clus *Cluster) error {
return nil
}
func (f *failureNoFailWithStress) Recover(clus *Cluster) error {
return nil
}
func (f *failureNoFailWithStress) Desc() string {
if f.desc != "" {
return f.desc
}
return f.failureCase.String()
}
func (f *failureNoFailWithStress) FailureCase() rpcpb.FailureCase {
return f.failureCase
}
func new_FailureCase_NO_FAIL_WITH_STRESS(clus *Cluster) Failure {
f := &failureNoFailWithStress{
failureCase: rpcpb.FailureCase_NO_FAIL_WITH_STRESS,
}
return &failureDelay{
Failure: f,
delayDuration: clus.GetFailureDelayDuration(),
}
}
type failureNoFailWithNoStressForLiveness failureByFunc
func (f *failureNoFailWithNoStressForLiveness) Inject(clus *Cluster) error {
clus.lg.Info(
"extra delay for liveness mode with no stresser",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.String("desc", f.Desc()),
)
time.Sleep(clus.GetFailureDelayDuration())
clus.lg.Info(
"wait health in liveness mode",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.String("desc", f.Desc()),
)
return clus.WaitHealth()
}
func (f *failureNoFailWithNoStressForLiveness) Recover(clus *Cluster) error {
return nil
}
func (f *failureNoFailWithNoStressForLiveness) Desc() string {
if f.desc != "" {
return f.desc
}
return f.failureCase.String()
}
func (f *failureNoFailWithNoStressForLiveness) FailureCase() rpcpb.FailureCase {
return f.failureCase
}
func new_FailureCase_NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS(clus *Cluster) Failure {
f := &failureNoFailWithNoStressForLiveness{
failureCase: rpcpb.FailureCase_NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS,
}
return &failureDelay{
Failure: f,
delayDuration: clus.GetFailureDelayDuration(),
}
}

View File

@ -1,92 +0,0 @@
// Copyright 2018 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tester
import "github.com/coreos/etcd/functional/rpcpb"
func inject_SIGTERM_ETCD(clus *Cluster, idx int) error {
return clus.sendOp(idx, rpcpb.Operation_SIGTERM_ETCD)
}
func recover_SIGTERM_ETCD(clus *Cluster, idx int) error {
return clus.sendOp(idx, rpcpb.Operation_RESTART_ETCD)
}
func new_FailureCase_SIGTERM_ONE_FOLLOWER(clus *Cluster) Failure {
ff := failureByFunc{
failureCase: rpcpb.FailureCase_SIGTERM_ONE_FOLLOWER,
injectMember: inject_SIGTERM_ETCD,
recoverMember: recover_SIGTERM_ETCD,
}
f := &failureFollower{ff, -1, -1}
return &failureDelay{
Failure: f,
delayDuration: clus.GetFailureDelayDuration(),
}
}
func new_FailureCase_SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT(clus *Cluster) Failure {
return &failureUntilSnapshot{
failureCase: rpcpb.FailureCase_SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT,
Failure: new_FailureCase_SIGTERM_ONE_FOLLOWER(clus),
}
}
func new_FailureCase_SIGTERM_LEADER(clus *Cluster) Failure {
ff := failureByFunc{
failureCase: rpcpb.FailureCase_SIGTERM_LEADER,
injectMember: inject_SIGTERM_ETCD,
recoverMember: recover_SIGTERM_ETCD,
}
f := &failureLeader{ff, -1, -1}
return &failureDelay{
Failure: f,
delayDuration: clus.GetFailureDelayDuration(),
}
}
func new_FailureCase_SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT(clus *Cluster) Failure {
return &failureUntilSnapshot{
failureCase: rpcpb.FailureCase_SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT,
Failure: new_FailureCase_SIGTERM_LEADER(clus),
}
}
func new_FailureCase_SIGTERM_QUORUM(clus *Cluster) Failure {
f := &failureQuorum{
failureByFunc: failureByFunc{
failureCase: rpcpb.FailureCase_SIGTERM_QUORUM,
injectMember: inject_SIGTERM_ETCD,
recoverMember: recover_SIGTERM_ETCD,
},
injected: make(map[int]struct{}),
}
return &failureDelay{
Failure: f,
delayDuration: clus.GetFailureDelayDuration(),
}
}
func new_FailureCase_SIGTERM_ALL(clus *Cluster) Failure {
f := &failureAll{
failureCase: rpcpb.FailureCase_SIGTERM_ALL,
injectMember: inject_SIGTERM_ETCD,
recoverMember: recover_SIGTERM_ETCD,
}
return &failureDelay{
Failure: f,
delayDuration: clus.GetFailureDelayDuration(),
}
}