mirror of
https://github.com/etcd-io/etcd.git
synced 2024-09-27 06:25:44 +00:00
Merge pull request #9525 from gyuho/ft
functional-tester: use "clientv3" for stressers
This commit is contained in:
commit
d7cf2cc03f
@ -41,15 +41,17 @@ func (m *Member) DialEtcdGRPCServer(opts ...grpc.DialOption) (*grpc.ClientConn,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CreateEtcdClient creates a client from member.
|
// CreateEtcdClient creates a client from member.
|
||||||
func (m *Member) CreateEtcdClient() (*clientv3.Client, error) {
|
func (m *Member) CreateEtcdClient(opts ...grpc.DialOption) (*clientv3.Client, error) {
|
||||||
|
cfg := clientv3.Config{
|
||||||
|
Endpoints: []string{m.EtcdClientEndpoint},
|
||||||
|
DialTimeout: 5 * time.Second,
|
||||||
|
DialOptions: opts,
|
||||||
|
}
|
||||||
if m.EtcdClientTLS {
|
if m.EtcdClientTLS {
|
||||||
// TODO: support TLS
|
// TODO: support TLS
|
||||||
panic("client TLS not supported yet")
|
panic("client TLS not supported yet")
|
||||||
}
|
}
|
||||||
return clientv3.New(clientv3.Config{
|
return clientv3.New(cfg)
|
||||||
Endpoints: []string{m.EtcdClientEndpoint},
|
|
||||||
DialTimeout: 5 * time.Second,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckCompact ensures that historical data before given revision has been compacted.
|
// CheckCompact ensures that historical data before given revision has been compacted.
|
||||||
@ -124,6 +126,21 @@ func (m *Member) Rev(ctx context.Context) (int64, error) {
|
|||||||
return resp.Header.Revision, nil
|
return resp.Header.Revision, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Compact compacts member storage with given revision.
|
||||||
|
// It blocks until it's physically done.
|
||||||
|
func (m *Member) Compact(rev int64, timeout time.Duration) error {
|
||||||
|
cli, err := m.CreateEtcdClient()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("%v (%q)", err, m.EtcdClientEndpoint)
|
||||||
|
}
|
||||||
|
defer cli.Close()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||||
|
_, err = cli.Compact(ctx, rev, clientv3.WithCompactPhysical())
|
||||||
|
cancel()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// IsLeader returns true if this member is the current cluster leader.
|
// IsLeader returns true if this member is the current cluster leader.
|
||||||
func (m *Member) IsLeader() (bool, error) {
|
func (m *Member) IsLeader() (bool, error) {
|
||||||
cli, err := m.CreateEtcdClient()
|
cli, err := m.CreateEtcdClient()
|
||||||
|
@ -21,7 +21,6 @@ import (
|
|||||||
|
|
||||||
"github.com/coreos/etcd/clientv3"
|
"github.com/coreos/etcd/clientv3"
|
||||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
|
||||||
"github.com/coreos/etcd/tools/functional-tester/rpcpb"
|
"github.com/coreos/etcd/tools/functional-tester/rpcpb"
|
||||||
|
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
@ -101,22 +100,20 @@ type leaseChecker struct {
|
|||||||
lg *zap.Logger
|
lg *zap.Logger
|
||||||
m *rpcpb.Member
|
m *rpcpb.Member
|
||||||
ls *leaseStresser
|
ls *leaseStresser
|
||||||
lsc pb.LeaseClient
|
cli *clientv3.Client
|
||||||
kvc pb.KVClient
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lc *leaseChecker) Check() error {
|
func (lc *leaseChecker) Check() error {
|
||||||
conn, err := lc.m.DialEtcdGRPCServer(grpc.WithBackoffMaxDelay(time.Second))
|
cli, err := lc.m.CreateEtcdClient(grpc.WithBackoffMaxDelay(time.Second))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("%v (%q)", err, lc.m.EtcdClientEndpoint)
|
return fmt.Errorf("%v (%q)", err, lc.m.EtcdClientEndpoint)
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
if conn != nil {
|
if cli != nil {
|
||||||
conn.Close()
|
cli.Close()
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
lc.kvc = pb.NewKVClient(conn)
|
lc.cli = cli
|
||||||
lc.lsc = pb.NewLeaseClient(conn)
|
|
||||||
if err := lc.check(true, lc.ls.revokedLeases.leases); err != nil {
|
if err := lc.check(true, lc.ls.revokedLeases.leases); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -148,7 +145,7 @@ func (lc *leaseChecker) checkShortLivedLeases() error {
|
|||||||
|
|
||||||
func (lc *leaseChecker) checkShortLivedLease(ctx context.Context, leaseID int64) (err error) {
|
func (lc *leaseChecker) checkShortLivedLease(ctx context.Context, leaseID int64) (err error) {
|
||||||
// retry in case of transient failure or lease is expired but not yet revoked due to the fact that etcd cluster didn't have enought time to delete it.
|
// retry in case of transient failure or lease is expired but not yet revoked due to the fact that etcd cluster didn't have enought time to delete it.
|
||||||
var resp *pb.LeaseTimeToLiveResponse
|
var resp *clientv3.LeaseTimeToLiveResponse
|
||||||
for i := 0; i < retries; i++ {
|
for i := 0; i < retries; i++ {
|
||||||
resp, err = lc.getLeaseByID(ctx, leaseID)
|
resp, err = lc.getLeaseByID(ctx, leaseID)
|
||||||
// lease not found, for ~v3.1 compatibilities, check ErrLeaseNotFound
|
// lease not found, for ~v3.1 compatibilities, check ErrLeaseNotFound
|
||||||
@ -230,9 +227,13 @@ func (lc *leaseChecker) check(expired bool, leases map[int64]time.Time) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lc *leaseChecker) getLeaseByID(ctx context.Context, leaseID int64) (*pb.LeaseTimeToLiveResponse, error) {
|
// TODO: handle failures from "grpc.FailFast(false)"
|
||||||
ltl := &pb.LeaseTimeToLiveRequest{ID: leaseID, Keys: true}
|
func (lc *leaseChecker) getLeaseByID(ctx context.Context, leaseID int64) (*clientv3.LeaseTimeToLiveResponse, error) {
|
||||||
return lc.lsc.LeaseTimeToLive(ctx, ltl, grpc.FailFast(false))
|
return lc.cli.TimeToLive(
|
||||||
|
ctx,
|
||||||
|
clientv3.LeaseID(leaseID),
|
||||||
|
clientv3.WithAttachedKeys(),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lc *leaseChecker) hasLeaseExpired(ctx context.Context, leaseID int64) (bool, error) {
|
func (lc *leaseChecker) hasLeaseExpired(ctx context.Context, leaseID int64) (bool, error) {
|
||||||
@ -261,10 +262,7 @@ func (lc *leaseChecker) hasLeaseExpired(ctx context.Context, leaseID int64) (boo
|
|||||||
// Since the format of keys contains about leaseID, finding keys base on "<leaseID>" prefix
|
// Since the format of keys contains about leaseID, finding keys base on "<leaseID>" prefix
|
||||||
// determines whether the attached keys for a given leaseID has been deleted or not
|
// determines whether the attached keys for a given leaseID has been deleted or not
|
||||||
func (lc *leaseChecker) hasKeysAttachedToLeaseExpired(ctx context.Context, leaseID int64) (bool, error) {
|
func (lc *leaseChecker) hasKeysAttachedToLeaseExpired(ctx context.Context, leaseID int64) (bool, error) {
|
||||||
resp, err := lc.kvc.Range(ctx, &pb.RangeRequest{
|
resp, err := lc.cli.Get(ctx, fmt.Sprintf("%d", leaseID), clientv3.WithPrefix())
|
||||||
Key: []byte(fmt.Sprintf("%d", leaseID)),
|
|
||||||
RangeEnd: []byte(clientv3.GetPrefixRangeEnd(fmt.Sprintf("%d", leaseID))),
|
|
||||||
}, grpc.FailFast(false))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
lc.lg.Warn(
|
lc.lg.Warn(
|
||||||
"hasKeysAttachedToLeaseExpired failed",
|
"hasKeysAttachedToLeaseExpired failed",
|
||||||
|
@ -25,7 +25,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
|
||||||
"github.com/coreos/etcd/pkg/debugutil"
|
"github.com/coreos/etcd/pkg/debugutil"
|
||||||
"github.com/coreos/etcd/tools/functional-tester/rpcpb"
|
"github.com/coreos/etcd/tools/functional-tester/rpcpb"
|
||||||
|
|
||||||
@ -681,31 +680,14 @@ func (clus *Cluster) compactKV(rev int64, timeout time.Duration) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i, m := range clus.Members {
|
for i, m := range clus.Members {
|
||||||
conn, derr := m.DialEtcdGRPCServer()
|
|
||||||
if derr != nil {
|
|
||||||
clus.lg.Warn(
|
|
||||||
"compactKV dial failed",
|
|
||||||
zap.String("endpoint", m.EtcdClientEndpoint),
|
|
||||||
zap.Error(derr),
|
|
||||||
)
|
|
||||||
err = derr
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
kvc := pb.NewKVClient(conn)
|
|
||||||
|
|
||||||
clus.lg.Info(
|
clus.lg.Info(
|
||||||
"compacting",
|
"compacting",
|
||||||
zap.String("endpoint", m.EtcdClientEndpoint),
|
zap.String("endpoint", m.EtcdClientEndpoint),
|
||||||
zap.Int64("compact-revision", rev),
|
zap.Int64("compact-revision", rev),
|
||||||
zap.Duration("timeout", timeout),
|
zap.Duration("timeout", timeout),
|
||||||
)
|
)
|
||||||
|
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
cerr := m.Compact(rev, timeout)
|
||||||
_, cerr := kvc.Compact(ctx, &pb.CompactionRequest{Revision: rev, Physical: true}, grpc.FailFast(false))
|
|
||||||
cancel()
|
|
||||||
|
|
||||||
conn.Close()
|
|
||||||
succeed := true
|
succeed := true
|
||||||
if cerr != nil {
|
if cerr != nil {
|
||||||
if strings.Contains(cerr.Error(), "required revision has been compacted") && i > 0 {
|
if strings.Contains(cerr.Error(), "required revision has been compacted") && i > 0 {
|
||||||
|
@ -22,9 +22,9 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/coreos/etcd/clientv3"
|
||||||
"github.com/coreos/etcd/etcdserver"
|
"github.com/coreos/etcd/etcdserver"
|
||||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
|
||||||
"github.com/coreos/etcd/tools/functional-tester/rpcpb"
|
"github.com/coreos/etcd/tools/functional-tester/rpcpb"
|
||||||
|
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
@ -51,7 +51,7 @@ type keyStresser struct {
|
|||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
|
|
||||||
cancel func()
|
cancel func()
|
||||||
conn *grpc.ClientConn
|
cli *clientv3.Client
|
||||||
// atomicModifiedKeys records the number of keys created and deleted by the stresser.
|
// atomicModifiedKeys records the number of keys created and deleted by the stresser.
|
||||||
atomicModifiedKeys int64
|
atomicModifiedKeys int64
|
||||||
|
|
||||||
@ -60,35 +60,33 @@ type keyStresser struct {
|
|||||||
|
|
||||||
func (s *keyStresser) Stress() error {
|
func (s *keyStresser) Stress() error {
|
||||||
// TODO: add backoff option
|
// TODO: add backoff option
|
||||||
conn, err := s.m.DialEtcdGRPCServer()
|
cli, err := s.m.CreateEtcdClient()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("%v (%q)", err, s.m.EtcdClientEndpoint)
|
return fmt.Errorf("%v (%q)", err, s.m.EtcdClientEndpoint)
|
||||||
}
|
}
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
s.wg.Add(s.N)
|
s.wg.Add(s.N)
|
||||||
s.conn = conn
|
s.cli = cli
|
||||||
s.cancel = cancel
|
s.cancel = cancel
|
||||||
|
|
||||||
kvc := pb.NewKVClient(conn)
|
|
||||||
|
|
||||||
var stressEntries = []stressEntry{
|
var stressEntries = []stressEntry{
|
||||||
{weight: 0.7, f: newStressPut(kvc, s.keySuffixRange, s.keySize)},
|
{weight: 0.7, f: newStressPut(cli, s.keySuffixRange, s.keySize)},
|
||||||
{
|
{
|
||||||
weight: 0.7 * float32(s.keySize) / float32(s.keyLargeSize),
|
weight: 0.7 * float32(s.keySize) / float32(s.keyLargeSize),
|
||||||
f: newStressPut(kvc, s.keySuffixRange, s.keyLargeSize),
|
f: newStressPut(cli, s.keySuffixRange, s.keyLargeSize),
|
||||||
},
|
},
|
||||||
{weight: 0.07, f: newStressRange(kvc, s.keySuffixRange)},
|
{weight: 0.07, f: newStressRange(cli, s.keySuffixRange)},
|
||||||
{weight: 0.07, f: newStressRangeInterval(kvc, s.keySuffixRange)},
|
{weight: 0.07, f: newStressRangeInterval(cli, s.keySuffixRange)},
|
||||||
{weight: 0.07, f: newStressDelete(kvc, s.keySuffixRange)},
|
{weight: 0.07, f: newStressDelete(cli, s.keySuffixRange)},
|
||||||
{weight: 0.07, f: newStressDeleteInterval(kvc, s.keySuffixRange)},
|
{weight: 0.07, f: newStressDeleteInterval(cli, s.keySuffixRange)},
|
||||||
}
|
}
|
||||||
if s.keyTxnSuffixRange > 0 {
|
if s.keyTxnSuffixRange > 0 {
|
||||||
// adjust to make up ±70% of workloads with writes
|
// adjust to make up ±70% of workloads with writes
|
||||||
stressEntries[0].weight = 0.35
|
stressEntries[0].weight = 0.35
|
||||||
stressEntries = append(stressEntries, stressEntry{
|
stressEntries = append(stressEntries, stressEntry{
|
||||||
weight: 0.35,
|
weight: 0.35,
|
||||||
f: newStressTxn(kvc, s.keyTxnSuffixRange, s.keyTxnOps),
|
f: newStressTxn(cli, s.keyTxnSuffixRange, s.keyTxnOps),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
s.stressTable = createStressTable(stressEntries)
|
s.stressTable = createStressTable(stressEntries)
|
||||||
@ -167,7 +165,7 @@ func (s *keyStresser) Pause() {
|
|||||||
|
|
||||||
func (s *keyStresser) Close() {
|
func (s *keyStresser) Close() {
|
||||||
s.cancel()
|
s.cancel()
|
||||||
s.conn.Close()
|
s.cli.Close()
|
||||||
s.wg.Wait()
|
s.wg.Wait()
|
||||||
|
|
||||||
s.lg.Info(
|
s.lg.Info(
|
||||||
@ -216,25 +214,26 @@ func (st *stressTable) choose() stressFunc {
|
|||||||
return st.entries[idx].f
|
return st.entries[idx].f
|
||||||
}
|
}
|
||||||
|
|
||||||
func newStressPut(kvc pb.KVClient, keySuffixRange, keySize int) stressFunc {
|
func newStressPut(cli *clientv3.Client, keySuffixRange, keySize int) stressFunc {
|
||||||
return func(ctx context.Context) (error, int64) {
|
return func(ctx context.Context) (error, int64) {
|
||||||
_, err := kvc.Put(ctx, &pb.PutRequest{
|
_, err := cli.Put(
|
||||||
Key: []byte(fmt.Sprintf("foo%016x", rand.Intn(keySuffixRange))),
|
ctx,
|
||||||
Value: randBytes(keySize),
|
fmt.Sprintf("foo%016x", rand.Intn(keySuffixRange)),
|
||||||
}, grpc.FailFast(false))
|
string(randBytes(keySize)),
|
||||||
|
)
|
||||||
return err, 1
|
return err, 1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newStressTxn(kvc pb.KVClient, keyTxnSuffixRange, txnOps int) stressFunc {
|
func newStressTxn(cli *clientv3.Client, keyTxnSuffixRange, txnOps int) stressFunc {
|
||||||
keys := make([]string, keyTxnSuffixRange)
|
keys := make([]string, keyTxnSuffixRange)
|
||||||
for i := range keys {
|
for i := range keys {
|
||||||
keys[i] = fmt.Sprintf("/k%03d", i)
|
keys[i] = fmt.Sprintf("/k%03d", i)
|
||||||
}
|
}
|
||||||
return writeTxn(kvc, keys, txnOps)
|
return writeTxn(cli, keys, txnOps)
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeTxn(kvc pb.KVClient, keys []string, txnOps int) stressFunc {
|
func writeTxn(cli *clientv3.Client, keys []string, txnOps int) stressFunc {
|
||||||
return func(ctx context.Context) (error, int64) {
|
return func(ctx context.Context) (error, int64) {
|
||||||
ks := make(map[string]struct{}, txnOps)
|
ks := make(map[string]struct{}, txnOps)
|
||||||
for len(ks) != txnOps {
|
for len(ks) != txnOps {
|
||||||
@ -244,99 +243,75 @@ func writeTxn(kvc pb.KVClient, keys []string, txnOps int) stressFunc {
|
|||||||
for k := range ks {
|
for k := range ks {
|
||||||
selected = append(selected, k)
|
selected = append(selected, k)
|
||||||
}
|
}
|
||||||
com, delOp, putOp := getTxnReqs(selected[0], "bar00")
|
com, delOp, putOp := getTxnOps(selected[0], "bar00")
|
||||||
txnReq := &pb.TxnRequest{
|
thenOps := []clientv3.Op{delOp}
|
||||||
Compare: []*pb.Compare{com},
|
elseOps := []clientv3.Op{putOp}
|
||||||
Success: []*pb.RequestOp{delOp},
|
for i := 1; i < txnOps; i++ { // nested txns
|
||||||
Failure: []*pb.RequestOp{putOp},
|
|
||||||
}
|
|
||||||
|
|
||||||
// add nested txns if any
|
|
||||||
for i := 1; i < txnOps; i++ {
|
|
||||||
k, v := selected[i], fmt.Sprintf("bar%02d", i)
|
k, v := selected[i], fmt.Sprintf("bar%02d", i)
|
||||||
com, delOp, putOp = getTxnReqs(k, v)
|
com, delOp, putOp = getTxnOps(k, v)
|
||||||
nested := &pb.RequestOp{
|
txnOp := clientv3.OpTxn(
|
||||||
Request: &pb.RequestOp_RequestTxn{
|
[]clientv3.Cmp{com},
|
||||||
RequestTxn: &pb.TxnRequest{
|
[]clientv3.Op{delOp},
|
||||||
Compare: []*pb.Compare{com},
|
[]clientv3.Op{putOp},
|
||||||
Success: []*pb.RequestOp{delOp},
|
)
|
||||||
Failure: []*pb.RequestOp{putOp},
|
thenOps = append(thenOps, txnOp)
|
||||||
},
|
elseOps = append(elseOps, txnOp)
|
||||||
},
|
|
||||||
}
|
}
|
||||||
txnReq.Success = append(txnReq.Success, nested)
|
_, err := cli.Txn(ctx).
|
||||||
txnReq.Failure = append(txnReq.Failure, nested)
|
If(com).
|
||||||
}
|
Then(thenOps...).
|
||||||
|
Else(elseOps...).
|
||||||
_, err := kvc.Txn(ctx, txnReq, grpc.FailFast(false))
|
Commit()
|
||||||
return err, int64(txnOps)
|
return err, int64(txnOps)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getTxnReqs(key, val string) (com *pb.Compare, delOp *pb.RequestOp, putOp *pb.RequestOp) {
|
func getTxnOps(k, v string) (
|
||||||
|
cmp clientv3.Cmp,
|
||||||
|
dop clientv3.Op,
|
||||||
|
pop clientv3.Op) {
|
||||||
// if key exists (version > 0)
|
// if key exists (version > 0)
|
||||||
com = &pb.Compare{
|
cmp = clientv3.Compare(clientv3.Version(k), ">", 0)
|
||||||
Key: []byte(key),
|
dop = clientv3.OpDelete(k)
|
||||||
Target: pb.Compare_VERSION,
|
pop = clientv3.OpPut(k, v)
|
||||||
Result: pb.Compare_GREATER,
|
return cmp, dop, pop
|
||||||
TargetUnion: &pb.Compare_Version{Version: 0},
|
|
||||||
}
|
|
||||||
delOp = &pb.RequestOp{
|
|
||||||
Request: &pb.RequestOp_RequestDeleteRange{
|
|
||||||
RequestDeleteRange: &pb.DeleteRangeRequest{
|
|
||||||
Key: []byte(key),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
putOp = &pb.RequestOp{
|
|
||||||
Request: &pb.RequestOp_RequestPut{
|
|
||||||
RequestPut: &pb.PutRequest{
|
|
||||||
Key: []byte(key),
|
|
||||||
Value: []byte(val),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return com, delOp, putOp
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newStressRange(kvc pb.KVClient, keySuffixRange int) stressFunc {
|
func newStressRange(cli *clientv3.Client, keySuffixRange int) stressFunc {
|
||||||
return func(ctx context.Context) (error, int64) {
|
return func(ctx context.Context) (error, int64) {
|
||||||
_, err := kvc.Range(ctx, &pb.RangeRequest{
|
_, err := cli.Get(ctx, fmt.Sprintf("foo%016x", rand.Intn(keySuffixRange)))
|
||||||
Key: []byte(fmt.Sprintf("foo%016x", rand.Intn(keySuffixRange))),
|
|
||||||
}, grpc.FailFast(false))
|
|
||||||
return err, 0
|
return err, 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newStressRangeInterval(kvc pb.KVClient, keySuffixRange int) stressFunc {
|
func newStressRangeInterval(cli *clientv3.Client, keySuffixRange int) stressFunc {
|
||||||
return func(ctx context.Context) (error, int64) {
|
return func(ctx context.Context) (error, int64) {
|
||||||
start := rand.Intn(keySuffixRange)
|
start := rand.Intn(keySuffixRange)
|
||||||
end := start + 500
|
end := start + 500
|
||||||
_, err := kvc.Range(ctx, &pb.RangeRequest{
|
_, err := cli.Get(
|
||||||
Key: []byte(fmt.Sprintf("foo%016x", start)),
|
ctx,
|
||||||
RangeEnd: []byte(fmt.Sprintf("foo%016x", end)),
|
fmt.Sprintf("foo%016x", start),
|
||||||
}, grpc.FailFast(false))
|
clientv3.WithRange(fmt.Sprintf("foo%016x", end)),
|
||||||
|
)
|
||||||
return err, 0
|
return err, 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newStressDelete(kvc pb.KVClient, keySuffixRange int) stressFunc {
|
func newStressDelete(cli *clientv3.Client, keySuffixRange int) stressFunc {
|
||||||
return func(ctx context.Context) (error, int64) {
|
return func(ctx context.Context) (error, int64) {
|
||||||
_, err := kvc.DeleteRange(ctx, &pb.DeleteRangeRequest{
|
_, err := cli.Delete(ctx, fmt.Sprintf("foo%016x", rand.Intn(keySuffixRange)))
|
||||||
Key: []byte(fmt.Sprintf("foo%016x", rand.Intn(keySuffixRange))),
|
|
||||||
}, grpc.FailFast(false))
|
|
||||||
return err, 1
|
return err, 1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newStressDeleteInterval(kvc pb.KVClient, keySuffixRange int) stressFunc {
|
func newStressDeleteInterval(cli *clientv3.Client, keySuffixRange int) stressFunc {
|
||||||
return func(ctx context.Context) (error, int64) {
|
return func(ctx context.Context) (error, int64) {
|
||||||
start := rand.Intn(keySuffixRange)
|
start := rand.Intn(keySuffixRange)
|
||||||
end := start + 500
|
end := start + 500
|
||||||
resp, err := kvc.DeleteRange(ctx, &pb.DeleteRangeRequest{
|
resp, err := cli.Delete(ctx,
|
||||||
Key: []byte(fmt.Sprintf("foo%016x", start)),
|
fmt.Sprintf("foo%016x", start),
|
||||||
RangeEnd: []byte(fmt.Sprintf("foo%016x", end)),
|
clientv3.WithRange(fmt.Sprintf("foo%016x", end)),
|
||||||
}, grpc.FailFast(false))
|
)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return nil, resp.Deleted
|
return nil, resp.Deleted
|
||||||
}
|
}
|
||||||
|
@ -22,8 +22,8 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/coreos/etcd/clientv3"
|
||||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
|
||||||
"github.com/coreos/etcd/tools/functional-tester/rpcpb"
|
"github.com/coreos/etcd/tools/functional-tester/rpcpb"
|
||||||
|
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
@ -41,11 +41,9 @@ type leaseStresser struct {
|
|||||||
lg *zap.Logger
|
lg *zap.Logger
|
||||||
|
|
||||||
m *rpcpb.Member
|
m *rpcpb.Member
|
||||||
cancel func()
|
cli *clientv3.Client
|
||||||
conn *grpc.ClientConn
|
|
||||||
kvc pb.KVClient
|
|
||||||
lc pb.LeaseClient
|
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
|
cancel func()
|
||||||
|
|
||||||
rateLimiter *rate.Limiter
|
rateLimiter *rate.Limiter
|
||||||
// atomicModifiedKey records the number of keys created and deleted during a test case
|
// atomicModifiedKey records the number of keys created and deleted during a test case
|
||||||
@ -118,7 +116,6 @@ func (ls *leaseStresser) setupOnce() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ls.aliveLeases = &atomicLeases{leases: make(map[int64]time.Time)}
|
ls.aliveLeases = &atomicLeases{leases: make(map[int64]time.Time)}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -132,20 +129,19 @@ func (ls *leaseStresser) Stress() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
conn, err := ls.m.DialEtcdGRPCServer(grpc.WithBackoffMaxDelay(1 * time.Second))
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
ls.ctx = ctx
|
||||||
|
ls.cancel = cancel
|
||||||
|
|
||||||
|
cli, err := ls.m.CreateEtcdClient(grpc.WithBackoffMaxDelay(1 * time.Second))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("%v (%s)", err, ls.m.EtcdClientEndpoint)
|
return fmt.Errorf("%v (%s)", err, ls.m.EtcdClientEndpoint)
|
||||||
}
|
}
|
||||||
ls.conn = conn
|
ls.cli = cli
|
||||||
ls.kvc = pb.NewKVClient(conn)
|
|
||||||
ls.lc = pb.NewLeaseClient(conn)
|
|
||||||
ls.revokedLeases = &atomicLeases{leases: make(map[int64]time.Time)}
|
ls.revokedLeases = &atomicLeases{leases: make(map[int64]time.Time)}
|
||||||
ls.shortLivedLeases = &atomicLeases{leases: make(map[int64]time.Time)}
|
ls.shortLivedLeases = &atomicLeases{leases: make(map[int64]time.Time)}
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
ls.cancel = cancel
|
|
||||||
ls.ctx = ctx
|
|
||||||
|
|
||||||
ls.runWg.Add(1)
|
ls.runWg.Add(1)
|
||||||
go ls.run()
|
go ls.run()
|
||||||
return nil
|
return nil
|
||||||
@ -299,17 +295,17 @@ func (ls *leaseStresser) randomlyDropLeases() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ls *leaseStresser) createLease(ttl int64) (int64, error) {
|
func (ls *leaseStresser) createLease(ttl int64) (int64, error) {
|
||||||
resp, err := ls.lc.LeaseGrant(ls.ctx, &pb.LeaseGrantRequest{TTL: ttl})
|
resp, err := ls.cli.Grant(ls.ctx, ttl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return -1, err
|
return -1, err
|
||||||
}
|
}
|
||||||
return resp.ID, nil
|
return int64(resp.ID), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ls *leaseStresser) keepLeaseAlive(leaseID int64) {
|
func (ls *leaseStresser) keepLeaseAlive(leaseID int64) {
|
||||||
defer ls.aliveWg.Done()
|
defer ls.aliveWg.Done()
|
||||||
ctx, cancel := context.WithCancel(ls.ctx)
|
ctx, cancel := context.WithCancel(ls.ctx)
|
||||||
stream, err := ls.lc.LeaseKeepAlive(ctx)
|
stream, err := ls.cli.KeepAlive(ctx, clientv3.LeaseID(leaseID))
|
||||||
defer func() { cancel() }()
|
defer func() { cancel() }()
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
@ -347,42 +343,36 @@ func (ls *leaseStresser) keepLeaseAlive(leaseID int64) {
|
|||||||
)
|
)
|
||||||
cancel()
|
cancel()
|
||||||
ctx, cancel = context.WithCancel(ls.ctx)
|
ctx, cancel = context.WithCancel(ls.ctx)
|
||||||
stream, err = ls.lc.LeaseKeepAlive(ctx)
|
stream, err = ls.cli.KeepAlive(ctx, clientv3.LeaseID(leaseID))
|
||||||
cancel()
|
cancel()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
if err != nil {
|
||||||
|
ls.lg.Debug(
|
||||||
|
"keepLeaseAlive failed to receive lease keepalive response",
|
||||||
|
zap.String("endpoint", ls.m.EtcdClientEndpoint),
|
||||||
|
zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
|
||||||
|
zap.Error(err),
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
ls.lg.Debug(
|
ls.lg.Debug(
|
||||||
"keepLeaseAlive stream sends lease keepalive request",
|
"keepLeaseAlive waiting on lease stream",
|
||||||
zap.String("endpoint", ls.m.EtcdClientEndpoint),
|
zap.String("endpoint", ls.m.EtcdClientEndpoint),
|
||||||
zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
|
zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
|
||||||
)
|
)
|
||||||
err = stream.Send(&pb.LeaseKeepAliveRequest{ID: leaseID})
|
|
||||||
if err != nil {
|
|
||||||
ls.lg.Debug(
|
|
||||||
"keepLeaseAlive stream failed to send lease keepalive request",
|
|
||||||
zap.String("endpoint", ls.m.EtcdClientEndpoint),
|
|
||||||
zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
|
|
||||||
zap.Error(err),
|
|
||||||
)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
leaseRenewTime := time.Now()
|
leaseRenewTime := time.Now()
|
||||||
|
respRC := <-stream
|
||||||
|
if respRC == nil {
|
||||||
ls.lg.Debug(
|
ls.lg.Debug(
|
||||||
"keepLeaseAlive stream sent lease keepalive request",
|
"keepLeaseAlive received nil lease keepalive response",
|
||||||
zap.String("endpoint", ls.m.EtcdClientEndpoint),
|
zap.String("endpoint", ls.m.EtcdClientEndpoint),
|
||||||
zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
|
zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
|
||||||
)
|
)
|
||||||
respRC, err := stream.Recv()
|
|
||||||
if err != nil {
|
|
||||||
ls.lg.Debug(
|
|
||||||
"keepLeaseAlive stream failed to receive lease keepalive response",
|
|
||||||
zap.String("endpoint", ls.m.EtcdClientEndpoint),
|
|
||||||
zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
|
|
||||||
zap.Error(err),
|
|
||||||
)
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// lease expires after TTL become 0
|
// lease expires after TTL become 0
|
||||||
// don't send keepalive if the lease has expired
|
// don't send keepalive if the lease has expired
|
||||||
if respRC.TTL <= 0 {
|
if respRC.TTL <= 0 {
|
||||||
@ -409,16 +399,18 @@ func (ls *leaseStresser) keepLeaseAlive(leaseID int64) {
|
|||||||
// the format of key is the concat of leaseID + '_' + '<order of key creation>'
|
// the format of key is the concat of leaseID + '_' + '<order of key creation>'
|
||||||
// e.g 5186835655248304152_0 for first created key and 5186835655248304152_1 for second created key
|
// e.g 5186835655248304152_0 for first created key and 5186835655248304152_1 for second created key
|
||||||
func (ls *leaseStresser) attachKeysWithLease(leaseID int64) error {
|
func (ls *leaseStresser) attachKeysWithLease(leaseID int64) error {
|
||||||
var txnPuts []*pb.RequestOp
|
var txnPuts []clientv3.Op
|
||||||
for j := 0; j < ls.keysPerLease; j++ {
|
for j := 0; j < ls.keysPerLease; j++ {
|
||||||
txnput := &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: &pb.PutRequest{Key: []byte(fmt.Sprintf("%d%s%d", leaseID, "_", j)),
|
txnput := clientv3.OpPut(
|
||||||
Value: []byte(fmt.Sprintf("bar")), Lease: leaseID}}}
|
fmt.Sprintf("%d%s%d", leaseID, "_", j),
|
||||||
|
fmt.Sprintf("bar"),
|
||||||
|
clientv3.WithLease(clientv3.LeaseID(leaseID)),
|
||||||
|
)
|
||||||
txnPuts = append(txnPuts, txnput)
|
txnPuts = append(txnPuts, txnput)
|
||||||
}
|
}
|
||||||
// keep retrying until lease is not found or ctx is being canceled
|
// keep retrying until lease is not found or ctx is being canceled
|
||||||
for ls.ctx.Err() == nil {
|
for ls.ctx.Err() == nil {
|
||||||
txn := &pb.TxnRequest{Success: txnPuts}
|
_, err := ls.cli.Txn(ls.ctx).Then(txnPuts...).Commit()
|
||||||
_, err := ls.kvc.Txn(ls.ctx, txn)
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
// since all created keys will be deleted too, the number of operations on keys will be roughly 2x the number of created keys
|
// since all created keys will be deleted too, the number of operations on keys will be roughly 2x the number of created keys
|
||||||
atomic.AddInt64(&ls.atomicModifiedKey, 2*int64(ls.keysPerLease))
|
atomic.AddInt64(&ls.atomicModifiedKey, 2*int64(ls.keysPerLease))
|
||||||
@ -437,9 +429,10 @@ func (ls *leaseStresser) randomlyDropLease(leaseID int64) (bool, error) {
|
|||||||
if rand.Intn(2) != 0 {
|
if rand.Intn(2) != 0 {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// keep retrying until a lease is dropped or ctx is being canceled
|
// keep retrying until a lease is dropped or ctx is being canceled
|
||||||
for ls.ctx.Err() == nil {
|
for ls.ctx.Err() == nil {
|
||||||
_, err := ls.lc.LeaseRevoke(ls.ctx, &pb.LeaseRevokeRequest{ID: leaseID})
|
_, err := ls.cli.Revoke(ls.ctx, clientv3.LeaseID(leaseID))
|
||||||
if err == nil || rpctypes.Error(err) == rpctypes.ErrLeaseNotFound {
|
if err == nil || rpctypes.Error(err) == rpctypes.ErrLeaseNotFound {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
@ -454,7 +447,9 @@ func (ls *leaseStresser) randomlyDropLease(leaseID int64) (bool, error) {
|
|||||||
return false, ls.ctx.Err()
|
return false, ls.ctx.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ls *leaseStresser) Pause() { ls.Close() }
|
func (ls *leaseStresser) Pause() {
|
||||||
|
ls.Close()
|
||||||
|
}
|
||||||
|
|
||||||
func (ls *leaseStresser) Close() {
|
func (ls *leaseStresser) Close() {
|
||||||
ls.lg.Info(
|
ls.lg.Info(
|
||||||
@ -464,7 +459,7 @@ func (ls *leaseStresser) Close() {
|
|||||||
ls.cancel()
|
ls.cancel()
|
||||||
ls.runWg.Wait()
|
ls.runWg.Wait()
|
||||||
ls.aliveWg.Wait()
|
ls.aliveWg.Wait()
|
||||||
ls.conn.Close()
|
ls.cli.Close()
|
||||||
ls.lg.Info(
|
ls.lg.Info(
|
||||||
"lease stresser is closed",
|
"lease stresser is closed",
|
||||||
zap.String("endpoint", ls.m.EtcdClientEndpoint),
|
zap.String("endpoint", ls.m.EtcdClientEndpoint),
|
||||||
|
Loading…
x
Reference in New Issue
Block a user