mirror of
https://github.com/etcd-io/etcd.git
synced 2024-09-27 06:25:44 +00:00
clientv3/integration: test lease keepalive works following quorum loss
This commit is contained in:
parent
395bd2313c
commit
7ceca7e046
@ -15,6 +15,8 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -455,3 +457,99 @@ func TestLeaseKeepAliveTTLTimeout(t *testing.T) {
|
||||
|
||||
clus.Members[0].Restart(t)
|
||||
}
|
||||
|
||||
func TestLeaseTimeToLive(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lapi := clientv3.NewLease(clus.RandClient())
|
||||
defer lapi.Close()
|
||||
|
||||
resp, err := lapi.Grant(context.Background(), 10)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create lease %v", err)
|
||||
}
|
||||
|
||||
kv := clientv3.NewKV(clus.RandClient())
|
||||
keys := []string{"foo1", "foo2"}
|
||||
for i := range keys {
|
||||
if _, err = kv.Put(context.TODO(), keys[i], "bar", clientv3.WithLease(resp.ID)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
lresp, lerr := lapi.TimeToLive(context.Background(), resp.ID, clientv3.WithAttachedKeys())
|
||||
if lerr != nil {
|
||||
t.Fatal(lerr)
|
||||
}
|
||||
if lresp.ID != resp.ID {
|
||||
t.Fatalf("leaseID expected %d, got %d", resp.ID, lresp.ID)
|
||||
}
|
||||
if lresp.GrantedTTL != int64(10) {
|
||||
t.Fatalf("GrantedTTL expected %d, got %d", 10, lresp.GrantedTTL)
|
||||
}
|
||||
if lresp.TTL == 0 || lresp.TTL > lresp.GrantedTTL {
|
||||
t.Fatalf("unexpected TTL %d (granted %d)", lresp.TTL, lresp.GrantedTTL)
|
||||
}
|
||||
ks := make([]string, len(lresp.Keys))
|
||||
for i := range lresp.Keys {
|
||||
ks[i] = string(lresp.Keys[i])
|
||||
}
|
||||
sort.Strings(ks)
|
||||
if !reflect.DeepEqual(ks, keys) {
|
||||
t.Fatalf("keys expected %v, got %v", keys, ks)
|
||||
}
|
||||
|
||||
lresp, lerr = lapi.TimeToLive(context.Background(), resp.ID)
|
||||
if lerr != nil {
|
||||
t.Fatal(lerr)
|
||||
}
|
||||
if len(lresp.Keys) != 0 {
|
||||
t.Fatalf("unexpected keys %+v", lresp.Keys)
|
||||
}
|
||||
}
|
||||
|
||||
// TestLeaseRenewLostQuorum ensures keepalives work after losing quorum
|
||||
// for a while.
|
||||
func TestLeaseRenewLostQuorum(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
r, err := cli.Grant(context.TODO(), 4)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
kctx, kcancel := context.WithCancel(context.Background())
|
||||
defer kcancel()
|
||||
ka, err := cli.KeepAlive(kctx, r.ID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// consume first keepalive so next message sends when cluster is down
|
||||
<-ka
|
||||
|
||||
// force keepalive stream message to timeout
|
||||
clus.Members[1].Stop(t)
|
||||
clus.Members[2].Stop(t)
|
||||
// Use TTL-1 since the client closes the keepalive channel if no
|
||||
// keepalive arrives before the lease deadline.
|
||||
// The cluster has 1 second to recover and reply to the keepalive.
|
||||
time.Sleep(time.Duration(r.TTL-1) * time.Second)
|
||||
clus.Members[1].Restart(t)
|
||||
clus.Members[2].Restart(t)
|
||||
|
||||
select {
|
||||
case _, ok := <-ka:
|
||||
if !ok {
|
||||
t.Fatalf("keepalive closed")
|
||||
}
|
||||
case <-time.After(time.Duration(r.TTL) * time.Second):
|
||||
t.Fatalf("timed out waiting for keepalive")
|
||||
}
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user