mirror of
https://github.com/etcd-io/etcd.git
synced 2024-09-27 06:25:44 +00:00
Merge pull request #14437 from clarkfw/migrate-member-remove-tests
tests: Migrate member remove tests to common framework
This commit is contained in:
commit
1ee1eff0c5
@ -22,6 +22,7 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
clientv3 "go.etcd.io/etcd/client/v3"
|
clientv3 "go.etcd.io/etcd/client/v3"
|
||||||
"go.etcd.io/etcd/server/v3/etcdserver"
|
"go.etcd.io/etcd/server/v3/etcdserver"
|
||||||
|
"go.etcd.io/etcd/tests/v3/framework"
|
||||||
"go.etcd.io/etcd/tests/v3/framework/testutils"
|
"go.etcd.io/etcd/tests/v3/framework/testutils"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -83,25 +84,18 @@ func TestMemberAdd(t *testing.T) {
|
|||||||
name: "StrictReconfigCheck/WaitForQuorum",
|
name: "StrictReconfigCheck/WaitForQuorum",
|
||||||
strictReconfigCheck: true,
|
strictReconfigCheck: true,
|
||||||
waitForQuorum: true,
|
waitForQuorum: true,
|
||||||
expectError: false,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "StrictReconfigCheck/NoWaitForQuorum",
|
name: "StrictReconfigCheck/NoWaitForQuorum",
|
||||||
strictReconfigCheck: true,
|
strictReconfigCheck: true,
|
||||||
waitForQuorum: false,
|
|
||||||
expectError: true,
|
expectError: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "DisableStrictReconfigCheck/WaitForQuorum",
|
name: "DisableStrictReconfigCheck/WaitForQuorum",
|
||||||
strictReconfigCheck: false,
|
|
||||||
waitForQuorum: true,
|
waitForQuorum: true,
|
||||||
expectError: false,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "DisableStrictReconfigCheck/NoWaitForQuorum",
|
name: "DisableStrictReconfigCheck/NoWaitForQuorum",
|
||||||
strictReconfigCheck: false,
|
|
||||||
waitForQuorum: false,
|
|
||||||
expectError: false,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -112,9 +106,7 @@ func TestMemberAdd(t *testing.T) {
|
|||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
c := clusterTc.config
|
c := clusterTc.config
|
||||||
if !quorumTc.strictReconfigCheck {
|
c.DisableStrictReconfigCheck = !quorumTc.strictReconfigCheck
|
||||||
c.DisableStrictReconfigCheck = true
|
|
||||||
}
|
|
||||||
clus := testRunner.NewCluster(ctx, t, c)
|
clus := testRunner.NewCluster(ctx, t, c)
|
||||||
defer clus.Close()
|
defer clus.Close()
|
||||||
cc := clus.Client()
|
cc := clus.Client()
|
||||||
@ -152,3 +144,122 @@ func TestMemberAdd(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestMemberRemove(t *testing.T) {
|
||||||
|
testRunner.BeforeTest(t)
|
||||||
|
|
||||||
|
tcs := []struct {
|
||||||
|
name string
|
||||||
|
strictReconfigCheck bool
|
||||||
|
waitForQuorum bool
|
||||||
|
expectSingleNodeError bool
|
||||||
|
expectClusterError bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "StrictReconfigCheck/WaitForQuorum",
|
||||||
|
strictReconfigCheck: true,
|
||||||
|
waitForQuorum: true,
|
||||||
|
expectSingleNodeError: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "StrictReconfigCheck/NoWaitForQuorum",
|
||||||
|
strictReconfigCheck: true,
|
||||||
|
expectSingleNodeError: true,
|
||||||
|
expectClusterError: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "DisableStrictReconfigCheck/WaitForQuorum",
|
||||||
|
waitForQuorum: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "DisableStrictReconfigCheck/NoWaitForQuorum",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, quorumTc := range tcs {
|
||||||
|
for _, clusterTc := range clusterTestCases {
|
||||||
|
if !quorumTc.strictReconfigCheck && clusterTc.config.ClusterSize == 1 {
|
||||||
|
// skip these test cases
|
||||||
|
// when strictReconfigCheck is disabled, calling MemberRemove will cause the single node to panic
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
t.Run(quorumTc.name+"/"+clusterTc.name, func(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
c := clusterTc.config
|
||||||
|
c.DisableStrictReconfigCheck = !quorumTc.strictReconfigCheck
|
||||||
|
clus := testRunner.NewCluster(ctx, t, c)
|
||||||
|
defer clus.Close()
|
||||||
|
// client connects to a specific member which won't be removed from cluster
|
||||||
|
cc := clus.Members()[0].Client()
|
||||||
|
|
||||||
|
testutils.ExecuteUntil(ctx, t, func() {
|
||||||
|
if quorumTc.waitForQuorum {
|
||||||
|
time.Sleep(etcdserver.HealthInterval)
|
||||||
|
}
|
||||||
|
|
||||||
|
memberId, clusterId := memberToRemove(ctx, t, cc, c.ClusterSize)
|
||||||
|
removeResp, err := cc.MemberRemove(ctx, memberId)
|
||||||
|
|
||||||
|
if c.ClusterSize == 1 && quorumTc.expectSingleNodeError {
|
||||||
|
require.ErrorContains(t, err, "etcdserver: re-configuration failed due to not enough started members")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.ClusterSize > 1 && quorumTc.expectClusterError {
|
||||||
|
require.ErrorContains(t, err, "etcdserver: unhealthy cluster")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
require.NoError(t, err, "MemberRemove failed")
|
||||||
|
t.Logf("removeResp.Members:%v", removeResp.Members)
|
||||||
|
if removeResp.Header.ClusterId != clusterId {
|
||||||
|
t.Fatalf("MemberRemove failed, expected ClusterId: %d, got: %d", clusterId, removeResp.Header.ClusterId)
|
||||||
|
}
|
||||||
|
if len(removeResp.Members) != c.ClusterSize-1 {
|
||||||
|
t.Fatalf("MemberRemove failed, expected length of members: %d, got: %d", c.ClusterSize-1, len(removeResp.Members))
|
||||||
|
}
|
||||||
|
for _, m := range removeResp.Members {
|
||||||
|
if m.ID == memberId {
|
||||||
|
t.Fatalf("MemberRemove failed, member(id=%d) is still in cluster", memberId)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// memberToRemove chooses a member to remove.
|
||||||
|
// If clusterSize == 1, return the only member.
|
||||||
|
// Otherwise, return a member that client has not connected to.
|
||||||
|
// It ensures that `MemberRemove` function does not return an "etcdserver: server stopped" error.
|
||||||
|
func memberToRemove(ctx context.Context, t *testing.T, client framework.Client, clusterSize int) (memberId uint64, clusterId uint64) {
|
||||||
|
listResp, err := client.MemberList(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
clusterId = listResp.Header.ClusterId
|
||||||
|
if clusterSize == 1 {
|
||||||
|
memberId = listResp.Members[0].ID
|
||||||
|
} else {
|
||||||
|
// get status of the specific member that client has connected to
|
||||||
|
statusResp, err := client.Status(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// choose a member that client has not connected to
|
||||||
|
for _, m := range listResp.Members {
|
||||||
|
if m.ID != statusResp[0].Header.MemberId {
|
||||||
|
memberId = m.ID
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if memberId == 0 {
|
||||||
|
t.Fatalf("memberToRemove failed. listResp:%v, statusResp:%v", listResp, statusResp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return memberId, clusterId
|
||||||
|
}
|
||||||
|
@ -28,28 +28,6 @@ import (
|
|||||||
|
|
||||||
func TestCtlV3MemberList(t *testing.T) { testCtl(t, memberListTest) }
|
func TestCtlV3MemberList(t *testing.T) { testCtl(t, memberListTest) }
|
||||||
func TestCtlV3MemberListWithHex(t *testing.T) { testCtl(t, memberListWithHexTest) }
|
func TestCtlV3MemberListWithHex(t *testing.T) { testCtl(t, memberListWithHexTest) }
|
||||||
func TestCtlV3MemberRemove(t *testing.T) {
|
|
||||||
testCtl(t, memberRemoveTest, withQuorum(), withDisableStrictReconfig())
|
|
||||||
}
|
|
||||||
func TestCtlV3MemberRemoveNoTLS(t *testing.T) {
|
|
||||||
testCtl(t, memberRemoveTest, withQuorum(), withDisableStrictReconfig(), withCfg(*e2e.NewConfigNoTLS()))
|
|
||||||
}
|
|
||||||
func TestCtlV3MemberRemoveClientTLS(t *testing.T) {
|
|
||||||
testCtl(t, memberRemoveTest, withQuorum(), withDisableStrictReconfig(), withCfg(*e2e.NewConfigClientTLS()))
|
|
||||||
}
|
|
||||||
func TestCtlV3MemberRemoveClientAutoTLS(t *testing.T) {
|
|
||||||
testCtl(t, memberRemoveTest, withQuorum(), withDisableStrictReconfig(), withCfg(
|
|
||||||
// default ClusterSize is 1
|
|
||||||
e2e.EtcdProcessClusterConfig{
|
|
||||||
ClusterSize: 3,
|
|
||||||
IsClientAutoTLS: true,
|
|
||||||
ClientTLS: e2e.ClientTLS,
|
|
||||||
InitialToken: "new",
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
func TestCtlV3MemberRemovePeerTLS(t *testing.T) {
|
|
||||||
testCtl(t, memberRemoveTest, withQuorum(), withDisableStrictReconfig(), withCfg(*e2e.NewConfigPeerTLS()))
|
|
||||||
}
|
|
||||||
func TestCtlV3MemberUpdate(t *testing.T) { testCtl(t, memberUpdateTest) }
|
func TestCtlV3MemberUpdate(t *testing.T) { testCtl(t, memberUpdateTest) }
|
||||||
func TestCtlV3MemberUpdateNoTLS(t *testing.T) {
|
func TestCtlV3MemberUpdateNoTLS(t *testing.T) {
|
||||||
testCtl(t, memberUpdateTest, withCfg(*e2e.NewConfigNoTLS()))
|
testCtl(t, memberUpdateTest, withCfg(*e2e.NewConfigNoTLS()))
|
||||||
@ -154,13 +132,6 @@ func memberListWithHexTest(cx ctlCtx) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func memberRemoveTest(cx ctlCtx) {
|
|
||||||
ep, memIDToRemove, clusterID := cx.memberToRemove()
|
|
||||||
if err := ctlV3MemberRemove(cx, ep, memIDToRemove, clusterID); err != nil {
|
|
||||||
cx.t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func ctlV3MemberRemove(cx ctlCtx, ep, memberID, clusterID string) error {
|
func ctlV3MemberRemove(cx ctlCtx, ep, memberID, clusterID string) error {
|
||||||
cmdArgs := append(cx.prefixArgs([]string{ep}), "member", "remove", memberID)
|
cmdArgs := append(cx.prefixArgs([]string{ep}), "member", "remove", memberID)
|
||||||
return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, fmt.Sprintf("%s removed from cluster %s", memberID, clusterID))
|
return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, fmt.Sprintf("%s removed from cluster %s", memberID, clusterID))
|
||||||
|
@ -337,3 +337,7 @@ func (c integrationClient) MemberAdd(ctx context.Context, _ string, peerAddrs []
|
|||||||
func (c integrationClient) MemberAddAsLearner(ctx context.Context, _ string, peerAddrs []string) (*clientv3.MemberAddResponse, error) {
|
func (c integrationClient) MemberAddAsLearner(ctx context.Context, _ string, peerAddrs []string) (*clientv3.MemberAddResponse, error) {
|
||||||
return c.Client.MemberAddAsLearner(ctx, peerAddrs)
|
return c.Client.MemberAddAsLearner(ctx, peerAddrs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c integrationClient) MemberRemove(ctx context.Context, id uint64) (*clientv3.MemberRemoveResponse, error) {
|
||||||
|
return c.Client.MemberRemove(ctx, id)
|
||||||
|
}
|
||||||
|
@ -74,6 +74,7 @@ type Client interface {
|
|||||||
MemberList(context context.Context) (*clientv3.MemberListResponse, error)
|
MemberList(context context.Context) (*clientv3.MemberListResponse, error)
|
||||||
MemberAdd(context context.Context, name string, peerAddrs []string) (*clientv3.MemberAddResponse, error)
|
MemberAdd(context context.Context, name string, peerAddrs []string) (*clientv3.MemberAddResponse, error)
|
||||||
MemberAddAsLearner(context context.Context, name string, peerAddrs []string) (*clientv3.MemberAddResponse, error)
|
MemberAddAsLearner(context context.Context, name string, peerAddrs []string) (*clientv3.MemberAddResponse, error)
|
||||||
|
MemberRemove(ctx context.Context, id uint64) (*clientv3.MemberRemoveResponse, error)
|
||||||
|
|
||||||
Watch(ctx context.Context, key string, opts config.WatchOptions) clientv3.WatchChan
|
Watch(ctx context.Context, key string, opts config.WatchOptions) clientv3.WatchChan
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user