Merge pull request #12854 from ptabor/20210410-shouldApplyV3

(no)StoreV2 (Part 3): Applying consistency fix: ClusterVersionSet (and co) might get not applied on v2store
This commit is contained in:
Piotr Tabor
2021-04-21 09:31:38 +02:00
committed by GitHub
12 changed files with 127 additions and 85 deletions

View File

@@ -72,6 +72,13 @@ type ConfigChangeContext struct {
IsPromote bool `json:"isPromote"`
}
type ShouldApplyV3 bool
const (
ApplyBoth = ShouldApplyV3(true)
ApplyV2storeOnly = ShouldApplyV3(false)
)
// NewClusterFromURLsMap creates a new raft cluster using provided urls map. Currently, it does not support creating
// cluster with raft learner member.
func NewClusterFromURLsMap(lg *zap.Logger, token string, urlsmap types.URLsMap) (*RaftCluster, error) {
@@ -285,6 +292,7 @@ func (c *RaftCluster) Recover(onSet func(*zap.Logger, *semver.Version)) {
// ValidateConfigurationChange takes a proposed ConfChange and
// ensures that it is still valid.
func (c *RaftCluster) ValidateConfigurationChange(cc raftpb.ConfChange) error {
// TODO: this must be switched to backend as well.
members, removed := membersFromStore(c.lg, c.v2store)
id := types.ID(cc.NodeID)
if removed[id] {
@@ -370,13 +378,13 @@ func (c *RaftCluster) ValidateConfigurationChange(cc raftpb.ConfChange) error {
// AddMember adds a new Member into the cluster, and saves the given member's
// raftAttributes into the store. The given member should have empty attributes.
// A Member with a matching id must not exist.
func (c *RaftCluster) AddMember(m *Member) {
func (c *RaftCluster) AddMember(m *Member, shouldApplyV3 ShouldApplyV3) {
c.Lock()
defer c.Unlock()
if c.v2store != nil {
mustSaveMemberToStore(c.lg, c.v2store, m)
}
if c.be != nil {
if c.be != nil && shouldApplyV3 {
mustSaveMemberToBackend(c.lg, c.be, m)
}
@@ -393,13 +401,13 @@ func (c *RaftCluster) AddMember(m *Member) {
// RemoveMember removes a member from the store.
// The given id MUST exist, or the function panics.
func (c *RaftCluster) RemoveMember(id types.ID) {
func (c *RaftCluster) RemoveMember(id types.ID, shouldApplyV3 ShouldApplyV3) {
c.Lock()
defer c.Unlock()
if c.v2store != nil {
mustDeleteMemberFromStore(c.lg, c.v2store, id)
}
if c.be != nil {
if c.be != nil && shouldApplyV3 {
mustDeleteMemberFromBackend(c.be, id)
}
@@ -425,7 +433,7 @@ func (c *RaftCluster) RemoveMember(id types.ID) {
}
}
func (c *RaftCluster) UpdateAttributes(id types.ID, attr Attributes) {
func (c *RaftCluster) UpdateAttributes(id types.ID, attr Attributes, shouldApplyV3 ShouldApplyV3) {
c.Lock()
defer c.Unlock()
@@ -434,7 +442,7 @@ func (c *RaftCluster) UpdateAttributes(id types.ID, attr Attributes) {
if c.v2store != nil {
mustUpdateMemberAttrInStore(c.lg, c.v2store, m)
}
if c.be != nil {
if c.be != nil && shouldApplyV3 {
mustSaveMemberToBackend(c.lg, c.be, m)
}
return
@@ -459,7 +467,7 @@ func (c *RaftCluster) UpdateAttributes(id types.ID, attr Attributes) {
}
// PromoteMember marks the member's IsLearner RaftAttributes to false.
func (c *RaftCluster) PromoteMember(id types.ID) {
func (c *RaftCluster) PromoteMember(id types.ID, shouldApplyV3 ShouldApplyV3) {
c.Lock()
defer c.Unlock()
@@ -467,7 +475,7 @@ func (c *RaftCluster) PromoteMember(id types.ID) {
if c.v2store != nil {
mustUpdateMemberInStore(c.lg, c.v2store, c.members[id])
}
if c.be != nil {
if c.be != nil && shouldApplyV3 {
mustSaveMemberToBackend(c.lg, c.be, c.members[id])
}
@@ -478,7 +486,7 @@ func (c *RaftCluster) PromoteMember(id types.ID) {
)
}
func (c *RaftCluster) UpdateRaftAttributes(id types.ID, raftAttr RaftAttributes) {
func (c *RaftCluster) UpdateRaftAttributes(id types.ID, raftAttr RaftAttributes, shouldApplyV3 ShouldApplyV3) {
c.Lock()
defer c.Unlock()
@@ -486,7 +494,7 @@ func (c *RaftCluster) UpdateRaftAttributes(id types.ID, raftAttr RaftAttributes)
if c.v2store != nil {
mustUpdateMemberInStore(c.lg, c.v2store, c.members[id])
}
if c.be != nil {
if c.be != nil && shouldApplyV3 {
mustSaveMemberToBackend(c.lg, c.be, c.members[id])
}
@@ -508,7 +516,7 @@ func (c *RaftCluster) Version() *semver.Version {
return semver.Must(semver.NewVersion(c.version.String()))
}
func (c *RaftCluster) SetVersion(ver *semver.Version, onSet func(*zap.Logger, *semver.Version)) {
func (c *RaftCluster) SetVersion(ver *semver.Version, onSet func(*zap.Logger, *semver.Version), shouldApplyV3 ShouldApplyV3) {
c.Lock()
defer c.Unlock()
if c.version != nil {
@@ -533,7 +541,7 @@ func (c *RaftCluster) SetVersion(ver *semver.Version, onSet func(*zap.Logger, *s
if c.v2store != nil {
mustSaveClusterVersionToStore(c.lg, c.v2store, ver)
}
if c.be != nil {
if c.be != nil && shouldApplyV3 {
mustSaveClusterVersionToBackend(c.be, ver)
}
if oldVer != nil {
@@ -809,11 +817,11 @@ func (c *RaftCluster) DowngradeInfo() *DowngradeInfo {
return d
}
func (c *RaftCluster) SetDowngradeInfo(d *DowngradeInfo) {
func (c *RaftCluster) SetDowngradeInfo(d *DowngradeInfo, shouldApplyV3 ShouldApplyV3) {
c.Lock()
defer c.Unlock()
if c.be != nil {
if c.be != nil && shouldApplyV3 {
mustSaveDowngradeToBackend(c.lg, c.be, d)
}

View File

@@ -283,9 +283,9 @@ func TestClusterValidateConfigurationChange(t *testing.T) {
cl.SetStore(v2store.New())
for i := 1; i <= 4; i++ {
attr := RaftAttributes{PeerURLs: []string{fmt.Sprintf("http://127.0.0.1:%d", i)}}
cl.AddMember(&Member{ID: types.ID(i), RaftAttributes: attr})
cl.AddMember(&Member{ID: types.ID(i), RaftAttributes: attr}, true)
}
cl.RemoveMember(4)
cl.RemoveMember(4, true)
attr := RaftAttributes{PeerURLs: []string{fmt.Sprintf("http://127.0.0.1:%d", 1)}}
ctx, err := json.Marshal(&Member{ID: types.ID(5), RaftAttributes: attr})
@@ -446,7 +446,7 @@ func TestClusterGenID(t *testing.T) {
previd := cs.ID()
cs.SetStore(mockstore.NewNop())
cs.AddMember(newTestMember(3, nil, "", nil))
cs.AddMember(newTestMember(3, nil, "", nil), true)
cs.genID()
if cs.ID() == previd {
t.Fatalf("cluster.ID = %v, want not %v", cs.ID(), previd)
@@ -489,7 +489,7 @@ func TestClusterAddMember(t *testing.T) {
st := mockstore.NewRecorder()
c := newTestCluster(t, nil)
c.SetStore(st)
c.AddMember(newTestMember(1, nil, "node1", nil))
c.AddMember(newTestMember(1, nil, "node1", nil), true)
wactions := []testutil.Action{
{
@@ -512,7 +512,7 @@ func TestClusterAddMemberAsLearner(t *testing.T) {
st := mockstore.NewRecorder()
c := newTestCluster(t, nil)
c.SetStore(st)
c.AddMember(newTestMemberAsLearner(1, nil, "node1", nil))
c.AddMember(newTestMemberAsLearner(1, nil, "node1", nil), true)
wactions := []testutil.Action{
{
@@ -555,7 +555,7 @@ func TestClusterRemoveMember(t *testing.T) {
st := mockstore.NewRecorder()
c := newTestCluster(t, nil)
c.SetStore(st)
c.RemoveMember(1)
c.RemoveMember(1, true)
wactions := []testutil.Action{
{Name: "Delete", Params: []interface{}{MemberStoreKey(1), true, true}},
@@ -595,7 +595,7 @@ func TestClusterUpdateAttributes(t *testing.T) {
c := newTestCluster(t, tt.mems)
c.removed = tt.removed
c.UpdateAttributes(types.ID(1), Attributes{Name: name, ClientURLs: clientURLs})
c.UpdateAttributes(types.ID(1), Attributes{Name: name, ClientURLs: clientURLs}, true)
if g := c.Members(); !reflect.DeepEqual(g, tt.wmems) {
t.Errorf("#%d: members = %+v, want %+v", i, g, tt.wmems)
}

View File

@@ -54,14 +54,14 @@ type applyResult struct {
// applierV3Internal is the interface for processing internal V3 raft request
type applierV3Internal interface {
ClusterVersionSet(r *membershippb.ClusterVersionSetRequest)
ClusterMemberAttrSet(r *membershippb.ClusterMemberAttrSetRequest)
DowngradeInfoSet(r *membershippb.DowngradeInfoSetRequest)
ClusterVersionSet(r *membershippb.ClusterVersionSetRequest, shouldApplyV3 membership.ShouldApplyV3)
ClusterMemberAttrSet(r *membershippb.ClusterMemberAttrSetRequest, shouldApplyV3 membership.ShouldApplyV3)
DowngradeInfoSet(r *membershippb.DowngradeInfoSetRequest, shouldApplyV3 membership.ShouldApplyV3)
}
// applierV3 is the interface for processing V3 raft messages
type applierV3 interface {
Apply(r *pb.InternalRaftRequest) *applyResult
Apply(r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *applyResult
Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error)
Range(ctx context.Context, txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error)
@@ -130,7 +130,7 @@ func (s *EtcdServer) newApplierV3() applierV3 {
)
}
func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult {
func (a *applierV3backend) Apply(r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *applyResult {
op := "unknown"
ar := &applyResult{}
defer func(start time.Time) {
@@ -142,6 +142,25 @@ func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult {
}
}(time.Now())
switch {
case r.ClusterVersionSet != nil: // Implemented in 3.5.x
op = "ClusterVersionSet"
a.s.applyV3Internal.ClusterVersionSet(r.ClusterVersionSet, shouldApplyV3)
return nil
case r.ClusterMemberAttrSet != nil:
op = "ClusterMemberAttrSet" // Implemented in 3.5.x
a.s.applyV3Internal.ClusterMemberAttrSet(r.ClusterMemberAttrSet, shouldApplyV3)
return nil
case r.DowngradeInfoSet != nil:
op = "DowngradeInfoSet" // Implemented in 3.5.x
a.s.applyV3Internal.DowngradeInfoSet(r.DowngradeInfoSet, shouldApplyV3)
return nil
}
if !shouldApplyV3 {
return nil
}
// call into a.s.applyV3.F instead of a.F so upper appliers can check individual calls
switch {
case r.Range != nil:
@@ -221,15 +240,6 @@ func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult {
case r.AuthRoleList != nil:
op = "AuthRoleList"
ar.resp, ar.err = a.s.applyV3.RoleList(r.AuthRoleList)
case r.ClusterVersionSet != nil: // Implemented in 3.5.x
op = "ClusterVersionSet"
a.s.applyV3Internal.ClusterVersionSet(r.ClusterVersionSet)
case r.ClusterMemberAttrSet != nil:
op = "ClusterMemberAttrSet" // Implemented in 3.5.x
a.s.applyV3Internal.ClusterMemberAttrSet(r.ClusterMemberAttrSet)
case r.DowngradeInfoSet != nil:
op = "DowngradeInfoSet" // Implemented in 3.5.x
a.s.applyV3Internal.DowngradeInfoSet(r.DowngradeInfoSet)
default:
a.s.lg.Panic("not implemented apply", zap.Stringer("raft-request", r))
}
@@ -903,26 +913,27 @@ func (a *applierV3backend) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleList
return resp, err
}
func (a *applierV3backend) ClusterVersionSet(r *membershippb.ClusterVersionSetRequest) {
a.s.cluster.SetVersion(semver.Must(semver.NewVersion(r.Ver)), api.UpdateCapability)
func (a *applierV3backend) ClusterVersionSet(r *membershippb.ClusterVersionSetRequest, shouldApplyV3 membership.ShouldApplyV3) {
a.s.cluster.SetVersion(semver.Must(semver.NewVersion(r.Ver)), api.UpdateCapability, shouldApplyV3)
}
func (a *applierV3backend) ClusterMemberAttrSet(r *membershippb.ClusterMemberAttrSetRequest) {
func (a *applierV3backend) ClusterMemberAttrSet(r *membershippb.ClusterMemberAttrSetRequest, shouldApplyV3 membership.ShouldApplyV3) {
a.s.cluster.UpdateAttributes(
types.ID(r.Member_ID),
membership.Attributes{
Name: r.MemberAttributes.Name,
ClientURLs: r.MemberAttributes.ClientUrls,
},
shouldApplyV3,
)
}
func (a *applierV3backend) DowngradeInfoSet(r *membershippb.DowngradeInfoSetRequest) {
func (a *applierV3backend) DowngradeInfoSet(r *membershippb.DowngradeInfoSetRequest, shouldApplyV3 membership.ShouldApplyV3) {
d := membership.DowngradeInfo{Enabled: false}
if r.Enabled {
d = membership.DowngradeInfo{Enabled: true, TargetVersion: r.Ver}
}
a.s.cluster.SetDowngradeInfo(&d)
a.s.cluster.SetDowngradeInfo(&d, shouldApplyV3)
}
type quotaApplierV3 struct {

View File

@@ -21,6 +21,7 @@ import (
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/pkg/v3/traceutil"
"go.etcd.io/etcd/server/v3/auth"
"go.etcd.io/etcd/server/v3/etcdserver/api/membership"
"go.etcd.io/etcd/server/v3/lease"
"go.etcd.io/etcd/server/v3/mvcc"
)
@@ -41,7 +42,7 @@ func newAuthApplierV3(as auth.AuthStore, base applierV3, lessor lease.Lessor) *a
return &authApplierV3{applierV3: base, as: as, lessor: lessor}
}
func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest) *applyResult {
func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *applyResult {
aa.mu.Lock()
defer aa.mu.Unlock()
if r.Header != nil {
@@ -57,7 +58,7 @@ func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest) *applyResult {
return &applyResult{err: err}
}
}
ret := aa.applierV3.Apply(r)
ret := aa.applierV3.Apply(r, shouldApplyV3)
aa.authInfo.Username = ""
aa.authInfo.Revision = 0
return ret

View File

@@ -87,7 +87,7 @@ func (a *applierV2store) Put(r *RequestV2) Response {
a.lg.Panic("failed to unmarshal", zap.String("value", r.Val), zap.Error(err))
}
if a.cluster != nil {
a.cluster.UpdateAttributes(id, attr)
a.cluster.UpdateAttributes(id, attr, true)
}
// return an empty response since there is no consumer.
return Response{}

View File

@@ -572,8 +572,7 @@ func NewServer(cfg config.ServerConfig) (srv *EtcdServer, err error) {
}
srv.kv = mvcc.New(srv.Logger(), srv.be, srv.lessor, srv.consistIndex, mvcc.StoreConfig{CompactionBatchLimit: cfg.CompactionBatchLimit})
kvindex := srv.consistIndex.ConsistentIndex()
srv.lg.Debug("restore consistentIndex",
zap.Uint64("index", kvindex))
srv.lg.Debug("restore consistentIndex", zap.Uint64("index", kvindex))
if beExist {
// TODO: remove kvindex != 0 checking when we do not expect users to upgrade
// etcd from pre-3.0 release.
@@ -2039,8 +2038,13 @@ func (s *EtcdServer) apply(
es []raftpb.Entry,
confState *raftpb.ConfState,
) (appliedt uint64, appliedi uint64, shouldStop bool) {
s.lg.Debug("Applying entries", zap.Int("num-entries", len(es)))
for i := range es {
e := es[i]
s.lg.Debug("Applying entry",
zap.Uint64("index", e.Index),
zap.Uint64("term", e.Term),
zap.Stringer("type", e.Type))
switch e.Type {
case raftpb.EntryNormal:
s.applyEntryNormal(&e)
@@ -2048,13 +2052,19 @@ func (s *EtcdServer) apply(
s.setTerm(e.Term)
case raftpb.EntryConfChange:
// We need to apply all WAL entries on top of v2store
// and only 'unapplied' (e.Index>backend.ConsistentIndex) on the backend.
shouldApplyV3 := membership.ApplyV2storeOnly
// set the consistent index of current executing entry
if e.Index > s.consistIndex.ConsistentIndex() {
s.consistIndex.SetConsistentIndex(e.Index)
shouldApplyV3 = membership.ApplyBoth
}
var cc raftpb.ConfChange
pbutil.MustUnmarshal(&cc, e.Data)
removedSelf, err := s.applyConfChange(cc, confState)
removedSelf, err := s.applyConfChange(cc, confState, shouldApplyV3)
s.setAppliedIndex(e.Index)
s.setTerm(e.Term)
shouldStop = shouldStop || removedSelf
@@ -2074,17 +2084,17 @@ func (s *EtcdServer) apply(
// applyEntryNormal apples an EntryNormal type raftpb request to the EtcdServer
func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) {
shouldApplyV3 := false
shouldApplyV3 := membership.ApplyV2storeOnly
index := s.consistIndex.ConsistentIndex()
if e.Index > index {
// set the consistent index of current executing entry
s.consistIndex.SetConsistentIndex(e.Index)
shouldApplyV3 = true
shouldApplyV3 = membership.ApplyBoth
}
s.lg.Debug("apply entry normal",
zap.Uint64("consistent-index", index),
zap.Uint64("entry-index", e.Index),
zap.Bool("should-applyV3", shouldApplyV3))
zap.Bool("should-applyV3", bool(shouldApplyV3)))
// raft state machine may generate noop entry when leader confirmation.
// skip it in advance to avoid some potential bug in the future
@@ -2104,18 +2114,16 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) {
var r pb.Request
rp := &r
pbutil.MustUnmarshal(rp, e.Data)
s.lg.Debug("applyEntryNormal", zap.Stringer("V2request", rp))
s.w.Trigger(r.ID, s.applyV2Request((*RequestV2)(rp)))
return
}
s.lg.Debug("applyEntryNormal", zap.Stringer("raftReq", &raftReq))
if raftReq.V2 != nil {
req := (*RequestV2)(raftReq.V2)
s.w.Trigger(req.ID, s.applyV2Request(req))
return
}
// do not re-apply applied entries.
if !shouldApplyV3 {
return
}
id := raftReq.ID
if id == 0 {
@@ -2128,7 +2136,12 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) {
if !needResult && raftReq.Txn != nil {
removeNeedlessRangeReqs(raftReq.Txn)
}
ar = s.applyV3.Apply(&raftReq)
ar = s.applyV3.Apply(&raftReq, shouldApplyV3)
}
// do not re-apply applied entries.
if !shouldApplyV3 {
return
}
if ar == nil {
@@ -2170,7 +2183,7 @@ func (s *EtcdServer) notifyAboutFirstCommitInTerm() {
// applyConfChange applies a ConfChange to the server. It is only
// invoked with a ConfChange that has already passed through Raft
func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.ConfState) (bool, error) {
func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.ConfState, shouldApplyV3 membership.ShouldApplyV3) (bool, error) {
if err := s.cluster.ValidateConfigurationChange(cc); err != nil {
cc.NodeID = raft.None
s.r.ApplyConfChange(cc)
@@ -2193,9 +2206,9 @@ func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.Con
)
}
if confChangeContext.IsPromote {
s.cluster.PromoteMember(confChangeContext.Member.ID)
s.cluster.PromoteMember(confChangeContext.Member.ID, shouldApplyV3)
} else {
s.cluster.AddMember(&confChangeContext.Member)
s.cluster.AddMember(&confChangeContext.Member, shouldApplyV3)
if confChangeContext.Member.ID != s.id {
s.r.transport.AddPeer(confChangeContext.Member.ID, confChangeContext.PeerURLs)
@@ -2213,7 +2226,7 @@ func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.Con
case raftpb.ConfChangeRemoveNode:
id := types.ID(cc.NodeID)
s.cluster.RemoveMember(id)
s.cluster.RemoveMember(id, shouldApplyV3)
if id == s.id {
return true, nil
}
@@ -2231,7 +2244,7 @@ func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.Con
zap.String("member-id-from-message", m.ID.String()),
)
}
s.cluster.UpdateRaftAttributes(m.ID, m.RaftAttributes)
s.cluster.UpdateRaftAttributes(m.ID, m.RaftAttributes, shouldApplyV3)
if m.ID != s.id {
s.r.transport.UpdatePeer(m.ID, m.PeerURLs)
}

View File

@@ -181,7 +181,7 @@ func TestApplyRepeat(t *testing.T) {
cl := newTestCluster(nil)
st := v2store.New()
cl.SetStore(v2store.New())
cl.AddMember(&membership.Member{ID: 1234})
cl.AddMember(&membership.Member{ID: 1234}, true)
r := newRaftNode(raftNodeConfig{
lg: zap.NewExample(),
Node: n,
@@ -509,9 +509,9 @@ func TestApplyConfChangeError(t *testing.T) {
cl := membership.NewCluster(zap.NewExample(), "")
cl.SetStore(v2store.New())
for i := 1; i <= 4; i++ {
cl.AddMember(&membership.Member{ID: types.ID(i)})
cl.AddMember(&membership.Member{ID: types.ID(i)}, true)
}
cl.RemoveMember(4)
cl.RemoveMember(4, true)
attr := membership.RaftAttributes{PeerURLs: []string{fmt.Sprintf("http://127.0.0.1:%d", 1)}}
ctx, err := json.Marshal(&membership.Member{ID: types.ID(1), RaftAttributes: attr})
@@ -576,7 +576,7 @@ func TestApplyConfChangeError(t *testing.T) {
r: *newRaftNode(raftNodeConfig{lg: zap.NewExample(), Node: n}),
cluster: cl,
}
_, err := srv.applyConfChange(tt.cc, nil)
_, err := srv.applyConfChange(tt.cc, nil, true)
if err != tt.werr {
t.Errorf("#%d: applyConfChange error = %v, want %v", i, err, tt.werr)
}
@@ -597,7 +597,7 @@ func TestApplyConfChangeShouldStop(t *testing.T) {
cl := membership.NewCluster(zap.NewExample(), "")
cl.SetStore(v2store.New())
for i := 1; i <= 3; i++ {
cl.AddMember(&membership.Member{ID: types.ID(i)})
cl.AddMember(&membership.Member{ID: types.ID(i)}, true)
}
r := newRaftNode(raftNodeConfig{
lg: zap.NewExample(),
@@ -616,7 +616,7 @@ func TestApplyConfChangeShouldStop(t *testing.T) {
NodeID: 2,
}
// remove non-local member
shouldStop, err := srv.applyConfChange(cc, &raftpb.ConfState{})
shouldStop, err := srv.applyConfChange(cc, &raftpb.ConfState{}, true)
if err != nil {
t.Fatalf("unexpected error %v", err)
}
@@ -626,7 +626,7 @@ func TestApplyConfChangeShouldStop(t *testing.T) {
// remove local member
cc.NodeID = 1
shouldStop, err = srv.applyConfChange(cc, &raftpb.ConfState{})
shouldStop, err = srv.applyConfChange(cc, &raftpb.ConfState{}, true)
if err != nil {
t.Fatalf("unexpected error %v", err)
}
@@ -640,7 +640,7 @@ func TestApplyConfChangeShouldStop(t *testing.T) {
func TestApplyConfigChangeUpdatesConsistIndex(t *testing.T) {
cl := membership.NewCluster(zap.NewExample(), "")
cl.SetStore(v2store.New())
cl.AddMember(&membership.Member{ID: types.ID(1)})
cl.AddMember(&membership.Member{ID: types.ID(1)}, true)
r := newRaftNode(raftNodeConfig{
lg: zap.NewExample(),
Node: newNodeNop(),
@@ -688,7 +688,7 @@ func TestApplyMultiConfChangeShouldStop(t *testing.T) {
cl := membership.NewCluster(zap.NewExample(), "")
cl.SetStore(v2store.New())
for i := 1; i <= 5; i++ {
cl.AddMember(&membership.Member{ID: types.ID(i)})
cl.AddMember(&membership.Member{ID: types.ID(i)}, true)
}
r := newRaftNode(raftNodeConfig{
lg: zap.NewExample(),
@@ -1342,7 +1342,7 @@ func TestRemoveMember(t *testing.T) {
cl := newTestCluster(nil)
st := v2store.New()
cl.SetStore(v2store.New())
cl.AddMember(&membership.Member{ID: 1234})
cl.AddMember(&membership.Member{ID: 1234}, true)
r := newRaftNode(raftNodeConfig{
lg: zap.NewExample(),
Node: n,
@@ -1386,7 +1386,7 @@ func TestUpdateMember(t *testing.T) {
cl := newTestCluster(nil)
st := v2store.New()
cl.SetStore(st)
cl.AddMember(&membership.Member{ID: 1234})
cl.AddMember(&membership.Member{ID: 1234}, true)
r := newRaftNode(raftNodeConfig{
lg: zap.NewExample(),
Node: n,
@@ -1874,7 +1874,7 @@ func (n *nodeCommitter) Propose(ctx context.Context, data []byte) error {
func newTestCluster(membs []*membership.Member) *membership.RaftCluster {
c := membership.NewCluster(zap.NewExample(), "")
for _, m := range membs {
c.AddMember(m)
c.AddMember(m, true)
}
return c
}