diff --git a/server/etcdserver/adapters.go b/server/etcdserver/adapters.go index a6611359c..5f3ff9993 100644 --- a/server/etcdserver/adapters.go +++ b/server/etcdserver/adapters.go @@ -63,7 +63,7 @@ func (s *serverVersionAdapter) GetDowngradeInfo() *membership.DowngradeInfo { return s.cluster.DowngradeInfo() } -func (s *serverVersionAdapter) GetVersions() map[string]*version.Versions { +func (s *serverVersionAdapter) GetMembersVersions() map[string]*version.Versions { return getVersions(s.lg, s.cluster, s.id, s.peerRt) } diff --git a/server/etcdserver/version/monitor.go b/server/etcdserver/version/monitor.go index 19e91f7ef..fef2e38f3 100644 --- a/server/etcdserver/version/monitor.go +++ b/server/etcdserver/version/monitor.go @@ -31,7 +31,7 @@ type Monitor struct { type Server interface { GetClusterVersion() *semver.Version GetDowngradeInfo() *membership.DowngradeInfo - GetVersions() map[string]*version.Versions + GetMembersVersions() map[string]*version.Versions UpdateClusterVersion(string) DowngradeCancel() @@ -99,7 +99,7 @@ func (m *Monitor) UpdateStorageVersionIfNeeded() { func (m *Monitor) CancelDowngradeIfNeeded() { d := m.s.GetDowngradeInfo() - if !d.Enabled { + if d == nil || !d.Enabled { return } @@ -115,7 +115,7 @@ func (m *Monitor) CancelDowngradeIfNeeded() { // The returned version is the min server version in the map, or nil if the min // version in unknown. func (m *Monitor) decideClusterVersion() *semver.Version { - vers := m.s.GetVersions() + vers := m.s.GetMembersVersions() var cv *semver.Version lv := semver.Must(semver.NewVersion(version.Version)) @@ -153,7 +153,7 @@ func (m *Monitor) decideClusterVersion() *semver.Version { // versionsMatchTarget returns true if all server versions are equal to target version, otherwise return false. // It can be used to decide the whether the cluster finishes downgrading to target version. func (m *Monitor) versionsMatchTarget(targetVersion *semver.Version) bool { - vers := m.s.GetVersions() + vers := m.s.GetMembersVersions() for mid, ver := range vers { if ver == nil { return false diff --git a/server/etcdserver/version/monitor_test.go b/server/etcdserver/version/monitor_test.go index b76915b11..d2a7b36d1 100644 --- a/server/etcdserver/version/monitor_test.go +++ b/server/etcdserver/version/monitor_test.go @@ -5,15 +5,15 @@ import ( "testing" "github.com/coreos/go-semver/semver" - "go.uber.org/zap" + "github.com/stretchr/testify/assert" + "go.uber.org/zap/zaptest" "go.etcd.io/etcd/api/v3/version" "go.etcd.io/etcd/server/v3/etcdserver/api/membership" ) -var testLogger = zap.NewExample() - var ( + V3_0 = semver.Version{Major: 3, Minor: 0} V3_5 = semver.Version{Major: 3, Minor: 5} V3_6 = semver.Version{Major: 3, Minor: 6} ) @@ -47,8 +47,8 @@ func TestDecideClusterVersion(t *testing.T) { } for i, tt := range tests { - monitor := NewMonitor(testLogger, &storageMock{ - versions: tt.vers, + monitor := NewMonitor(zaptest.NewLogger(t), &storageMock{ + memberVersions: tt.vers, }) dver := monitor.decideClusterVersion() if !reflect.DeepEqual(dver, tt.wdver) { @@ -97,7 +97,7 @@ func TestDecideStorageVersion(t *testing.T) { clusterVersion: tt.clusterVersion, storageVersion: tt.storageVersion, } - monitor := NewMonitor(testLogger, s) + monitor := NewMonitor(zaptest.NewLogger(t), s) monitor.UpdateStorageVersionIfNeeded() if !reflect.DeepEqual(s.storageVersion, tt.expectStorageVersion) { t.Errorf("Unexpected storage version value, got = %+v, want %+v", s.storageVersion, tt.expectStorageVersion) @@ -147,8 +147,8 @@ func TestVersionMatchTarget(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - monitor := NewMonitor(testLogger, &storageMock{ - versions: tt.versionMap, + monitor := NewMonitor(zaptest.NewLogger(t), &storageMock{ + memberVersions: tt.versionMap, }) actual := monitor.versionsMatchTarget(tt.targetVersion) if actual != tt.expectedFinished { @@ -158,8 +158,176 @@ func TestVersionMatchTarget(t *testing.T) { } } +func TestUpdateClusterVersionIfNeeded(t *testing.T) { + tests := []struct { + name string + clusterVersion *semver.Version + memberVersions map[string]*version.Versions + downgrade *membership.DowngradeInfo + expectClusterVersion *semver.Version + }{ + { + name: "Default to 3.0 if there are no members", + expectClusterVersion: &V3_0, + }, + { + name: "Should pick lowest server version from members", + memberVersions: map[string]*version.Versions{ + "a": {Cluster: "3.7.0", Server: "3.6.0"}, + "b": {Cluster: "3.4.0", Server: "3.5.0"}, + }, + expectClusterVersion: &V3_5, + }, + { + name: "Sets minimal version when member has broken version", + memberVersions: map[string]*version.Versions{ + "a": {Cluster: "3.7.0", Server: "3.6.0"}, + "b": {Cluster: "xxxx", Server: "yyyy"}, + }, + expectClusterVersion: &V3_0, + }, + { + name: "Should pick lowest server version from members (cv already set)", + memberVersions: map[string]*version.Versions{ + "a": {Cluster: "3.7.0", Server: "3.6.0"}, + "b": {Cluster: "3.4.0", Server: "3.5.0"}, + }, + clusterVersion: &V3_5, + expectClusterVersion: &V3_5, + }, + { + name: "Should upgrade cluster version if all members have upgraded (have higher server version)", + memberVersions: map[string]*version.Versions{ + "a": {Cluster: "3.5.0", Server: "3.6.0"}, + "b": {Cluster: "3.5.0", Server: "3.6.0"}, + }, + clusterVersion: &V3_5, + expectClusterVersion: &V3_6, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := &storageMock{ + clusterVersion: tt.clusterVersion, + memberVersions: tt.memberVersions, + downgradeInfo: tt.downgrade, + } + monitor := NewMonitor(zaptest.NewLogger(t), s) + + // Run multiple times to ensure that results are stable + for i := 0; i < 3; i++ { + monitor.UpdateClusterVersionIfNeeded() + assert.Equal(t, tt.expectClusterVersion, s.clusterVersion) + } + }) + } +} + +func TestCancelDowngradeIfNeeded(t *testing.T) { + tests := []struct { + name string + memberVersions map[string]*version.Versions + downgrade *membership.DowngradeInfo + expectDowngrade *membership.DowngradeInfo + }{ + { + name: "No action if there no downgrade in progress", + }, + { + name: "Cancel downgrade if there are no members", + downgrade: &membership.DowngradeInfo{TargetVersion: "3.5.0", Enabled: true}, + expectDowngrade: nil, + }, + // Next entries go through all states that should happen during downgrade + { + name: "No action if downgrade was not started", + memberVersions: map[string]*version.Versions{ + "a": {Cluster: "3.6.0", Server: "3.6.1"}, + "b": {Cluster: "3.6.0", Server: "3.6.2"}, + }, + }, + { + name: "Cancel downgrade if all members have downgraded", + memberVersions: map[string]*version.Versions{ + "a": {Cluster: "3.5.0", Server: "3.5.1"}, + "b": {Cluster: "3.5.0", Server: "3.5.2"}, + }, + downgrade: &membership.DowngradeInfo{TargetVersion: "3.5.0", Enabled: true}, + expectDowngrade: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := &storageMock{ + memberVersions: tt.memberVersions, + downgradeInfo: tt.downgrade, + } + monitor := NewMonitor(zaptest.NewLogger(t), s) + + // Run multiple times to ensure that results are stable + for i := 0; i < 3; i++ { + monitor.CancelDowngradeIfNeeded() + assert.Equal(t, tt.expectDowngrade, s.downgradeInfo) + } + }) + } +} + +func TestUpdateStorageVersionIfNeeded(t *testing.T) { + tests := []struct { + name string + clusterVersion *semver.Version + storageVersion *semver.Version + expectStorageVersion *semver.Version + }{ + { + name: "No action if cluster version is nil", + }, + { + name: "Should set storage version if cluster version is set", + clusterVersion: &V3_5, + expectStorageVersion: &V3_5, + }, + { + name: "No action if storage version was already set", + storageVersion: &V3_5, + expectStorageVersion: &V3_5, + }, + { + name: "No action if storage version equals cluster version", + clusterVersion: &V3_5, + storageVersion: &V3_5, + expectStorageVersion: &V3_5, + }, + { + name: "Should set storage version to cluster version", + clusterVersion: &V3_6, + storageVersion: &V3_5, + expectStorageVersion: &V3_6, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := &storageMock{ + clusterVersion: tt.clusterVersion, + storageVersion: tt.storageVersion, + } + monitor := NewMonitor(zaptest.NewLogger(t), s) + + // Run multiple times to ensure that results are stable + for i := 0; i < 3; i++ { + monitor.UpdateStorageVersionIfNeeded() + assert.Equal(t, tt.expectStorageVersion, s.storageVersion) + } + }) + } +} + type storageMock struct { - versions map[string]*version.Versions + memberVersions map[string]*version.Versions clusterVersion *semver.Version storageVersion *semver.Version downgradeInfo *membership.DowngradeInfo @@ -184,8 +352,8 @@ func (s *storageMock) GetDowngradeInfo() *membership.DowngradeInfo { return s.downgradeInfo } -func (s *storageMock) GetVersions() map[string]*version.Versions { - return s.versions +func (s *storageMock) GetMembersVersions() map[string]*version.Versions { + return s.memberVersions } func (s *storageMock) GetStorageVersion() *semver.Version { diff --git a/server/etcdserver/version/version_test.go b/server/etcdserver/version/version_test.go new file mode 100644 index 000000000..5178494c2 --- /dev/null +++ b/server/etcdserver/version/version_test.go @@ -0,0 +1,175 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "fmt" + "math/rand" + "testing" + + "github.com/coreos/go-semver/semver" + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + "go.uber.org/zap/zaptest" + + "go.etcd.io/etcd/api/v3/version" + "go.etcd.io/etcd/server/v3/etcdserver/api/membership" +) + +var ( + V3_7 = semver.Version{Major: 3, Minor: 7} +) + +func TestUpgradeSingleNode(t *testing.T) { + lg := zaptest.NewLogger(t) + c := newCluster(lg, 1, V3_6) + c.StepMonitors() + assert.Equal(t, newCluster(lg, 1, V3_6), c) + + c.ReplaceMemberBinary(0, V3_7) + c.StepMonitors() + c.StepMonitors() + + assert.Equal(t, newCluster(lg, 1, V3_7), c) +} + +func TestUpgradeThreeNodes(t *testing.T) { + lg := zaptest.NewLogger(t) + c := newCluster(lg, 3, V3_6) + c.StepMonitors() + assert.Equal(t, newCluster(lg, 3, V3_6), c) + + c.ReplaceMemberBinary(0, V3_7) + c.StepMonitors() + c.ReplaceMemberBinary(1, V3_7) + c.StepMonitors() + c.ReplaceMemberBinary(2, V3_7) + c.StepMonitors() + c.StepMonitors() + + assert.Equal(t, newCluster(lg, 3, V3_7), c) +} + +func newCluster(lg *zap.Logger, memberCount int, ver semver.Version) *clusterMock { + cluster := &clusterMock{ + clusterVersion: ver, + members: make([]*memberMock, 0, memberCount), + } + majorMinVer := semver.Version{Major: ver.Major, Minor: ver.Minor} + for i := 0; i < memberCount; i++ { + m := &memberMock{ + cluster: cluster, + serverVersion: ver, + storageVersion: majorMinVer, + } + m.monitor = NewMonitor(lg.Named(fmt.Sprintf("m%d", i)), m) + cluster.members = append(cluster.members, m) + } + cluster.members[0].isLeader = true + return cluster +} + +func (c *clusterMock) StepMonitors() { + // Execute monitor functions in random order as it is not guaranteed + fs := []func(){} + for _, m := range c.members { + fs = append(fs, m.monitor.UpdateStorageVersionIfNeeded) + if m.isLeader { + fs = append(fs, m.monitor.CancelDowngradeIfNeeded, m.monitor.UpdateClusterVersionIfNeeded) + } + } + rand.Shuffle(len(fs), func(i, j int) { + fs[i], fs[j] = fs[j], fs[i] + }) + for _, f := range fs { + f() + } +} + +type clusterMock struct { + clusterVersion semver.Version + downgradeInfo *membership.DowngradeInfo + members []*memberMock +} + +func (c *clusterMock) DowngradeEnable(ver semver.Version) { + c.downgradeInfo = &membership.DowngradeInfo{TargetVersion: ver.String(), Enabled: true} +} + +func (c *clusterMock) MembersVersions() map[string]*version.Versions { + result := map[string]*version.Versions{} + for i, m := range c.members { + result[fmt.Sprintf("%d", i)] = &version.Versions{ + Server: m.serverVersion.String(), + Cluster: c.clusterVersion.String(), + } + } + return result +} + +func (c *clusterMock) ReplaceMemberBinary(mid int, newServerVersion semver.Version) { + if newServerVersion.LessThan(c.clusterVersion) { + panic("Members cannot join clusters with higher version") + } + c.members[mid].serverVersion = newServerVersion +} + +type memberMock struct { + cluster *clusterMock + + isLeader bool + serverVersion semver.Version + storageVersion semver.Version + monitor *Monitor +} + +var _ Server = (*memberMock)(nil) + +func (m *memberMock) UpdateClusterVersion(version string) { + m.cluster.clusterVersion = *semver.New(version) +} + +func (m *memberMock) DowngradeCancel() { + m.cluster.downgradeInfo = nil +} + +func (m *memberMock) GetClusterVersion() *semver.Version { + return &m.cluster.clusterVersion +} + +func (m *memberMock) GetDowngradeInfo() *membership.DowngradeInfo { + return m.cluster.downgradeInfo +} + +func (m *memberMock) GetMembersVersions() map[string]*version.Versions { + return m.cluster.MembersVersions() +} + +func (m *memberMock) GetStorageVersion() *semver.Version { + return &m.storageVersion +} + +func (m *memberMock) UpdateStorageVersion(v semver.Version) { + m.storageVersion = v +} + +func (m *memberMock) TriggerSnapshot() { +} + +func (m *memberMock) Lock() { +} + +func (m *memberMock) Unlock() { +}