Merge pull request #15449 from fuweid/fix-15409

tests/integration: deflake TestEtcdVersionFromWAL
This commit is contained in:
Marek Siarkowicz 2023-03-13 10:36:02 +01:00 committed by GitHub
commit 043525c69d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 47 additions and 5 deletions

View File

@ -2168,12 +2168,14 @@ func (s *EtcdServer) StorageVersion() *semver.Version {
// monitorClusterVersions every monitorVersionInterval checks if it's the leader and updates cluster version if needed. // monitorClusterVersions every monitorVersionInterval checks if it's the leader and updates cluster version if needed.
func (s *EtcdServer) monitorClusterVersions() { func (s *EtcdServer) monitorClusterVersions() {
monitor := serverversion.NewMonitor(s.Logger(), NewServerVersionAdapter(s)) lg := s.Logger()
monitor := serverversion.NewMonitor(lg, NewServerVersionAdapter(s))
for { for {
select { select {
case <-s.firstCommitInTerm.Receive(): case <-s.firstCommitInTerm.Receive():
case <-time.After(monitorVersionInterval): case <-time.After(monitorVersionInterval):
case <-s.stopping: case <-s.stopping:
lg.Info("server has stopped; stopping cluster version's monitor")
return return
} }
@ -2189,12 +2191,14 @@ func (s *EtcdServer) monitorClusterVersions() {
// monitorStorageVersion every monitorVersionInterval updates storage version if needed. // monitorStorageVersion every monitorVersionInterval updates storage version if needed.
func (s *EtcdServer) monitorStorageVersion() { func (s *EtcdServer) monitorStorageVersion() {
monitor := serverversion.NewMonitor(s.Logger(), NewServerVersionAdapter(s)) lg := s.Logger()
monitor := serverversion.NewMonitor(lg, NewServerVersionAdapter(s))
for { for {
select { select {
case <-time.After(monitorVersionInterval): case <-time.After(monitorVersionInterval):
case <-s.clusterVersionChanged.Receive(): case <-s.clusterVersionChanged.Receive():
case <-s.stopping: case <-s.stopping:
lg.Info("server has stopped; stopping storage version's monitor")
return return
} }
monitor.UpdateStorageVersionIfNeeded() monitor.UpdateStorageVersionIfNeeded()
@ -2218,6 +2222,7 @@ func (s *EtcdServer) monitorKVHash() {
for { for {
select { select {
case <-s.stopping: case <-s.stopping:
lg.Info("server has stopped; stopping kv hash's monitor")
return return
case <-checkTicker.C: case <-checkTicker.C:
} }
@ -2239,6 +2244,8 @@ func (s *EtcdServer) monitorCompactHash() {
select { select {
case <-time.After(t): case <-time.After(t):
case <-s.stopping: case <-s.stopping:
lg := s.Logger()
lg.Info("server has stopped; stopping compact hash's monitor")
return return
} }
if !s.isLeader() { if !s.isLeader() {

View File

@ -28,6 +28,7 @@ import (
"go.etcd.io/etcd/server/v3/embed" "go.etcd.io/etcd/server/v3/embed"
"go.etcd.io/etcd/server/v3/storage/wal" "go.etcd.io/etcd/server/v3/storage/wal"
"go.etcd.io/etcd/server/v3/storage/wal/walpb" "go.etcd.io/etcd/server/v3/storage/wal/walpb"
framecfg "go.etcd.io/etcd/tests/v3/framework/config"
"go.etcd.io/etcd/tests/v3/framework/integration" "go.etcd.io/etcd/tests/v3/framework/integration"
) )
@ -45,28 +46,62 @@ func TestEtcdVersionFromWAL(t *testing.T) {
t.Fatalf("failed to start embed.Etcd for test") t.Fatalf("failed to start embed.Etcd for test")
} }
// When the member becomes leader, it will update the cluster version
// with the cluster's minimum version. As it's updated asynchronously,
// it could not be updated in time before close. Wait for it to become
// ready.
if err := waitForClusterVersionReady(srv); err != nil {
srv.Close()
t.Fatalf("failed to wait for cluster version to become ready: %v", err)
}
ccfg := clientv3.Config{Endpoints: []string{cfg.ACUrls[0].String()}} ccfg := clientv3.Config{Endpoints: []string{cfg.ACUrls[0].String()}}
cli, err := integration.NewClient(t, ccfg) cli, err := integration.NewClient(t, ccfg)
if err != nil { if err != nil {
srv.Close() srv.Close()
t.Fatal(err) t.Fatal(err)
} }
// Get auth status to increase etcd version of proto stored in wal
// Once the cluster version has been updated, any entity's storage
// version should be align with cluster version.
ctx, cancel := context.WithTimeout(context.Background(), testutil.RequestTimeout) ctx, cancel := context.WithTimeout(context.Background(), testutil.RequestTimeout)
cli.AuthStatus(ctx) _, err = cli.AuthStatus(ctx)
cancel() cancel()
if err != nil {
srv.Close()
t.Fatalf("failed to get auth status: %v", err)
}
cli.Close() cli.Close()
srv.Close() srv.Close()
w, err := wal.Open(zap.NewNop(), cfg.Dir+"/member/wal", walpb.Snapshot{}) w, err := wal.Open(zap.NewNop(), cfg.Dir+"/member/wal", walpb.Snapshot{})
if err != nil { if err != nil {
panic(err) t.Fatal(err)
} }
defer w.Close() defer w.Close()
walVersion, err := wal.ReadWALVersion(w) walVersion, err := wal.ReadWALVersion(w)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
assert.Equal(t, &semver.Version{Major: 3, Minor: 6}, walVersion.MinimalEtcdVersion()) assert.Equal(t, &semver.Version{Major: 3, Minor: 6}, walVersion.MinimalEtcdVersion())
} }
func waitForClusterVersionReady(srv *embed.Etcd) error {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
for {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
if srv.Server.ClusterVersion() != nil {
return nil
}
time.Sleep(framecfg.TickDuration)
}
}