Merge pull request #17977 from ivanvc/release-3.5-backport-etcdutl-memory-allocation-issue

[3.5] backport etcdutl memory allocation issue
This commit is contained in:
Marek Siarkowicz 2024-05-10 10:50:23 +02:00 committed by GitHub
commit 8f6d2dc403
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 36 additions and 10 deletions

View File

@ -42,6 +42,7 @@ var (
skipHashCheck bool
markCompacted bool
revisionBump uint64
initialMmapSize uint64
)
// NewSnapshotCommand returns the cobra command for "snapshot".
@ -93,6 +94,7 @@ func NewSnapshotRestoreCommand() *cobra.Command {
cmd.Flags().BoolVar(&skipHashCheck, "skip-hash-check", false, "Ignore snapshot integrity hash value (required if copied from data directory)")
cmd.Flags().Uint64Var(&revisionBump, "bump-revision", 0, "How much to increase the latest revision after restore")
cmd.Flags().BoolVar(&markCompacted, "mark-compacted", false, "Mark the latest revision after restore as the point of scheduled compaction (required if --bump-revision > 0, disallowed otherwise)")
cmd.Flags().Uint64Var(&initialMmapSize, "initial-memory-map-size", initialMmapSize, "Initial memory map size of the database in bytes. It uses the default value if not defined or defined to 0")
return cmd
}
@ -131,7 +133,7 @@ func snapshotStatusCommandFunc(cmd *cobra.Command, args []string) {
func snapshotRestoreCommandFunc(cmd *cobra.Command, args []string) {
fmt.Fprintf(os.Stderr, "Deprecated: Use `etcdutl snapshot restore` instead.\n\n")
etcdutl.SnapshotRestoreCommandFunc(restoreCluster, restoreClusterToken, restoreDataDir, restoreWalDir,
restorePeerURLs, restoreName, skipHashCheck, revisionBump, markCompacted, args)
restorePeerURLs, restoreName, skipHashCheck, initialMmapSize, revisionBump, markCompacted, args)
}
func initialClusterFromName(name string) string {

View File

@ -21,6 +21,7 @@ import (
"go.etcd.io/etcd/etcdutl/v3/snapshot"
"go.etcd.io/etcd/pkg/v3/cobrautl"
"go.etcd.io/etcd/server/v3/datadir"
"go.etcd.io/etcd/server/v3/mvcc/backend"
"github.com/spf13/cobra"
)
@ -38,6 +39,7 @@ var (
restorePeerURLs string
restoreName string
skipHashCheck bool
initialMmapSize = backend.InitialMmapSize
markCompacted bool
revisionBump uint64
)
@ -93,6 +95,7 @@ func NewSnapshotRestoreCommand() *cobra.Command {
cmd.Flags().StringVar(&restorePeerURLs, "initial-advertise-peer-urls", defaultInitialAdvertisePeerURLs, "List of this member's peer URLs to advertise to the rest of the cluster")
cmd.Flags().StringVar(&restoreName, "name", defaultName, "Human-readable name for this member")
cmd.Flags().BoolVar(&skipHashCheck, "skip-hash-check", false, "Ignore snapshot integrity hash value (required if copied from data directory)")
cmd.Flags().Uint64Var(&initialMmapSize, "initial-memory-map-size", initialMmapSize, "Initial memory map size of the database in bytes. It uses the default value if not defined or defined to 0")
cmd.Flags().Uint64Var(&revisionBump, "bump-revision", 0, "How much to increase the latest revision after restore")
cmd.Flags().BoolVar(&markCompacted, "mark-compacted", false, "Mark the latest revision after restore as the point of scheduled compaction (required if --bump-revision > 0, disallowed otherwise)")
@ -119,7 +122,7 @@ func SnapshotStatusCommandFunc(cmd *cobra.Command, args []string) {
func snapshotRestoreCommandFunc(_ *cobra.Command, args []string) {
SnapshotRestoreCommandFunc(restoreCluster, restoreClusterToken, restoreDataDir, restoreWalDir,
restorePeerURLs, restoreName, skipHashCheck, revisionBump, markCompacted, args)
restorePeerURLs, restoreName, skipHashCheck, initialMmapSize, revisionBump, markCompacted, args)
}
func SnapshotRestoreCommandFunc(restoreCluster string,
@ -129,6 +132,7 @@ func SnapshotRestoreCommandFunc(restoreCluster string,
restorePeerURLs string,
restoreName string,
skipHashCheck bool,
initialMmapSize uint64,
revisionBump uint64,
markCompacted bool,
args []string) {
@ -164,6 +168,7 @@ func SnapshotRestoreCommandFunc(restoreCluster string,
InitialCluster: restoreCluster,
InitialClusterToken: restoreClusterToken,
SkipHashCheck: skipHashCheck,
InitialMmapSize: initialMmapSize,
RevisionBump: revisionBump,
MarkCompacted: markCompacted,
}); err != nil {

View File

@ -83,7 +83,8 @@ type v3Manager struct {
snapDir string
cl *membership.RaftCluster
skipHashCheck bool
skipHashCheck bool
initialMmapSize uint64
}
// hasChecksum returns "true" if the file size "n"
@ -197,6 +198,9 @@ type RestoreConfig struct {
// (required if copied from data directory).
SkipHashCheck bool
// InitialMmapSize is the database initial memory map size.
InitialMmapSize uint64
// RevisionBump is the amount to increase the latest revision after restore,
// to allow administrators to trick clients into thinking that revision never decreased.
// If 0, revision bumping is skipped.
@ -256,6 +260,7 @@ func (s *v3Manager) Restore(cfg RestoreConfig) error {
s.walDir = walDir
s.snapDir = filepath.Join(dataDir, "member", "snap")
s.skipHashCheck = cfg.SkipHashCheck
s.initialMmapSize = cfg.InitialMmapSize
s.lg.Info(
"restoring snapshot",
@ -263,6 +268,7 @@ func (s *v3Manager) Restore(cfg RestoreConfig) error {
zap.String("wal-dir", s.walDir),
zap.String("data-dir", dataDir),
zap.String("snap-dir", s.snapDir),
zap.Uint64("initial-memory-map-size", s.initialMmapSize),
)
if err = s.saveDB(); err != nil {
@ -290,6 +296,7 @@ func (s *v3Manager) Restore(cfg RestoreConfig) error {
zap.String("wal-dir", s.walDir),
zap.String("data-dir", dataDir),
zap.String("snap-dir", s.snapDir),
zap.Uint64("initial-memory-map-size", s.initialMmapSize),
)
return verify.VerifyIfEnabled(verify.Config{
@ -310,7 +317,7 @@ func (s *v3Manager) saveDB() error {
return err
}
be := backend.NewDefaultBackend(s.outDbPath())
be := backend.NewDefaultBackend(s.outDbPath(), backend.WithMmapSize(s.initialMmapSize))
defer be.Close()
err = membership.TrimMembershipFromBackend(s.lg, be)
@ -324,7 +331,7 @@ func (s *v3Manager) saveDB() error {
// modifyLatestRevision can increase the latest revision by the given amount and sets the scheduled compaction
// to that revision so that the server will consider this revision compacted.
func (s *v3Manager) modifyLatestRevision(bumpAmount uint64) error {
be := backend.NewDefaultBackend(s.outDbPath())
be := backend.NewDefaultBackend(s.outDbPath(), backend.WithMmapSize(s.initialMmapSize))
defer func() {
be.ForceCommit()
be.Close()
@ -467,7 +474,7 @@ func (s *v3Manager) saveWALAndSnap() (*raftpb.HardState, error) {
// add members again to persist them to the store we create.
st := v2store.New(etcdserver.StoreClusterPrefix, etcdserver.StoreKeysPrefix)
s.cl.SetStore(st)
be := backend.NewDefaultBackend(s.outDbPath())
be := backend.NewDefaultBackend(s.outDbPath(), backend.WithMmapSize(s.initialMmapSize))
defer be.Close()
s.cl.SetBackend(be)
for _, m := range s.cl.Members() {

View File

@ -36,10 +36,10 @@ var (
defragLimit = 10000
// initialMmapSize is the initial size of the mmapped region. Setting this larger than
// InitialMmapSize is the initial size of the mmapped region. Setting this larger than
// the potential max db size can prevent writer from blocking reader.
// This only works for linux.
initialMmapSize = uint64(10 * 1024 * 1024 * 1024)
InitialMmapSize = uint64(10 * 1024 * 1024 * 1024)
// minSnapshotWarningTimeout is the minimum threshold to trigger a long running snapshot warning.
minSnapshotWarningTimeout = 30 * time.Second
@ -151,11 +151,13 @@ type BackendConfig struct {
Hooks Hooks
}
type BackendConfigOption func(*BackendConfig)
func DefaultBackendConfig() BackendConfig {
return BackendConfig{
BatchInterval: defaultBatchInterval,
BatchLimit: defaultBatchLimit,
MmapSize: initialMmapSize,
MmapSize: InitialMmapSize,
}
}
@ -163,9 +165,19 @@ func New(bcfg BackendConfig) Backend {
return newBackend(bcfg)
}
func NewDefaultBackend(path string) Backend {
func WithMmapSize(size uint64) BackendConfigOption {
return func(bcfg *BackendConfig) {
bcfg.MmapSize = size
}
}
func NewDefaultBackend(path string, opts ...BackendConfigOption) Backend {
bcfg := DefaultBackendConfig()
bcfg.Path = path
for _, opt := range opts {
opt(&bcfg)
}
return newBackend(bcfg)
}