mirror of
https://github.com/etcd-io/etcd.git
synced 2024-09-27 06:25:44 +00:00
etcdserver, et al: add --unsafe-no-fsync flag
This makes it possible to run an etcd node for testing and development without placing lots of load on the file system. Fixes #11930. Signed-off-by: David Crawshaw <crawshaw@tailscale.com>
This commit is contained in:
@@ -335,6 +335,10 @@ type Config struct {
|
|||||||
// Only valid if "logger" option is "capnslog".
|
// Only valid if "logger" option is "capnslog".
|
||||||
// WARN: DO NOT USE THIS!
|
// WARN: DO NOT USE THIS!
|
||||||
LogPkgLevels string `json:"log-package-levels"`
|
LogPkgLevels string `json:"log-package-levels"`
|
||||||
|
|
||||||
|
// UnsafeNoFsync disables all uses of fsync.
|
||||||
|
// Setting this is unsafe and will cause data loss.
|
||||||
|
UnsafeNoFsync bool `json:"unsafe-no-fsync"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// configYAML holds the config suitable for yaml parsing
|
// configYAML holds the config suitable for yaml parsing
|
||||||
|
|||||||
@@ -204,6 +204,7 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
|
|||||||
Debug: cfg.Debug,
|
Debug: cfg.Debug,
|
||||||
ForceNewCluster: cfg.ForceNewCluster,
|
ForceNewCluster: cfg.ForceNewCluster,
|
||||||
EnableGRPCGateway: cfg.EnableGRPCGateway,
|
EnableGRPCGateway: cfg.EnableGRPCGateway,
|
||||||
|
UnsafeNoFsync: cfg.UnsafeNoFsync,
|
||||||
EnableLeaseCheckpoint: cfg.ExperimentalEnableLeaseCheckpoint,
|
EnableLeaseCheckpoint: cfg.ExperimentalEnableLeaseCheckpoint,
|
||||||
CompactionBatchLimit: cfg.ExperimentalCompactionBatchLimit,
|
CompactionBatchLimit: cfg.ExperimentalCompactionBatchLimit,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -258,6 +258,7 @@ func newConfig() *config {
|
|||||||
fs.IntVar(&cfg.ec.ExperimentalCompactionBatchLimit, "experimental-compaction-batch-limit", cfg.ec.ExperimentalCompactionBatchLimit, "Sets the maximum revisions deleted in each compaction batch.")
|
fs.IntVar(&cfg.ec.ExperimentalCompactionBatchLimit, "experimental-compaction-batch-limit", cfg.ec.ExperimentalCompactionBatchLimit, "Sets the maximum revisions deleted in each compaction batch.")
|
||||||
|
|
||||||
// unsafe
|
// unsafe
|
||||||
|
fs.BoolVar(&cfg.ec.UnsafeNoFsync, "unsafe-no-fsync", false, "Disables fsync, unsafe, will cause data loss.")
|
||||||
fs.BoolVar(&cfg.ec.ForceNewCluster, "force-new-cluster", false, "Force to create a new one member cluster.")
|
fs.BoolVar(&cfg.ec.ForceNewCluster, "force-new-cluster", false, "Force to create a new one member cluster.")
|
||||||
|
|
||||||
// ignored
|
// ignored
|
||||||
|
|||||||
@@ -31,6 +31,7 @@ import (
|
|||||||
func newBackend(cfg ServerConfig) backend.Backend {
|
func newBackend(cfg ServerConfig) backend.Backend {
|
||||||
bcfg := backend.DefaultBackendConfig()
|
bcfg := backend.DefaultBackendConfig()
|
||||||
bcfg.Path = cfg.backendPath()
|
bcfg.Path = cfg.backendPath()
|
||||||
|
bcfg.UnsafeNoFsync = cfg.UnsafeNoFsync
|
||||||
if cfg.BackendBatchLimit != 0 {
|
if cfg.BackendBatchLimit != 0 {
|
||||||
bcfg.BatchLimit = cfg.BackendBatchLimit
|
bcfg.BatchLimit = cfg.BackendBatchLimit
|
||||||
if cfg.Logger != nil {
|
if cfg.Logger != nil {
|
||||||
|
|||||||
@@ -157,6 +157,10 @@ type ServerConfig struct {
|
|||||||
LeaseCheckpointInterval time.Duration
|
LeaseCheckpointInterval time.Duration
|
||||||
|
|
||||||
EnableGRPCGateway bool
|
EnableGRPCGateway bool
|
||||||
|
|
||||||
|
// UnsafeNoFsync disables all uses of fsync.
|
||||||
|
// Setting this is unsafe and will cause data loss.
|
||||||
|
UnsafeNoFsync bool `json:"unsafe-no-fsync"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// VerifyBootstrap sanity-checks the initial config for bootstrap case
|
// VerifyBootstrap sanity-checks the initial config for bootstrap case
|
||||||
|
|||||||
@@ -465,6 +465,9 @@ func startNode(cfg ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id
|
|||||||
plog.Panicf("create wal error: %v", err)
|
plog.Panicf("create wal error: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if cfg.UnsafeNoFsync {
|
||||||
|
w.SetUnsafeNoFsync()
|
||||||
|
}
|
||||||
peers := make([]raft.Peer, len(ids))
|
peers := make([]raft.Peer, len(ids))
|
||||||
for i, id := range ids {
|
for i, id := range ids {
|
||||||
var ctx []byte
|
var ctx []byte
|
||||||
@@ -527,7 +530,7 @@ func restartNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *member
|
|||||||
if snapshot != nil {
|
if snapshot != nil {
|
||||||
walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
|
walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
|
||||||
}
|
}
|
||||||
w, id, cid, st, ents := readWAL(cfg.Logger, cfg.WALDir(), walsnap)
|
w, id, cid, st, ents := readWAL(cfg.Logger, cfg.WALDir(), walsnap, cfg.UnsafeNoFsync)
|
||||||
|
|
||||||
if cfg.Logger != nil {
|
if cfg.Logger != nil {
|
||||||
cfg.Logger.Info(
|
cfg.Logger.Info(
|
||||||
@@ -582,7 +585,7 @@ func restartAsStandaloneNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types
|
|||||||
if snapshot != nil {
|
if snapshot != nil {
|
||||||
walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
|
walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
|
||||||
}
|
}
|
||||||
w, id, cid, st, ents := readWAL(cfg.Logger, cfg.WALDir(), walsnap)
|
w, id, cid, st, ents := readWAL(cfg.Logger, cfg.WALDir(), walsnap, cfg.UnsafeNoFsync)
|
||||||
|
|
||||||
// discard the previously uncommitted entries
|
// discard the previously uncommitted entries
|
||||||
for i, ent := range ents {
|
for i, ent := range ents {
|
||||||
|
|||||||
@@ -82,7 +82,7 @@ func (st *storage) Release(snap raftpb.Snapshot) error {
|
|||||||
// readWAL reads the WAL at the given snap and returns the wal, its latest HardState and cluster ID, and all entries that appear
|
// readWAL reads the WAL at the given snap and returns the wal, its latest HardState and cluster ID, and all entries that appear
|
||||||
// after the position of the given snap in the WAL.
|
// after the position of the given snap in the WAL.
|
||||||
// The snap must have been previously saved to the WAL, or this call will panic.
|
// The snap must have been previously saved to the WAL, or this call will panic.
|
||||||
func readWAL(lg *zap.Logger, waldir string, snap walpb.Snapshot) (w *wal.WAL, id, cid types.ID, st raftpb.HardState, ents []raftpb.Entry) {
|
func readWAL(lg *zap.Logger, waldir string, snap walpb.Snapshot, unsafeNoFsync bool) (w *wal.WAL, id, cid types.ID, st raftpb.HardState, ents []raftpb.Entry) {
|
||||||
var (
|
var (
|
||||||
err error
|
err error
|
||||||
wmetadata []byte
|
wmetadata []byte
|
||||||
@@ -97,6 +97,9 @@ func readWAL(lg *zap.Logger, waldir string, snap walpb.Snapshot) (w *wal.WAL, id
|
|||||||
plog.Fatalf("open wal error: %v", err)
|
plog.Fatalf("open wal error: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if unsafeNoFsync {
|
||||||
|
w.SetUnsafeNoFsync()
|
||||||
|
}
|
||||||
if wmetadata, st, ents, err = w.ReadAll(); err != nil {
|
if wmetadata, st, ents, err = w.ReadAll(); err != nil {
|
||||||
w.Close()
|
w.Close()
|
||||||
// we can only repair ErrUnexpectedEOF and we never repair twice.
|
// we can only repair ErrUnexpectedEOF and we never repair twice.
|
||||||
|
|||||||
@@ -123,6 +123,8 @@ type BackendConfig struct {
|
|||||||
MmapSize uint64
|
MmapSize uint64
|
||||||
// Logger logs backend-side operations.
|
// Logger logs backend-side operations.
|
||||||
Logger *zap.Logger
|
Logger *zap.Logger
|
||||||
|
// UnsafeNoFsync disables all uses of fsync.
|
||||||
|
UnsafeNoFsync bool `json:"unsafe-no-fsync"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func DefaultBackendConfig() BackendConfig {
|
func DefaultBackendConfig() BackendConfig {
|
||||||
@@ -150,6 +152,8 @@ func newBackend(bcfg BackendConfig) *backend {
|
|||||||
}
|
}
|
||||||
bopts.InitialMmapSize = bcfg.mmapSize()
|
bopts.InitialMmapSize = bcfg.mmapSize()
|
||||||
bopts.FreelistType = bcfg.BackendFreelistType
|
bopts.FreelistType = bcfg.BackendFreelistType
|
||||||
|
bopts.NoSync = bcfg.UnsafeNoFsync
|
||||||
|
bopts.NoGrowSync = bcfg.UnsafeNoFsync
|
||||||
|
|
||||||
db, err := bolt.Open(bcfg.Path, 0600, bopts)
|
db, err := bolt.Open(bcfg.Path, 0600, bopts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -84,6 +84,8 @@ type WAL struct {
|
|||||||
decoder *decoder // decoder to decode records
|
decoder *decoder // decoder to decode records
|
||||||
readClose func() error // closer for decode reader
|
readClose func() error // closer for decode reader
|
||||||
|
|
||||||
|
unsafeNoSync bool // if set, do not fsync
|
||||||
|
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
enti uint64 // index of the last entry saved to the wal
|
enti uint64 // index of the last entry saved to the wal
|
||||||
encoder *encoder // encoder to encode records
|
encoder *encoder // encoder to encode records
|
||||||
@@ -233,6 +235,10 @@ func Create(lg *zap.Logger, dirpath string, metadata []byte) (*WAL, error) {
|
|||||||
return w, nil
|
return w, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (w *WAL) SetUnsafeNoFsync() {
|
||||||
|
w.unsafeNoSync = true
|
||||||
|
}
|
||||||
|
|
||||||
func (w *WAL) cleanupWAL(lg *zap.Logger) {
|
func (w *WAL) cleanupWAL(lg *zap.Logger) {
|
||||||
var err error
|
var err error
|
||||||
if err = w.Close(); err != nil {
|
if err = w.Close(); err != nil {
|
||||||
@@ -768,6 +774,9 @@ func (w *WAL) cut() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (w *WAL) sync() error {
|
func (w *WAL) sync() error {
|
||||||
|
if w.unsafeNoSync {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
if w.encoder != nil {
|
if w.encoder != nil {
|
||||||
if err := w.encoder.flush(); err != nil {
|
if err := w.encoder.flush(); err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
Reference in New Issue
Block a user