Merge pull request #8563 from fanminshi/make_auto_compaction_granular

*: support auto-compaction with finer granularity
This commit is contained in:
fanmin shi 2017-09-29 11:18:51 -07:00 committed by GitHub
commit bcef78c665
8 changed files with 76 additions and 59 deletions

View File

@ -29,8 +29,7 @@ var (
) )
const ( const (
checkCompactionInterval = 5 * time.Minute checkCompactionInterval = 5 * time.Minute
executeCompactionInterval = time.Hour
ModePeriodic = "periodic" ModePeriodic = "periodic"
ModeRevision = "revision" ModeRevision = "revision"
@ -57,7 +56,7 @@ type RevGetter interface {
Rev() int64 Rev() int64
} }
func New(mode string, retention int, rg RevGetter, c Compactable) (Compactor, error) { func New(mode string, retention time.Duration, rg RevGetter, c Compactable) (Compactor, error) {
switch mode { switch mode {
case ModePeriodic: case ModePeriodic:
return NewPeriodic(retention, rg, c), nil return NewPeriodic(retention, rg, c), nil

View File

@ -26,10 +26,10 @@ import (
) )
// Periodic compacts the log by purging revisions older than // Periodic compacts the log by purging revisions older than
// the configured retention time. Compaction happens hourly. // the configured retention time.
type Periodic struct { type Periodic struct {
clock clockwork.Clock clock clockwork.Clock
periodInHour int period time.Duration
rg RevGetter rg RevGetter
c Compactable c Compactable
@ -38,26 +38,30 @@ type Periodic struct {
ctx context.Context ctx context.Context
cancel context.CancelFunc cancel context.CancelFunc
mu sync.Mutex // mu protects paused
mu sync.RWMutex
paused bool paused bool
} }
// NewPeriodic creates a new instance of Periodic compactor that purges // NewPeriodic creates a new instance of Periodic compactor that purges
// the log older than h hours. // the log older than h Duration.
func NewPeriodic(h int, rg RevGetter, c Compactable) *Periodic { func NewPeriodic(h time.Duration, rg RevGetter, c Compactable) *Periodic {
return &Periodic{ return &Periodic{
clock: clockwork.NewRealClock(), clock: clockwork.NewRealClock(),
periodInHour: h, period: h,
rg: rg, rg: rg,
c: c, c: c,
} }
} }
// periodDivisor divides Periodic.period in into checkCompactInterval duration
const periodDivisor = 10
func (t *Periodic) Run() { func (t *Periodic) Run() {
t.ctx, t.cancel = context.WithCancel(context.Background()) t.ctx, t.cancel = context.WithCancel(context.Background())
t.revs = make([]int64, 0) t.revs = make([]int64, 0)
clock := t.clock clock := t.clock
checkCompactInterval := t.period / time.Duration(periodDivisor)
go func() { go func() {
last := clock.Now() last := clock.Now()
for { for {
@ -65,7 +69,7 @@ func (t *Periodic) Run() {
select { select {
case <-t.ctx.Done(): case <-t.ctx.Done():
return return
case <-clock.After(checkCompactionInterval): case <-clock.After(checkCompactInterval):
t.mu.Lock() t.mu.Lock()
p := t.paused p := t.paused
t.mu.Unlock() t.mu.Unlock()
@ -73,25 +77,21 @@ func (t *Periodic) Run() {
continue continue
} }
} }
if clock.Now().Sub(last) < t.period {
if clock.Now().Sub(last) < executeCompactionInterval {
continue continue
} }
rev, remaining := t.getRev()
rev, remaining := t.getRev(t.periodInHour)
if rev < 0 { if rev < 0 {
continue continue
} }
plog.Noticef("Starting auto-compaction at revision %d (retention: %v)", rev, t.period)
plog.Noticef("Starting auto-compaction at revision %d (retention: %d hours)", rev, t.periodInHour)
_, err := t.c.Compact(t.ctx, &pb.CompactionRequest{Revision: rev}) _, err := t.c.Compact(t.ctx, &pb.CompactionRequest{Revision: rev})
if err == nil || err == mvcc.ErrCompacted { if err == nil || err == mvcc.ErrCompacted {
t.revs = remaining t.revs = remaining
last = clock.Now()
plog.Noticef("Finished auto-compaction at revision %d", rev) plog.Noticef("Finished auto-compaction at revision %d", rev)
} else { } else {
plog.Noticef("Failed auto-compaction at revision %d (%v)", err, rev) plog.Noticef("Failed auto-compaction at revision %d (%v)", rev, err)
plog.Noticef("Retry after %v", checkCompactionInterval) plog.Noticef("Retry after %v", checkCompactInterval)
} }
} }
}() }()
@ -113,8 +113,8 @@ func (t *Periodic) Resume() {
t.paused = false t.paused = false
} }
func (t *Periodic) getRev(h int) (int64, []int64) { func (t *Periodic) getRev() (int64, []int64) {
i := len(t.revs) - int(time.Duration(h)*time.Hour/checkCompactionInterval) i := len(t.revs) - periodDivisor
if i < 0 { if i < 0 {
return -1, t.revs return -1, t.revs
} }

View File

@ -26,39 +26,36 @@ import (
func TestPeriodic(t *testing.T) { func TestPeriodic(t *testing.T) {
retentionHours := 2 retentionHours := 2
retentionDuration := time.Duration(retentionHours) * time.Hour
fc := clockwork.NewFakeClock() fc := clockwork.NewFakeClock()
rg := &fakeRevGetter{testutil.NewRecorderStream(), 0} rg := &fakeRevGetter{testutil.NewRecorderStream(), 0}
compactable := &fakeCompactable{testutil.NewRecorderStream()} compactable := &fakeCompactable{testutil.NewRecorderStream()}
tb := &Periodic{ tb := &Periodic{
clock: fc, clock: fc,
periodInHour: retentionHours, period: retentionDuration,
rg: rg, rg: rg,
c: compactable, c: compactable,
} }
tb.Run() tb.Run()
defer tb.Stop() defer tb.Stop()
checkCompactInterval := retentionDuration / time.Duration(periodDivisor)
n := int(time.Hour / checkCompactionInterval) n := periodDivisor
// collect 5 hours of revisions // simulate 5 hours worth of intervals.
for i := 0; i < 5; i++ { for i := 0; i < n/retentionHours*5; i++ {
// advance one hour, one revision for each interval rg.Wait(1)
for j := 0; j < n; j++ { fc.Advance(checkCompactInterval)
rg.Wait(1) // compaction doesn't happen til 2 hours elapses.
fc.Advance(checkCompactionInterval) if i < n {
}
// compaction doesn't happen til 2 hours elapses
if i+1 < retentionHours {
continue continue
} }
// after 2 hours, compaction happens at every checkCompactInterval.
a, err := compactable.Wait(1) a, err := compactable.Wait(1)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
expectedRevision := int64(1 + (i+1)*n - retentionHours*n) expectedRevision := int64(i + 1 - n)
if !reflect.DeepEqual(a[0].Params[0], &pb.CompactionRequest{Revision: expectedRevision}) { if !reflect.DeepEqual(a[0].Params[0], &pb.CompactionRequest{Revision: expectedRevision}) {
t.Errorf("compact request = %v, want %v", a[0].Params[0], &pb.CompactionRequest{Revision: expectedRevision}) t.Errorf("compact request = %v, want %v", a[0].Params[0], &pb.CompactionRequest{Revision: expectedRevision})
} }
@ -75,21 +72,23 @@ func TestPeriodicPause(t *testing.T) {
fc := clockwork.NewFakeClock() fc := clockwork.NewFakeClock()
compactable := &fakeCompactable{testutil.NewRecorderStream()} compactable := &fakeCompactable{testutil.NewRecorderStream()}
rg := &fakeRevGetter{testutil.NewRecorderStream(), 0} rg := &fakeRevGetter{testutil.NewRecorderStream(), 0}
retentionDuration := time.Hour
tb := &Periodic{ tb := &Periodic{
clock: fc, clock: fc,
periodInHour: 1, period: retentionDuration,
rg: rg, rg: rg,
c: compactable, c: compactable,
} }
tb.Run() tb.Run()
tb.Pause() tb.Pause()
// tb will collect 3 hours of revisions but not compact since paused // tb will collect 3 hours of revisions but not compact since paused
n := int(time.Hour / checkCompactionInterval) checkCompactInterval := retentionDuration / time.Duration(periodDivisor)
n := periodDivisor
for i := 0; i < 3*n; i++ { for i := 0; i < 3*n; i++ {
rg.Wait(1) rg.Wait(1)
fc.Advance(checkCompactionInterval) fc.Advance(checkCompactInterval)
} }
// tb ends up waiting for the clock // tb ends up waiting for the clock
@ -102,14 +101,14 @@ func TestPeriodicPause(t *testing.T) {
// tb resumes to being blocked on the clock // tb resumes to being blocked on the clock
tb.Resume() tb.Resume()
// unblock clock, will kick off a compaction at hour 3:05 // unblock clock, will kick off a compaction at hour 3:06
rg.Wait(1) rg.Wait(1)
fc.Advance(checkCompactionInterval) fc.Advance(checkCompactInterval)
a, err := compactable.Wait(1) a, err := compactable.Wait(1)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
// compact the revision from hour 2:05 // compact the revision from hour 2:06
wreq := &pb.CompactionRequest{Revision: int64(1 + 2*n + 1)} wreq := &pb.CompactionRequest{Revision: int64(1 + 2*n + 1)}
if !reflect.DeepEqual(a[0].Params[0], wreq) { if !reflect.DeepEqual(a[0].Params[0], wreq) {
t.Errorf("compact request = %v, want %v", a[0].Params[0], wreq.Revision) t.Errorf("compact request = %v, want %v", a[0].Params[0], wreq.Revision)

View File

@ -84,7 +84,7 @@ type Config struct {
MaxWalFiles uint `json:"max-wals"` MaxWalFiles uint `json:"max-wals"`
Name string `json:"name"` Name string `json:"name"`
SnapCount uint64 `json:"snapshot-count"` SnapCount uint64 `json:"snapshot-count"`
AutoCompactionRetention int `json:"auto-compaction-retention"` AutoCompactionRetention string `json:"auto-compaction-retention"`
AutoCompactionMode string `json:"auto-compaction-mode"` AutoCompactionMode string `json:"auto-compaction-mode"`
// TickMs is the number of milliseconds between heartbeat ticks. // TickMs is the number of milliseconds between heartbeat ticks.

View File

@ -23,6 +23,7 @@ import (
"net" "net"
"net/http" "net/http"
"net/url" "net/url"
"strconv"
"sync" "sync"
"time" "time"
@ -128,6 +129,24 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
} }
} }
var (
autoCompactionRetention time.Duration
h int
)
// AutoCompactionRetention defaults to "0" if not set.
if len(cfg.AutoCompactionRetention) == 0 {
cfg.AutoCompactionRetention = "0"
}
h, err = strconv.Atoi(cfg.AutoCompactionRetention)
if err == nil {
autoCompactionRetention = time.Duration(int64(h)) * time.Hour
} else {
autoCompactionRetention, err = time.ParseDuration(cfg.AutoCompactionRetention)
if err != nil {
return nil, fmt.Errorf("error parsing AutoCompactionRetention: %v", err)
}
}
srvcfg := etcdserver.ServerConfig{ srvcfg := etcdserver.ServerConfig{
Name: cfg.Name, Name: cfg.Name,
ClientURLs: cfg.ACUrls, ClientURLs: cfg.ACUrls,
@ -146,7 +165,7 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
PeerTLSInfo: cfg.PeerTLSInfo, PeerTLSInfo: cfg.PeerTLSInfo,
TickMs: cfg.TickMs, TickMs: cfg.TickMs,
ElectionTicks: cfg.ElectionTicks(), ElectionTicks: cfg.ElectionTicks(),
AutoCompactionRetention: cfg.AutoCompactionRetention, AutoCompactionRetention: autoCompactionRetention,
AutoCompactionMode: cfg.AutoCompactionMode, AutoCompactionMode: cfg.AutoCompactionMode,
QuotaBackendBytes: cfg.QuotaBackendBytes, QuotaBackendBytes: cfg.QuotaBackendBytes,
MaxTxnOps: cfg.MaxTxnOps, MaxTxnOps: cfg.MaxTxnOps,

View File

@ -199,8 +199,8 @@ func newConfig() *config {
// version // version
fs.BoolVar(&cfg.printVersion, "version", false, "Print the version and exit.") fs.BoolVar(&cfg.printVersion, "version", false, "Print the version and exit.")
fs.IntVar(&cfg.AutoCompactionRetention, "auto-compaction-retention", 0, "Auto compaction retention for mvcc key value store. 0 means disable auto compaction.") fs.StringVar(&cfg.AutoCompactionRetention, "auto-compaction-retention", "0", "Auto compaction retention for mvcc key value store. 0 means disable auto compaction.")
fs.StringVar(&cfg.AutoCompactionMode, "auto-compaction-mode", "periodic", "Interpret 'auto-compaction-retention' as hours when 'periodic', as revision numbers when 'revision'.") fs.StringVar(&cfg.AutoCompactionMode, "auto-compaction-mode", "periodic", "interpret 'auto-compaction-retention' one of: periodic|revision. 'periodic' for duration based retention, defaulting to hours if no time unit is provided (e.g. '5m'). 'revision' for revision number based retention.")
// pprof profiler via HTTP // pprof profiler via HTTP
fs.BoolVar(&cfg.EnablePprof, "enable-pprof", false, "Enable runtime profiling data via HTTP server. Address is at client URL + \"/debug/pprof/\"") fs.BoolVar(&cfg.EnablePprof, "enable-pprof", false, "Enable runtime profiling data via HTTP server. Address is at client URL + \"/debug/pprof/\"")

View File

@ -105,7 +105,7 @@ clustering flags:
--auto-compaction-retention '0' --auto-compaction-retention '0'
auto compaction retention length. 0 means disable auto compaction. auto compaction retention length. 0 means disable auto compaction.
--auto-compaction-mode 'periodic' --auto-compaction-mode 'periodic'
'periodic' means hours, 'revision' means revision numbers to retain by auto compaction interpret 'auto-compaction-retention' one of: periodic|revision. 'periodic' for duration based retention, defaulting to hours if no time unit is provided (e.g. '5m'). 'revision' for revision number based retention.
--enable-v2 --enable-v2
Accept etcd V2 client requests. Accept etcd V2 client requests.

View File

@ -51,7 +51,7 @@ type ServerConfig struct {
ElectionTicks int ElectionTicks int
BootstrapTimeout time.Duration BootstrapTimeout time.Duration
AutoCompactionRetention int AutoCompactionRetention time.Duration
AutoCompactionMode string AutoCompactionMode string
QuotaBackendBytes int64 QuotaBackendBytes int64
MaxTxnOps uint MaxTxnOps uint