Simplify KVStore interaction with cindex thanks to hooks.

This commit is contained in:
Piotr Tabor
2021-04-12 19:49:58 +02:00
parent fe3254aee3
commit 2dbecea5b2
10 changed files with 84 additions and 103 deletions

View File

@@ -139,14 +139,3 @@ type Watchable interface {
// watch events happened or happening on the KV.
NewWatchStream() WatchStream
}
// ConsistentWatchableKV is a WatchableKV that understands the consistency
// algorithm and consistent index.
// If the consistent index of executing entry is not larger than the
// consistent index of ConsistentWatchableKV, all operations in
// this entry are skipped and return empty response.
type ConsistentWatchableKV interface {
WatchableKV
// ConsistentIndex returns the current consistent index of the KV.
ConsistentIndex() uint64
}

View File

@@ -69,8 +69,6 @@ type store struct {
// mu read locks for txns and write locks for non-txn store changes.
mu sync.RWMutex
ci cindex.ConsistentIndexer
b backend.Backend
kvindex index
@@ -94,7 +92,7 @@ type store struct {
// NewStore returns a new store. It is useful to create a store inside
// mvcc pkg. It should only be used for testing externally.
func NewStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, ci cindex.ConsistentIndexer, cfg StoreConfig) *store {
func NewStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, cfg StoreConfig) *store {
if lg == nil {
lg = zap.NewNop()
}
@@ -104,7 +102,6 @@ func NewStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, ci cindex.Cons
s := &store{
cfg: cfg,
b: b,
ci: ci,
kvindex: newTreeIndex(lg),
le: le,
@@ -314,11 +311,6 @@ func init() {
func (s *store) Commit() {
s.mu.Lock()
defer s.mu.Unlock()
tx := s.b.BatchTx()
tx.Lock()
s.saveIndex(tx)
tx.Unlock()
s.b.ForceCommit()
}
@@ -342,8 +334,6 @@ func (s *store) Restore(b backend.Backend) error {
s.fifoSched = schedule.NewFIFOScheduler()
s.stopc = make(chan struct{})
s.ci.SetBatchTx(b.BatchTx())
s.ci.SetConsistentIndex(0)
return s.restore()
}
@@ -436,9 +426,7 @@ func (s *store) restore() error {
tx.Unlock()
s.lg.Info("kvstore restored",
zap.Uint64("consistent-index", s.ConsistentIndex()),
zap.Int64("current-rev", s.currentRev))
s.lg.Info("kvstore restored", zap.Int64("current-rev", s.currentRev))
if scheduledCompact != 0 {
if _, err := s.compactLockfree(scheduledCompact); err != nil {
@@ -533,19 +521,6 @@ func (s *store) Close() error {
return nil
}
func (s *store) saveIndex(tx backend.BatchTx) {
if s.ci != nil {
s.ci.UnsafeSave(tx)
}
}
func (s *store) ConsistentIndex() uint64 {
if s.ci != nil {
return s.ci.ConsistentIndex()
}
return 0
}
func (s *store) setupMetricsReporter() {
b := s.b
reportDbTotalSizeInBytesMu.Lock()

View File

@@ -18,6 +18,7 @@ import (
"context"
"testing"
"github.com/stretchr/testify/assert"
"go.etcd.io/etcd/pkg/v3/traceutil"
"go.etcd.io/etcd/server/v3/etcdserver/cindex"
"go.etcd.io/etcd/server/v3/lease"
@@ -28,7 +29,7 @@ import (
func BenchmarkStorePut(b *testing.B) {
be, tmpPath := betesting.NewDefaultTmpBackend(b)
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, cindex.NewConsistentIndex(be.BatchTx()), StoreConfig{})
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
defer cleanup(s, be, tmpPath)
// arbitrary number of bytes
@@ -47,7 +48,7 @@ func BenchmarkStoreRangeKey100(b *testing.B) { benchmarkStoreRange(b, 100) }
func benchmarkStoreRange(b *testing.B, n int) {
be, tmpPath := betesting.NewDefaultTmpBackend(b)
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, cindex.NewConsistentIndex(be.BatchTx()), StoreConfig{})
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
defer cleanup(s, be, tmpPath)
// 64 byte key/val
@@ -73,26 +74,30 @@ func benchmarkStoreRange(b *testing.B, n int) {
}
func BenchmarkConsistentIndex(b *testing.B) {
be, tmpPath := betesting.NewDefaultTmpBackend(b)
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, cindex.NewConsistentIndex(be.BatchTx()), StoreConfig{})
defer cleanup(s, be, tmpPath)
be, _ := betesting.NewDefaultTmpBackend(b)
ci := cindex.NewConsistentIndex(be)
defer betesting.Close(b, be)
tx := s.b.BatchTx()
// This will force the index to be reread from scratch on each call.
ci.SetConsistentIndex(0)
tx := be.BatchTx()
tx.Lock()
s.saveIndex(tx)
cindex.UnsafeCreateMetaBucket(tx)
ci.UnsafeSave(tx)
tx.Unlock()
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
s.ConsistentIndex()
ci.ConsistentIndex()
}
}
// BenchmarkStoreTxnPutUpdate is same as above, but instead updates single key
func BenchmarkStorePutUpdate(b *testing.B) {
be, tmpPath := betesting.NewDefaultTmpBackend(b)
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, cindex.NewConsistentIndex(be.BatchTx()), StoreConfig{})
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
defer cleanup(s, be, tmpPath)
// arbitrary number of bytes
@@ -110,7 +115,7 @@ func BenchmarkStorePutUpdate(b *testing.B) {
// some synchronization operations, such as mutex locking.
func BenchmarkStoreTxnPut(b *testing.B) {
be, tmpPath := betesting.NewDefaultTmpBackend(b)
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, cindex.NewConsistentIndex(be.BatchTx()), StoreConfig{})
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
defer cleanup(s, be, tmpPath)
// arbitrary number of bytes
@@ -130,7 +135,7 @@ func BenchmarkStoreTxnPut(b *testing.B) {
// benchmarkStoreRestore benchmarks the restore operation
func benchmarkStoreRestore(revsPerKey int, b *testing.B) {
be, tmpPath := betesting.NewDefaultTmpBackend(b)
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, cindex.NewConsistentIndex(be.BatchTx()), StoreConfig{})
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
// use closure to capture 's' to pick up the reassignment
defer func() { cleanup(s, be, tmpPath) }()
@@ -146,11 +151,11 @@ func benchmarkStoreRestore(revsPerKey int, b *testing.B) {
txn.End()
}
}
s.Close()
assert.NoError(b, s.Close())
b.ReportAllocs()
b.ResetTimer()
s = NewStore(zap.NewExample(), be, &lease.FakeLessor{}, cindex.NewConsistentIndex(be.BatchTx()), StoreConfig{})
s = NewStore(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
}
func BenchmarkStoreRestoreRevs1(b *testing.B) {

View File

@@ -104,7 +104,6 @@ func (tw *storeTxnWrite) Put(key, value []byte, lease lease.LeaseID) int64 {
func (tw *storeTxnWrite) End() {
// only update index if the txn modifies the mvcc state.
if len(tw.changes) != 0 {
tw.s.saveIndex(tw.tx)
// hold revMu lock to prevent new read txns from opening until writeback.
tw.s.revMu.Lock()
tw.s.currentRev++

View File

@@ -20,7 +20,6 @@ import (
"go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/pkg/v3/traceutil"
"go.etcd.io/etcd/server/v3/etcdserver/cindex"
"go.etcd.io/etcd/server/v3/lease"
"go.etcd.io/etcd/server/v3/mvcc/backend"
@@ -70,16 +69,16 @@ type watchableStore struct {
// cancel operations.
type cancelFunc func()
func New(lg *zap.Logger, b backend.Backend, le lease.Lessor, ci cindex.ConsistentIndexer, cfg StoreConfig) ConsistentWatchableKV {
return newWatchableStore(lg, b, le, ci, cfg)
func New(lg *zap.Logger, b backend.Backend, le lease.Lessor, cfg StoreConfig) WatchableKV {
return newWatchableStore(lg, b, le, cfg)
}
func newWatchableStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, ci cindex.ConsistentIndexer, cfg StoreConfig) *watchableStore {
func newWatchableStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, cfg StoreConfig) *watchableStore {
if lg == nil {
lg = zap.NewNop()
}
s := &watchableStore{
store: NewStore(lg, b, le, ci, cfg),
store: NewStore(lg, b, le, cfg),
victimc: make(chan struct{}, 1),
unsynced: newWatcherGroup(),
synced: newWatcherGroup(),