From 0b2d31f3bcd7d6113a33da51c69b7f8baa6ea48e Mon Sep 17 00:00:00 2001 From: Hitoshi Mitake Date: Wed, 30 Dec 2015 16:11:01 +0900 Subject: [PATCH] storage: decouple default parameters and storage creation newStore() uses constants for some important parameters e.g. batchInerval. It is not suitable for other storage users (e.g. tools/benchmark). This commit decouples the default parameters from storage creation. etcd should use a newly added function newDefaultStore() for creating a store with default parameters. --- storage/kv_test.go | 36 +++++++++++++-------------- storage/kvstore.go | 6 ++++- storage/kvstore_bench_test.go | 4 +-- storage/kvstore_compaction_test.go | 2 +- storage/kvstore_test.go | 10 ++++---- storage/watchable_store.go | 2 +- storage/watchable_store_bench_test.go | 2 +- storage/watchable_store_test.go | 4 +-- 8 files changed, 35 insertions(+), 31 deletions(-) diff --git a/storage/kv_test.go b/storage/kv_test.go index 80b3ec129..f4dd1eb94 100644 --- a/storage/kv_test.go +++ b/storage/kv_test.go @@ -89,7 +89,7 @@ func TestKVRange(t *testing.T) { testKVRange(t, normalRangeFunc) } func TestKVTxnRange(t *testing.T) { testKVRange(t, txnRangeFunc) } func testKVRange(t *testing.T, f rangeFunc) { - s := newStore(tmpPath) + s := newDefaultStore(tmpPath) defer cleanup(s, tmpPath) s.Put([]byte("foo"), []byte("bar")) @@ -156,7 +156,7 @@ func TestKVRangeRev(t *testing.T) { testKVRangeRev(t, normalRangeFunc) } func TestKVTxnRangeRev(t *testing.T) { testKVRangeRev(t, normalRangeFunc) } func testKVRangeRev(t *testing.T, f rangeFunc) { - s := newStore(tmpPath) + s := newDefaultStore(tmpPath) defer cleanup(s, tmpPath) s.Put([]byte("foo"), []byte("bar")) @@ -198,7 +198,7 @@ func TestKVRangeBadRev(t *testing.T) { testKVRangeBadRev(t, normalRangeFunc) func TestKVTxnRangeBadRev(t *testing.T) { testKVRangeBadRev(t, normalRangeFunc) } func testKVRangeBadRev(t *testing.T, f rangeFunc) { - s := newStore(tmpPath) + s := newDefaultStore(tmpPath) defer cleanup(s, tmpPath) s.Put([]byte("foo"), []byte("bar")) @@ -230,7 +230,7 @@ func TestKVRangeLimit(t *testing.T) { testKVRangeLimit(t, normalRangeFunc) } func TestKVTxnRangeLimit(t *testing.T) { testKVRangeLimit(t, txnRangeFunc) } func testKVRangeLimit(t *testing.T, f rangeFunc) { - s := newStore(tmpPath) + s := newDefaultStore(tmpPath) defer cleanup(s, tmpPath) s.Put([]byte("foo"), []byte("bar")) @@ -274,7 +274,7 @@ func TestKVPutMultipleTimes(t *testing.T) { testKVPutMultipleTimes(t, normalP func TestKVTxnPutMultipleTimes(t *testing.T) { testKVPutMultipleTimes(t, txnPutFunc) } func testKVPutMultipleTimes(t *testing.T, f putFunc) { - s := newStore(tmpPath) + s := newDefaultStore(tmpPath) defer cleanup(s, tmpPath) for i := 0; i < 10; i++ { @@ -335,7 +335,7 @@ func testKVDeleteRange(t *testing.T, f deleteRangeFunc) { } for i, tt := range tests { - s := newStore(tmpPath) + s := newDefaultStore(tmpPath) s.Put([]byte("foo"), []byte("bar")) s.Put([]byte("foo1"), []byte("bar1")) @@ -354,7 +354,7 @@ func TestKVDeleteMultipleTimes(t *testing.T) { testKVDeleteMultipleTimes(t, n func TestKVTxnDeleteMultipleTimes(t *testing.T) { testKVDeleteMultipleTimes(t, txnDeleteRangeFunc) } func testKVDeleteMultipleTimes(t *testing.T, f deleteRangeFunc) { - s := newStore(tmpPath) + s := newDefaultStore(tmpPath) defer cleanup(s, tmpPath) s.Put([]byte("foo"), []byte("bar")) @@ -374,7 +374,7 @@ func testKVDeleteMultipleTimes(t *testing.T, f deleteRangeFunc) { // test that range, put, delete on single key in sequence repeatedly works correctly. func TestKVOperationInSequence(t *testing.T) { - s := newStore(tmpPath) + s := newDefaultStore(tmpPath) defer cleanup(s, tmpPath) for i := 0; i < 10; i++ { @@ -420,7 +420,7 @@ func TestKVOperationInSequence(t *testing.T) { } func TestKVTxnBlockNonTnxOperations(t *testing.T) { - s := newStore(tmpPath) + s := newDefaultStore(tmpPath) defer cleanup(s, tmpPath) tests := []func(){ @@ -451,7 +451,7 @@ func TestKVTxnBlockNonTnxOperations(t *testing.T) { } func TestKVTxnWrongID(t *testing.T) { - s := newStore(tmpPath) + s := newDefaultStore(tmpPath) defer cleanup(s, tmpPath) id := s.TxnBegin() @@ -487,7 +487,7 @@ func TestKVTxnWrongID(t *testing.T) { // test that txn range, put, delete on single key in sequence repeatedly works correctly. func TestKVTnxOperationInSequence(t *testing.T) { - s := newStore(tmpPath) + s := newDefaultStore(tmpPath) defer cleanup(s, tmpPath) for i := 0; i < 10; i++ { @@ -542,7 +542,7 @@ func TestKVTnxOperationInSequence(t *testing.T) { } func TestKVCompactReserveLastValue(t *testing.T) { - s := newStore(tmpPath) + s := newDefaultStore(tmpPath) defer cleanup(s, tmpPath) s.Put([]byte("foo"), []byte("bar0")) @@ -595,7 +595,7 @@ func TestKVCompactReserveLastValue(t *testing.T) { } func TestKVCompactBad(t *testing.T) { - s := newStore(tmpPath) + s := newDefaultStore(tmpPath) defer cleanup(s, tmpPath) s.Put([]byte("foo"), []byte("bar0")) @@ -627,7 +627,7 @@ func TestKVHash(t *testing.T) { for i := 0; i < len(hashes); i++ { var err error - kv := newStore(tmpPath) + kv := newDefaultStore(tmpPath) kv.Put([]byte("foo0"), []byte("bar0")) kv.Put([]byte("foo1"), []byte("bar0")) hashes[i], err = kv.Hash() @@ -663,7 +663,7 @@ func TestKVRestore(t *testing.T) { }, } for i, tt := range tests { - s := newStore(tmpPath) + s := newDefaultStore(tmpPath) tt(s) var kvss [][]storagepb.KeyValue for k := int64(0); k < 10; k++ { @@ -672,7 +672,7 @@ func TestKVRestore(t *testing.T) { } s.Close() - ns := newStore(tmpPath) + ns := newDefaultStore(tmpPath) ns.Restore() // wait for possible compaction to finish testutil.WaitSchedule() @@ -690,7 +690,7 @@ func TestKVRestore(t *testing.T) { } func TestKVSnapshot(t *testing.T) { - s := newStore(tmpPath) + s := newDefaultStore(tmpPath) defer cleanup(s, tmpPath) s.Put([]byte("foo"), []byte("bar")) @@ -714,7 +714,7 @@ func TestKVSnapshot(t *testing.T) { } f.Close() - ns := newStore("new_test") + ns := newDefaultStore("new_test") defer cleanup(ns, "new_test") ns.Restore() kvs, rev, err := ns.Range([]byte("a"), []byte("z"), 0, 0) diff --git a/storage/kvstore.go b/storage/kvstore.go index 4761b938e..bbc466508 100644 --- a/storage/kvstore.go +++ b/storage/kvstore.go @@ -65,7 +65,7 @@ type store struct { stopc chan struct{} } -func newStore(path string) *store { +func NewStore(path string, bachInterval time.Duration, batchLimit int) KV { s := &store{ b: backend.New(path, batchInterval, batchLimit), kvindex: newTreeIndex(), @@ -84,6 +84,10 @@ func newStore(path string) *store { return s } +func newDefaultStore(path string) *store { + return (NewStore(path, batchInterval, batchLimit)).(*store) +} + func (s *store) Rev() int64 { s.mu.Lock() defer s.mu.Unlock() diff --git a/storage/kvstore_bench_test.go b/storage/kvstore_bench_test.go index 70874de5e..6d53dc5f7 100644 --- a/storage/kvstore_bench_test.go +++ b/storage/kvstore_bench_test.go @@ -20,7 +20,7 @@ import ( ) func BenchmarkStorePut(b *testing.B) { - s := newStore(tmpPath) + s := newDefaultStore(tmpPath) defer os.Remove(tmpPath) // arbitrary number of bytes @@ -38,7 +38,7 @@ func BenchmarkStorePut(b *testing.B) { // with transaction begin and end, where transaction involves // some synchronization operations, such as mutex locking. func BenchmarkStoreTxnPut(b *testing.B) { - s := newStore(tmpPath) + s := newDefaultStore(tmpPath) defer cleanup(s, tmpPath) // arbitrary number of bytes diff --git a/storage/kvstore_compaction_test.go b/storage/kvstore_compaction_test.go index 957bd0021..c7e44210d 100644 --- a/storage/kvstore_compaction_test.go +++ b/storage/kvstore_compaction_test.go @@ -58,7 +58,7 @@ func TestScheduleCompaction(t *testing.T) { }, } for i, tt := range tests { - s := newStore(tmpPath) + s := newDefaultStore(tmpPath) tx := s.b.BatchTx() tx.Lock() diff --git a/storage/kvstore_test.go b/storage/kvstore_test.go index 1b50a3ce7..64fa2156e 100644 --- a/storage/kvstore_test.go +++ b/storage/kvstore_test.go @@ -29,7 +29,7 @@ import ( ) func TestStoreRev(t *testing.T) { - s := newStore(tmpPath) + s := newDefaultStore(tmpPath) defer os.Remove(tmpPath) for i := 0; i < 3; i++ { @@ -354,7 +354,7 @@ func TestStoreRestore(t *testing.T) { } func TestRestoreContinueUnfinishedCompaction(t *testing.T) { - s0 := newStore(tmpPath) + s0 := newDefaultStore(tmpPath) defer os.Remove(tmpPath) s0.Put([]byte("foo"), []byte("bar")) @@ -371,7 +371,7 @@ func TestRestoreContinueUnfinishedCompaction(t *testing.T) { s0.Close() - s1 := newStore(tmpPath) + s1 := newDefaultStore(tmpPath) s1.Restore() // wait for scheduled compaction to be finished @@ -409,7 +409,7 @@ func TestTxnPut(t *testing.T) { keys := createBytesSlice(bytesN, sliceN) vals := createBytesSlice(bytesN, sliceN) - s := newStore(tmpPath) + s := newDefaultStore(tmpPath) defer cleanup(s, tmpPath) for i := 0; i < sliceN; i++ { @@ -429,7 +429,7 @@ func TestTxnPut(t *testing.T) { } func TestTxnBlockBackendForceCommit(t *testing.T) { - s := newStore(tmpPath) + s := newDefaultStore(tmpPath) defer os.Remove(tmpPath) id := s.TxnBegin() diff --git a/storage/watchable_store.go b/storage/watchable_store.go index 1485b8468..e2aed0d7f 100644 --- a/storage/watchable_store.go +++ b/storage/watchable_store.go @@ -55,7 +55,7 @@ type watchableStore struct { func newWatchableStore(path string) *watchableStore { s := &watchableStore{ - store: newStore(path), + store: newDefaultStore(path), unsynced: make(map[*watching]struct{}), synced: make(map[string]map[*watching]struct{}), stopc: make(chan struct{}), diff --git a/storage/watchable_store_bench_test.go b/storage/watchable_store_bench_test.go index e247cd606..cad6a9d6e 100644 --- a/storage/watchable_store_bench_test.go +++ b/storage/watchable_store_bench_test.go @@ -33,7 +33,7 @@ func BenchmarkWatchableStoreUnsyncedCancel(b *testing.B) { // method to sync watchers in unsynced map. We want to keep watchers // in unsynced for this benchmark. s := &watchableStore{ - store: newStore(tmpPath), + store: newDefaultStore(tmpPath), unsynced: make(map[*watching]struct{}), // to make the test not crash from assigning to nil map. diff --git a/storage/watchable_store_test.go b/storage/watchable_store_test.go index e27a9b7da..03fc4f30b 100644 --- a/storage/watchable_store_test.go +++ b/storage/watchable_store_test.go @@ -66,7 +66,7 @@ func TestCancelUnsynced(t *testing.T) { // method to sync watchers in unsynced map. We want to keep watchers // in unsynced to test if syncWatchers works as expected. s := &watchableStore{ - store: newStore(tmpPath), + store: newDefaultStore(tmpPath), unsynced: make(map[*watching]struct{}), // to make the test not crash from assigning to nil map. @@ -120,7 +120,7 @@ func TestCancelUnsynced(t *testing.T) { // watchings to synced. func TestSyncWatchings(t *testing.T) { s := &watchableStore{ - store: newStore(tmpPath), + store: newDefaultStore(tmpPath), unsynced: make(map[*watching]struct{}), synced: make(map[string]map[*watching]struct{}), }