diff --git a/server/storage/mvcc/hash_test.go b/server/storage/mvcc/hash_test.go index f0d516276..d906d41f1 100644 --- a/server/storage/mvcc/hash_test.go +++ b/server/storage/mvcc/hash_test.go @@ -32,9 +32,9 @@ import ( // output which would have catastrophic consequences. Expected output is just // hardcoded, so please regenerate it every time you change input parameters. func TestHashByRevValue(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) + defer cleanup(s, b) var totalRevisions int64 = 1210 assert.Less(t, int64(s.cfg.CompactionBatchLimit), totalRevisions) @@ -73,9 +73,9 @@ func TestHashByRevValue(t *testing.T) { } func TestHashByRevValueLastRevision(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) + defer cleanup(s, b) var totalRevisions int64 = 1210 assert.Less(t, int64(s.cfg.CompactionBatchLimit), totalRevisions) @@ -133,9 +133,9 @@ func testHashByRev(t *testing.T, s *store, rev int64) KeyValueHash { // TestCompactionHash tests compaction hash // TODO: Change this to fuzz test func TestCompactionHash(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) + defer cleanup(s, b) testutil.TestCompactionHash(context.Background(), t, hashTestCase{s}, s.cfg.CompactionBatchLimit) } diff --git a/server/storage/mvcc/kv_test.go b/server/storage/mvcc/kv_test.go index bc2081b02..ef5461035 100644 --- a/server/storage/mvcc/kv_test.go +++ b/server/storage/mvcc/kv_test.go @@ -79,9 +79,9 @@ func TestKVRange(t *testing.T) { testKVRange(t, normalRangeFunc) } func TestKVTxnRange(t *testing.T) { testKVRange(t, txnRangeFunc) } func testKVRange(t *testing.T, f rangeFunc) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) + defer cleanup(s, b) kvs := put3TestKVs(s) @@ -145,9 +145,9 @@ func TestKVRangeRev(t *testing.T) { testKVRangeRev(t, normalRangeFunc) } func TestKVTxnRangeRev(t *testing.T) { testKVRangeRev(t, txnRangeFunc) } func testKVRangeRev(t *testing.T, f rangeFunc) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) + defer cleanup(s, b) kvs := put3TestKVs(s) @@ -181,9 +181,9 @@ func TestKVRangeBadRev(t *testing.T) { testKVRangeBadRev(t, normalRangeFunc) func TestKVTxnRangeBadRev(t *testing.T) { testKVRangeBadRev(t, txnRangeFunc) } func testKVRangeBadRev(t *testing.T, f rangeFunc) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) + defer cleanup(s, b) put3TestKVs(s) if _, err := s.Compact(traceutil.TODO(), 4); err != nil { @@ -214,9 +214,9 @@ func TestKVRangeLimit(t *testing.T) { testKVRangeLimit(t, normalRangeFunc) } func TestKVTxnRangeLimit(t *testing.T) { testKVRangeLimit(t, txnRangeFunc) } func testKVRangeLimit(t *testing.T, f rangeFunc) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) + defer cleanup(s, b) kvs := put3TestKVs(s) @@ -260,9 +260,9 @@ func TestKVPutMultipleTimes(t *testing.T) { testKVPutMultipleTimes(t, normalP func TestKVTxnPutMultipleTimes(t *testing.T) { testKVPutMultipleTimes(t, txnPutFunc) } func testKVPutMultipleTimes(t *testing.T, f putFunc) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) + defer cleanup(s, b) for i := 0; i < 10; i++ { base := int64(i + 1) @@ -322,7 +322,7 @@ func testKVDeleteRange(t *testing.T, f deleteRangeFunc) { } for i, tt := range tests { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) s.Put([]byte("foo"), []byte("bar"), lease.NoLease) @@ -334,7 +334,7 @@ func testKVDeleteRange(t *testing.T, f deleteRangeFunc) { t.Errorf("#%d: n = %d, rev = %d, want (%d, %d)", i, n, rev, tt.wN, tt.wrev) } - cleanup(s, b, tmpPath) + cleanup(s, b) } } @@ -342,9 +342,9 @@ func TestKVDeleteMultipleTimes(t *testing.T) { testKVDeleteMultipleTimes(t, n func TestKVTxnDeleteMultipleTimes(t *testing.T) { testKVDeleteMultipleTimes(t, txnDeleteRangeFunc) } func testKVDeleteMultipleTimes(t *testing.T, f deleteRangeFunc) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) + defer cleanup(s, b) s.Put([]byte("foo"), []byte("bar"), lease.NoLease) @@ -365,9 +365,9 @@ func TestKVPutWithSameLease(t *testing.T) { testKVPutWithSameLease(t, normalP func TestKVTxnPutWithSameLease(t *testing.T) { testKVPutWithSameLease(t, txnPutFunc) } func testKVPutWithSameLease(t *testing.T, f putFunc) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) + defer cleanup(s, b) leaseID := int64(1) // put foo @@ -398,9 +398,9 @@ func testKVPutWithSameLease(t *testing.T, f putFunc) { // TestKVOperationInSequence tests that range, put, delete on single key in // sequence repeatedly works correctly. func TestKVOperationInSequence(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) + defer cleanup(s, b) for i := 0; i < 10; i++ { base := int64(i*2 + 1) @@ -445,7 +445,7 @@ func TestKVOperationInSequence(t *testing.T) { } func TestKVTxnBlockWriteOperations(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) tests := []func(){ @@ -475,13 +475,13 @@ func TestKVTxnBlockWriteOperations(t *testing.T) { } // only close backend when we know all the tx are finished - cleanup(s, b, tmpPath) + cleanup(s, b) } func TestKVTxnNonBlockRange(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) + defer cleanup(s, b) txn := s.Write(traceutil.TODO()) defer txn.End() @@ -501,9 +501,9 @@ func TestKVTxnNonBlockRange(t *testing.T) { // TestKVTxnOperationInSequence tests that txn range, put, delete on single key // in sequence repeatedly works correctly. func TestKVTxnOperationInSequence(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) + defer cleanup(s, b) for i := 0; i < 10; i++ { txn := s.Write(traceutil.TODO()) @@ -551,9 +551,9 @@ func TestKVTxnOperationInSequence(t *testing.T) { } func TestKVCompactReserveLastValue(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) + defer cleanup(s, b) s.Put([]byte("foo"), []byte("bar0"), 1) s.Put([]byte("foo"), []byte("bar1"), 2) @@ -605,9 +605,9 @@ func TestKVCompactReserveLastValue(t *testing.T) { } func TestKVCompactBad(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) + defer cleanup(s, b) s.Put([]byte("foo"), []byte("bar0"), lease.NoLease) s.Put([]byte("foo"), []byte("bar1"), lease.NoLease) @@ -638,7 +638,7 @@ func TestKVHash(t *testing.T) { for i := 0; i < len(hashes); i++ { var err error - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) kv := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) kv.Put([]byte("foo0"), []byte("bar0"), lease.NoLease) kv.Put([]byte("foo1"), []byte("bar0"), lease.NoLease) @@ -646,7 +646,7 @@ func TestKVHash(t *testing.T) { if err != nil { t.Fatalf("failed to get hash: %v", err) } - cleanup(kv, b, tmpPath) + cleanup(kv, b) } for i := 1; i < len(hashes); i++ { @@ -676,7 +676,7 @@ func TestKVRestore(t *testing.T) { }, } for i, tt := range tests { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) tt(s) var kvss [][]mvccpb.KeyValue @@ -702,7 +702,7 @@ func TestKVRestore(t *testing.T) { r, _ := ns.Range(context.TODO(), []byte("a"), []byte("z"), RangeOptions{Rev: k}) nkvss = append(nkvss, r.KVs) } - cleanup(ns, b, tmpPath) + cleanup(ns, b) if !reflect.DeepEqual(nkvss, kvss) { t.Errorf("#%d: kvs history = %+v, want %+v", i, nkvss, kvss) @@ -720,9 +720,9 @@ func readGaugeInt(g prometheus.Gauge) int { } func TestKVSnapshot(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) + defer cleanup(s, b) wkvs := put3TestKVs(s) @@ -756,9 +756,9 @@ func TestKVSnapshot(t *testing.T) { } func TestWatchableKVWatch(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := WatchableKV(newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})) - defer cleanup(s, b, tmpPath) + defer cleanup(s, b) w := s.NewWatchStream() defer w.Close() @@ -860,10 +860,9 @@ func TestWatchableKVWatch(t *testing.T) { } } -func cleanup(s KV, b backend.Backend, path string) { +func cleanup(s KV, b backend.Backend) { s.Close() b.Close() - os.Remove(path) } func put3TestKVs(s KV) []mvccpb.KeyValue { diff --git a/server/storage/mvcc/kvstore_bench_test.go b/server/storage/mvcc/kvstore_bench_test.go index 8b9a1456a..eeb574a1c 100644 --- a/server/storage/mvcc/kvstore_bench_test.go +++ b/server/storage/mvcc/kvstore_bench_test.go @@ -29,9 +29,9 @@ import ( ) func BenchmarkStorePut(b *testing.B) { - be, tmpPath := betesting.NewDefaultTmpBackend(b) + be, _ := betesting.NewDefaultTmpBackend(b) s := NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, be, tmpPath) + defer cleanup(s, be) // arbitrary number of bytes bytesN := 64 @@ -48,9 +48,9 @@ func BenchmarkStoreRangeKey1(b *testing.B) { benchmarkStoreRange(b, 1) } func BenchmarkStoreRangeKey100(b *testing.B) { benchmarkStoreRange(b, 100) } func benchmarkStoreRange(b *testing.B, n int) { - be, tmpPath := betesting.NewDefaultTmpBackend(b) + be, _ := betesting.NewDefaultTmpBackend(b) s := NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, be, tmpPath) + defer cleanup(s, be) // 64 byte key/val keys, val := createBytesSlice(64, n), createBytesSlice(64, 1) @@ -97,9 +97,9 @@ func BenchmarkConsistentIndex(b *testing.B) { // BenchmarkStorePutUpdate is same as above, but instead updates single key func BenchmarkStorePutUpdate(b *testing.B) { - be, tmpPath := betesting.NewDefaultTmpBackend(b) + be, _ := betesting.NewDefaultTmpBackend(b) s := NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, be, tmpPath) + defer cleanup(s, be) // arbitrary number of bytes keys := createBytesSlice(64, 1) @@ -115,9 +115,9 @@ func BenchmarkStorePutUpdate(b *testing.B) { // with transaction begin and end, where transaction involves // some synchronization operations, such as mutex locking. func BenchmarkStoreTxnPut(b *testing.B) { - be, tmpPath := betesting.NewDefaultTmpBackend(b) + be, _ := betesting.NewDefaultTmpBackend(b) s := NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, be, tmpPath) + defer cleanup(s, be) // arbitrary number of bytes bytesN := 64 @@ -135,10 +135,10 @@ func BenchmarkStoreTxnPut(b *testing.B) { // benchmarkStoreRestore benchmarks the restore operation func benchmarkStoreRestore(revsPerKey int, b *testing.B) { - be, tmpPath := betesting.NewDefaultTmpBackend(b) + be, _ := betesting.NewDefaultTmpBackend(b) s := NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{}) // use closure to capture 's' to pick up the reassignment - defer func() { cleanup(s, be, tmpPath) }() + defer func() { cleanup(s, be) }() // arbitrary number of bytes bytesN := 64 diff --git a/server/storage/mvcc/kvstore_compaction_test.go b/server/storage/mvcc/kvstore_compaction_test.go index b9b782273..dd8837637 100644 --- a/server/storage/mvcc/kvstore_compaction_test.go +++ b/server/storage/mvcc/kvstore_compaction_test.go @@ -16,7 +16,6 @@ package mvcc import ( "context" - "os" "reflect" "testing" "time" @@ -68,7 +67,7 @@ func TestScheduleCompaction(t *testing.T) { }, } for i, tt := range tests { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) fi := newFakeIndex() fi.indexCompactRespc <- tt.keep @@ -103,17 +102,14 @@ func TestScheduleCompaction(t *testing.T) { } tx.Unlock() - cleanup(s, b, tmpPath) + cleanup(s, b) } } func TestCompactAllAndRestore(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s0 := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer func() { - b.Close() - os.Remove(tmpPath) - }() + defer b.Close() s0.Put([]byte("foo"), []byte("bar"), lease.NoLease) s0.Put([]byte("foo"), []byte("bar1"), lease.NoLease) diff --git a/server/storage/mvcc/kvstore_test.go b/server/storage/mvcc/kvstore_test.go index 02ab63db2..a3bdaac77 100644 --- a/server/storage/mvcc/kvstore_test.go +++ b/server/storage/mvcc/kvstore_test.go @@ -485,7 +485,7 @@ func TestRestoreContinueUnfinishedCompaction(t *testing.T) { test := test t.Run(test, func(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s0 := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) s0.Put([]byte("foo"), []byte("bar"), lease.NoLease) @@ -512,7 +512,7 @@ func TestRestoreContinueUnfinishedCompaction(t *testing.T) { s0.Restore(b) s = s0 } - defer cleanup(s, b, tmpPath) + defer cleanup(s, b) // wait for scheduled compaction to be finished time.Sleep(100 * time.Millisecond) @@ -549,9 +549,9 @@ type hashKVResult struct { // TestHashKVWhenCompacting ensures that HashKV returns correct hash when compacting. func TestHashKVWhenCompacting(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) + defer cleanup(s, b) rev := 10000 for i := 2; i <= rev; i++ { @@ -629,9 +629,9 @@ func TestHashKVWhenCompacting(t *testing.T) { // TestHashKVWithCompactedAndFutureRevisions ensures that HashKV returns a correct hash when called // with a past revision (lower than compacted), a future revision, and the exact compacted revision func TestHashKVWithCompactedAndFutureRevisions(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) + defer cleanup(s, b) rev := 10000 compactRev := rev / 2 @@ -662,9 +662,9 @@ func TestHashKVWithCompactedAndFutureRevisions(t *testing.T) { // TestHashKVZeroRevision ensures that "HashByRev(0)" computes // correct hash value with latest revision. func TestHashKVZeroRevision(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) + defer cleanup(s, b) rev := 10000 for i := 2; i <= rev; i++ { @@ -695,9 +695,9 @@ func TestTxnPut(t *testing.T) { keys := createBytesSlice(bytesN, sliceN) vals := createBytesSlice(bytesN, sliceN) - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) + defer cleanup(s, b) for i := 0; i < sliceN; i++ { txn := s.Write(traceutil.TODO()) @@ -711,9 +711,9 @@ func TestTxnPut(t *testing.T) { // TestConcurrentReadNotBlockingWrite ensures Read does not blocking Write after its creation func TestConcurrentReadNotBlockingWrite(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) + defer cleanup(s, b) // write something to read later s.Put([]byte("foo"), []byte("bar"), lease.NoLease) @@ -780,9 +780,9 @@ func TestConcurrentReadTxAndWrite(t *testing.T) { committedKVs kvs // committedKVs records the key-value pairs written by the finished Write Txns mu sync.Mutex // mu protects committedKVs ) - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) + defer cleanup(s, b) var wg sync.WaitGroup wg.Add(numOfWrites) diff --git a/server/storage/mvcc/watchable_store_bench_test.go b/server/storage/mvcc/watchable_store_bench_test.go index 9329dce76..ba402a3e1 100644 --- a/server/storage/mvcc/watchable_store_bench_test.go +++ b/server/storage/mvcc/watchable_store_bench_test.go @@ -16,7 +16,6 @@ package mvcc import ( "math/rand" - "os" "testing" "go.uber.org/zap/zaptest" @@ -27,9 +26,9 @@ import ( ) func BenchmarkWatchableStorePut(b *testing.B) { - be, tmpPath := betesting.NewDefaultTmpBackend(b) + be, _ := betesting.NewDefaultTmpBackend(b) s := New(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, be, tmpPath) + defer cleanup(s, be) // arbitrary number of bytes bytesN := 64 @@ -47,9 +46,9 @@ func BenchmarkWatchableStorePut(b *testing.B) { // with transaction begin and end, where transaction involves // some synchronization operations, such as mutex locking. func BenchmarkWatchableStoreTxnPut(b *testing.B) { - be, tmpPath := betesting.NewDefaultTmpBackend(b) + be, _ := betesting.NewDefaultTmpBackend(b) s := New(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, be, tmpPath) + defer cleanup(s, be) // arbitrary number of bytes bytesN := 64 @@ -78,9 +77,9 @@ func BenchmarkWatchableStoreWatchPutUnsync(b *testing.B) { } func benchmarkWatchableStoreWatchPut(b *testing.B, synced bool) { - be, tmpPath := betesting.NewDefaultTmpBackend(b) + be, _ := betesting.NewDefaultTmpBackend(b) s := newWatchableStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, be, tmpPath) + defer cleanup(s, be) k := []byte("testkey") v := []byte("testval") @@ -122,7 +121,7 @@ func benchmarkWatchableStoreWatchPut(b *testing.B, synced bool) { // TODO: k is an arbitrary constant. We need to figure out what factor // we should put to simulate the real-world use cases. func BenchmarkWatchableStoreUnsyncedCancel(b *testing.B) { - be, tmpPath := betesting.NewDefaultTmpBackend(b) + be, _ := betesting.NewDefaultTmpBackend(b) s := NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{}) // manually create watchableStore instead of newWatchableStore @@ -136,12 +135,10 @@ func BenchmarkWatchableStoreUnsyncedCancel(b *testing.B) { // to make the test not crash from assigning to nil map. // 'synced' doesn't get populated in this test. synced: newWatcherGroup(), + stopc: make(chan struct{}), } - defer func() { - ws.store.Close() - os.Remove(tmpPath) - }() + defer cleanup(ws, be) // Put a key so that we can spawn watchers on that key // (testKey in this test). This increases the rev to 1, @@ -152,6 +149,7 @@ func BenchmarkWatchableStoreUnsyncedCancel(b *testing.B) { s.Put(testKey, testValue, lease.NoLease) w := ws.NewWatchStream() + defer w.Close() const k int = 2 benchSampleN := b.N @@ -179,13 +177,10 @@ func BenchmarkWatchableStoreUnsyncedCancel(b *testing.B) { } func BenchmarkWatchableStoreSyncedCancel(b *testing.B) { - be, tmpPath := betesting.NewDefaultTmpBackend(b) + be, _ := betesting.NewDefaultTmpBackend(b) s := newWatchableStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{}) - defer func() { - s.store.Close() - os.Remove(tmpPath) - }() + defer cleanup(s, be) // Put a key so that we can spawn watchers on that key testKey := []byte("foo") @@ -193,6 +188,7 @@ func BenchmarkWatchableStoreSyncedCancel(b *testing.B) { s.Put(testKey, testValue, lease.NoLease) w := s.NewWatchStream() + defer w.Close() // put 1 million watchers on the same key const watcherN = 1000000 diff --git a/server/storage/mvcc/watchable_store_test.go b/server/storage/mvcc/watchable_store_test.go index b7d6868f8..a98106bca 100644 --- a/server/storage/mvcc/watchable_store_test.go +++ b/server/storage/mvcc/watchable_store_test.go @@ -31,9 +31,9 @@ import ( ) func TestWatch(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) + defer cleanup(s, b) testKey := []byte("foo") testValue := []byte("bar") @@ -50,9 +50,9 @@ func TestWatch(t *testing.T) { } func TestNewWatcherCancel(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) + defer cleanup(s, b) testKey := []byte("foo") testValue := []byte("bar") @@ -74,7 +74,7 @@ func TestNewWatcherCancel(t *testing.T) { // TestCancelUnsynced tests if running CancelFunc removes watchers from unsynced. func TestCancelUnsynced(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) // manually create watchableStore instead of newWatchableStore // because newWatchableStore automatically calls syncWatchers @@ -90,7 +90,7 @@ func TestCancelUnsynced(t *testing.T) { stopc: make(chan struct{}), } - defer cleanup(s, b, tmpPath) + defer cleanup(s, b) // Put a key so that we can spawn watchers on that key. // (testKey in this test). This increases the rev to 1, @@ -132,7 +132,7 @@ func TestCancelUnsynced(t *testing.T) { // method to see if it correctly sends events to channel of unsynced watchers // and moves these watchers to synced. func TestSyncWatchers(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := &watchableStore{ store: NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}), @@ -141,7 +141,7 @@ func TestSyncWatchers(t *testing.T) { stopc: make(chan struct{}), } - defer cleanup(s, b, tmpPath) + defer cleanup(s, b) testKey := []byte("foo") testValue := []byte("bar") @@ -216,9 +216,9 @@ func TestSyncWatchers(t *testing.T) { // TestWatchCompacted tests a watcher that watches on a compacted revision. func TestWatchCompacted(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) + defer cleanup(s, b) testKey := []byte("foo") testValue := []byte("bar") @@ -251,9 +251,9 @@ func TestWatchCompacted(t *testing.T) { } func TestWatchFutureRev(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) + defer cleanup(s, b) testKey := []byte("foo") testValue := []byte("bar") @@ -290,17 +290,17 @@ func TestWatchFutureRev(t *testing.T) { func TestWatchRestore(t *testing.T) { test := func(delay time.Duration) func(t *testing.T) { return func(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) + defer cleanup(s, b) testKey := []byte("foo") testValue := []byte("bar") rev := s.Put(testKey, testValue, lease.NoLease) - newBackend, newPath := betesting.NewDefaultTmpBackend(t) + newBackend, _ := betesting.NewDefaultTmpBackend(t) newStore := newWatchableStore(zaptest.NewLogger(t), newBackend, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(newStore, newBackend, newPath) + defer cleanup(newStore, newBackend) w := newStore.NewWatchStream() defer w.Close() @@ -338,13 +338,13 @@ func TestWatchRestore(t *testing.T) { // 4. restore operation moves "synced" to "unsynced" watcher group // 5. choose the watcher from step 1, without panic func TestWatchRestoreSyncedWatcher(t *testing.T) { - b1, b1Path := betesting.NewDefaultTmpBackend(t) + b1, _ := betesting.NewDefaultTmpBackend(t) s1 := newWatchableStore(zaptest.NewLogger(t), b1, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s1, b1, b1Path) + defer cleanup(s1, b1) - b2, b2Path := betesting.NewDefaultTmpBackend(t) + b2, _ := betesting.NewDefaultTmpBackend(t) s2 := newWatchableStore(zaptest.NewLogger(t), b2, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s2, b2, b2Path) + defer cleanup(s2, b2) testKey, testValue := []byte("foo"), []byte("bar") rev := s1.Put(testKey, testValue, lease.NoLease) @@ -391,13 +391,13 @@ func TestWatchRestoreSyncedWatcher(t *testing.T) { // TestWatchBatchUnsynced tests batching on unsynced watchers func TestWatchBatchUnsynced(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) oldMaxRevs := watchBatchMaxRevs defer func() { watchBatchMaxRevs = oldMaxRevs - cleanup(s, b, tmpPath) + cleanup(s, b) }() batches := 3 watchBatchMaxRevs = 4 @@ -526,11 +526,11 @@ func TestNewMapwatcherToEventMap(t *testing.T) { func TestWatchVictims(t *testing.T) { oldChanBufLen, oldMaxWatchersPerSync := chanBufLen, maxWatchersPerSync - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) defer func() { - cleanup(s, b, tmpPath) + cleanup(s, b) chanBufLen, maxWatchersPerSync = oldChanBufLen, oldMaxWatchersPerSync }() @@ -603,9 +603,9 @@ func TestWatchVictims(t *testing.T) { // TestStressWatchCancelClose tests closing a watch stream while // canceling its watches. func TestStressWatchCancelClose(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) + defer cleanup(s, b) testKey, testValue := []byte("foo"), []byte("bar") var wg sync.WaitGroup diff --git a/server/storage/mvcc/watcher_bench_test.go b/server/storage/mvcc/watcher_bench_test.go index 264369d75..52a55d063 100644 --- a/server/storage/mvcc/watcher_bench_test.go +++ b/server/storage/mvcc/watcher_bench_test.go @@ -25,12 +25,13 @@ import ( ) func BenchmarkKVWatcherMemoryUsage(b *testing.B) { - be, tmpPath := betesting.NewDefaultTmpBackend(b) + be, _ := betesting.NewDefaultTmpBackend(b) watchable := newWatchableStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(watchable, be, tmpPath) + defer cleanup(watchable, be) w := watchable.NewWatchStream() + defer w.Close() b.ReportAllocs() b.StartTimer() diff --git a/server/storage/mvcc/watcher_test.go b/server/storage/mvcc/watcher_test.go index cbe394022..b86e31a55 100644 --- a/server/storage/mvcc/watcher_test.go +++ b/server/storage/mvcc/watcher_test.go @@ -32,9 +32,9 @@ import ( // TestWatcherWatchID tests that each watcher provides unique watchID, // and the watched event attaches the correct watchID. func TestWatcherWatchID(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := WatchableKV(newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})) - defer cleanup(s, b, tmpPath) + defer cleanup(s, b) w := s.NewWatchStream() defer w.Close() @@ -82,9 +82,9 @@ func TestWatcherWatchID(t *testing.T) { } func TestWatcherRequestsCustomID(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := WatchableKV(newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})) - defer cleanup(s, b, tmpPath) + defer cleanup(s, b) w := s.NewWatchStream() defer w.Close() @@ -119,9 +119,9 @@ func TestWatcherRequestsCustomID(t *testing.T) { // TestWatcherWatchPrefix tests if Watch operation correctly watches // and returns events with matching prefixes. func TestWatcherWatchPrefix(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := WatchableKV(newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})) - defer cleanup(s, b, tmpPath) + defer cleanup(s, b) w := s.NewWatchStream() defer w.Close() @@ -193,9 +193,9 @@ func TestWatcherWatchPrefix(t *testing.T) { // TestWatcherWatchWrongRange ensures that watcher with wrong 'end' range // does not create watcher, which panics when canceling in range tree. func TestWatcherWatchWrongRange(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := WatchableKV(newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})) - defer cleanup(s, b, tmpPath) + defer cleanup(s, b) w := s.NewWatchStream() defer w.Close() @@ -253,9 +253,9 @@ func TestWatchDeleteRange(t *testing.T) { // TestWatchStreamCancelWatcherByID ensures cancel calls the cancel func of the watcher // with given id inside watchStream. func TestWatchStreamCancelWatcherByID(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := WatchableKV(newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})) - defer cleanup(s, b, tmpPath) + defer cleanup(s, b) w := s.NewWatchStream() defer w.Close() @@ -290,7 +290,7 @@ func TestWatchStreamCancelWatcherByID(t *testing.T) { // TestWatcherRequestProgress ensures synced watcher can correctly // report its correct progress. func TestWatcherRequestProgress(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) // manually create watchableStore instead of newWatchableStore // because newWatchableStore automatically calls syncWatchers @@ -300,12 +300,10 @@ func TestWatcherRequestProgress(t *testing.T) { store: NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}), unsynced: newWatcherGroup(), synced: newWatcherGroup(), + stopc: make(chan struct{}), } - defer func() { - s.store.Close() - os.Remove(tmpPath) - }() + defer cleanup(s, b) testKey := []byte("foo") notTestKey := []byte("bad") @@ -345,9 +343,9 @@ func TestWatcherRequestProgress(t *testing.T) { } func TestWatcherWatchWithFilter(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) + b, _ := betesting.NewDefaultTmpBackend(t) s := WatchableKV(newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})) - defer cleanup(s, b, tmpPath) + defer cleanup(s, b) w := s.NewWatchStream() defer w.Close()