mirror of
https://github.com/etcd-io/etcd.git
synced 2024-09-27 06:25:44 +00:00
chore: refactor cleanup fn in mvcc test
The tmp path is cleanup by go testing so that the `cleanup` doesn't need to call `os.Remove`. Signed-off-by: Wei Fu <fuweid89@gmail.com>
This commit is contained in:
parent
830d9e9eaa
commit
eb09e00541
@ -32,9 +32,9 @@ import (
|
|||||||
// output which would have catastrophic consequences. Expected output is just
|
// output which would have catastrophic consequences. Expected output is just
|
||||||
// hardcoded, so please regenerate it every time you change input parameters.
|
// hardcoded, so please regenerate it every time you change input parameters.
|
||||||
func TestHashByRevValue(t *testing.T) {
|
func TestHashByRevValue(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b)
|
||||||
|
|
||||||
var totalRevisions int64 = 1210
|
var totalRevisions int64 = 1210
|
||||||
assert.Less(t, int64(s.cfg.CompactionBatchLimit), totalRevisions)
|
assert.Less(t, int64(s.cfg.CompactionBatchLimit), totalRevisions)
|
||||||
@ -73,9 +73,9 @@ func TestHashByRevValue(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestHashByRevValueLastRevision(t *testing.T) {
|
func TestHashByRevValueLastRevision(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b)
|
||||||
|
|
||||||
var totalRevisions int64 = 1210
|
var totalRevisions int64 = 1210
|
||||||
assert.Less(t, int64(s.cfg.CompactionBatchLimit), totalRevisions)
|
assert.Less(t, int64(s.cfg.CompactionBatchLimit), totalRevisions)
|
||||||
@ -133,9 +133,9 @@ func testHashByRev(t *testing.T, s *store, rev int64) KeyValueHash {
|
|||||||
// TestCompactionHash tests compaction hash
|
// TestCompactionHash tests compaction hash
|
||||||
// TODO: Change this to fuzz test
|
// TODO: Change this to fuzz test
|
||||||
func TestCompactionHash(t *testing.T) {
|
func TestCompactionHash(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b)
|
||||||
|
|
||||||
testutil.TestCompactionHash(context.Background(), t, hashTestCase{s}, s.cfg.CompactionBatchLimit)
|
testutil.TestCompactionHash(context.Background(), t, hashTestCase{s}, s.cfg.CompactionBatchLimit)
|
||||||
}
|
}
|
||||||
|
@ -79,9 +79,9 @@ func TestKVRange(t *testing.T) { testKVRange(t, normalRangeFunc) }
|
|||||||
func TestKVTxnRange(t *testing.T) { testKVRange(t, txnRangeFunc) }
|
func TestKVTxnRange(t *testing.T) { testKVRange(t, txnRangeFunc) }
|
||||||
|
|
||||||
func testKVRange(t *testing.T, f rangeFunc) {
|
func testKVRange(t *testing.T, f rangeFunc) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b)
|
||||||
|
|
||||||
kvs := put3TestKVs(s)
|
kvs := put3TestKVs(s)
|
||||||
|
|
||||||
@ -145,9 +145,9 @@ func TestKVRangeRev(t *testing.T) { testKVRangeRev(t, normalRangeFunc) }
|
|||||||
func TestKVTxnRangeRev(t *testing.T) { testKVRangeRev(t, txnRangeFunc) }
|
func TestKVTxnRangeRev(t *testing.T) { testKVRangeRev(t, txnRangeFunc) }
|
||||||
|
|
||||||
func testKVRangeRev(t *testing.T, f rangeFunc) {
|
func testKVRangeRev(t *testing.T, f rangeFunc) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b)
|
||||||
|
|
||||||
kvs := put3TestKVs(s)
|
kvs := put3TestKVs(s)
|
||||||
|
|
||||||
@ -181,9 +181,9 @@ func TestKVRangeBadRev(t *testing.T) { testKVRangeBadRev(t, normalRangeFunc)
|
|||||||
func TestKVTxnRangeBadRev(t *testing.T) { testKVRangeBadRev(t, txnRangeFunc) }
|
func TestKVTxnRangeBadRev(t *testing.T) { testKVRangeBadRev(t, txnRangeFunc) }
|
||||||
|
|
||||||
func testKVRangeBadRev(t *testing.T, f rangeFunc) {
|
func testKVRangeBadRev(t *testing.T, f rangeFunc) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b)
|
||||||
|
|
||||||
put3TestKVs(s)
|
put3TestKVs(s)
|
||||||
if _, err := s.Compact(traceutil.TODO(), 4); err != nil {
|
if _, err := s.Compact(traceutil.TODO(), 4); err != nil {
|
||||||
@ -214,9 +214,9 @@ func TestKVRangeLimit(t *testing.T) { testKVRangeLimit(t, normalRangeFunc) }
|
|||||||
func TestKVTxnRangeLimit(t *testing.T) { testKVRangeLimit(t, txnRangeFunc) }
|
func TestKVTxnRangeLimit(t *testing.T) { testKVRangeLimit(t, txnRangeFunc) }
|
||||||
|
|
||||||
func testKVRangeLimit(t *testing.T, f rangeFunc) {
|
func testKVRangeLimit(t *testing.T, f rangeFunc) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b)
|
||||||
|
|
||||||
kvs := put3TestKVs(s)
|
kvs := put3TestKVs(s)
|
||||||
|
|
||||||
@ -260,9 +260,9 @@ func TestKVPutMultipleTimes(t *testing.T) { testKVPutMultipleTimes(t, normalP
|
|||||||
func TestKVTxnPutMultipleTimes(t *testing.T) { testKVPutMultipleTimes(t, txnPutFunc) }
|
func TestKVTxnPutMultipleTimes(t *testing.T) { testKVPutMultipleTimes(t, txnPutFunc) }
|
||||||
|
|
||||||
func testKVPutMultipleTimes(t *testing.T, f putFunc) {
|
func testKVPutMultipleTimes(t *testing.T, f putFunc) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b)
|
||||||
|
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
base := int64(i + 1)
|
base := int64(i + 1)
|
||||||
@ -322,7 +322,7 @@ func testKVDeleteRange(t *testing.T, f deleteRangeFunc) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i, tt := range tests {
|
for i, tt := range tests {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
|
|
||||||
s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
|
s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
|
||||||
@ -334,7 +334,7 @@ func testKVDeleteRange(t *testing.T, f deleteRangeFunc) {
|
|||||||
t.Errorf("#%d: n = %d, rev = %d, want (%d, %d)", i, n, rev, tt.wN, tt.wrev)
|
t.Errorf("#%d: n = %d, rev = %d, want (%d, %d)", i, n, rev, tt.wN, tt.wrev)
|
||||||
}
|
}
|
||||||
|
|
||||||
cleanup(s, b, tmpPath)
|
cleanup(s, b)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -342,9 +342,9 @@ func TestKVDeleteMultipleTimes(t *testing.T) { testKVDeleteMultipleTimes(t, n
|
|||||||
func TestKVTxnDeleteMultipleTimes(t *testing.T) { testKVDeleteMultipleTimes(t, txnDeleteRangeFunc) }
|
func TestKVTxnDeleteMultipleTimes(t *testing.T) { testKVDeleteMultipleTimes(t, txnDeleteRangeFunc) }
|
||||||
|
|
||||||
func testKVDeleteMultipleTimes(t *testing.T, f deleteRangeFunc) {
|
func testKVDeleteMultipleTimes(t *testing.T, f deleteRangeFunc) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b)
|
||||||
|
|
||||||
s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
|
s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
|
||||||
|
|
||||||
@ -365,9 +365,9 @@ func TestKVPutWithSameLease(t *testing.T) { testKVPutWithSameLease(t, normalP
|
|||||||
func TestKVTxnPutWithSameLease(t *testing.T) { testKVPutWithSameLease(t, txnPutFunc) }
|
func TestKVTxnPutWithSameLease(t *testing.T) { testKVPutWithSameLease(t, txnPutFunc) }
|
||||||
|
|
||||||
func testKVPutWithSameLease(t *testing.T, f putFunc) {
|
func testKVPutWithSameLease(t *testing.T, f putFunc) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b)
|
||||||
leaseID := int64(1)
|
leaseID := int64(1)
|
||||||
|
|
||||||
// put foo
|
// put foo
|
||||||
@ -398,9 +398,9 @@ func testKVPutWithSameLease(t *testing.T, f putFunc) {
|
|||||||
// TestKVOperationInSequence tests that range, put, delete on single key in
|
// TestKVOperationInSequence tests that range, put, delete on single key in
|
||||||
// sequence repeatedly works correctly.
|
// sequence repeatedly works correctly.
|
||||||
func TestKVOperationInSequence(t *testing.T) {
|
func TestKVOperationInSequence(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b)
|
||||||
|
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
base := int64(i*2 + 1)
|
base := int64(i*2 + 1)
|
||||||
@ -445,7 +445,7 @@ func TestKVOperationInSequence(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestKVTxnBlockWriteOperations(t *testing.T) {
|
func TestKVTxnBlockWriteOperations(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
|
|
||||||
tests := []func(){
|
tests := []func(){
|
||||||
@ -475,13 +475,13 @@ func TestKVTxnBlockWriteOperations(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// only close backend when we know all the tx are finished
|
// only close backend when we know all the tx are finished
|
||||||
cleanup(s, b, tmpPath)
|
cleanup(s, b)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestKVTxnNonBlockRange(t *testing.T) {
|
func TestKVTxnNonBlockRange(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b)
|
||||||
|
|
||||||
txn := s.Write(traceutil.TODO())
|
txn := s.Write(traceutil.TODO())
|
||||||
defer txn.End()
|
defer txn.End()
|
||||||
@ -501,9 +501,9 @@ func TestKVTxnNonBlockRange(t *testing.T) {
|
|||||||
// TestKVTxnOperationInSequence tests that txn range, put, delete on single key
|
// TestKVTxnOperationInSequence tests that txn range, put, delete on single key
|
||||||
// in sequence repeatedly works correctly.
|
// in sequence repeatedly works correctly.
|
||||||
func TestKVTxnOperationInSequence(t *testing.T) {
|
func TestKVTxnOperationInSequence(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b)
|
||||||
|
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
txn := s.Write(traceutil.TODO())
|
txn := s.Write(traceutil.TODO())
|
||||||
@ -551,9 +551,9 @@ func TestKVTxnOperationInSequence(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestKVCompactReserveLastValue(t *testing.T) {
|
func TestKVCompactReserveLastValue(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b)
|
||||||
|
|
||||||
s.Put([]byte("foo"), []byte("bar0"), 1)
|
s.Put([]byte("foo"), []byte("bar0"), 1)
|
||||||
s.Put([]byte("foo"), []byte("bar1"), 2)
|
s.Put([]byte("foo"), []byte("bar1"), 2)
|
||||||
@ -605,9 +605,9 @@ func TestKVCompactReserveLastValue(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestKVCompactBad(t *testing.T) {
|
func TestKVCompactBad(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b)
|
||||||
|
|
||||||
s.Put([]byte("foo"), []byte("bar0"), lease.NoLease)
|
s.Put([]byte("foo"), []byte("bar0"), lease.NoLease)
|
||||||
s.Put([]byte("foo"), []byte("bar1"), lease.NoLease)
|
s.Put([]byte("foo"), []byte("bar1"), lease.NoLease)
|
||||||
@ -638,7 +638,7 @@ func TestKVHash(t *testing.T) {
|
|||||||
|
|
||||||
for i := 0; i < len(hashes); i++ {
|
for i := 0; i < len(hashes); i++ {
|
||||||
var err error
|
var err error
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
kv := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
kv := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
kv.Put([]byte("foo0"), []byte("bar0"), lease.NoLease)
|
kv.Put([]byte("foo0"), []byte("bar0"), lease.NoLease)
|
||||||
kv.Put([]byte("foo1"), []byte("bar0"), lease.NoLease)
|
kv.Put([]byte("foo1"), []byte("bar0"), lease.NoLease)
|
||||||
@ -646,7 +646,7 @@ func TestKVHash(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to get hash: %v", err)
|
t.Fatalf("failed to get hash: %v", err)
|
||||||
}
|
}
|
||||||
cleanup(kv, b, tmpPath)
|
cleanup(kv, b)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 1; i < len(hashes); i++ {
|
for i := 1; i < len(hashes); i++ {
|
||||||
@ -676,7 +676,7 @@ func TestKVRestore(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for i, tt := range tests {
|
for i, tt := range tests {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
tt(s)
|
tt(s)
|
||||||
var kvss [][]mvccpb.KeyValue
|
var kvss [][]mvccpb.KeyValue
|
||||||
@ -702,7 +702,7 @@ func TestKVRestore(t *testing.T) {
|
|||||||
r, _ := ns.Range(context.TODO(), []byte("a"), []byte("z"), RangeOptions{Rev: k})
|
r, _ := ns.Range(context.TODO(), []byte("a"), []byte("z"), RangeOptions{Rev: k})
|
||||||
nkvss = append(nkvss, r.KVs)
|
nkvss = append(nkvss, r.KVs)
|
||||||
}
|
}
|
||||||
cleanup(ns, b, tmpPath)
|
cleanup(ns, b)
|
||||||
|
|
||||||
if !reflect.DeepEqual(nkvss, kvss) {
|
if !reflect.DeepEqual(nkvss, kvss) {
|
||||||
t.Errorf("#%d: kvs history = %+v, want %+v", i, nkvss, kvss)
|
t.Errorf("#%d: kvs history = %+v, want %+v", i, nkvss, kvss)
|
||||||
@ -720,9 +720,9 @@ func readGaugeInt(g prometheus.Gauge) int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestKVSnapshot(t *testing.T) {
|
func TestKVSnapshot(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b)
|
||||||
|
|
||||||
wkvs := put3TestKVs(s)
|
wkvs := put3TestKVs(s)
|
||||||
|
|
||||||
@ -756,9 +756,9 @@ func TestKVSnapshot(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestWatchableKVWatch(t *testing.T) {
|
func TestWatchableKVWatch(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := WatchableKV(newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}))
|
s := WatchableKV(newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}))
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b)
|
||||||
|
|
||||||
w := s.NewWatchStream()
|
w := s.NewWatchStream()
|
||||||
defer w.Close()
|
defer w.Close()
|
||||||
@ -860,10 +860,9 @@ func TestWatchableKVWatch(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func cleanup(s KV, b backend.Backend, path string) {
|
func cleanup(s KV, b backend.Backend) {
|
||||||
s.Close()
|
s.Close()
|
||||||
b.Close()
|
b.Close()
|
||||||
os.Remove(path)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func put3TestKVs(s KV) []mvccpb.KeyValue {
|
func put3TestKVs(s KV) []mvccpb.KeyValue {
|
||||||
|
@ -29,9 +29,9 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func BenchmarkStorePut(b *testing.B) {
|
func BenchmarkStorePut(b *testing.B) {
|
||||||
be, tmpPath := betesting.NewDefaultTmpBackend(b)
|
be, _ := betesting.NewDefaultTmpBackend(b)
|
||||||
s := NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
|
s := NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, be, tmpPath)
|
defer cleanup(s, be)
|
||||||
|
|
||||||
// arbitrary number of bytes
|
// arbitrary number of bytes
|
||||||
bytesN := 64
|
bytesN := 64
|
||||||
@ -48,9 +48,9 @@ func BenchmarkStoreRangeKey1(b *testing.B) { benchmarkStoreRange(b, 1) }
|
|||||||
func BenchmarkStoreRangeKey100(b *testing.B) { benchmarkStoreRange(b, 100) }
|
func BenchmarkStoreRangeKey100(b *testing.B) { benchmarkStoreRange(b, 100) }
|
||||||
|
|
||||||
func benchmarkStoreRange(b *testing.B, n int) {
|
func benchmarkStoreRange(b *testing.B, n int) {
|
||||||
be, tmpPath := betesting.NewDefaultTmpBackend(b)
|
be, _ := betesting.NewDefaultTmpBackend(b)
|
||||||
s := NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
|
s := NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, be, tmpPath)
|
defer cleanup(s, be)
|
||||||
|
|
||||||
// 64 byte key/val
|
// 64 byte key/val
|
||||||
keys, val := createBytesSlice(64, n), createBytesSlice(64, 1)
|
keys, val := createBytesSlice(64, n), createBytesSlice(64, 1)
|
||||||
@ -97,9 +97,9 @@ func BenchmarkConsistentIndex(b *testing.B) {
|
|||||||
|
|
||||||
// BenchmarkStorePutUpdate is same as above, but instead updates single key
|
// BenchmarkStorePutUpdate is same as above, but instead updates single key
|
||||||
func BenchmarkStorePutUpdate(b *testing.B) {
|
func BenchmarkStorePutUpdate(b *testing.B) {
|
||||||
be, tmpPath := betesting.NewDefaultTmpBackend(b)
|
be, _ := betesting.NewDefaultTmpBackend(b)
|
||||||
s := NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
|
s := NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, be, tmpPath)
|
defer cleanup(s, be)
|
||||||
|
|
||||||
// arbitrary number of bytes
|
// arbitrary number of bytes
|
||||||
keys := createBytesSlice(64, 1)
|
keys := createBytesSlice(64, 1)
|
||||||
@ -115,9 +115,9 @@ func BenchmarkStorePutUpdate(b *testing.B) {
|
|||||||
// with transaction begin and end, where transaction involves
|
// with transaction begin and end, where transaction involves
|
||||||
// some synchronization operations, such as mutex locking.
|
// some synchronization operations, such as mutex locking.
|
||||||
func BenchmarkStoreTxnPut(b *testing.B) {
|
func BenchmarkStoreTxnPut(b *testing.B) {
|
||||||
be, tmpPath := betesting.NewDefaultTmpBackend(b)
|
be, _ := betesting.NewDefaultTmpBackend(b)
|
||||||
s := NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
|
s := NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, be, tmpPath)
|
defer cleanup(s, be)
|
||||||
|
|
||||||
// arbitrary number of bytes
|
// arbitrary number of bytes
|
||||||
bytesN := 64
|
bytesN := 64
|
||||||
@ -135,10 +135,10 @@ func BenchmarkStoreTxnPut(b *testing.B) {
|
|||||||
|
|
||||||
// benchmarkStoreRestore benchmarks the restore operation
|
// benchmarkStoreRestore benchmarks the restore operation
|
||||||
func benchmarkStoreRestore(revsPerKey int, b *testing.B) {
|
func benchmarkStoreRestore(revsPerKey int, b *testing.B) {
|
||||||
be, tmpPath := betesting.NewDefaultTmpBackend(b)
|
be, _ := betesting.NewDefaultTmpBackend(b)
|
||||||
s := NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
|
s := NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
|
||||||
// use closure to capture 's' to pick up the reassignment
|
// use closure to capture 's' to pick up the reassignment
|
||||||
defer func() { cleanup(s, be, tmpPath) }()
|
defer func() { cleanup(s, be) }()
|
||||||
|
|
||||||
// arbitrary number of bytes
|
// arbitrary number of bytes
|
||||||
bytesN := 64
|
bytesN := 64
|
||||||
|
@ -16,7 +16,6 @@ package mvcc
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"os"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -68,7 +67,7 @@ func TestScheduleCompaction(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for i, tt := range tests {
|
for i, tt := range tests {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
fi := newFakeIndex()
|
fi := newFakeIndex()
|
||||||
fi.indexCompactRespc <- tt.keep
|
fi.indexCompactRespc <- tt.keep
|
||||||
@ -103,17 +102,14 @@ func TestScheduleCompaction(t *testing.T) {
|
|||||||
}
|
}
|
||||||
tx.Unlock()
|
tx.Unlock()
|
||||||
|
|
||||||
cleanup(s, b, tmpPath)
|
cleanup(s, b)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCompactAllAndRestore(t *testing.T) {
|
func TestCompactAllAndRestore(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s0 := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s0 := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer func() {
|
defer b.Close()
|
||||||
b.Close()
|
|
||||||
os.Remove(tmpPath)
|
|
||||||
}()
|
|
||||||
|
|
||||||
s0.Put([]byte("foo"), []byte("bar"), lease.NoLease)
|
s0.Put([]byte("foo"), []byte("bar"), lease.NoLease)
|
||||||
s0.Put([]byte("foo"), []byte("bar1"), lease.NoLease)
|
s0.Put([]byte("foo"), []byte("bar1"), lease.NoLease)
|
||||||
|
@ -485,7 +485,7 @@ func TestRestoreContinueUnfinishedCompaction(t *testing.T) {
|
|||||||
test := test
|
test := test
|
||||||
|
|
||||||
t.Run(test, func(t *testing.T) {
|
t.Run(test, func(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s0 := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s0 := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
|
|
||||||
s0.Put([]byte("foo"), []byte("bar"), lease.NoLease)
|
s0.Put([]byte("foo"), []byte("bar"), lease.NoLease)
|
||||||
@ -512,7 +512,7 @@ func TestRestoreContinueUnfinishedCompaction(t *testing.T) {
|
|||||||
s0.Restore(b)
|
s0.Restore(b)
|
||||||
s = s0
|
s = s0
|
||||||
}
|
}
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b)
|
||||||
|
|
||||||
// wait for scheduled compaction to be finished
|
// wait for scheduled compaction to be finished
|
||||||
time.Sleep(100 * time.Millisecond)
|
time.Sleep(100 * time.Millisecond)
|
||||||
@ -549,9 +549,9 @@ type hashKVResult struct {
|
|||||||
|
|
||||||
// TestHashKVWhenCompacting ensures that HashKV returns correct hash when compacting.
|
// TestHashKVWhenCompacting ensures that HashKV returns correct hash when compacting.
|
||||||
func TestHashKVWhenCompacting(t *testing.T) {
|
func TestHashKVWhenCompacting(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b)
|
||||||
|
|
||||||
rev := 10000
|
rev := 10000
|
||||||
for i := 2; i <= rev; i++ {
|
for i := 2; i <= rev; i++ {
|
||||||
@ -629,9 +629,9 @@ func TestHashKVWhenCompacting(t *testing.T) {
|
|||||||
// TestHashKVWithCompactedAndFutureRevisions ensures that HashKV returns a correct hash when called
|
// TestHashKVWithCompactedAndFutureRevisions ensures that HashKV returns a correct hash when called
|
||||||
// with a past revision (lower than compacted), a future revision, and the exact compacted revision
|
// with a past revision (lower than compacted), a future revision, and the exact compacted revision
|
||||||
func TestHashKVWithCompactedAndFutureRevisions(t *testing.T) {
|
func TestHashKVWithCompactedAndFutureRevisions(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b)
|
||||||
|
|
||||||
rev := 10000
|
rev := 10000
|
||||||
compactRev := rev / 2
|
compactRev := rev / 2
|
||||||
@ -662,9 +662,9 @@ func TestHashKVWithCompactedAndFutureRevisions(t *testing.T) {
|
|||||||
// TestHashKVZeroRevision ensures that "HashByRev(0)" computes
|
// TestHashKVZeroRevision ensures that "HashByRev(0)" computes
|
||||||
// correct hash value with latest revision.
|
// correct hash value with latest revision.
|
||||||
func TestHashKVZeroRevision(t *testing.T) {
|
func TestHashKVZeroRevision(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b)
|
||||||
|
|
||||||
rev := 10000
|
rev := 10000
|
||||||
for i := 2; i <= rev; i++ {
|
for i := 2; i <= rev; i++ {
|
||||||
@ -695,9 +695,9 @@ func TestTxnPut(t *testing.T) {
|
|||||||
keys := createBytesSlice(bytesN, sliceN)
|
keys := createBytesSlice(bytesN, sliceN)
|
||||||
vals := createBytesSlice(bytesN, sliceN)
|
vals := createBytesSlice(bytesN, sliceN)
|
||||||
|
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b)
|
||||||
|
|
||||||
for i := 0; i < sliceN; i++ {
|
for i := 0; i < sliceN; i++ {
|
||||||
txn := s.Write(traceutil.TODO())
|
txn := s.Write(traceutil.TODO())
|
||||||
@ -711,9 +711,9 @@ func TestTxnPut(t *testing.T) {
|
|||||||
|
|
||||||
// TestConcurrentReadNotBlockingWrite ensures Read does not blocking Write after its creation
|
// TestConcurrentReadNotBlockingWrite ensures Read does not blocking Write after its creation
|
||||||
func TestConcurrentReadNotBlockingWrite(t *testing.T) {
|
func TestConcurrentReadNotBlockingWrite(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b)
|
||||||
|
|
||||||
// write something to read later
|
// write something to read later
|
||||||
s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
|
s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
|
||||||
@ -780,9 +780,9 @@ func TestConcurrentReadTxAndWrite(t *testing.T) {
|
|||||||
committedKVs kvs // committedKVs records the key-value pairs written by the finished Write Txns
|
committedKVs kvs // committedKVs records the key-value pairs written by the finished Write Txns
|
||||||
mu sync.Mutex // mu protects committedKVs
|
mu sync.Mutex // mu protects committedKVs
|
||||||
)
|
)
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b)
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(numOfWrites)
|
wg.Add(numOfWrites)
|
||||||
|
@ -16,7 +16,6 @@ package mvcc
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"go.uber.org/zap/zaptest"
|
"go.uber.org/zap/zaptest"
|
||||||
@ -27,9 +26,9 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func BenchmarkWatchableStorePut(b *testing.B) {
|
func BenchmarkWatchableStorePut(b *testing.B) {
|
||||||
be, tmpPath := betesting.NewDefaultTmpBackend(b)
|
be, _ := betesting.NewDefaultTmpBackend(b)
|
||||||
s := New(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
|
s := New(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, be, tmpPath)
|
defer cleanup(s, be)
|
||||||
|
|
||||||
// arbitrary number of bytes
|
// arbitrary number of bytes
|
||||||
bytesN := 64
|
bytesN := 64
|
||||||
@ -47,9 +46,9 @@ func BenchmarkWatchableStorePut(b *testing.B) {
|
|||||||
// with transaction begin and end, where transaction involves
|
// with transaction begin and end, where transaction involves
|
||||||
// some synchronization operations, such as mutex locking.
|
// some synchronization operations, such as mutex locking.
|
||||||
func BenchmarkWatchableStoreTxnPut(b *testing.B) {
|
func BenchmarkWatchableStoreTxnPut(b *testing.B) {
|
||||||
be, tmpPath := betesting.NewDefaultTmpBackend(b)
|
be, _ := betesting.NewDefaultTmpBackend(b)
|
||||||
s := New(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
|
s := New(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, be, tmpPath)
|
defer cleanup(s, be)
|
||||||
|
|
||||||
// arbitrary number of bytes
|
// arbitrary number of bytes
|
||||||
bytesN := 64
|
bytesN := 64
|
||||||
@ -78,9 +77,9 @@ func BenchmarkWatchableStoreWatchPutUnsync(b *testing.B) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func benchmarkWatchableStoreWatchPut(b *testing.B, synced bool) {
|
func benchmarkWatchableStoreWatchPut(b *testing.B, synced bool) {
|
||||||
be, tmpPath := betesting.NewDefaultTmpBackend(b)
|
be, _ := betesting.NewDefaultTmpBackend(b)
|
||||||
s := newWatchableStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
|
s := newWatchableStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, be, tmpPath)
|
defer cleanup(s, be)
|
||||||
|
|
||||||
k := []byte("testkey")
|
k := []byte("testkey")
|
||||||
v := []byte("testval")
|
v := []byte("testval")
|
||||||
@ -122,7 +121,7 @@ func benchmarkWatchableStoreWatchPut(b *testing.B, synced bool) {
|
|||||||
// TODO: k is an arbitrary constant. We need to figure out what factor
|
// TODO: k is an arbitrary constant. We need to figure out what factor
|
||||||
// we should put to simulate the real-world use cases.
|
// we should put to simulate the real-world use cases.
|
||||||
func BenchmarkWatchableStoreUnsyncedCancel(b *testing.B) {
|
func BenchmarkWatchableStoreUnsyncedCancel(b *testing.B) {
|
||||||
be, tmpPath := betesting.NewDefaultTmpBackend(b)
|
be, _ := betesting.NewDefaultTmpBackend(b)
|
||||||
s := NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
|
s := NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
|
||||||
|
|
||||||
// manually create watchableStore instead of newWatchableStore
|
// manually create watchableStore instead of newWatchableStore
|
||||||
@ -136,12 +135,10 @@ func BenchmarkWatchableStoreUnsyncedCancel(b *testing.B) {
|
|||||||
// to make the test not crash from assigning to nil map.
|
// to make the test not crash from assigning to nil map.
|
||||||
// 'synced' doesn't get populated in this test.
|
// 'synced' doesn't get populated in this test.
|
||||||
synced: newWatcherGroup(),
|
synced: newWatcherGroup(),
|
||||||
|
stopc: make(chan struct{}),
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func() {
|
defer cleanup(ws, be)
|
||||||
ws.store.Close()
|
|
||||||
os.Remove(tmpPath)
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Put a key so that we can spawn watchers on that key
|
// Put a key so that we can spawn watchers on that key
|
||||||
// (testKey in this test). This increases the rev to 1,
|
// (testKey in this test). This increases the rev to 1,
|
||||||
@ -152,6 +149,7 @@ func BenchmarkWatchableStoreUnsyncedCancel(b *testing.B) {
|
|||||||
s.Put(testKey, testValue, lease.NoLease)
|
s.Put(testKey, testValue, lease.NoLease)
|
||||||
|
|
||||||
w := ws.NewWatchStream()
|
w := ws.NewWatchStream()
|
||||||
|
defer w.Close()
|
||||||
|
|
||||||
const k int = 2
|
const k int = 2
|
||||||
benchSampleN := b.N
|
benchSampleN := b.N
|
||||||
@ -179,13 +177,10 @@ func BenchmarkWatchableStoreUnsyncedCancel(b *testing.B) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkWatchableStoreSyncedCancel(b *testing.B) {
|
func BenchmarkWatchableStoreSyncedCancel(b *testing.B) {
|
||||||
be, tmpPath := betesting.NewDefaultTmpBackend(b)
|
be, _ := betesting.NewDefaultTmpBackend(b)
|
||||||
s := newWatchableStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
|
s := newWatchableStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
|
||||||
|
|
||||||
defer func() {
|
defer cleanup(s, be)
|
||||||
s.store.Close()
|
|
||||||
os.Remove(tmpPath)
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Put a key so that we can spawn watchers on that key
|
// Put a key so that we can spawn watchers on that key
|
||||||
testKey := []byte("foo")
|
testKey := []byte("foo")
|
||||||
@ -193,6 +188,7 @@ func BenchmarkWatchableStoreSyncedCancel(b *testing.B) {
|
|||||||
s.Put(testKey, testValue, lease.NoLease)
|
s.Put(testKey, testValue, lease.NoLease)
|
||||||
|
|
||||||
w := s.NewWatchStream()
|
w := s.NewWatchStream()
|
||||||
|
defer w.Close()
|
||||||
|
|
||||||
// put 1 million watchers on the same key
|
// put 1 million watchers on the same key
|
||||||
const watcherN = 1000000
|
const watcherN = 1000000
|
||||||
|
@ -31,9 +31,9 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestWatch(t *testing.T) {
|
func TestWatch(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b)
|
||||||
|
|
||||||
testKey := []byte("foo")
|
testKey := []byte("foo")
|
||||||
testValue := []byte("bar")
|
testValue := []byte("bar")
|
||||||
@ -50,9 +50,9 @@ func TestWatch(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNewWatcherCancel(t *testing.T) {
|
func TestNewWatcherCancel(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b)
|
||||||
|
|
||||||
testKey := []byte("foo")
|
testKey := []byte("foo")
|
||||||
testValue := []byte("bar")
|
testValue := []byte("bar")
|
||||||
@ -74,7 +74,7 @@ func TestNewWatcherCancel(t *testing.T) {
|
|||||||
|
|
||||||
// TestCancelUnsynced tests if running CancelFunc removes watchers from unsynced.
|
// TestCancelUnsynced tests if running CancelFunc removes watchers from unsynced.
|
||||||
func TestCancelUnsynced(t *testing.T) {
|
func TestCancelUnsynced(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
|
|
||||||
// manually create watchableStore instead of newWatchableStore
|
// manually create watchableStore instead of newWatchableStore
|
||||||
// because newWatchableStore automatically calls syncWatchers
|
// because newWatchableStore automatically calls syncWatchers
|
||||||
@ -90,7 +90,7 @@ func TestCancelUnsynced(t *testing.T) {
|
|||||||
stopc: make(chan struct{}),
|
stopc: make(chan struct{}),
|
||||||
}
|
}
|
||||||
|
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b)
|
||||||
|
|
||||||
// Put a key so that we can spawn watchers on that key.
|
// Put a key so that we can spawn watchers on that key.
|
||||||
// (testKey in this test). This increases the rev to 1,
|
// (testKey in this test). This increases the rev to 1,
|
||||||
@ -132,7 +132,7 @@ func TestCancelUnsynced(t *testing.T) {
|
|||||||
// method to see if it correctly sends events to channel of unsynced watchers
|
// method to see if it correctly sends events to channel of unsynced watchers
|
||||||
// and moves these watchers to synced.
|
// and moves these watchers to synced.
|
||||||
func TestSyncWatchers(t *testing.T) {
|
func TestSyncWatchers(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
|
|
||||||
s := &watchableStore{
|
s := &watchableStore{
|
||||||
store: NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}),
|
store: NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}),
|
||||||
@ -141,7 +141,7 @@ func TestSyncWatchers(t *testing.T) {
|
|||||||
stopc: make(chan struct{}),
|
stopc: make(chan struct{}),
|
||||||
}
|
}
|
||||||
|
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b)
|
||||||
|
|
||||||
testKey := []byte("foo")
|
testKey := []byte("foo")
|
||||||
testValue := []byte("bar")
|
testValue := []byte("bar")
|
||||||
@ -216,9 +216,9 @@ func TestSyncWatchers(t *testing.T) {
|
|||||||
|
|
||||||
// TestWatchCompacted tests a watcher that watches on a compacted revision.
|
// TestWatchCompacted tests a watcher that watches on a compacted revision.
|
||||||
func TestWatchCompacted(t *testing.T) {
|
func TestWatchCompacted(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b)
|
||||||
|
|
||||||
testKey := []byte("foo")
|
testKey := []byte("foo")
|
||||||
testValue := []byte("bar")
|
testValue := []byte("bar")
|
||||||
@ -251,9 +251,9 @@ func TestWatchCompacted(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestWatchFutureRev(t *testing.T) {
|
func TestWatchFutureRev(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b)
|
||||||
|
|
||||||
testKey := []byte("foo")
|
testKey := []byte("foo")
|
||||||
testValue := []byte("bar")
|
testValue := []byte("bar")
|
||||||
@ -290,17 +290,17 @@ func TestWatchFutureRev(t *testing.T) {
|
|||||||
func TestWatchRestore(t *testing.T) {
|
func TestWatchRestore(t *testing.T) {
|
||||||
test := func(delay time.Duration) func(t *testing.T) {
|
test := func(delay time.Duration) func(t *testing.T) {
|
||||||
return func(t *testing.T) {
|
return func(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b)
|
||||||
|
|
||||||
testKey := []byte("foo")
|
testKey := []byte("foo")
|
||||||
testValue := []byte("bar")
|
testValue := []byte("bar")
|
||||||
rev := s.Put(testKey, testValue, lease.NoLease)
|
rev := s.Put(testKey, testValue, lease.NoLease)
|
||||||
|
|
||||||
newBackend, newPath := betesting.NewDefaultTmpBackend(t)
|
newBackend, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
newStore := newWatchableStore(zaptest.NewLogger(t), newBackend, &lease.FakeLessor{}, StoreConfig{})
|
newStore := newWatchableStore(zaptest.NewLogger(t), newBackend, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(newStore, newBackend, newPath)
|
defer cleanup(newStore, newBackend)
|
||||||
|
|
||||||
w := newStore.NewWatchStream()
|
w := newStore.NewWatchStream()
|
||||||
defer w.Close()
|
defer w.Close()
|
||||||
@ -338,13 +338,13 @@ func TestWatchRestore(t *testing.T) {
|
|||||||
// 4. restore operation moves "synced" to "unsynced" watcher group
|
// 4. restore operation moves "synced" to "unsynced" watcher group
|
||||||
// 5. choose the watcher from step 1, without panic
|
// 5. choose the watcher from step 1, without panic
|
||||||
func TestWatchRestoreSyncedWatcher(t *testing.T) {
|
func TestWatchRestoreSyncedWatcher(t *testing.T) {
|
||||||
b1, b1Path := betesting.NewDefaultTmpBackend(t)
|
b1, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s1 := newWatchableStore(zaptest.NewLogger(t), b1, &lease.FakeLessor{}, StoreConfig{})
|
s1 := newWatchableStore(zaptest.NewLogger(t), b1, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s1, b1, b1Path)
|
defer cleanup(s1, b1)
|
||||||
|
|
||||||
b2, b2Path := betesting.NewDefaultTmpBackend(t)
|
b2, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s2 := newWatchableStore(zaptest.NewLogger(t), b2, &lease.FakeLessor{}, StoreConfig{})
|
s2 := newWatchableStore(zaptest.NewLogger(t), b2, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s2, b2, b2Path)
|
defer cleanup(s2, b2)
|
||||||
|
|
||||||
testKey, testValue := []byte("foo"), []byte("bar")
|
testKey, testValue := []byte("foo"), []byte("bar")
|
||||||
rev := s1.Put(testKey, testValue, lease.NoLease)
|
rev := s1.Put(testKey, testValue, lease.NoLease)
|
||||||
@ -391,13 +391,13 @@ func TestWatchRestoreSyncedWatcher(t *testing.T) {
|
|||||||
|
|
||||||
// TestWatchBatchUnsynced tests batching on unsynced watchers
|
// TestWatchBatchUnsynced tests batching on unsynced watchers
|
||||||
func TestWatchBatchUnsynced(t *testing.T) {
|
func TestWatchBatchUnsynced(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
|
|
||||||
oldMaxRevs := watchBatchMaxRevs
|
oldMaxRevs := watchBatchMaxRevs
|
||||||
defer func() {
|
defer func() {
|
||||||
watchBatchMaxRevs = oldMaxRevs
|
watchBatchMaxRevs = oldMaxRevs
|
||||||
cleanup(s, b, tmpPath)
|
cleanup(s, b)
|
||||||
}()
|
}()
|
||||||
batches := 3
|
batches := 3
|
||||||
watchBatchMaxRevs = 4
|
watchBatchMaxRevs = 4
|
||||||
@ -526,11 +526,11 @@ func TestNewMapwatcherToEventMap(t *testing.T) {
|
|||||||
func TestWatchVictims(t *testing.T) {
|
func TestWatchVictims(t *testing.T) {
|
||||||
oldChanBufLen, oldMaxWatchersPerSync := chanBufLen, maxWatchersPerSync
|
oldChanBufLen, oldMaxWatchersPerSync := chanBufLen, maxWatchersPerSync
|
||||||
|
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
cleanup(s, b, tmpPath)
|
cleanup(s, b)
|
||||||
chanBufLen, maxWatchersPerSync = oldChanBufLen, oldMaxWatchersPerSync
|
chanBufLen, maxWatchersPerSync = oldChanBufLen, oldMaxWatchersPerSync
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@ -603,9 +603,9 @@ func TestWatchVictims(t *testing.T) {
|
|||||||
// TestStressWatchCancelClose tests closing a watch stream while
|
// TestStressWatchCancelClose tests closing a watch stream while
|
||||||
// canceling its watches.
|
// canceling its watches.
|
||||||
func TestStressWatchCancelClose(t *testing.T) {
|
func TestStressWatchCancelClose(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b)
|
||||||
|
|
||||||
testKey, testValue := []byte("foo"), []byte("bar")
|
testKey, testValue := []byte("foo"), []byte("bar")
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
|
@ -25,12 +25,13 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func BenchmarkKVWatcherMemoryUsage(b *testing.B) {
|
func BenchmarkKVWatcherMemoryUsage(b *testing.B) {
|
||||||
be, tmpPath := betesting.NewDefaultTmpBackend(b)
|
be, _ := betesting.NewDefaultTmpBackend(b)
|
||||||
watchable := newWatchableStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
|
watchable := newWatchableStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
|
||||||
|
|
||||||
defer cleanup(watchable, be, tmpPath)
|
defer cleanup(watchable, be)
|
||||||
|
|
||||||
w := watchable.NewWatchStream()
|
w := watchable.NewWatchStream()
|
||||||
|
defer w.Close()
|
||||||
|
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
b.StartTimer()
|
b.StartTimer()
|
||||||
|
@ -32,9 +32,9 @@ import (
|
|||||||
// TestWatcherWatchID tests that each watcher provides unique watchID,
|
// TestWatcherWatchID tests that each watcher provides unique watchID,
|
||||||
// and the watched event attaches the correct watchID.
|
// and the watched event attaches the correct watchID.
|
||||||
func TestWatcherWatchID(t *testing.T) {
|
func TestWatcherWatchID(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := WatchableKV(newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}))
|
s := WatchableKV(newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}))
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b)
|
||||||
|
|
||||||
w := s.NewWatchStream()
|
w := s.NewWatchStream()
|
||||||
defer w.Close()
|
defer w.Close()
|
||||||
@ -82,9 +82,9 @@ func TestWatcherWatchID(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestWatcherRequestsCustomID(t *testing.T) {
|
func TestWatcherRequestsCustomID(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := WatchableKV(newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}))
|
s := WatchableKV(newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}))
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b)
|
||||||
|
|
||||||
w := s.NewWatchStream()
|
w := s.NewWatchStream()
|
||||||
defer w.Close()
|
defer w.Close()
|
||||||
@ -119,9 +119,9 @@ func TestWatcherRequestsCustomID(t *testing.T) {
|
|||||||
// TestWatcherWatchPrefix tests if Watch operation correctly watches
|
// TestWatcherWatchPrefix tests if Watch operation correctly watches
|
||||||
// and returns events with matching prefixes.
|
// and returns events with matching prefixes.
|
||||||
func TestWatcherWatchPrefix(t *testing.T) {
|
func TestWatcherWatchPrefix(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := WatchableKV(newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}))
|
s := WatchableKV(newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}))
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b)
|
||||||
|
|
||||||
w := s.NewWatchStream()
|
w := s.NewWatchStream()
|
||||||
defer w.Close()
|
defer w.Close()
|
||||||
@ -193,9 +193,9 @@ func TestWatcherWatchPrefix(t *testing.T) {
|
|||||||
// TestWatcherWatchWrongRange ensures that watcher with wrong 'end' range
|
// TestWatcherWatchWrongRange ensures that watcher with wrong 'end' range
|
||||||
// does not create watcher, which panics when canceling in range tree.
|
// does not create watcher, which panics when canceling in range tree.
|
||||||
func TestWatcherWatchWrongRange(t *testing.T) {
|
func TestWatcherWatchWrongRange(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := WatchableKV(newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}))
|
s := WatchableKV(newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}))
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b)
|
||||||
|
|
||||||
w := s.NewWatchStream()
|
w := s.NewWatchStream()
|
||||||
defer w.Close()
|
defer w.Close()
|
||||||
@ -253,9 +253,9 @@ func TestWatchDeleteRange(t *testing.T) {
|
|||||||
// TestWatchStreamCancelWatcherByID ensures cancel calls the cancel func of the watcher
|
// TestWatchStreamCancelWatcherByID ensures cancel calls the cancel func of the watcher
|
||||||
// with given id inside watchStream.
|
// with given id inside watchStream.
|
||||||
func TestWatchStreamCancelWatcherByID(t *testing.T) {
|
func TestWatchStreamCancelWatcherByID(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := WatchableKV(newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}))
|
s := WatchableKV(newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}))
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b)
|
||||||
|
|
||||||
w := s.NewWatchStream()
|
w := s.NewWatchStream()
|
||||||
defer w.Close()
|
defer w.Close()
|
||||||
@ -290,7 +290,7 @@ func TestWatchStreamCancelWatcherByID(t *testing.T) {
|
|||||||
// TestWatcherRequestProgress ensures synced watcher can correctly
|
// TestWatcherRequestProgress ensures synced watcher can correctly
|
||||||
// report its correct progress.
|
// report its correct progress.
|
||||||
func TestWatcherRequestProgress(t *testing.T) {
|
func TestWatcherRequestProgress(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
|
|
||||||
// manually create watchableStore instead of newWatchableStore
|
// manually create watchableStore instead of newWatchableStore
|
||||||
// because newWatchableStore automatically calls syncWatchers
|
// because newWatchableStore automatically calls syncWatchers
|
||||||
@ -300,12 +300,10 @@ func TestWatcherRequestProgress(t *testing.T) {
|
|||||||
store: NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}),
|
store: NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}),
|
||||||
unsynced: newWatcherGroup(),
|
unsynced: newWatcherGroup(),
|
||||||
synced: newWatcherGroup(),
|
synced: newWatcherGroup(),
|
||||||
|
stopc: make(chan struct{}),
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func() {
|
defer cleanup(s, b)
|
||||||
s.store.Close()
|
|
||||||
os.Remove(tmpPath)
|
|
||||||
}()
|
|
||||||
|
|
||||||
testKey := []byte("foo")
|
testKey := []byte("foo")
|
||||||
notTestKey := []byte("bad")
|
notTestKey := []byte("bad")
|
||||||
@ -345,9 +343,9 @@ func TestWatcherRequestProgress(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestWatcherWatchWithFilter(t *testing.T) {
|
func TestWatcherWatchWithFilter(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := WatchableKV(newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}))
|
s := WatchableKV(newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}))
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b)
|
||||||
|
|
||||||
w := s.NewWatchStream()
|
w := s.NewWatchStream()
|
||||||
defer w.Close()
|
defer w.Close()
|
||||||
|
Loading…
x
Reference in New Issue
Block a user