mirror of
https://github.com/etcd-io/etcd.git
synced 2024-09-27 06:25:44 +00:00
Simplify KVstore dependency on cindex.
This commit is contained in:
parent
2fb6f0a74b
commit
a78d072b9a
@ -322,9 +322,7 @@ func saveDB(lg *zap.Logger, destDB, srcDB string, idx uint64, desired *desiredCl
|
|||||||
tx.Lock()
|
tx.Lock()
|
||||||
defer tx.Unlock()
|
defer tx.Unlock()
|
||||||
cindex.UnsafeCreateMetaBucket(tx)
|
cindex.UnsafeCreateMetaBucket(tx)
|
||||||
ci := cindex.NewConsistentIndex(tx)
|
cindex.UnsafeUpdateConsistentIndex(tx, idx, false)
|
||||||
ci.SetConsistentIndex(idx)
|
|
||||||
ci.UnsafeSave(tx)
|
|
||||||
} else {
|
} else {
|
||||||
// Thanks to translateWAL not moving entries, but just replacing them with
|
// Thanks to translateWAL not moving entries, but just replacing them with
|
||||||
// 'empty', there is no need to update the consistency index.
|
// 'empty', there is no need to update the consistency index.
|
||||||
|
@ -478,8 +478,7 @@ func (s *v3Manager) saveWALAndSnap() (*raftpb.HardState, error) {
|
|||||||
func (s *v3Manager) updateCIndex(commit uint64) error {
|
func (s *v3Manager) updateCIndex(commit uint64) error {
|
||||||
be := backend.NewDefaultBackend(s.outDbPath())
|
be := backend.NewDefaultBackend(s.outDbPath())
|
||||||
defer be.Close()
|
defer be.Close()
|
||||||
ci := cindex.NewConsistentIndex(be.BatchTx())
|
|
||||||
ci.SetConsistentIndex(commit)
|
cindex.UpdateConsistentIndex(be.BatchTx(), commit, false)
|
||||||
ci.UnsafeSave(be.BatchTx())
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -79,7 +79,7 @@ func TestKVTxnRange(t *testing.T) { testKVRange(t, txnRangeFunc) }
|
|||||||
|
|
||||||
func testKVRange(t *testing.T, f rangeFunc) {
|
func testKVRange(t *testing.T, f rangeFunc) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b, tmpPath)
|
||||||
|
|
||||||
kvs := put3TestKVs(s)
|
kvs := put3TestKVs(s)
|
||||||
@ -145,7 +145,7 @@ func TestKVTxnRangeRev(t *testing.T) { testKVRangeRev(t, txnRangeFunc) }
|
|||||||
|
|
||||||
func testKVRangeRev(t *testing.T, f rangeFunc) {
|
func testKVRangeRev(t *testing.T, f rangeFunc) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b, tmpPath)
|
||||||
|
|
||||||
kvs := put3TestKVs(s)
|
kvs := put3TestKVs(s)
|
||||||
@ -181,7 +181,7 @@ func TestKVTxnRangeBadRev(t *testing.T) { testKVRangeBadRev(t, txnRangeFunc) }
|
|||||||
|
|
||||||
func testKVRangeBadRev(t *testing.T, f rangeFunc) {
|
func testKVRangeBadRev(t *testing.T, f rangeFunc) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b, tmpPath)
|
||||||
|
|
||||||
put3TestKVs(s)
|
put3TestKVs(s)
|
||||||
@ -214,7 +214,7 @@ func TestKVTxnRangeLimit(t *testing.T) { testKVRangeLimit(t, txnRangeFunc) }
|
|||||||
|
|
||||||
func testKVRangeLimit(t *testing.T, f rangeFunc) {
|
func testKVRangeLimit(t *testing.T, f rangeFunc) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b, tmpPath)
|
||||||
|
|
||||||
kvs := put3TestKVs(s)
|
kvs := put3TestKVs(s)
|
||||||
@ -259,7 +259,7 @@ func TestKVTxnPutMultipleTimes(t *testing.T) { testKVPutMultipleTimes(t, txnPutF
|
|||||||
|
|
||||||
func testKVPutMultipleTimes(t *testing.T, f putFunc) {
|
func testKVPutMultipleTimes(t *testing.T, f putFunc) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b, tmpPath)
|
||||||
|
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
@ -321,7 +321,7 @@ func testKVDeleteRange(t *testing.T, f deleteRangeFunc) {
|
|||||||
|
|
||||||
for i, tt := range tests {
|
for i, tt := range tests {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
|
|
||||||
s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
|
s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
|
||||||
s.Put([]byte("foo1"), []byte("bar1"), lease.NoLease)
|
s.Put([]byte("foo1"), []byte("bar1"), lease.NoLease)
|
||||||
@ -341,7 +341,7 @@ func TestKVTxnDeleteMultipleTimes(t *testing.T) { testKVDeleteMultipleTimes(t, t
|
|||||||
|
|
||||||
func testKVDeleteMultipleTimes(t *testing.T, f deleteRangeFunc) {
|
func testKVDeleteMultipleTimes(t *testing.T, f deleteRangeFunc) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b, tmpPath)
|
||||||
|
|
||||||
s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
|
s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
|
||||||
@ -362,7 +362,7 @@ func testKVDeleteMultipleTimes(t *testing.T, f deleteRangeFunc) {
|
|||||||
// test that range, put, delete on single key in sequence repeatedly works correctly.
|
// test that range, put, delete on single key in sequence repeatedly works correctly.
|
||||||
func TestKVOperationInSequence(t *testing.T) {
|
func TestKVOperationInSequence(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b, tmpPath)
|
||||||
|
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
@ -409,7 +409,7 @@ func TestKVOperationInSequence(t *testing.T) {
|
|||||||
|
|
||||||
func TestKVTxnBlockWriteOperations(t *testing.T) {
|
func TestKVTxnBlockWriteOperations(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
|
|
||||||
tests := []func(){
|
tests := []func(){
|
||||||
func() { s.Put([]byte("foo"), nil, lease.NoLease) },
|
func() { s.Put([]byte("foo"), nil, lease.NoLease) },
|
||||||
@ -443,7 +443,7 @@ func TestKVTxnBlockWriteOperations(t *testing.T) {
|
|||||||
|
|
||||||
func TestKVTxnNonBlockRange(t *testing.T) {
|
func TestKVTxnNonBlockRange(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b, tmpPath)
|
||||||
|
|
||||||
txn := s.Write(traceutil.TODO())
|
txn := s.Write(traceutil.TODO())
|
||||||
@ -464,7 +464,7 @@ func TestKVTxnNonBlockRange(t *testing.T) {
|
|||||||
// test that txn range, put, delete on single key in sequence repeatedly works correctly.
|
// test that txn range, put, delete on single key in sequence repeatedly works correctly.
|
||||||
func TestKVTxnOperationInSequence(t *testing.T) {
|
func TestKVTxnOperationInSequence(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b, tmpPath)
|
||||||
|
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
@ -514,7 +514,7 @@ func TestKVTxnOperationInSequence(t *testing.T) {
|
|||||||
|
|
||||||
func TestKVCompactReserveLastValue(t *testing.T) {
|
func TestKVCompactReserveLastValue(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b, tmpPath)
|
||||||
|
|
||||||
s.Put([]byte("foo"), []byte("bar0"), 1)
|
s.Put([]byte("foo"), []byte("bar0"), 1)
|
||||||
@ -568,7 +568,7 @@ func TestKVCompactReserveLastValue(t *testing.T) {
|
|||||||
|
|
||||||
func TestKVCompactBad(t *testing.T) {
|
func TestKVCompactBad(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b, tmpPath)
|
||||||
|
|
||||||
s.Put([]byte("foo"), []byte("bar0"), lease.NoLease)
|
s.Put([]byte("foo"), []byte("bar0"), lease.NoLease)
|
||||||
@ -601,7 +601,7 @@ func TestKVHash(t *testing.T) {
|
|||||||
for i := 0; i < len(hashes); i++ {
|
for i := 0; i < len(hashes); i++ {
|
||||||
var err error
|
var err error
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
kv := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
kv := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
kv.Put([]byte("foo0"), []byte("bar0"), lease.NoLease)
|
kv.Put([]byte("foo0"), []byte("bar0"), lease.NoLease)
|
||||||
kv.Put([]byte("foo1"), []byte("bar0"), lease.NoLease)
|
kv.Put([]byte("foo1"), []byte("bar0"), lease.NoLease)
|
||||||
hashes[i], _, err = kv.Hash()
|
hashes[i], _, err = kv.Hash()
|
||||||
@ -639,7 +639,7 @@ func TestKVRestore(t *testing.T) {
|
|||||||
}
|
}
|
||||||
for i, tt := range tests {
|
for i, tt := range tests {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
tt(s)
|
tt(s)
|
||||||
var kvss [][]mvccpb.KeyValue
|
var kvss [][]mvccpb.KeyValue
|
||||||
for k := int64(0); k < 10; k++ {
|
for k := int64(0); k < 10; k++ {
|
||||||
@ -651,7 +651,7 @@ func TestKVRestore(t *testing.T) {
|
|||||||
s.Close()
|
s.Close()
|
||||||
|
|
||||||
// ns should recover the the previous state from backend.
|
// ns should recover the the previous state from backend.
|
||||||
ns := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
ns := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
|
|
||||||
if keysRestore := readGaugeInt(keysGauge); keysBefore != keysRestore {
|
if keysRestore := readGaugeInt(keysGauge); keysBefore != keysRestore {
|
||||||
t.Errorf("#%d: got %d key count, expected %d", i, keysRestore, keysBefore)
|
t.Errorf("#%d: got %d key count, expected %d", i, keysRestore, keysBefore)
|
||||||
@ -683,7 +683,7 @@ func readGaugeInt(g prometheus.Gauge) int {
|
|||||||
|
|
||||||
func TestKVSnapshot(t *testing.T) {
|
func TestKVSnapshot(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b, tmpPath)
|
||||||
|
|
||||||
wkvs := put3TestKVs(s)
|
wkvs := put3TestKVs(s)
|
||||||
@ -703,7 +703,7 @@ func TestKVSnapshot(t *testing.T) {
|
|||||||
}
|
}
|
||||||
f.Close()
|
f.Close()
|
||||||
|
|
||||||
ns := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
ns := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer ns.Close()
|
defer ns.Close()
|
||||||
r, err := ns.Range(context.TODO(), []byte("a"), []byte("z"), RangeOptions{})
|
r, err := ns.Range(context.TODO(), []byte("a"), []byte("z"), RangeOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -719,7 +719,7 @@ func TestKVSnapshot(t *testing.T) {
|
|||||||
|
|
||||||
func TestWatchableKVWatch(t *testing.T) {
|
func TestWatchableKVWatch(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{}))
|
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{}))
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b, tmpPath)
|
||||||
|
|
||||||
w := s.NewWatchStream()
|
w := s.NewWatchStream()
|
||||||
|
@ -67,7 +67,7 @@ func TestScheduleCompaction(t *testing.T) {
|
|||||||
}
|
}
|
||||||
for i, tt := range tests {
|
for i, tt := range tests {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
tx := s.b.BatchTx()
|
tx := s.b.BatchTx()
|
||||||
|
|
||||||
tx.Lock()
|
tx.Lock()
|
||||||
@ -101,7 +101,7 @@ func TestScheduleCompaction(t *testing.T) {
|
|||||||
|
|
||||||
func TestCompactAllAndRestore(t *testing.T) {
|
func TestCompactAllAndRestore(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s0 := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
s0 := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer os.Remove(tmpPath)
|
defer os.Remove(tmpPath)
|
||||||
|
|
||||||
s0.Put([]byte("foo"), []byte("bar"), lease.NoLease)
|
s0.Put([]byte("foo"), []byte("bar"), lease.NoLease)
|
||||||
@ -127,7 +127,7 @@ func TestCompactAllAndRestore(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
s1 := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
s1 := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
if s1.Rev() != rev {
|
if s1.Rev() != rev {
|
||||||
t.Errorf("rev = %v, want %v", s1.Rev(), rev)
|
t.Errorf("rev = %v, want %v", s1.Rev(), rev)
|
||||||
}
|
}
|
||||||
|
@ -43,7 +43,7 @@ import (
|
|||||||
|
|
||||||
func TestStoreRev(t *testing.T) {
|
func TestStoreRev(t *testing.T) {
|
||||||
b, _ := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
|
|
||||||
for i := 1; i <= 3; i++ {
|
for i := 1; i <= 3; i++ {
|
||||||
@ -426,7 +426,7 @@ func TestRestoreDelete(t *testing.T) {
|
|||||||
defer func() { restoreChunkKeys = oldChunk }()
|
defer func() { restoreChunkKeys = oldChunk }()
|
||||||
|
|
||||||
b, _ := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
|
|
||||||
keys := make(map[string]struct{})
|
keys := make(map[string]struct{})
|
||||||
for i := 0; i < 20; i++ {
|
for i := 0; i < 20; i++ {
|
||||||
@ -451,7 +451,7 @@ func TestRestoreDelete(t *testing.T) {
|
|||||||
}
|
}
|
||||||
s.Close()
|
s.Close()
|
||||||
|
|
||||||
s = NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
s = NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
for i := 0; i < 20; i++ {
|
for i := 0; i < 20; i++ {
|
||||||
ks := fmt.Sprintf("foo-%d", i)
|
ks := fmt.Sprintf("foo-%d", i)
|
||||||
@ -473,7 +473,7 @@ func TestRestoreContinueUnfinishedCompaction(t *testing.T) {
|
|||||||
tests := []string{"recreate", "restore"}
|
tests := []string{"recreate", "restore"}
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
b, _ := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s0 := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
s0 := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
|
|
||||||
s0.Put([]byte("foo"), []byte("bar"), lease.NoLease)
|
s0.Put([]byte("foo"), []byte("bar"), lease.NoLease)
|
||||||
s0.Put([]byte("foo"), []byte("bar1"), lease.NoLease)
|
s0.Put([]byte("foo"), []byte("bar1"), lease.NoLease)
|
||||||
@ -492,7 +492,7 @@ func TestRestoreContinueUnfinishedCompaction(t *testing.T) {
|
|||||||
var s *store
|
var s *store
|
||||||
switch test {
|
switch test {
|
||||||
case "recreate":
|
case "recreate":
|
||||||
s = NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
s = NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
case "restore":
|
case "restore":
|
||||||
s0.Restore(b)
|
s0.Restore(b)
|
||||||
s = s0
|
s = s0
|
||||||
@ -534,7 +534,7 @@ type hashKVResult struct {
|
|||||||
// TestHashKVWhenCompacting ensures that HashKV returns correct hash when compacting.
|
// TestHashKVWhenCompacting ensures that HashKV returns correct hash when compacting.
|
||||||
func TestHashKVWhenCompacting(t *testing.T) {
|
func TestHashKVWhenCompacting(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer os.Remove(tmpPath)
|
defer os.Remove(tmpPath)
|
||||||
|
|
||||||
rev := 10000
|
rev := 10000
|
||||||
@ -602,7 +602,7 @@ func TestHashKVWhenCompacting(t *testing.T) {
|
|||||||
// correct hash value with latest revision.
|
// correct hash value with latest revision.
|
||||||
func TestHashKVZeroRevision(t *testing.T) {
|
func TestHashKVZeroRevision(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer os.Remove(tmpPath)
|
defer os.Remove(tmpPath)
|
||||||
|
|
||||||
rev := 10000
|
rev := 10000
|
||||||
@ -635,7 +635,7 @@ func TestTxnPut(t *testing.T) {
|
|||||||
vals := createBytesSlice(bytesN, sliceN)
|
vals := createBytesSlice(bytesN, sliceN)
|
||||||
|
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b, tmpPath)
|
||||||
|
|
||||||
for i := 0; i < sliceN; i++ {
|
for i := 0; i < sliceN; i++ {
|
||||||
@ -651,7 +651,7 @@ func TestTxnPut(t *testing.T) {
|
|||||||
// TestConcurrentReadNotBlockingWrite ensures Read does not blocking Write after its creation
|
// TestConcurrentReadNotBlockingWrite ensures Read does not blocking Write after its creation
|
||||||
func TestConcurrentReadNotBlockingWrite(t *testing.T) {
|
func TestConcurrentReadNotBlockingWrite(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer os.Remove(tmpPath)
|
defer os.Remove(tmpPath)
|
||||||
|
|
||||||
// write something to read later
|
// write something to read later
|
||||||
@ -720,7 +720,7 @@ func TestConcurrentReadTxAndWrite(t *testing.T) {
|
|||||||
mu sync.Mutex // mu protects committedKVs
|
mu sync.Mutex // mu protects committedKVs
|
||||||
)
|
)
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer os.Remove(tmpPath)
|
defer os.Remove(tmpPath)
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
|
@ -20,7 +20,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"go.etcd.io/etcd/pkg/v3/traceutil"
|
"go.etcd.io/etcd/pkg/v3/traceutil"
|
||||||
"go.etcd.io/etcd/server/v3/etcdserver/cindex"
|
|
||||||
"go.etcd.io/etcd/server/v3/lease"
|
"go.etcd.io/etcd/server/v3/lease"
|
||||||
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
|
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
|
||||||
|
|
||||||
@ -29,7 +28,7 @@ import (
|
|||||||
|
|
||||||
func BenchmarkWatchableStorePut(b *testing.B) {
|
func BenchmarkWatchableStorePut(b *testing.B) {
|
||||||
be, tmpPath := betesting.NewDefaultTmpBackend(b)
|
be, tmpPath := betesting.NewDefaultTmpBackend(b)
|
||||||
s := New(zap.NewExample(), be, &lease.FakeLessor{}, nil, StoreConfig{})
|
s := New(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, be, tmpPath)
|
defer cleanup(s, be, tmpPath)
|
||||||
|
|
||||||
// arbitrary number of bytes
|
// arbitrary number of bytes
|
||||||
@ -49,7 +48,7 @@ func BenchmarkWatchableStorePut(b *testing.B) {
|
|||||||
// some synchronization operations, such as mutex locking.
|
// some synchronization operations, such as mutex locking.
|
||||||
func BenchmarkWatchableStoreTxnPut(b *testing.B) {
|
func BenchmarkWatchableStoreTxnPut(b *testing.B) {
|
||||||
be, tmpPath := betesting.NewDefaultTmpBackend(b)
|
be, tmpPath := betesting.NewDefaultTmpBackend(b)
|
||||||
s := New(zap.NewExample(), be, &lease.FakeLessor{}, cindex.NewConsistentIndex(be.BatchTx()), StoreConfig{})
|
s := New(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, be, tmpPath)
|
defer cleanup(s, be, tmpPath)
|
||||||
|
|
||||||
// arbitrary number of bytes
|
// arbitrary number of bytes
|
||||||
@ -80,7 +79,7 @@ func BenchmarkWatchableStoreWatchPutUnsync(b *testing.B) {
|
|||||||
|
|
||||||
func benchmarkWatchableStoreWatchPut(b *testing.B, synced bool) {
|
func benchmarkWatchableStoreWatchPut(b *testing.B, synced bool) {
|
||||||
be, tmpPath := betesting.NewDefaultTmpBackend(b)
|
be, tmpPath := betesting.NewDefaultTmpBackend(b)
|
||||||
s := newWatchableStore(zap.NewExample(), be, &lease.FakeLessor{}, nil, StoreConfig{})
|
s := newWatchableStore(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, be, tmpPath)
|
defer cleanup(s, be, tmpPath)
|
||||||
|
|
||||||
k := []byte("testkey")
|
k := []byte("testkey")
|
||||||
@ -123,7 +122,7 @@ func benchmarkWatchableStoreWatchPut(b *testing.B, synced bool) {
|
|||||||
// we should put to simulate the real-world use cases.
|
// we should put to simulate the real-world use cases.
|
||||||
func BenchmarkWatchableStoreUnsyncedCancel(b *testing.B) {
|
func BenchmarkWatchableStoreUnsyncedCancel(b *testing.B) {
|
||||||
be, tmpPath := betesting.NewDefaultTmpBackend(b)
|
be, tmpPath := betesting.NewDefaultTmpBackend(b)
|
||||||
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, nil, StoreConfig{})
|
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
|
||||||
|
|
||||||
// manually create watchableStore instead of newWatchableStore
|
// manually create watchableStore instead of newWatchableStore
|
||||||
// because newWatchableStore periodically calls syncWatchersLoop
|
// because newWatchableStore periodically calls syncWatchersLoop
|
||||||
@ -180,7 +179,7 @@ func BenchmarkWatchableStoreUnsyncedCancel(b *testing.B) {
|
|||||||
|
|
||||||
func BenchmarkWatchableStoreSyncedCancel(b *testing.B) {
|
func BenchmarkWatchableStoreSyncedCancel(b *testing.B) {
|
||||||
be, tmpPath := betesting.NewDefaultTmpBackend(b)
|
be, tmpPath := betesting.NewDefaultTmpBackend(b)
|
||||||
s := newWatchableStore(zap.NewExample(), be, &lease.FakeLessor{}, nil, StoreConfig{})
|
s := newWatchableStore(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
s.store.Close()
|
s.store.Close()
|
||||||
|
@ -25,7 +25,6 @@ import (
|
|||||||
|
|
||||||
"go.etcd.io/etcd/api/v3/mvccpb"
|
"go.etcd.io/etcd/api/v3/mvccpb"
|
||||||
"go.etcd.io/etcd/pkg/v3/traceutil"
|
"go.etcd.io/etcd/pkg/v3/traceutil"
|
||||||
"go.etcd.io/etcd/server/v3/etcdserver/cindex"
|
|
||||||
"go.etcd.io/etcd/server/v3/lease"
|
"go.etcd.io/etcd/server/v3/lease"
|
||||||
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
|
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
@ -33,7 +32,7 @@ import (
|
|||||||
|
|
||||||
func TestWatch(t *testing.T) {
|
func TestWatch(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
s.store.Close()
|
s.store.Close()
|
||||||
@ -55,7 +54,7 @@ func TestWatch(t *testing.T) {
|
|||||||
|
|
||||||
func TestNewWatcherCancel(t *testing.T) {
|
func TestNewWatcherCancel(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
s.store.Close()
|
s.store.Close()
|
||||||
@ -87,7 +86,7 @@ func TestCancelUnsynced(t *testing.T) {
|
|||||||
// method to sync watchers in unsynced map. We want to keep watchers
|
// method to sync watchers in unsynced map. We want to keep watchers
|
||||||
// in unsynced to test if syncWatchers works as expected.
|
// in unsynced to test if syncWatchers works as expected.
|
||||||
s := &watchableStore{
|
s := &watchableStore{
|
||||||
store: NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{}),
|
store: NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{}),
|
||||||
unsynced: newWatcherGroup(),
|
unsynced: newWatcherGroup(),
|
||||||
|
|
||||||
// to make the test not crash from assigning to nil map.
|
// to make the test not crash from assigning to nil map.
|
||||||
@ -142,7 +141,7 @@ func TestSyncWatchers(t *testing.T) {
|
|||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
|
|
||||||
s := &watchableStore{
|
s := &watchableStore{
|
||||||
store: NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{}),
|
store: NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{}),
|
||||||
unsynced: newWatcherGroup(),
|
unsynced: newWatcherGroup(),
|
||||||
synced: newWatcherGroup(),
|
synced: newWatcherGroup(),
|
||||||
}
|
}
|
||||||
@ -225,7 +224,7 @@ func TestSyncWatchers(t *testing.T) {
|
|||||||
// TestWatchCompacted tests a watcher that watches on a compacted revision.
|
// TestWatchCompacted tests a watcher that watches on a compacted revision.
|
||||||
func TestWatchCompacted(t *testing.T) {
|
func TestWatchCompacted(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
s.store.Close()
|
s.store.Close()
|
||||||
@ -262,7 +261,7 @@ func TestWatchCompacted(t *testing.T) {
|
|||||||
|
|
||||||
func TestWatchFutureRev(t *testing.T) {
|
func TestWatchFutureRev(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
s.store.Close()
|
s.store.Close()
|
||||||
@ -303,7 +302,7 @@ func TestWatchRestore(t *testing.T) {
|
|||||||
test := func(delay time.Duration) func(t *testing.T) {
|
test := func(delay time.Duration) func(t *testing.T) {
|
||||||
return func(t *testing.T) {
|
return func(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, cindex.NewConsistentIndex(b.BatchTx()), StoreConfig{})
|
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b, tmpPath)
|
||||||
|
|
||||||
testKey := []byte("foo")
|
testKey := []byte("foo")
|
||||||
@ -311,7 +310,7 @@ func TestWatchRestore(t *testing.T) {
|
|||||||
rev := s.Put(testKey, testValue, lease.NoLease)
|
rev := s.Put(testKey, testValue, lease.NoLease)
|
||||||
|
|
||||||
newBackend, newPath := betesting.NewDefaultTmpBackend(t)
|
newBackend, newPath := betesting.NewDefaultTmpBackend(t)
|
||||||
newStore := newWatchableStore(zap.NewExample(), newBackend, &lease.FakeLessor{}, cindex.NewConsistentIndex(newBackend.BatchTx()), StoreConfig{})
|
newStore := newWatchableStore(zap.NewExample(), newBackend, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(newStore, newBackend, newPath)
|
defer cleanup(newStore, newBackend, newPath)
|
||||||
|
|
||||||
w := newStore.NewWatchStream()
|
w := newStore.NewWatchStream()
|
||||||
@ -349,11 +348,11 @@ func TestWatchRestore(t *testing.T) {
|
|||||||
// 5. choose the watcher from step 1, without panic
|
// 5. choose the watcher from step 1, without panic
|
||||||
func TestWatchRestoreSyncedWatcher(t *testing.T) {
|
func TestWatchRestoreSyncedWatcher(t *testing.T) {
|
||||||
b1, b1Path := betesting.NewDefaultTmpBackend(t)
|
b1, b1Path := betesting.NewDefaultTmpBackend(t)
|
||||||
s1 := newWatchableStore(zap.NewExample(), b1, &lease.FakeLessor{}, cindex.NewConsistentIndex(b1.BatchTx()), StoreConfig{})
|
s1 := newWatchableStore(zap.NewExample(), b1, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s1, b1, b1Path)
|
defer cleanup(s1, b1, b1Path)
|
||||||
|
|
||||||
b2, b2Path := betesting.NewDefaultTmpBackend(t)
|
b2, b2Path := betesting.NewDefaultTmpBackend(t)
|
||||||
s2 := newWatchableStore(zap.NewExample(), b2, &lease.FakeLessor{}, cindex.NewConsistentIndex(b2.BatchTx()), StoreConfig{})
|
s2 := newWatchableStore(zap.NewExample(), b2, &lease.FakeLessor{}, StoreConfig{})
|
||||||
defer cleanup(s2, b2, b2Path)
|
defer cleanup(s2, b2, b2Path)
|
||||||
|
|
||||||
testKey, testValue := []byte("foo"), []byte("bar")
|
testKey, testValue := []byte("foo"), []byte("bar")
|
||||||
@ -400,7 +399,7 @@ func TestWatchRestoreSyncedWatcher(t *testing.T) {
|
|||||||
// TestWatchBatchUnsynced tests batching on unsynced watchers
|
// TestWatchBatchUnsynced tests batching on unsynced watchers
|
||||||
func TestWatchBatchUnsynced(t *testing.T) {
|
func TestWatchBatchUnsynced(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
|
|
||||||
oldMaxRevs := watchBatchMaxRevs
|
oldMaxRevs := watchBatchMaxRevs
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -534,7 +533,7 @@ func TestWatchVictims(t *testing.T) {
|
|||||||
oldChanBufLen, oldMaxWatchersPerSync := chanBufLen, maxWatchersPerSync
|
oldChanBufLen, oldMaxWatchersPerSync := chanBufLen, maxWatchersPerSync
|
||||||
|
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
s.store.Close()
|
s.store.Close()
|
||||||
@ -612,7 +611,7 @@ func TestWatchVictims(t *testing.T) {
|
|||||||
// canceling its watches.
|
// canceling its watches.
|
||||||
func TestStressWatchCancelClose(t *testing.T) {
|
func TestStressWatchCancelClose(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
s.store.Close()
|
s.store.Close()
|
||||||
|
@ -26,7 +26,7 @@ import (
|
|||||||
|
|
||||||
func BenchmarkKVWatcherMemoryUsage(b *testing.B) {
|
func BenchmarkKVWatcherMemoryUsage(b *testing.B) {
|
||||||
be, tmpPath := betesting.NewDefaultTmpBackend(b)
|
be, tmpPath := betesting.NewDefaultTmpBackend(b)
|
||||||
watchable := newWatchableStore(zap.NewExample(), be, &lease.FakeLessor{}, nil, StoreConfig{})
|
watchable := newWatchableStore(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
|
||||||
|
|
||||||
defer cleanup(watchable, be, tmpPath)
|
defer cleanup(watchable, be, tmpPath)
|
||||||
|
|
||||||
|
@ -32,7 +32,7 @@ import (
|
|||||||
// and the watched event attaches the correct watchID.
|
// and the watched event attaches the correct watchID.
|
||||||
func TestWatcherWatchID(t *testing.T) {
|
func TestWatcherWatchID(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{}))
|
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{}))
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b, tmpPath)
|
||||||
|
|
||||||
w := s.NewWatchStream()
|
w := s.NewWatchStream()
|
||||||
@ -82,7 +82,7 @@ func TestWatcherWatchID(t *testing.T) {
|
|||||||
|
|
||||||
func TestWatcherRequestsCustomID(t *testing.T) {
|
func TestWatcherRequestsCustomID(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{}))
|
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{}))
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b, tmpPath)
|
||||||
|
|
||||||
w := s.NewWatchStream()
|
w := s.NewWatchStream()
|
||||||
@ -119,7 +119,7 @@ func TestWatcherRequestsCustomID(t *testing.T) {
|
|||||||
// and returns events with matching prefixes.
|
// and returns events with matching prefixes.
|
||||||
func TestWatcherWatchPrefix(t *testing.T) {
|
func TestWatcherWatchPrefix(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{}))
|
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{}))
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b, tmpPath)
|
||||||
|
|
||||||
w := s.NewWatchStream()
|
w := s.NewWatchStream()
|
||||||
@ -193,7 +193,7 @@ func TestWatcherWatchPrefix(t *testing.T) {
|
|||||||
// does not create watcher, which panics when canceling in range tree.
|
// does not create watcher, which panics when canceling in range tree.
|
||||||
func TestWatcherWatchWrongRange(t *testing.T) {
|
func TestWatcherWatchWrongRange(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{}))
|
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{}))
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b, tmpPath)
|
||||||
|
|
||||||
w := s.NewWatchStream()
|
w := s.NewWatchStream()
|
||||||
@ -213,7 +213,7 @@ func TestWatcherWatchWrongRange(t *testing.T) {
|
|||||||
|
|
||||||
func TestWatchDeleteRange(t *testing.T) {
|
func TestWatchDeleteRange(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
s.store.Close()
|
s.store.Close()
|
||||||
@ -252,7 +252,7 @@ func TestWatchDeleteRange(t *testing.T) {
|
|||||||
// with given id inside watchStream.
|
// with given id inside watchStream.
|
||||||
func TestWatchStreamCancelWatcherByID(t *testing.T) {
|
func TestWatchStreamCancelWatcherByID(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{}))
|
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{}))
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b, tmpPath)
|
||||||
|
|
||||||
w := s.NewWatchStream()
|
w := s.NewWatchStream()
|
||||||
@ -295,7 +295,7 @@ func TestWatcherRequestProgress(t *testing.T) {
|
|||||||
// method to sync watchers in unsynced map. We want to keep watchers
|
// method to sync watchers in unsynced map. We want to keep watchers
|
||||||
// in unsynced to test if syncWatchers works as expected.
|
// in unsynced to test if syncWatchers works as expected.
|
||||||
s := &watchableStore{
|
s := &watchableStore{
|
||||||
store: NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{}),
|
store: NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{}),
|
||||||
unsynced: newWatcherGroup(),
|
unsynced: newWatcherGroup(),
|
||||||
synced: newWatcherGroup(),
|
synced: newWatcherGroup(),
|
||||||
}
|
}
|
||||||
@ -344,7 +344,7 @@ func TestWatcherRequestProgress(t *testing.T) {
|
|||||||
|
|
||||||
func TestWatcherWatchWithFilter(t *testing.T) {
|
func TestWatcherWatchWithFilter(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{}))
|
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{}))
|
||||||
defer cleanup(s, b, tmpPath)
|
defer cleanup(s, b, tmpPath)
|
||||||
|
|
||||||
w := s.NewWatchStream()
|
w := s.NewWatchStream()
|
||||||
|
@ -149,7 +149,7 @@ func TestMaintenanceSnapshotErrorInflight(t *testing.T) {
|
|||||||
clus.Members[0].Stop(t)
|
clus.Members[0].Stop(t)
|
||||||
dpath := filepath.Join(clus.Members[0].DataDir, "member", "snap", "db")
|
dpath := filepath.Join(clus.Members[0].DataDir, "member", "snap", "db")
|
||||||
b := backend.NewDefaultBackend(dpath)
|
b := backend.NewDefaultBackend(dpath)
|
||||||
s := mvcc.NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, nil, mvcc.StoreConfig{CompactionBatchLimit: math.MaxInt32})
|
s := mvcc.NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, mvcc.StoreConfig{CompactionBatchLimit: math.MaxInt32})
|
||||||
rev := 100000
|
rev := 100000
|
||||||
for i := 2; i <= rev; i++ {
|
for i := 2; i <= rev; i++ {
|
||||||
s.Put([]byte(fmt.Sprintf("%10d", i)), bytes.Repeat([]byte("a"), 1024), lease.NoLease)
|
s.Put([]byte(fmt.Sprintf("%10d", i)), bytes.Repeat([]byte("a"), 1024), lease.NoLease)
|
||||||
|
@ -25,7 +25,6 @@ import (
|
|||||||
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
|
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||||
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
|
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
|
||||||
"go.etcd.io/etcd/pkg/v3/traceutil"
|
"go.etcd.io/etcd/pkg/v3/traceutil"
|
||||||
"go.etcd.io/etcd/server/v3/etcdserver/cindex"
|
|
||||||
"go.etcd.io/etcd/server/v3/mvcc"
|
"go.etcd.io/etcd/server/v3/mvcc"
|
||||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||||
"go.uber.org/zap/zaptest"
|
"go.uber.org/zap/zaptest"
|
||||||
@ -167,7 +166,7 @@ func TestV3CorruptAlarm(t *testing.T) {
|
|||||||
clus.Members[0].Stop(t)
|
clus.Members[0].Stop(t)
|
||||||
fp := filepath.Join(clus.Members[0].DataDir, "member", "snap", "db")
|
fp := filepath.Join(clus.Members[0].DataDir, "member", "snap", "db")
|
||||||
be := backend.NewDefaultBackend(fp)
|
be := backend.NewDefaultBackend(fp)
|
||||||
s := mvcc.NewStore(zaptest.NewLogger(t), be, nil, cindex.NewFakeConsistentIndex(13), mvcc.StoreConfig{})
|
s := mvcc.NewStore(zaptest.NewLogger(t), be, nil, mvcc.StoreConfig{})
|
||||||
// NOTE: cluster_proxy mode with namespacing won't set 'k', but namespace/'k'.
|
// NOTE: cluster_proxy mode with namespacing won't set 'k', but namespace/'k'.
|
||||||
s.Put([]byte("abc"), []byte("def"), 0)
|
s.Put([]byte("abc"), []byte("def"), 0)
|
||||||
s.Put([]byte("xyz"), []byte("123"), 0)
|
s.Put([]byte("xyz"), []byte("123"), 0)
|
||||||
|
@ -38,7 +38,7 @@ func initMVCC() {
|
|||||||
bcfg := backend.DefaultBackendConfig()
|
bcfg := backend.DefaultBackendConfig()
|
||||||
bcfg.Path, bcfg.BatchInterval, bcfg.BatchLimit = "mvcc-bench", time.Duration(batchInterval)*time.Millisecond, batchLimit
|
bcfg.Path, bcfg.BatchInterval, bcfg.BatchLimit = "mvcc-bench", time.Duration(batchInterval)*time.Millisecond, batchLimit
|
||||||
be := backend.New(bcfg)
|
be := backend.New(bcfg)
|
||||||
s = mvcc.NewStore(zap.NewExample(), be, &lease.FakeLessor{}, nil, mvcc.StoreConfig{})
|
s = mvcc.NewStore(zap.NewExample(), be, &lease.FakeLessor{}, mvcc.StoreConfig{})
|
||||||
os.Remove("mvcc-bench") // boltDB has an opened fd, so removing the file is ok
|
os.Remove("mvcc-bench") // boltDB has an opened fd, so removing the file is ok
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user