Merge pull request #8475 from heyitsanthony/mvcc-100-range

mvcc: don't allocate keys when computing Revisions
This commit is contained in:
Anthony Romano 2017-08-31 16:42:16 -07:00 committed by GitHub
commit b61c7489e0
2 changed files with 41 additions and 25 deletions

View File

@ -85,6 +85,21 @@ func (ti *treeIndex) keyIndex(keyi *keyIndex) *keyIndex {
return nil
}
func (ti *treeIndex) visit(key, end []byte, f func(ki *keyIndex)) {
keyi, endi := &keyIndex{key: key}, &keyIndex{key: end}
ti.RLock()
defer ti.RUnlock()
ti.tree.AscendGreaterOrEqual(keyi, func(item btree.Item) bool {
if len(endi.key) > 0 && !item.Less(endi) {
return false
}
f(item.(*keyIndex))
return true
})
}
func (ti *treeIndex) Revisions(key, end []byte, atRev int64) (revs []revision) {
if end == nil {
rev, _, _, err := ti.Get(key, atRev)
@ -93,8 +108,12 @@ func (ti *treeIndex) Revisions(key, end []byte, atRev int64) (revs []revision) {
}
return []revision{rev}
}
_, rev := ti.Range(key, end, atRev)
return rev
ti.visit(key, end, func(ki *keyIndex) {
if rev, _, _, err := ki.get(atRev); err == nil {
revs = append(revs, rev)
}
})
return revs
}
func (ti *treeIndex) Range(key, end []byte, atRev int64) (keys [][]byte, revs []revision) {
@ -105,27 +124,12 @@ func (ti *treeIndex) Range(key, end []byte, atRev int64) (keys [][]byte, revs []
}
return [][]byte{key}, []revision{rev}
}
keyi := &keyIndex{key: key}
endi := &keyIndex{key: end}
ti.RLock()
defer ti.RUnlock()
ti.tree.AscendGreaterOrEqual(keyi, func(item btree.Item) bool {
if len(endi.key) > 0 && !item.Less(endi) {
return false
}
curKeyi := item.(*keyIndex)
rev, _, _, err := curKeyi.get(atRev)
if err != nil {
return true
}
ti.visit(key, end, func(ki *keyIndex) {
if rev, _, _, err := ki.get(atRev); err == nil {
revs = append(revs, rev)
keys = append(keys, curKeyi.key)
return true
keys = append(keys, ki.key)
}
})
return keys, revs
}

View File

@ -45,22 +45,34 @@ func BenchmarkStorePut(b *testing.B) {
}
}
func BenchmarkStoreRangeOneKey(b *testing.B) {
func BenchmarkStoreRangeKey1(b *testing.B) { benchmarkStoreRange(b, 1) }
func BenchmarkStoreRangeKey100(b *testing.B) { benchmarkStoreRange(b, 100) }
func benchmarkStoreRange(b *testing.B, n int) {
var i fakeConsistentIndex
be, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(be, &lease.FakeLessor{}, &i)
defer cleanup(s, be, tmpPath)
// 64 byte key/val
key, val := createBytesSlice(64, 1), createBytesSlice(64, 1)
s.Put(key[0], val[0], lease.NoLease)
keys, val := createBytesSlice(64, n), createBytesSlice(64, 1)
for i := range keys {
s.Put(keys[i], val[0], lease.NoLease)
}
// Force into boltdb tx instead of backend read tx.
s.Commit()
var begin, end []byte
if n == 1 {
begin, end = keys[0], nil
} else {
begin, end = []byte{}, []byte{}
}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
s.Range(key[0], nil, RangeOptions{})
s.Range(begin, end, RangeOptions{})
}
}