From 77775e8e92f39f5d4e5cd824beec058cd4c546f6 Mon Sep 17 00:00:00 2001 From: Gyu-Ho Lee Date: Wed, 18 May 2016 09:42:37 -0700 Subject: [PATCH] mvcc: preallocate bytes buffer for saveIndex --- mvcc/kvstore.go | 8 ++++++-- mvcc/kvstore_bench_test.go | 13 +++++++++++-- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/mvcc/kvstore.go b/mvcc/kvstore.go index 37900dc70..01aca0708 100644 --- a/mvcc/kvstore.go +++ b/mvcc/kvstore.go @@ -75,6 +75,10 @@ type store struct { tx backend.BatchTx txnID int64 // tracks the current txnID to verify txn operations + // bytesBuf8 is a byte slice of length 8 + // to avoid a repetitive allocation in saveIndex. + bytesBuf8 []byte + changes []mvccpb.KeyValue fifoSched schedule.Scheduler @@ -94,6 +98,7 @@ func NewStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *sto currentRev: revision{main: 1}, compactMainRev: -1, + bytesBuf8: make([]byte, 8, 8), fifoSched: schedule.NewFIFOScheduler(), stopc: make(chan struct{}), @@ -595,8 +600,7 @@ func (s *store) saveIndex() { return } tx := s.tx - // TODO: avoid this unnecessary allocation - bs := make([]byte, 8) + bs := s.bytesBuf8 binary.BigEndian.PutUint64(bs, s.ig.ConsistentIndex()) // put the index into the underlying backend // tx has been locked in TxnBegin, so there is no need to lock it again diff --git a/mvcc/kvstore_bench_test.go b/mvcc/kvstore_bench_test.go index 457d2a3d6..e1d7ecdd4 100644 --- a/mvcc/kvstore_bench_test.go +++ b/mvcc/kvstore_bench_test.go @@ -16,15 +16,23 @@ package mvcc import ( "log" + "sync/atomic" "testing" "github.com/coreos/etcd/lease" "github.com/coreos/etcd/mvcc/backend" ) +type fakeConsistentIndex uint64 + +func (i *fakeConsistentIndex) ConsistentIndex() uint64 { + return atomic.LoadUint64((*uint64)(i)) +} + func BenchmarkStorePut(b *testing.B) { + var i fakeConsistentIndex be, tmpPath := backend.NewDefaultTmpBackend() - s := NewStore(be, &lease.FakeLessor{}, nil) + s := NewStore(be, &lease.FakeLessor{}, &i) defer cleanup(s, be, tmpPath) // arbitrary number of bytes @@ -42,8 +50,9 @@ func BenchmarkStorePut(b *testing.B) { // with transaction begin and end, where transaction involves // some synchronization operations, such as mutex locking. func BenchmarkStoreTxnPut(b *testing.B) { + var i fakeConsistentIndex be, tmpPath := backend.NewDefaultTmpBackend() - s := NewStore(be, &lease.FakeLessor{}, nil) + s := NewStore(be, &lease.FakeLessor{}, &i) defer cleanup(s, be, tmpPath) // arbitrary number of bytes