pkg/report, tools/benchmark: refactor report out of tools/benchmark

Only tracks time series when requested. Can configure output precision.
This commit is contained in:
Anthony Romano
2016-10-06 15:58:24 -07:00
parent cc04d80b09
commit 3d28faa3eb
14 changed files with 364 additions and 399 deletions

16
pkg/report/doc.go Normal file
View File

@@ -0,0 +1,16 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package report generates human-readable benchmark reports.
package report

219
pkg/report/report.go Normal file
View File

@@ -0,0 +1,219 @@
// Copyright 2014 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// the file is borrowed from github.com/rakyll/boom/boomer/print.go
package report
import (
"fmt"
"math"
"sort"
"strings"
"time"
)
const (
barChar = "∎"
)
// Result describes the timings for an operation.
type Result struct {
Start time.Time
End time.Time
Err error
}
func (res *Result) Duration() time.Duration { return res.End.Sub(res.Start) }
type report struct {
results chan Result
precision string
avgTotal float64
fastest float64
slowest float64
average float64
stddev float64
rps float64
total time.Duration
errorDist map[string]int
lats []float64
sps *secondPoints
}
// Report processes a result stream until it is closed, then produces a
// string with information about the consumed result data.
type Report interface {
Results() chan<- Result
Run() <-chan string
String() string
}
func NewReport(precision string) Report {
return &report{
results: make(chan Result, 16),
precision: precision,
errorDist: make(map[string]int),
}
}
func NewReportSample(precision string) Report {
r := NewReport(precision).(*report)
r.sps = newSecondPoints()
return r
}
func (r *report) Results() chan<- Result { return r.results }
func (r *report) Run() <-chan string {
donec := make(chan string, 1)
go func() {
defer close(donec)
r.processResults()
donec <- r.String()
}()
return donec
}
func (r *report) String() (s string) {
if len(r.lats) > 0 {
s += fmt.Sprintf("\nSummary:\n")
s += fmt.Sprintf(" Total:\t%s.\n", r.sec2str(r.total.Seconds()))
s += fmt.Sprintf(" Slowest:\t%s.\n", r.sec2str(r.slowest))
s += fmt.Sprintf(" Fastest:\t%s.\n", r.sec2str(r.fastest))
s += fmt.Sprintf(" Average:\t%s.\n", r.sec2str(r.average))
s += fmt.Sprintf(" Stddev:\t%s.\n", r.sec2str(r.stddev))
s += fmt.Sprintf(" Requests/sec:\t"+r.precision+"\n", r.rps)
s += r.histogram()
s += r.latencies()
if r.sps != nil {
s += fmt.Sprintf("%v\n", r.sps.getTimeSeries())
}
}
if len(r.errorDist) > 0 {
s += r.errors()
}
return s
}
func (r *report) sec2str(sec float64) string { return fmt.Sprintf(r.precision+" secs", sec) }
type reportRate struct{ *report }
func NewReportRate(precision string) Report {
return &reportRate{NewReport(precision).(*report)}
}
func (r *reportRate) String() string {
return fmt.Sprintf(" Requests/sec:\t"+r.precision+"\n", r.rps)
}
func (r *report) processResult(res *Result) {
if res.Err != nil {
r.errorDist[res.Err.Error()]++
return
}
dur := res.Duration()
r.lats = append(r.lats, dur.Seconds())
r.avgTotal += dur.Seconds()
if r.sps != nil {
r.sps.Add(res.Start, dur)
}
}
func (r *report) processResults() {
st := time.Now()
for res := range r.results {
r.processResult(&res)
}
r.total = time.Since(st)
r.rps = float64(len(r.lats)) / r.total.Seconds()
r.average = r.avgTotal / float64(len(r.lats))
for i := range r.lats {
dev := r.lats[i] - r.average
r.stddev += dev * dev
}
r.stddev = math.Sqrt(r.stddev / float64(len(r.lats)))
sort.Float64s(r.lats)
if len(r.lats) > 0 {
r.fastest = r.lats[0]
r.slowest = r.lats[len(r.lats)-1]
}
}
func (r *report) latencies() string {
pctls := []int{10, 25, 50, 75, 90, 95, 99}
data := make([]float64, len(pctls))
j := 0
for i := 0; i < len(r.lats) && j < len(pctls); i++ {
current := i * 100 / len(r.lats)
if current >= pctls[j] {
data[j] = r.lats[i]
j++
}
}
s := fmt.Sprintf("\nLatency distribution:\n")
for i := 0; i < len(pctls); i++ {
if data[i] > 0 {
s += fmt.Sprintf(" %v%% in %s.\n", pctls[i], r.sec2str(data[i]))
}
}
return s
}
func (r *report) histogram() string {
bc := 10
buckets := make([]float64, bc+1)
counts := make([]int, bc+1)
bs := (r.slowest - r.fastest) / float64(bc)
for i := 0; i < bc; i++ {
buckets[i] = r.fastest + bs*float64(i)
}
buckets[bc] = r.slowest
var bi int
var max int
for i := 0; i < len(r.lats); {
if r.lats[i] <= buckets[bi] {
i++
counts[bi]++
if max < counts[bi] {
max = counts[bi]
}
} else if bi < len(buckets)-1 {
bi++
}
}
s := fmt.Sprintf("\nResponse time histogram:\n")
for i := 0; i < len(buckets); i++ {
// Normalize bar lengths.
var barLen int
if max > 0 {
barLen = counts[i] * 40 / max
}
s += fmt.Sprintf(" "+r.precision+" [%v]\t|%v\n", buckets[i], counts[i], strings.Repeat(barChar, barLen))
}
return s
}
func (r *report) errors() string {
s := fmt.Sprintf("\nError distribution:\n")
for err, num := range r.errorDist {
s += fmt.Sprintf(" [%d]\t%s\n", num, err)
}
return s
}

134
pkg/report/timeseries.go Normal file
View File

@@ -0,0 +1,134 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package report
import (
"bytes"
"encoding/csv"
"fmt"
"log"
"math"
"sort"
"sync"
"time"
)
type timeSeries struct {
timestamp int64
avgLatency time.Duration
throughPut int64
}
type TimeSeries []timeSeries
func (t TimeSeries) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
func (t TimeSeries) Len() int { return len(t) }
func (t TimeSeries) Less(i, j int) bool { return t[i].timestamp < t[j].timestamp }
type secondPoint struct {
totalLatency time.Duration
count int64
}
type secondPoints struct {
mu sync.Mutex
tm map[int64]secondPoint
}
func newSecondPoints() *secondPoints {
return &secondPoints{tm: make(map[int64]secondPoint)}
}
func (sp *secondPoints) Add(ts time.Time, lat time.Duration) {
sp.mu.Lock()
defer sp.mu.Unlock()
tk := ts.Unix()
if v, ok := sp.tm[tk]; !ok {
sp.tm[tk] = secondPoint{totalLatency: lat, count: 1}
} else {
v.totalLatency += lat
v.count += 1
sp.tm[tk] = v
}
}
func (sp *secondPoints) getTimeSeries() TimeSeries {
sp.mu.Lock()
defer sp.mu.Unlock()
var (
minTs int64 = math.MaxInt64
maxTs int64 = -1
)
for k := range sp.tm {
if minTs > k {
minTs = k
}
if maxTs < k {
maxTs = k
}
}
for ti := minTs; ti < maxTs; ti++ {
if _, ok := sp.tm[ti]; !ok { // fill-in empties
sp.tm[ti] = secondPoint{totalLatency: 0, count: 0}
}
}
var (
tslice = make(TimeSeries, len(sp.tm))
i int
)
for k, v := range sp.tm {
var lat time.Duration
if v.count > 0 {
lat = time.Duration(v.totalLatency) / time.Duration(v.count)
}
tslice[i] = timeSeries{
timestamp: k,
avgLatency: lat,
throughPut: v.count,
}
i++
}
sort.Sort(tslice)
return tslice
}
func (ts TimeSeries) String() string {
buf := new(bytes.Buffer)
wr := csv.NewWriter(buf)
if err := wr.Write([]string{"unix_ts", "avg_latency", "throughput"}); err != nil {
log.Fatal(err)
}
rows := [][]string{}
for i := range ts {
row := []string{
fmt.Sprintf("%d", ts[i].timestamp),
fmt.Sprintf("%s", ts[i].avgLatency),
fmt.Sprintf("%d", ts[i].throughPut),
}
rows = append(rows, row)
}
if err := wr.WriteAll(rows); err != nil {
log.Fatal(err)
}
wr.Flush()
if err := wr.Error(); err != nil {
log.Fatal(err)
}
return fmt.Sprintf("\nSample in one second (unix latency throughput):\n%s", buf.String())
}

View File

@@ -0,0 +1,31 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package report
import (
"testing"
"time"
)
func TestGetTimeseries(t *testing.T) {
sp := newSecondPoints()
now := time.Now()
sp.Add(now, time.Second)
sp.Add(now.Add(5*time.Second), time.Second)
n := sp.getTimeSeries().Len()
if n < 3 {
t.Fatalf("expected at 6 points of time series, got %s", sp.getTimeSeries())
}
}