*: replace our own metrics with codahale/metrics

This commit is contained in:
Xiang Li 2015-01-28 16:27:28 -08:00 committed by Yicheng Qin
parent 33afbfead6
commit 9b6fcfffb6
23 changed files with 1769 additions and 279 deletions

8
Godeps/Godeps.json generated
View File

@ -9,6 +9,14 @@
"ImportPath": "code.google.com/p/gogoprotobuf/proto",
"Rev": "7fd1620f09261338b6b1ca1289ace83aee0ec946"
},
{
"ImportPath": "github.com/codahale/hdrhistogram",
"Rev": "9208b142303c12d8899bae836fd524ac9338b4fd"
},
{
"ImportPath": "github.com/codahale/metrics",
"Rev": "9d788a162cb186f9ced1922fc88d456a82cd16a0"
},
{
"ImportPath": "github.com/codegangsta/cli",
"Comment": "1.2.0-26-gf7ebb76",

View File

@ -0,0 +1,9 @@
language: go
go:
- 1.3.3
notifications:
# See http://about.travis-ci.org/docs/user/build-configuration/ to learn more
# about configuring notification recipients and more.
email:
recipients:
- coda.hale@gmail.com

View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Coda Hale
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -0,0 +1,15 @@
hdrhistogram
============
[![Build Status](https://travis-ci.org/codahale/hdrhistogram.png?branch=master)](https://travis-ci.org/codahale/hdrhistogram)
A pure Go implementation of the [HDR Histogram](https://github.com/HdrHistogram/HdrHistogram).
> A Histogram that supports recording and analyzing sampled data value counts
> across a configurable integer value range with configurable value precision
> within the range. Value precision is expressed as the number of significant
> digits in the value recording, and provides control over value quantization
> behavior across the value range and the subsequent value resolution at any
> given level.
For documentation, check [godoc](http://godoc.org/github.com/codahale/hdrhistogram).

View File

@ -0,0 +1,521 @@
// Package hdrhistogram provides an implementation of Gil Tene's HDR Histogram
// data structure. The HDR Histogram allows for fast and accurate analysis of
// the extreme ranges of data with non-normal distributions, like latency.
package hdrhistogram
import (
"fmt"
"math"
)
// A Bracket is a part of a cumulative distribution.
type Bracket struct {
Quantile float64
Count, ValueAt int64
}
// A Snapshot is an exported view of a Histogram, useful for serializing them.
// A Histogram can be constructed from it by passing it to Import.
type Snapshot struct {
LowestTrackableValue int64
HighestTrackableValue int64
SignificantFigures int64
Counts []int64
}
// A Histogram is a lossy data structure used to record the distribution of
// non-normally distributed data (like latency) with a high degree of accuracy
// and a bounded degree of precision.
type Histogram struct {
lowestTrackableValue int64
highestTrackableValue int64
unitMagnitude int64
significantFigures int64
subBucketHalfCountMagnitude int32
subBucketHalfCount int32
subBucketMask int64
subBucketCount int32
bucketCount int32
countsLen int32
totalCount int64
counts []int64
}
// New returns a new Histogram instance capable of tracking values in the given
// range and with the given amount of precision.
func New(minValue, maxValue int64, sigfigs int) *Histogram {
if sigfigs < 1 || 5 < sigfigs {
panic(fmt.Errorf("sigfigs must be [1,5] (was %d)", sigfigs))
}
largestValueWithSingleUnitResolution := 2 * power(10, int64(sigfigs))
// we need to shove these down to float32 or the math is wrong
a := float32(math.Log(float64(largestValueWithSingleUnitResolution)))
b := float32(math.Log(2))
subBucketCountMagnitude := int32(math.Ceil(float64(a / b)))
subBucketHalfCountMagnitude := subBucketCountMagnitude
if subBucketHalfCountMagnitude < 1 {
subBucketHalfCountMagnitude = 1
}
subBucketHalfCountMagnitude--
unitMagnitude := int32(math.Floor(math.Log(float64(minValue)) / math.Log(2)))
if unitMagnitude < 0 {
unitMagnitude = 0
}
subBucketCount := int32(math.Pow(2, float64(subBucketHalfCountMagnitude)+1))
subBucketHalfCount := subBucketCount / 2
subBucketMask := int64(subBucketCount-1) << uint(unitMagnitude)
// determine exponent range needed to support the trackable value with no
// overflow:
smallestUntrackableValue := int64(subBucketCount) << uint(unitMagnitude)
bucketsNeeded := int32(1)
for smallestUntrackableValue < maxValue {
smallestUntrackableValue <<= 1
bucketsNeeded++
}
bucketCount := bucketsNeeded
countsLen := (bucketCount + 1) * (subBucketCount / 2)
return &Histogram{
lowestTrackableValue: minValue,
highestTrackableValue: maxValue,
unitMagnitude: int64(unitMagnitude),
significantFigures: int64(sigfigs),
subBucketHalfCountMagnitude: subBucketHalfCountMagnitude,
subBucketHalfCount: subBucketHalfCount,
subBucketMask: subBucketMask,
subBucketCount: subBucketCount,
bucketCount: bucketCount,
countsLen: countsLen,
totalCount: 0,
counts: make([]int64, countsLen),
}
}
// ByteSize returns an estimate of the amount of memory allocated to the
// histogram in bytes.
//
// N.B.: This does not take into account the overhead for slices, which are
// small, constant, and specific to the compiler version.
func (h *Histogram) ByteSize() int {
return 6*8 + 5*4 + len(h.counts)*8
}
// Merge merges the data stored in the given histogram with the receiver,
// returning the number of recorded values which had to be dropped.
func (h *Histogram) Merge(from *Histogram) (dropped int64) {
i := from.rIterator()
for i.next() {
v := i.valueFromIdx
c := i.countAtIdx
if h.RecordValues(v, c) != nil {
dropped += c
}
}
return
}
// Max returns the approximate maximum recorded value.
func (h *Histogram) Max() int64 {
var max int64
i := h.iterator()
for i.next() {
if i.countAtIdx != 0 {
max = i.highestEquivalentValue
}
}
return h.lowestEquivalentValue(max)
}
// Min returns the approximate minimum recorded value.
func (h *Histogram) Min() int64 {
var min int64
i := h.iterator()
for i.next() {
if i.countAtIdx != 0 && min == 0 {
min = i.highestEquivalentValue
break
}
}
return h.lowestEquivalentValue(min)
}
// Mean returns the approximate arithmetic mean of the recorded values.
func (h *Histogram) Mean() float64 {
var total int64
i := h.iterator()
for i.next() {
if i.countAtIdx != 0 {
total += i.countAtIdx * h.medianEquivalentValue(i.valueFromIdx)
}
}
return float64(total) / float64(h.totalCount)
}
// StdDev returns the approximate standard deviation of the recorded values.
func (h *Histogram) StdDev() float64 {
mean := h.Mean()
geometricDevTotal := 0.0
i := h.iterator()
for i.next() {
if i.countAtIdx != 0 {
dev := float64(h.medianEquivalentValue(i.valueFromIdx)) - mean
geometricDevTotal += (dev * dev) * float64(i.countAtIdx)
}
}
return math.Sqrt(geometricDevTotal / float64(h.totalCount))
}
// Reset deletes all recorded values and restores the histogram to its original
// state.
func (h *Histogram) Reset() {
h.totalCount = 0
for i := range h.counts {
h.counts[i] = 0
}
}
// RecordValue records the given value, returning an error if the value is out
// of range.
func (h *Histogram) RecordValue(v int64) error {
return h.RecordValues(v, 1)
}
// RecordCorrectedValue records the given value, correcting for stalls in the
// recording process. This only works for processes which are recording values
// at an expected interval (e.g., doing jitter analysis). Processes which are
// recording ad-hoc values (e.g., latency for incoming requests) can't take
// advantage of this.
func (h *Histogram) RecordCorrectedValue(v, expectedInterval int64) error {
if err := h.RecordValue(v); err != nil {
return err
}
if expectedInterval <= 0 || v <= expectedInterval {
return nil
}
missingValue := v - expectedInterval
for missingValue >= expectedInterval {
if err := h.RecordValue(missingValue); err != nil {
return err
}
missingValue -= expectedInterval
}
return nil
}
// RecordValues records n occurrences of the given value, returning an error if
// the value is out of range.
func (h *Histogram) RecordValues(v, n int64) error {
idx := h.countsIndexFor(v)
if idx < 0 || int(h.countsLen) <= idx {
return fmt.Errorf("value %d is too large to be recorded", v)
}
h.counts[idx] += n
h.totalCount += n
return nil
}
// ValueAtQuantile returns the recorded value at the given quantile (0..100).
func (h *Histogram) ValueAtQuantile(q float64) int64 {
if q > 100 {
q = 100
}
total := int64(0)
countAtPercentile := int64(((q / 100) * float64(h.totalCount)) + 0.5)
i := h.iterator()
for i.next() {
total += i.countAtIdx
if total >= countAtPercentile {
return h.highestEquivalentValue(i.valueFromIdx)
}
}
return 0
}
// CumulativeDistribution returns an ordered list of brackets of the
// distribution of recorded values.
func (h *Histogram) CumulativeDistribution() []Bracket {
var result []Bracket
i := h.pIterator(1)
for i.next() {
result = append(result, Bracket{
Quantile: i.percentile,
Count: i.countToIdx,
ValueAt: i.highestEquivalentValue,
})
}
return result
}
// Equals returns true if the two Histograms are equivalent, false if not.
func (h *Histogram) Equals(other *Histogram) bool {
switch {
case
h.lowestTrackableValue != other.lowestTrackableValue,
h.highestTrackableValue != other.highestTrackableValue,
h.unitMagnitude != other.unitMagnitude,
h.significantFigures != other.significantFigures,
h.subBucketHalfCountMagnitude != other.subBucketHalfCountMagnitude,
h.subBucketHalfCount != other.subBucketHalfCount,
h.subBucketMask != other.subBucketMask,
h.subBucketCount != other.subBucketCount,
h.bucketCount != other.bucketCount,
h.countsLen != other.countsLen,
h.totalCount != other.totalCount:
return false
default:
for i, c := range h.counts {
if c != other.counts[i] {
return false
}
}
}
return true
}
// Export returns a snapshot view of the Histogram. This can be later passed to
// Import to construct a new Histogram with the same state.
func (h *Histogram) Export() *Snapshot {
return &Snapshot{
LowestTrackableValue: h.lowestTrackableValue,
HighestTrackableValue: h.highestTrackableValue,
SignificantFigures: h.significantFigures,
Counts: h.counts,
}
}
// Import returns a new Histogram populated from the Snapshot data.
func Import(s *Snapshot) *Histogram {
h := New(s.LowestTrackableValue, s.HighestTrackableValue, int(s.SignificantFigures))
h.counts = s.Counts
totalCount := int64(0)
for i := int32(0); i < h.countsLen; i++ {
countAtIndex := h.counts[i]
if countAtIndex > 0 {
totalCount += countAtIndex
}
}
h.totalCount = totalCount
return h
}
func (h *Histogram) iterator() *iterator {
return &iterator{
h: h,
subBucketIdx: -1,
}
}
func (h *Histogram) rIterator() *rIterator {
return &rIterator{
iterator: iterator{
h: h,
subBucketIdx: -1,
},
}
}
func (h *Histogram) pIterator(ticksPerHalfDistance int32) *pIterator {
return &pIterator{
iterator: iterator{
h: h,
subBucketIdx: -1,
},
ticksPerHalfDistance: ticksPerHalfDistance,
}
}
func (h *Histogram) sizeOfEquivalentValueRange(v int64) int64 {
bucketIdx := h.getBucketIndex(v)
subBucketIdx := h.getSubBucketIdx(v, bucketIdx)
adjustedBucket := bucketIdx
if subBucketIdx >= h.subBucketCount {
adjustedBucket++
}
return int64(1) << uint(h.unitMagnitude+int64(adjustedBucket))
}
func (h *Histogram) valueFromIndex(bucketIdx, subBucketIdx int32) int64 {
return int64(subBucketIdx) << uint(int64(bucketIdx)+h.unitMagnitude)
}
func (h *Histogram) lowestEquivalentValue(v int64) int64 {
bucketIdx := h.getBucketIndex(v)
subBucketIdx := h.getSubBucketIdx(v, bucketIdx)
return h.valueFromIndex(bucketIdx, subBucketIdx)
}
func (h *Histogram) nextNonEquivalentValue(v int64) int64 {
return h.lowestEquivalentValue(v) + h.sizeOfEquivalentValueRange(v)
}
func (h *Histogram) highestEquivalentValue(v int64) int64 {
return h.nextNonEquivalentValue(v) - 1
}
func (h *Histogram) medianEquivalentValue(v int64) int64 {
return h.lowestEquivalentValue(v) + (h.sizeOfEquivalentValueRange(v) >> 1)
}
func (h *Histogram) getCountAtIndex(bucketIdx, subBucketIdx int32) int64 {
return h.counts[h.countsIndex(bucketIdx, subBucketIdx)]
}
func (h *Histogram) countsIndex(bucketIdx, subBucketIdx int32) int32 {
bucketBaseIdx := (bucketIdx + 1) << uint(h.subBucketHalfCountMagnitude)
offsetInBucket := subBucketIdx - h.subBucketHalfCount
return bucketBaseIdx + offsetInBucket
}
func (h *Histogram) getBucketIndex(v int64) int32 {
pow2Ceiling := bitLen(v | h.subBucketMask)
return int32(pow2Ceiling - int64(h.unitMagnitude) -
int64(h.subBucketHalfCountMagnitude+1))
}
func (h *Histogram) getSubBucketIdx(v int64, idx int32) int32 {
return int32(v >> uint(int64(idx)+int64(h.unitMagnitude)))
}
func (h *Histogram) countsIndexFor(v int64) int {
bucketIdx := h.getBucketIndex(v)
subBucketIdx := h.getSubBucketIdx(v, bucketIdx)
return int(h.countsIndex(bucketIdx, subBucketIdx))
}
type iterator struct {
h *Histogram
bucketIdx, subBucketIdx int32
countAtIdx, countToIdx, valueFromIdx int64
highestEquivalentValue int64
}
func (i *iterator) next() bool {
if i.countToIdx >= i.h.totalCount {
return false
}
// increment bucket
i.subBucketIdx++
if i.subBucketIdx >= i.h.subBucketCount {
i.subBucketIdx = i.h.subBucketHalfCount
i.bucketIdx++
}
if i.bucketIdx >= i.h.bucketCount {
return false
}
i.countAtIdx = i.h.getCountAtIndex(i.bucketIdx, i.subBucketIdx)
i.countToIdx += i.countAtIdx
i.valueFromIdx = i.h.valueFromIndex(i.bucketIdx, i.subBucketIdx)
i.highestEquivalentValue = i.h.highestEquivalentValue(i.valueFromIdx)
return true
}
type rIterator struct {
iterator
countAddedThisStep int64
}
func (r *rIterator) next() bool {
for r.iterator.next() {
if r.countAtIdx != 0 {
r.countAddedThisStep = r.countAtIdx
return true
}
}
return false
}
type pIterator struct {
iterator
seenLastValue bool
ticksPerHalfDistance int32
percentileToIteratorTo float64
percentile float64
}
func (p *pIterator) next() bool {
if !(p.countToIdx < p.h.totalCount) {
if p.seenLastValue {
return false
}
p.seenLastValue = true
p.percentile = 100
return true
}
if p.subBucketIdx == -1 && !p.iterator.next() {
return false
}
var done = false
for !done {
currentPercentile := (100.0 * float64(p.countToIdx)) / float64(p.h.totalCount)
if p.countAtIdx != 0 && p.percentileToIteratorTo <= currentPercentile {
p.percentile = p.percentileToIteratorTo
halfDistance := math.Pow(2, (math.Log(100.0/(100.0-(p.percentileToIteratorTo)))/math.Log(2))+1)
percentileReportingTicks := float64(p.ticksPerHalfDistance) * halfDistance
p.percentileToIteratorTo += 100.0 / percentileReportingTicks
return true
}
done = !p.iterator.next()
}
return true
}
func bitLen(x int64) (n int64) {
for ; x >= 0x8000; x >>= 16 {
n += 16
}
if x >= 0x80 {
x >>= 8
n += 8
}
if x >= 0x8 {
x >>= 4
n += 4
}
if x >= 0x2 {
x >>= 2
n += 2
}
if x >= 0x1 {
n++
}
return
}
func power(base, exp int64) (n int64) {
n = 1
for exp > 0 {
n *= base
exp--
}
return
}

View File

@ -0,0 +1,320 @@
package hdrhistogram_test
import (
"reflect"
"testing"
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/codahale/hdrhistogram"
)
func TestHighSigFig(t *testing.T) {
input := []int64{
459876, 669187, 711612, 816326, 931423, 1033197, 1131895, 2477317,
3964974, 12718782,
}
hist := hdrhistogram.New(459876, 12718782, 5)
for _, sample := range input {
hist.RecordValue(sample)
}
if v, want := hist.ValueAtQuantile(50), int64(1048575); v != want {
t.Errorf("Median was %v, but expected %v", v, want)
}
}
func TestValueAtQuantile(t *testing.T) {
h := hdrhistogram.New(1, 10000000, 3)
for i := 0; i < 1000000; i++ {
if err := h.RecordValue(int64(i)); err != nil {
t.Fatal(err)
}
}
data := []struct {
q float64
v int64
}{
{q: 50, v: 500223},
{q: 75, v: 750079},
{q: 90, v: 900095},
{q: 95, v: 950271},
{q: 99, v: 990207},
{q: 99.9, v: 999423},
{q: 99.99, v: 999935},
}
for _, d := range data {
if v := h.ValueAtQuantile(d.q); v != d.v {
t.Errorf("P%v was %v, but expected %v", d.q, v, d.v)
}
}
}
func TestMean(t *testing.T) {
h := hdrhistogram.New(1, 10000000, 3)
for i := 0; i < 1000000; i++ {
if err := h.RecordValue(int64(i)); err != nil {
t.Fatal(err)
}
}
if v, want := h.Mean(), 500000.013312; v != want {
t.Errorf("Mean was %v, but expected %v", v, want)
}
}
func TestStdDev(t *testing.T) {
h := hdrhistogram.New(1, 10000000, 3)
for i := 0; i < 1000000; i++ {
if err := h.RecordValue(int64(i)); err != nil {
t.Fatal(err)
}
}
if v, want := h.StdDev(), 288675.1403682715; v != want {
t.Errorf("StdDev was %v, but expected %v", v, want)
}
}
func TestMax(t *testing.T) {
h := hdrhistogram.New(1, 10000000, 3)
for i := 0; i < 1000000; i++ {
if err := h.RecordValue(int64(i)); err != nil {
t.Fatal(err)
}
}
if v, want := h.Max(), int64(999936); v != want {
t.Errorf("Max was %v, but expected %v", v, want)
}
}
func TestReset(t *testing.T) {
h := hdrhistogram.New(1, 10000000, 3)
for i := 0; i < 1000000; i++ {
if err := h.RecordValue(int64(i)); err != nil {
t.Fatal(err)
}
}
h.Reset()
if v, want := h.Max(), int64(0); v != want {
t.Errorf("Max was %v, but expected %v", v, want)
}
}
func TestMerge(t *testing.T) {
h1 := hdrhistogram.New(1, 1000, 3)
h2 := hdrhistogram.New(1, 1000, 3)
for i := 0; i < 100; i++ {
if err := h1.RecordValue(int64(i)); err != nil {
t.Fatal(err)
}
}
for i := 100; i < 200; i++ {
if err := h2.RecordValue(int64(i)); err != nil {
t.Fatal(err)
}
}
h1.Merge(h2)
if v, want := h1.ValueAtQuantile(50), int64(99); v != want {
t.Errorf("Median was %v, but expected %v", v, want)
}
}
func TestMin(t *testing.T) {
h := hdrhistogram.New(1, 10000000, 3)
for i := 0; i < 1000000; i++ {
if err := h.RecordValue(int64(i)); err != nil {
t.Fatal(err)
}
}
if v, want := h.Min(), int64(0); v != want {
t.Errorf("Min was %v, but expected %v", v, want)
}
}
func TestByteSize(t *testing.T) {
h := hdrhistogram.New(1, 100000, 3)
if v, want := h.ByteSize(), 65604; v != want {
t.Errorf("ByteSize was %v, but expected %d", v, want)
}
}
func TestRecordCorrectedValue(t *testing.T) {
h := hdrhistogram.New(1, 100000, 3)
if err := h.RecordCorrectedValue(10, 100); err != nil {
t.Fatal(err)
}
if v, want := h.ValueAtQuantile(75), int64(10); v != want {
t.Errorf("Corrected value was %v, but expected %v", v, want)
}
}
func TestRecordCorrectedValueStall(t *testing.T) {
h := hdrhistogram.New(1, 100000, 3)
if err := h.RecordCorrectedValue(1000, 100); err != nil {
t.Fatal(err)
}
if v, want := h.ValueAtQuantile(75), int64(800); v != want {
t.Errorf("Corrected value was %v, but expected %v", v, want)
}
}
func TestCumulativeDistribution(t *testing.T) {
h := hdrhistogram.New(1, 100000000, 3)
for i := 0; i < 1000000; i++ {
if err := h.RecordValue(int64(i)); err != nil {
t.Fatal(err)
}
}
actual := h.CumulativeDistribution()
expected := []hdrhistogram.Bracket{
hdrhistogram.Bracket{Quantile: 0, Count: 1, ValueAt: 0},
hdrhistogram.Bracket{Quantile: 50, Count: 500224, ValueAt: 500223},
hdrhistogram.Bracket{Quantile: 75, Count: 750080, ValueAt: 750079},
hdrhistogram.Bracket{Quantile: 87.5, Count: 875008, ValueAt: 875007},
hdrhistogram.Bracket{Quantile: 93.75, Count: 937984, ValueAt: 937983},
hdrhistogram.Bracket{Quantile: 96.875, Count: 969216, ValueAt: 969215},
hdrhistogram.Bracket{Quantile: 98.4375, Count: 984576, ValueAt: 984575},
hdrhistogram.Bracket{Quantile: 99.21875, Count: 992256, ValueAt: 992255},
hdrhistogram.Bracket{Quantile: 99.609375, Count: 996352, ValueAt: 996351},
hdrhistogram.Bracket{Quantile: 99.8046875, Count: 998400, ValueAt: 998399},
hdrhistogram.Bracket{Quantile: 99.90234375, Count: 999424, ValueAt: 999423},
hdrhistogram.Bracket{Quantile: 99.951171875, Count: 999936, ValueAt: 999935},
hdrhistogram.Bracket{Quantile: 99.9755859375, Count: 999936, ValueAt: 999935},
hdrhistogram.Bracket{Quantile: 99.98779296875, Count: 999936, ValueAt: 999935},
hdrhistogram.Bracket{Quantile: 99.993896484375, Count: 1000000, ValueAt: 1000447},
hdrhistogram.Bracket{Quantile: 100, Count: 1000000, ValueAt: 1000447},
}
if !reflect.DeepEqual(actual, expected) {
t.Errorf("CF was %#v, but expected %#v", actual, expected)
}
}
func BenchmarkHistogramRecordValue(b *testing.B) {
h := hdrhistogram.New(1, 10000000, 3)
for i := 0; i < 1000000; i++ {
if err := h.RecordValue(int64(i)); err != nil {
b.Fatal(err)
}
}
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
h.RecordValue(100)
}
}
func BenchmarkNew(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
hdrhistogram.New(1, 120000, 3) // this could track 1ms-2min
}
}
func TestUnitMagnitudeOverflow(t *testing.T) {
h := hdrhistogram.New(0, 200, 4)
if err := h.RecordValue(11); err != nil {
t.Fatal(err)
}
}
func TestSubBucketMaskOverflow(t *testing.T) {
hist := hdrhistogram.New(2e7, 1e8, 5)
for _, sample := range [...]int64{1e8, 2e7, 3e7} {
hist.RecordValue(sample)
}
for q, want := range map[float64]int64{
50: 33554431,
83.33: 33554431,
83.34: 100663295,
99: 100663295,
} {
if got := hist.ValueAtQuantile(q); got != want {
t.Errorf("got %d for %fth percentile. want: %d", got, q, want)
}
}
}
func TestExportImport(t *testing.T) {
min := int64(1)
max := int64(10000000)
sigfigs := 3
h := hdrhistogram.New(min, max, sigfigs)
for i := 0; i < 1000000; i++ {
if err := h.RecordValue(int64(i)); err != nil {
t.Fatal(err)
}
}
s := h.Export()
if v := s.LowestTrackableValue; v != min {
t.Errorf("LowestTrackableValue was %v, but expected %v", v, min)
}
if v := s.HighestTrackableValue; v != max {
t.Errorf("HighestTrackableValue was %v, but expected %v", v, max)
}
if v := int(s.SignificantFigures); v != sigfigs {
t.Errorf("SignificantFigures was %v, but expected %v", v, sigfigs)
}
if imported := hdrhistogram.Import(s); !imported.Equals(h) {
t.Error("Expected Histograms to be equivalent")
}
}
func TestEquals(t *testing.T) {
h1 := hdrhistogram.New(1, 10000000, 3)
for i := 0; i < 1000000; i++ {
if err := h1.RecordValue(int64(i)); err != nil {
t.Fatal(err)
}
}
h2 := hdrhistogram.New(1, 10000000, 3)
for i := 0; i < 10000; i++ {
if err := h1.RecordValue(int64(i)); err != nil {
t.Fatal(err)
}
}
if h1.Equals(h2) {
t.Error("Expected Histograms to not be equivalent")
}
h1.Reset()
h2.Reset()
if !h1.Equals(h2) {
t.Error("Expected Histograms to be equivalent")
}
}

View File

@ -0,0 +1,45 @@
package hdrhistogram
// A WindowedHistogram combines histograms to provide windowed statistics.
type WindowedHistogram struct {
idx int
h []Histogram
m *Histogram
Current *Histogram
}
// NewWindowed creates a new WindowedHistogram with N underlying histograms with
// the given parameters.
func NewWindowed(n int, minValue, maxValue int64, sigfigs int) *WindowedHistogram {
w := WindowedHistogram{
idx: -1,
h: make([]Histogram, n),
m: New(minValue, maxValue, sigfigs),
}
for i := range w.h {
w.h[i] = *New(minValue, maxValue, sigfigs)
}
w.Rotate()
return &w
}
// Merge returns a histogram which includes the recorded values from all the
// sections of the window.
func (w *WindowedHistogram) Merge() *Histogram {
w.m.Reset()
for _, h := range w.h {
w.m.Merge(&h)
}
return w.m
}
// Rotate resets the oldest histogram and rotates it to be used as the current
// histogram.
func (w *WindowedHistogram) Rotate() {
w.idx++
w.Current = &w.h[w.idx%len(w.h)]
w.Current.Reset()
}

View File

@ -0,0 +1,64 @@
package hdrhistogram_test
import (
"testing"
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/codahale/hdrhistogram"
)
func TestWindowedHistogram(t *testing.T) {
w := hdrhistogram.NewWindowed(2, 1, 1000, 3)
for i := 0; i < 100; i++ {
w.Current.RecordValue(int64(i))
}
w.Rotate()
for i := 100; i < 200; i++ {
w.Current.RecordValue(int64(i))
}
w.Rotate()
for i := 200; i < 300; i++ {
w.Current.RecordValue(int64(i))
}
if v, want := w.Merge().ValueAtQuantile(50), int64(199); v != want {
t.Errorf("Median was %v, but expected %v", v, want)
}
}
func BenchmarkWindowedHistogramRecordAndRotate(b *testing.B) {
w := hdrhistogram.NewWindowed(3, 1, 10000000, 3)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
if err := w.Current.RecordValue(100); err != nil {
b.Fatal(err)
}
if i%100000 == 1 {
w.Rotate()
}
}
}
func BenchmarkWindowedHistogramMerge(b *testing.B) {
w := hdrhistogram.NewWindowed(3, 1, 10000000, 3)
for i := 0; i < 10000000; i++ {
if err := w.Current.RecordValue(100); err != nil {
b.Fatal(err)
}
if i%100000 == 1 {
w.Rotate()
}
}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
w.Merge()
}
}

View File

@ -0,0 +1,9 @@
language: go
go:
- 1.3.3
notifications:
# See http://about.travis-ci.org/docs/user/build-configuration/ to learn more
# about configuring notification recipients and more.
email:
recipients:
- coda.hale@gmail.com

View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Coda Hale
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -0,0 +1,8 @@
metrics
=======
[![Build Status](https://travis-ci.org/codahale/metrics.png?branch=master)](https://travis-ci.org/codahale/metrics)
A Go library which provides light-weight instrumentation for your application.
For documentation, check [godoc](http://godoc.org/github.com/codahale/metrics).

View File

@ -0,0 +1,324 @@
// Package metrics provides minimalist instrumentation for your applications in
// the form of counters and gauges.
//
// Counters
//
// A counter is a monotonically-increasing, unsigned, 64-bit integer used to
// represent the number of times an event has occurred. By tracking the deltas
// between measurements of a counter over intervals of time, an aggregation
// layer can derive rates, acceleration, etc.
//
// Gauges
//
// A gauge returns instantaneous measurements of something using signed, 64-bit
// integers. This value does not need to be monotonic.
//
// Histograms
//
// A histogram tracks the distribution of a stream of values (e.g. the number of
// milliseconds it takes to handle requests), adding gauges for the values at
// meaningful quantiles: 50th, 75th, 90th, 95th, 99th, 99.9th.
//
// Reporting
//
// Measurements from counters and gauges are available as expvars. Your service
// should return its expvars from an HTTP endpoint (i.e., /debug/vars) as a JSON
// object.
package metrics
import (
"expvar"
"sync"
"time"
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/codahale/hdrhistogram"
)
// A Counter is a monotonically increasing unsigned integer.
//
// Use a counter to derive rates (e.g., record total number of requests, derive
// requests per second).
type Counter string
// Add increments the counter by one.
func (c Counter) Add() {
c.AddN(1)
}
// AddN increments the counter by N.
func (c Counter) AddN(delta uint64) {
cm.Lock()
counters[string(c)] += delta
cm.Unlock()
}
// SetFunc sets the counter's value to the lazily-called return value of the
// given function.
func (c Counter) SetFunc(f func() uint64) {
cm.Lock()
defer cm.Unlock()
counterFuncs[string(c)] = f
}
// SetBatchFunc sets the counter's value to the lazily-called return value of
// the given function, with an additional initializer function for a related
// batch of counters, all of which are keyed by an arbitrary value.
func (c Counter) SetBatchFunc(key interface{}, init func(), f func() uint64) {
cm.Lock()
defer cm.Unlock()
gm.Lock()
defer gm.Unlock()
counterFuncs[string(c)] = f
if _, ok := inits[key]; !ok {
inits[key] = init
}
}
// Remove removes the given counter.
func (c Counter) Remove() {
cm.Lock()
defer cm.Unlock()
gm.Lock()
defer gm.Unlock()
delete(counters, string(c))
delete(counterFuncs, string(c))
delete(inits, string(c))
}
// A Gauge is an instantaneous measurement of a value.
//
// Use a gauge to track metrics which increase and decrease (e.g., amount of
// free memory).
type Gauge string
// Set the gauge's value to the given value.
func (g Gauge) Set(value int64) {
gm.Lock()
defer gm.Unlock()
gauges[string(g)] = func() int64 {
return value
}
}
// SetFunc sets the gauge's value to the lazily-called return value of the given
// function.
func (g Gauge) SetFunc(f func() int64) {
gm.Lock()
defer gm.Unlock()
gauges[string(g)] = f
}
// SetBatchFunc sets the gauge's value to the lazily-called return value of the
// given function, with an additional initializer function for a related batch
// of gauges, all of which are keyed by an arbitrary value.
func (g Gauge) SetBatchFunc(key interface{}, init func(), f func() int64) {
gm.Lock()
defer gm.Unlock()
gauges[string(g)] = f
if _, ok := inits[key]; !ok {
inits[key] = init
}
}
// Remove removes the given counter.
func (g Gauge) Remove() {
gm.Lock()
defer gm.Unlock()
delete(gauges, string(g))
delete(inits, string(g))
}
// Reset removes all existing counters and gauges.
func Reset() {
cm.Lock()
defer cm.Unlock()
gm.Lock()
defer gm.Unlock()
hm.Lock()
defer hm.Unlock()
counters = make(map[string]uint64)
counterFuncs = make(map[string]func() uint64)
gauges = make(map[string]func() int64)
histograms = make(map[string]*Histogram)
inits = make(map[interface{}]func())
}
// Snapshot returns a copy of the values of all registered counters and gauges.
func Snapshot() (c map[string]uint64, g map[string]int64) {
cm.Lock()
defer cm.Unlock()
gm.Lock()
defer gm.Unlock()
hm.Lock()
defer hm.Unlock()
for _, init := range inits {
init()
}
c = make(map[string]uint64, len(counters)+len(counterFuncs))
for n, v := range counters {
c[n] = v
}
for n, f := range counterFuncs {
c[n] = f()
}
g = make(map[string]int64, len(gauges))
for n, f := range gauges {
g[n] = f()
}
return
}
// NewHistogram returns a windowed HDR histogram which drops data older than
// five minutes. The returned histogram is safe to use from multiple goroutines.
//
// Use a histogram to track the distribution of a stream of values (e.g., the
// latency associated with HTTP requests).
func NewHistogram(name string, minValue, maxValue int64, sigfigs int) *Histogram {
hm.Lock()
defer hm.Unlock()
if _, ok := histograms[name]; ok {
panic(name + " already exists")
}
hist := &Histogram{
name: name,
hist: hdrhistogram.NewWindowed(5, minValue, maxValue, sigfigs),
}
histograms[name] = hist
Gauge(name+".P50").SetBatchFunc(hname(name), hist.merge, hist.valueAt(50))
Gauge(name+".P75").SetBatchFunc(hname(name), hist.merge, hist.valueAt(75))
Gauge(name+".P90").SetBatchFunc(hname(name), hist.merge, hist.valueAt(90))
Gauge(name+".P95").SetBatchFunc(hname(name), hist.merge, hist.valueAt(95))
Gauge(name+".P99").SetBatchFunc(hname(name), hist.merge, hist.valueAt(99))
Gauge(name+".P999").SetBatchFunc(hname(name), hist.merge, hist.valueAt(99.9))
return hist
}
// Remove removes the given histogram.
func (h *Histogram) Remove() {
hm.Lock()
defer hm.Unlock()
Gauge(h.name + ".P50").Remove()
Gauge(h.name + ".P75").Remove()
Gauge(h.name + ".P90").Remove()
Gauge(h.name + ".P95").Remove()
Gauge(h.name + ".P99").Remove()
Gauge(h.name + ".P999").Remove()
delete(histograms, h.name)
}
type hname string // unexported to prevent collisions
// A Histogram measures the distribution of a stream of values.
type Histogram struct {
name string
hist *hdrhistogram.WindowedHistogram
m *hdrhistogram.Histogram
rw sync.RWMutex
}
// RecordValue records the given value, or returns an error if the value is out
// of range.
// Returned error values are of type Error.
func (h *Histogram) RecordValue(v int64) error {
h.rw.Lock()
defer h.rw.Unlock()
err := h.hist.Current.RecordValue(v)
if err != nil {
return Error{h.name, err}
}
return nil
}
func (h *Histogram) rotate() {
h.rw.Lock()
defer h.rw.Unlock()
h.hist.Rotate()
}
func (h *Histogram) merge() {
h.rw.Lock()
defer h.rw.Unlock()
h.m = h.hist.Merge()
}
func (h *Histogram) valueAt(q float64) func() int64 {
return func() int64 {
h.rw.RLock()
defer h.rw.RUnlock()
if h.m == nil {
return 0
}
return h.m.ValueAtQuantile(q)
}
}
// Error describes an error and the name of the metric where it occurred.
type Error struct {
Metric string
Err error
}
func (e Error) Error() string {
return e.Metric + ": " + e.Err.Error()
}
var (
counters = make(map[string]uint64)
counterFuncs = make(map[string]func() uint64)
gauges = make(map[string]func() int64)
inits = make(map[interface{}]func())
histograms = make(map[string]*Histogram)
cm, gm, hm sync.Mutex
)
func init() {
expvar.Publish("metrics", expvar.Func(func() interface{} {
counters, gauges := Snapshot()
return map[string]interface{}{
"Counters": counters,
"Gauges": gauges,
}
}))
go func() {
for _ = range time.NewTicker(1 * time.Minute).C {
hm.Lock()
for _, h := range histograms {
h.rotate()
}
hm.Unlock()
}
}()
}

View File

@ -0,0 +1,217 @@
package metrics_test
import (
"testing"
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/codahale/metrics"
)
func TestCounter(t *testing.T) {
metrics.Reset()
metrics.Counter("whee").Add()
metrics.Counter("whee").AddN(10)
counters, _ := metrics.Snapshot()
if v, want := counters["whee"], uint64(11); v != want {
t.Errorf("Counter was %v, but expected %v", v, want)
}
}
func TestCounterFunc(t *testing.T) {
metrics.Reset()
metrics.Counter("whee").SetFunc(func() uint64 {
return 100
})
counters, _ := metrics.Snapshot()
if v, want := counters["whee"], uint64(100); v != want {
t.Errorf("Counter was %v, but expected %v", v, want)
}
}
func TestCounterBatchFunc(t *testing.T) {
metrics.Reset()
var a, b uint64
metrics.Counter("whee").SetBatchFunc(
"yay",
func() {
a, b = 1, 2
},
func() uint64 {
return a
},
)
metrics.Counter("woo").SetBatchFunc(
"yay",
func() {
a, b = 1, 2
},
func() uint64 {
return b
},
)
counters, _ := metrics.Snapshot()
if v, want := counters["whee"], uint64(1); v != want {
t.Errorf("Counter was %v, but expected %v", v, want)
}
if v, want := counters["woo"], uint64(2); v != want {
t.Errorf("Counter was %v, but expected %v", v, want)
}
}
func TestCounterRemove(t *testing.T) {
metrics.Reset()
metrics.Counter("whee").Add()
metrics.Counter("whee").Remove()
counters, _ := metrics.Snapshot()
if v, ok := counters["whee"]; ok {
t.Errorf("Counter was %v, but expected nothing", v)
}
}
func TestGaugeValue(t *testing.T) {
metrics.Reset()
metrics.Gauge("whee").Set(-100)
_, gauges := metrics.Snapshot()
if v, want := gauges["whee"], int64(-100); v != want {
t.Errorf("Gauge was %v, but expected %v", v, want)
}
}
func TestGaugeFunc(t *testing.T) {
metrics.Reset()
metrics.Gauge("whee").SetFunc(func() int64 {
return -100
})
_, gauges := metrics.Snapshot()
if v, want := gauges["whee"], int64(-100); v != want {
t.Errorf("Gauge was %v, but expected %v", v, want)
}
}
func TestGaugeRemove(t *testing.T) {
metrics.Reset()
metrics.Gauge("whee").Set(1)
metrics.Gauge("whee").Remove()
_, gauges := metrics.Snapshot()
if v, ok := gauges["whee"]; ok {
t.Errorf("Gauge was %v, but expected nothing", v)
}
}
func TestHistogram(t *testing.T) {
metrics.Reset()
h := metrics.NewHistogram("heyo", 1, 1000, 3)
for i := 100; i > 0; i-- {
for j := 0; j < i; j++ {
h.RecordValue(int64(i))
}
}
_, gauges := metrics.Snapshot()
if v, want := gauges["heyo.P50"], int64(71); v != want {
t.Errorf("P50 was %v, but expected %v", v, want)
}
if v, want := gauges["heyo.P75"], int64(87); v != want {
t.Errorf("P75 was %v, but expected %v", v, want)
}
if v, want := gauges["heyo.P90"], int64(95); v != want {
t.Errorf("P90 was %v, but expected %v", v, want)
}
if v, want := gauges["heyo.P95"], int64(98); v != want {
t.Errorf("P95 was %v, but expected %v", v, want)
}
if v, want := gauges["heyo.P99"], int64(100); v != want {
t.Errorf("P99 was %v, but expected %v", v, want)
}
if v, want := gauges["heyo.P999"], int64(100); v != want {
t.Errorf("P999 was %v, but expected %v", v, want)
}
}
func TestHistogramRemove(t *testing.T) {
metrics.Reset()
h := metrics.NewHistogram("heyo", 1, 1000, 3)
h.Remove()
_, gauges := metrics.Snapshot()
if v, ok := gauges["heyo.P50"]; ok {
t.Errorf("Gauge was %v, but expected nothing", v)
}
}
func BenchmarkCounterAdd(b *testing.B) {
metrics.Reset()
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
metrics.Counter("test1").Add()
}
})
}
func BenchmarkCounterAddN(b *testing.B) {
metrics.Reset()
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
metrics.Counter("test2").AddN(100)
}
})
}
func BenchmarkGaugeSet(b *testing.B) {
metrics.Reset()
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
metrics.Gauge("test2").Set(100)
}
})
}
func BenchmarkHistogramRecordValue(b *testing.B) {
metrics.Reset()
h := metrics.NewHistogram("hist", 1, 1000, 3)
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
h.RecordValue(100)
}
})
}

View File

@ -0,0 +1,17 @@
// Package runtime registers gauges and counters for various operationally
// important aspects of the Go runtime.
//
// To use, import this package:
//
// import _ "github.com/codahale/metrics/runtime"
//
// This registers the following gauges:
//
// FileDescriptors.Max
// FileDescriptors.Used
// Mem.NumGC
// Mem.PauseTotalNs
// Mem.LastGC
// Mem.Alloc
// Mem.HeapObjects
package runtime

View File

@ -0,0 +1,42 @@
package runtime
import (
"io/ioutil"
"syscall"
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/codahale/metrics"
)
func getFDLimit() (uint64, error) {
var rlimit syscall.Rlimit
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit); err != nil {
return 0, err
}
return rlimit.Cur, nil
}
func getFDUsage() (uint64, error) {
fds, err := ioutil.ReadDir("/proc/self/fd")
if err != nil {
return 0, err
}
return uint64(len(fds)), nil
}
func init() {
metrics.Gauge("FileDescriptors.Max").SetFunc(func() int64 {
v, err := getFDLimit()
if err != nil {
return 0
}
return int64(v)
})
metrics.Gauge("FileDescriptors.Used").SetFunc(func() int64 {
v, err := getFDUsage()
if err != nil {
return 0
}
return int64(v)
})
}

View File

@ -0,0 +1,22 @@
package runtime
import (
"testing"
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/codahale/metrics"
)
func TestFdStats(t *testing.T) {
_, gauges := metrics.Snapshot()
expected := []string{
"FileDescriptors.Max",
"FileDescriptors.Used",
}
for _, name := range expected {
if _, ok := gauges[name]; !ok {
t.Errorf("Missing gauge %q", name)
}
}
}

View File

@ -0,0 +1,48 @@
package runtime
import (
"runtime"
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/codahale/metrics"
)
func init() {
msg := &memStatGauges{}
metrics.Counter("Mem.NumGC").SetBatchFunc(key{}, msg.init, msg.numGC)
metrics.Counter("Mem.PauseTotalNs").SetBatchFunc(key{}, msg.init, msg.totalPause)
metrics.Gauge("Mem.LastGC").SetBatchFunc(key{}, msg.init, msg.lastPause)
metrics.Gauge("Mem.Alloc").SetBatchFunc(key{}, msg.init, msg.alloc)
metrics.Gauge("Mem.HeapObjects").SetBatchFunc(key{}, msg.init, msg.objects)
}
type key struct{} // unexported to prevent collision
type memStatGauges struct {
stats runtime.MemStats
}
func (msg *memStatGauges) init() {
runtime.ReadMemStats(&msg.stats)
}
func (msg *memStatGauges) numGC() uint64 {
return uint64(msg.stats.NumGC)
}
func (msg *memStatGauges) totalPause() uint64 {
return msg.stats.PauseTotalNs
}
func (msg *memStatGauges) lastPause() int64 {
return int64(msg.stats.LastGC)
}
func (msg *memStatGauges) alloc() int64 {
return int64(msg.stats.Alloc)
}
func (msg *memStatGauges) objects() int64 {
return int64(msg.stats.HeapObjects)
}

View File

@ -0,0 +1,34 @@
package runtime
import (
"testing"
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/codahale/metrics"
)
func TestMemStats(t *testing.T) {
counters, gauges := metrics.Snapshot()
expectedCounters := []string{
"Mem.NumGC",
"Mem.PauseTotalNs",
}
expectedGauges := []string{
"Mem.LastGC",
"Mem.Alloc",
"Mem.HeapObjects",
}
for _, name := range expectedCounters {
if _, ok := counters[name]; !ok {
t.Errorf("Missing counters %q", name)
}
}
for _, name := range expectedGauges {
if _, ok := gauges[name]; !ok {
t.Errorf("Missing gauge %q", name)
}
}
}

View File

@ -18,7 +18,6 @@ import (
"os"
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/codegangsta/cli"
"github.com/coreos/etcd/etcdctl/command"
"github.com/coreos/etcd/version"
)

View File

@ -1,165 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package metrics provides metrics view of variables which is exposed through
// expvar package.
//
// Naming conventions:
// 1. volatile path components should be kept as deep into the hierarchy as possible
// 2. each path component should have a clear and well-defined purpose
// 3. components.separated.with.dot, and put package prefix at the head
// 4. words_separated_with_underscore, and put clarifiers last, e.g., requests_total
package metrics
import (
"bytes"
"expvar"
"fmt"
"sort"
"sync"
)
// Counter is a number that increases over time monotonically.
type Counter struct{ i *expvar.Int }
func (c *Counter) Add() { c.i.Add(1) }
func (c *Counter) AddBy(delta int64) { c.i.Add(delta) }
func (c *Counter) String() string { return c.i.String() }
// Gauge returns instantaneous value that is expected to fluctuate over time.
type Gauge struct{ i *expvar.Int }
func (g *Gauge) Set(value int64) { g.i.Set(value) }
func (g *Gauge) String() string { return g.i.String() }
type nilVar struct{}
func (v *nilVar) String() string { return "nil" }
// Map aggregates Counters and Gauges.
type Map struct{ *expvar.Map }
func (m *Map) NewCounter(key string) *Counter {
c := &Counter{i: new(expvar.Int)}
m.Set(key, c)
return c
}
func (m *Map) NewGauge(key string) *Gauge {
g := &Gauge{i: new(expvar.Int)}
m.Set(key, g)
return g
}
// TODO: remove the var from the map to avoid memory boom
func (m *Map) Delete(key string) { m.Set(key, &nilVar{}) }
// String returns JSON format string that represents the group.
// It does not print out nilVar.
func (m *Map) String() string {
var b bytes.Buffer
fmt.Fprintf(&b, "{")
first := true
m.Do(func(kv expvar.KeyValue) {
v := kv.Value.String()
if v == "nil" {
return
}
if !first {
fmt.Fprintf(&b, ", ")
}
fmt.Fprintf(&b, "%q: %v", kv.Key, v)
first = false
})
fmt.Fprintf(&b, "}")
return b.String()
}
// All published variables.
var (
mutex sync.RWMutex
vars = make(map[string]expvar.Var)
varKeys []string // sorted
)
// Publish declares a named exported variable.
// If the name is already registered then this will overwrite the old one.
func Publish(name string, v expvar.Var) {
mutex.Lock()
defer mutex.Unlock()
if _, existing := vars[name]; !existing {
varKeys = append(varKeys, name)
}
sort.Strings(varKeys)
vars[name] = v
return
}
// Get retrieves a named exported variable.
func Get(name string) expvar.Var {
mutex.RLock()
defer mutex.RUnlock()
return vars[name]
}
// Convenience functions for creating new exported variables.
func NewCounter(name string) *Counter {
c := &Counter{i: new(expvar.Int)}
Publish(name, c)
return c
}
func NewGauge(name string) *Gauge {
g := &Gauge{i: new(expvar.Int)}
Publish(name, g)
return g
}
func NewMap(name string) *Map {
m := &Map{Map: new(expvar.Map).Init()}
Publish(name, m)
return m
}
// GetMap returns the map if it exists, or inits the given name map if it does
// not exist.
func GetMap(name string) *Map {
v := Get(name)
if v == nil {
return NewMap(name)
}
return v.(*Map)
}
// Do calls f for each exported variable.
// The global variable map is locked during the iteration,
// but existing entries may be concurrently updated.
func Do(f func(expvar.KeyValue)) {
mutex.RLock()
defer mutex.RUnlock()
for _, k := range varKeys {
f(expvar.KeyValue{k, vars[k]})
}
}
// for test only
func reset() {
mutex.Lock()
defer mutex.Unlock()
vars = make(map[string]expvar.Var)
varKeys = nil
}

View File

@ -1,77 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metrics
import (
"expvar"
"testing"
)
// TestPublish tests function Publish and related creation functions.
func TestPublish(t *testing.T) {
defer reset()
Publish("string", new(expvar.String))
NewCounter("counter")
NewGauge("gauge")
NewMap("map")
keys := []string{"counter", "gauge", "map", "string"}
i := 0
Do(func(kv expvar.KeyValue) {
if kv.Key != keys[i] {
t.Errorf("#%d: key = %s, want %s", i, kv.Key, keys[i])
}
i++
})
}
func TestDuplicatePublish(t *testing.T) {
defer reset()
num1 := new(expvar.Int)
num1.Set(10)
Publish("number", num1)
num2 := new(expvar.Int)
num2.Set(20)
Publish("number", num2)
if g := Get("number").String(); g != "20" {
t.Errorf("number str = %s, want %s", g, "20")
}
}
// TestMap tests the basic usage of Map.
func TestMap(t *testing.T) {
m := &Map{Map: new(expvar.Map).Init()}
c := m.NewCounter("number")
c.Add()
c.AddBy(10)
if w := "11"; c.String() != w {
t.Errorf("counter = %s, want %s", c, w)
}
g := m.NewGauge("price")
g.Set(100)
if w := "100"; g.String() != w {
t.Errorf("gauge = %s, want %s", g, w)
}
if w := `{"number": 11, "price": 100}`; m.String() != w {
t.Errorf("map = %s, want %s", m, w)
}
m.Delete("price")
if w := `{"number": 11}`; m.String() != w {
t.Errorf("map after deletion = %s, want %s", m, w)
}
}

View File

@ -18,26 +18,20 @@ import (
"encoding/binary"
"io"
"github.com/coreos/etcd/pkg/metrics"
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/codahale/metrics"
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/etcd/raft/raftpb"
)
type entryReader struct {
r io.Reader
id types.ID
ents *metrics.Counter
bytes *metrics.Counter
lastIndex *metrics.Gauge
r io.Reader
id types.ID
}
func newEntryReader(r io.Reader, id types.ID) *entryReader {
return &entryReader{
r: r,
id: id,
ents: metrics.GetMap("rafthttp.stream.entries_received").NewCounter(id.String()),
bytes: metrics.GetMap("rafthttp.stream.bytes_received").NewCounter(id.String()),
lastIndex: metrics.GetMap("rafthttp.stream.last_index_received").NewGauge(id.String()),
r: r,
id: id,
}
}
@ -46,16 +40,16 @@ func (er *entryReader) readEntries() ([]raftpb.Entry, error) {
if err := binary.Read(er.r, binary.BigEndian, &l); err != nil {
return nil, err
}
er.bytes.AddBy(8)
metrics.Counter("rafthttp.stream.bytes_received." + er.id.String()).AddN(8)
ents := make([]raftpb.Entry, int(l))
for i := 0; i < int(l); i++ {
if err := er.readEntry(&ents[i]); err != nil {
return nil, err
}
er.ents.Add()
metrics.Counter("rafthttp.stream.entries_received." + er.id.String()).AddN(8)
}
if l > 0 {
er.lastIndex.Set(int64(ents[l-1].Index))
metrics.Gauge("rafthttp.stream.last_index_received." + er.id.String()).Set(int64(ents[l-1].Index))
}
return ents, nil
}
@ -69,12 +63,12 @@ func (er *entryReader) readEntry(ent *raftpb.Entry) error {
if _, err := io.ReadFull(er.r, buf); err != nil {
return err
}
er.bytes.AddBy(8 + int64(l))
metrics.Counter("rafthttp.stream.bytes_received." + er.id.String()).AddN(8 + uint64(l))
return ent.Unmarshal(buf)
}
func (er *entryReader) stop() {
metrics.GetMap("rafthttp.stream.entries_received").Delete(er.id.String())
metrics.GetMap("rafthttp.stream.bytes_received").Delete(er.id.String())
metrics.GetMap("rafthttp.stream.last_index_received").Delete(er.id.String())
metrics.Counter("rafthttp.stream.bytes_received." + er.id.String()).Remove()
metrics.Counter("rafthttp.stream.entries_received." + er.id.String()).Remove()
metrics.Gauge("rafthttp.stream.last_index_received." + er.id.String()).Remove()
}

View File

@ -18,26 +18,20 @@ import (
"encoding/binary"
"io"
"github.com/coreos/etcd/pkg/metrics"
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/codahale/metrics"
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/etcd/raft/raftpb"
)
type entryWriter struct {
w io.Writer
id types.ID
ents *metrics.Counter
bytes *metrics.Counter
lastIndex *metrics.Gauge
w io.Writer
id types.ID
}
func newEntryWriter(w io.Writer, id types.ID) *entryWriter {
ew := &entryWriter{
w: w,
id: id,
ents: metrics.GetMap("rafthttp.stream.entries_sent").NewCounter(id.String()),
bytes: metrics.GetMap("rafthttp.stream.bytes_sent").NewCounter(id.String()),
lastIndex: metrics.GetMap("rafthttp.stream.last_index_sent").NewGauge(id.String()),
w: w,
id: id,
}
return ew
}
@ -50,14 +44,14 @@ func (ew *entryWriter) writeEntries(ents []raftpb.Entry) error {
if err := binary.Write(ew.w, binary.BigEndian, uint64(l)); err != nil {
return err
}
ew.bytes.AddBy(8)
metrics.Counter("rafthttp.stream.bytes_sent." + ew.id.String()).AddN(8)
for i := 0; i < l; i++ {
if err := ew.writeEntry(&ents[i]); err != nil {
return err
}
ew.ents.Add()
metrics.Counter("rafthttp.stream.entries_sent." + ew.id.String()).Add()
}
ew.lastIndex.Set(int64(ents[l-1].Index))
metrics.Gauge("rafthttp.stream.last_index_sent." + ew.id.String()).Set(int64(ents[l-1].Index))
return nil
}
@ -71,12 +65,12 @@ func (ew *entryWriter) writeEntry(ent *raftpb.Entry) error {
return err
}
_, err = ew.w.Write(b)
ew.bytes.AddBy(8 + int64(size))
metrics.Counter("rafthttp.stream.bytes_sent." + ew.id.String()).AddN(8 + uint64(size))
return err
}
func (ew *entryWriter) stop() {
metrics.GetMap("rafthttp.stream.entries_sent").Delete(ew.id.String())
metrics.GetMap("rafthttp.stream.bytes_sent").Delete(ew.id.String())
metrics.GetMap("rafthttp.stream.last_index_sent").Delete(ew.id.String())
metrics.Counter("rafthttp.stream.bytes_sent." + ew.id.String()).Remove()
metrics.Counter("rafthttp.stream.entries_sent." + ew.id.String()).Remove()
metrics.Gauge("rafthttp.stream.last_index_sent." + ew.id.String()).Remove()
}