mirror of
https://github.com/grafana/grafana.git
synced 2025-02-25 18:55:37 -06:00
feat(instrumentation): added meter, histogram and new timer, timer now send p25, p75, p90, p99 percentiles in 1000 sample exp decaying sample
This commit is contained in:
@@ -12,8 +12,12 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
NotFound = ApiError(404, "Not found", nil)
|
||||
ServerError = ApiError(500, "Server error", nil)
|
||||
NotFound = func() Response {
|
||||
return ApiError(404, "Not found", nil)
|
||||
}
|
||||
ServerError = func() Response {
|
||||
return ApiError(500, "Server error", nil)
|
||||
}
|
||||
)
|
||||
|
||||
type Response interface {
|
||||
@@ -34,7 +38,7 @@ func wrap(action interface{}) macaron.Handler {
|
||||
if err == nil && val != nil && len(val) > 0 {
|
||||
res = val[0].Interface().(Response)
|
||||
} else {
|
||||
res = ServerError
|
||||
res = ServerError()
|
||||
}
|
||||
|
||||
res.WriteTo(c.Resp)
|
||||
|
@@ -58,13 +58,13 @@ func main() {
|
||||
flag.Parse()
|
||||
writePIDFile()
|
||||
initRuntime()
|
||||
metrics.Init()
|
||||
|
||||
search.Init()
|
||||
login.Init()
|
||||
social.NewOAuthService()
|
||||
eventpublisher.Init()
|
||||
plugins.Init()
|
||||
metrics.Init()
|
||||
|
||||
if err := notifications.Init(); err != nil {
|
||||
log.Fatal(3, "Notification service failed to initialize", err)
|
||||
@@ -87,6 +87,7 @@ func initRuntime() {
|
||||
|
||||
log.Info("Starting Grafana")
|
||||
log.Info("Version: %v, Commit: %v, Build date: %v", setting.BuildVersion, setting.BuildCommit, time.Unix(setting.BuildStamp, 0))
|
||||
|
||||
setting.LogConfigurationInfo()
|
||||
|
||||
sqlstore.NewEngine()
|
||||
|
122
pkg/metrics/EMWA.go
Normal file
122
pkg/metrics/EMWA.go
Normal file
@@ -0,0 +1,122 @@
|
||||
// includes code from
|
||||
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
|
||||
// Copyright 2012 Richard Crowley. All rights reserved.
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// EWMAs continuously calculate an exponentially-weighted moving average
|
||||
// based on an outside source of clock ticks.
|
||||
type EWMA interface {
|
||||
Rate() float64
|
||||
Snapshot() EWMA
|
||||
Tick()
|
||||
Update(int64)
|
||||
}
|
||||
|
||||
// NewEWMA constructs a new EWMA with the given alpha.
|
||||
func NewEWMA(alpha float64) EWMA {
|
||||
if UseNilMetrics {
|
||||
return NilEWMA{}
|
||||
}
|
||||
return &StandardEWMA{alpha: alpha}
|
||||
}
|
||||
|
||||
// NewEWMA1 constructs a new EWMA for a one-minute moving average.
|
||||
func NewEWMA1() EWMA {
|
||||
return NewEWMA(1 - math.Exp(-5.0/60.0/1))
|
||||
}
|
||||
|
||||
// NewEWMA5 constructs a new EWMA for a five-minute moving average.
|
||||
func NewEWMA5() EWMA {
|
||||
return NewEWMA(1 - math.Exp(-5.0/60.0/5))
|
||||
}
|
||||
|
||||
// NewEWMA15 constructs a new EWMA for a fifteen-minute moving average.
|
||||
func NewEWMA15() EWMA {
|
||||
return NewEWMA(1 - math.Exp(-5.0/60.0/15))
|
||||
}
|
||||
|
||||
// EWMASnapshot is a read-only copy of another EWMA.
|
||||
type EWMASnapshot float64
|
||||
|
||||
// Rate returns the rate of events per second at the time the snapshot was
|
||||
// taken.
|
||||
func (a EWMASnapshot) Rate() float64 { return float64(a) }
|
||||
|
||||
// Snapshot returns the snapshot.
|
||||
func (a EWMASnapshot) Snapshot() EWMA { return a }
|
||||
|
||||
// Tick panics.
|
||||
func (EWMASnapshot) Tick() {
|
||||
panic("Tick called on an EWMASnapshot")
|
||||
}
|
||||
|
||||
// Update panics.
|
||||
func (EWMASnapshot) Update(int64) {
|
||||
panic("Update called on an EWMASnapshot")
|
||||
}
|
||||
|
||||
// NilEWMA is a no-op EWMA.
|
||||
type NilEWMA struct{}
|
||||
|
||||
// Rate is a no-op.
|
||||
func (NilEWMA) Rate() float64 { return 0.0 }
|
||||
|
||||
// Snapshot is a no-op.
|
||||
func (NilEWMA) Snapshot() EWMA { return NilEWMA{} }
|
||||
|
||||
// Tick is a no-op.
|
||||
func (NilEWMA) Tick() {}
|
||||
|
||||
// Update is a no-op.
|
||||
func (NilEWMA) Update(n int64) {}
|
||||
|
||||
// StandardEWMA is the standard implementation of an EWMA and tracks the number
|
||||
// of uncounted events and processes them on each tick. It uses the
|
||||
// sync/atomic package to manage uncounted events.
|
||||
type StandardEWMA struct {
|
||||
uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment
|
||||
alpha float64
|
||||
rate float64
|
||||
init bool
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
// Rate returns the moving average rate of events per second.
|
||||
func (a *StandardEWMA) Rate() float64 {
|
||||
a.mutex.Lock()
|
||||
defer a.mutex.Unlock()
|
||||
return a.rate * float64(1e9)
|
||||
}
|
||||
|
||||
// Snapshot returns a read-only copy of the EWMA.
|
||||
func (a *StandardEWMA) Snapshot() EWMA {
|
||||
return EWMASnapshot(a.Rate())
|
||||
}
|
||||
|
||||
// Tick ticks the clock to update the moving average. It assumes it is called
|
||||
// every five seconds.
|
||||
func (a *StandardEWMA) Tick() {
|
||||
count := atomic.LoadInt64(&a.uncounted)
|
||||
atomic.AddInt64(&a.uncounted, -count)
|
||||
instantRate := float64(count) / float64(5e9)
|
||||
a.mutex.Lock()
|
||||
defer a.mutex.Unlock()
|
||||
if a.init {
|
||||
a.rate += a.alpha * (instantRate - a.rate)
|
||||
} else {
|
||||
a.init = true
|
||||
a.rate = instantRate
|
||||
}
|
||||
}
|
||||
|
||||
// Update adds n uncounted events.
|
||||
func (a *StandardEWMA) Update(n int64) {
|
||||
atomic.AddInt64(&a.uncounted, n)
|
||||
}
|
46
pkg/metrics/combos.go
Normal file
46
pkg/metrics/combos.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package metrics
|
||||
|
||||
// type comboCounterRef struct {
|
||||
// *MetricMeta
|
||||
// usageCounter Counter
|
||||
// metricCounter Counter
|
||||
// }
|
||||
//
|
||||
// func RegComboCounter(name string, tagStrings ...string) Counter {
|
||||
// meta := NewMetricMeta(name, tagStrings)
|
||||
// cr := &comboCounterRef{
|
||||
// MetricMeta: meta,
|
||||
// usageCounter: NewCounter(meta),
|
||||
// metricCounter: NewCounter(meta),
|
||||
// }
|
||||
//
|
||||
// UsageStats.Register(cr.usageCounter)
|
||||
// MetricStats.Register(cr.metricCounter)
|
||||
//
|
||||
// return cr
|
||||
// }
|
||||
//
|
||||
// func (c comboCounterRef) Clear() {
|
||||
// c.usageCounter.Clear()
|
||||
// c.metricCounter.Clear()
|
||||
// }
|
||||
//
|
||||
// func (c comboCounterRef) Count() int64 {
|
||||
// panic("Count called on a combocounter ref")
|
||||
// }
|
||||
//
|
||||
// // Dec panics.
|
||||
// func (c comboCounterRef) Dec(i int64) {
|
||||
// c.usageCounter.Dec(i)
|
||||
// c.metricCounter.Dec(i)
|
||||
// }
|
||||
//
|
||||
// // Inc panics.
|
||||
// func (c comboCounterRef) Inc(i int64) {
|
||||
// c.usageCounter.Inc(i)
|
||||
// c.metricCounter.Inc(i)
|
||||
// }
|
||||
//
|
||||
// func (c comboCounterRef) Snapshot() Metric {
|
||||
// return c.metricCounter.Snapshot()
|
||||
// }
|
@@ -58,5 +58,4 @@ type Metric interface {
|
||||
GetTagsCopy() map[string]string
|
||||
StringifyTags() string
|
||||
Snapshot() Metric
|
||||
Clear()
|
||||
}
|
||||
|
@@ -6,6 +6,7 @@ import "sync/atomic"
|
||||
type Counter interface {
|
||||
Metric
|
||||
|
||||
Clear()
|
||||
Count() int64
|
||||
Dec(int64)
|
||||
Inc(int64)
|
||||
@@ -19,6 +20,12 @@ func NewCounter(meta *MetricMeta) Counter {
|
||||
}
|
||||
}
|
||||
|
||||
func RegCounter(name string, tagStrings ...string) Counter {
|
||||
cr := NewCounter(NewMetricMeta(name, tagStrings))
|
||||
MetricStats.Register(cr)
|
||||
return cr
|
||||
}
|
||||
|
||||
// StandardCounter is the standard implementation of a Counter and uses the
|
||||
// sync/atomic package to manage a single int64 value.
|
||||
type StandardCounter struct {
|
||||
|
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
@@ -40,23 +41,38 @@ func (this *GraphitePublisher) Publish(metrics []Metric) {
|
||||
|
||||
buf := bytes.NewBufferString("")
|
||||
now := time.Now().Unix()
|
||||
addToBuf := func(metric string, value int64) {
|
||||
addIntToBuf := func(metric string, value int64) {
|
||||
buf.WriteString(fmt.Sprintf("%s %d %d\n", metric, value, now))
|
||||
}
|
||||
addFloatToBuf := func(metric string, value float64) {
|
||||
buf.WriteString(fmt.Sprintf("%s %f %d\n", metric, value, now))
|
||||
}
|
||||
|
||||
for _, m := range metrics {
|
||||
log.Info("metric: %v, %v", m, reflect.TypeOf(m))
|
||||
metricName := this.Prefix + m.Name() + m.StringifyTags()
|
||||
log.Info(metricName)
|
||||
|
||||
switch metric := m.(type) {
|
||||
case Counter:
|
||||
addToBuf(metricName+".count", metric.Count())
|
||||
addIntToBuf(metricName+".count", metric.Count())
|
||||
case SimpleTimer:
|
||||
addIntToBuf(metricName+".count", metric.Count())
|
||||
addIntToBuf(metricName+".max", metric.Max())
|
||||
addIntToBuf(metricName+".min", metric.Min())
|
||||
addFloatToBuf(metricName+".mean", metric.Mean())
|
||||
case Timer:
|
||||
addToBuf(metricName+".count", metric.Count())
|
||||
addToBuf(metricName+".max", metric.Max())
|
||||
addToBuf(metricName+".min", metric.Min())
|
||||
addToBuf(metricName+".avg", metric.Avg())
|
||||
percentiles := metric.Percentiles([]float64{0.25, 0.75, 0.90, 0.99})
|
||||
addIntToBuf(metricName+".count", metric.Count())
|
||||
addIntToBuf(metricName+".max", metric.Max())
|
||||
addIntToBuf(metricName+".min", metric.Min())
|
||||
addFloatToBuf(metricName+".mean", metric.Mean())
|
||||
addFloatToBuf(metricName+".std", metric.StdDev())
|
||||
addFloatToBuf(metricName+".p25", percentiles[0])
|
||||
addFloatToBuf(metricName+".p75", percentiles[1])
|
||||
addFloatToBuf(metricName+".p90", percentiles[2])
|
||||
addFloatToBuf(metricName+".p99", percentiles[3])
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
log.Trace("Metrics: GraphitePublisher.Publish() \n%s", buf)
|
||||
|
189
pkg/metrics/histogram.go
Normal file
189
pkg/metrics/histogram.go
Normal file
@@ -0,0 +1,189 @@
|
||||
// includes code from
|
||||
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
|
||||
// Copyright 2012 Richard Crowley. All rights reserved.
|
||||
|
||||
package metrics
|
||||
|
||||
// Histograms calculate distribution statistics from a series of int64 values.
|
||||
type Histogram interface {
|
||||
Metric
|
||||
|
||||
Clear()
|
||||
Count() int64
|
||||
Max() int64
|
||||
Mean() float64
|
||||
Min() int64
|
||||
Percentile(float64) float64
|
||||
Percentiles([]float64) []float64
|
||||
StdDev() float64
|
||||
Sum() int64
|
||||
Update(int64)
|
||||
Variance() float64
|
||||
}
|
||||
|
||||
func NewHistogram(meta *MetricMeta, s Sample) Histogram {
|
||||
return &StandardHistogram{
|
||||
MetricMeta: meta,
|
||||
sample: s,
|
||||
}
|
||||
}
|
||||
|
||||
// HistogramSnapshot is a read-only copy of another Histogram.
|
||||
type HistogramSnapshot struct {
|
||||
*MetricMeta
|
||||
sample *SampleSnapshot
|
||||
}
|
||||
|
||||
// Clear panics.
|
||||
func (*HistogramSnapshot) Clear() {
|
||||
panic("Clear called on a HistogramSnapshot")
|
||||
}
|
||||
|
||||
// Count returns the number of samples recorded at the time the snapshot was
|
||||
// taken.
|
||||
func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() }
|
||||
|
||||
// Max returns the maximum value in the sample at the time the snapshot was
|
||||
// taken.
|
||||
func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() }
|
||||
|
||||
// Mean returns the mean of the values in the sample at the time the snapshot
|
||||
// was taken.
|
||||
func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() }
|
||||
|
||||
// Min returns the minimum value in the sample at the time the snapshot was
|
||||
// taken.
|
||||
func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() }
|
||||
|
||||
// Percentile returns an arbitrary percentile of values in the sample at the
|
||||
// time the snapshot was taken.
|
||||
func (h *HistogramSnapshot) Percentile(p float64) float64 {
|
||||
return h.sample.Percentile(p)
|
||||
}
|
||||
|
||||
// Percentiles returns a slice of arbitrary percentiles of values in the sample
|
||||
// at the time the snapshot was taken.
|
||||
func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 {
|
||||
return h.sample.Percentiles(ps)
|
||||
}
|
||||
|
||||
// Sample returns the Sample underlying the histogram.
|
||||
func (h *HistogramSnapshot) Sample() Sample { return h.sample }
|
||||
|
||||
// Snapshot returns the snapshot.
|
||||
func (h *HistogramSnapshot) Snapshot() Metric { return h }
|
||||
|
||||
// StdDev returns the standard deviation of the values in the sample at the
|
||||
// time the snapshot was taken.
|
||||
func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() }
|
||||
|
||||
// Sum returns the sum in the sample at the time the snapshot was taken.
|
||||
func (h *HistogramSnapshot) Sum() int64 { return h.sample.Sum() }
|
||||
|
||||
// Update panics.
|
||||
func (*HistogramSnapshot) Update(int64) {
|
||||
panic("Update called on a HistogramSnapshot")
|
||||
}
|
||||
|
||||
// Variance returns the variance of inputs at the time the snapshot was taken.
|
||||
func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() }
|
||||
|
||||
// NilHistogram is a no-op Histogram.
|
||||
type NilHistogram struct {
|
||||
*MetricMeta
|
||||
}
|
||||
|
||||
// Clear is a no-op.
|
||||
func (NilHistogram) Clear() {}
|
||||
|
||||
// Count is a no-op.
|
||||
func (NilHistogram) Count() int64 { return 0 }
|
||||
|
||||
// Max is a no-op.
|
||||
func (NilHistogram) Max() int64 { return 0 }
|
||||
|
||||
// Mean is a no-op.
|
||||
func (NilHistogram) Mean() float64 { return 0.0 }
|
||||
|
||||
// Min is a no-op.
|
||||
func (NilHistogram) Min() int64 { return 0 }
|
||||
|
||||
// Percentile is a no-op.
|
||||
func (NilHistogram) Percentile(p float64) float64 { return 0.0 }
|
||||
|
||||
// Percentiles is a no-op.
|
||||
func (NilHistogram) Percentiles(ps []float64) []float64 {
|
||||
return make([]float64, len(ps))
|
||||
}
|
||||
|
||||
// Sample is a no-op.
|
||||
func (NilHistogram) Sample() Sample { return NilSample{} }
|
||||
|
||||
// Snapshot is a no-op.
|
||||
func (n NilHistogram) Snapshot() Metric { return n }
|
||||
|
||||
// StdDev is a no-op.
|
||||
func (NilHistogram) StdDev() float64 { return 0.0 }
|
||||
|
||||
// Sum is a no-op.
|
||||
func (NilHistogram) Sum() int64 { return 0 }
|
||||
|
||||
// Update is a no-op.
|
||||
func (NilHistogram) Update(v int64) {}
|
||||
|
||||
// Variance is a no-op.
|
||||
func (NilHistogram) Variance() float64 { return 0.0 }
|
||||
|
||||
// StandardHistogram is the standard implementation of a Histogram and uses a
|
||||
// Sample to bound its memory use.
|
||||
type StandardHistogram struct {
|
||||
*MetricMeta
|
||||
sample Sample
|
||||
}
|
||||
|
||||
// Clear clears the histogram and its sample.
|
||||
func (h *StandardHistogram) Clear() { h.sample.Clear() }
|
||||
|
||||
// Count returns the number of samples recorded since the histogram was last
|
||||
// cleared.
|
||||
func (h *StandardHistogram) Count() int64 { return h.sample.Count() }
|
||||
|
||||
// Max returns the maximum value in the sample.
|
||||
func (h *StandardHistogram) Max() int64 { return h.sample.Max() }
|
||||
|
||||
// Mean returns the mean of the values in the sample.
|
||||
func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() }
|
||||
|
||||
// Min returns the minimum value in the sample.
|
||||
func (h *StandardHistogram) Min() int64 { return h.sample.Min() }
|
||||
|
||||
// Percentile returns an arbitrary percentile of the values in the sample.
|
||||
func (h *StandardHistogram) Percentile(p float64) float64 {
|
||||
return h.sample.Percentile(p)
|
||||
}
|
||||
|
||||
// Percentiles returns a slice of arbitrary percentiles of the values in the
|
||||
// sample.
|
||||
func (h *StandardHistogram) Percentiles(ps []float64) []float64 {
|
||||
return h.sample.Percentiles(ps)
|
||||
}
|
||||
|
||||
// Sample returns the Sample underlying the histogram.
|
||||
func (h *StandardHistogram) Sample() Sample { return h.sample }
|
||||
|
||||
// Snapshot returns a read-only copy of the histogram.
|
||||
func (h *StandardHistogram) Snapshot() Metric {
|
||||
return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)}
|
||||
}
|
||||
|
||||
// StdDev returns the standard deviation of the values in the sample.
|
||||
func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() }
|
||||
|
||||
// Sum returns the sum in the sample.
|
||||
func (h *StandardHistogram) Sum() int64 { return h.sample.Sum() }
|
||||
|
||||
// Update samples a new value.
|
||||
func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) }
|
||||
|
||||
// Variance returns the variance of the values in the sample.
|
||||
func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() }
|
90
pkg/metrics/histogram_test.go
Normal file
90
pkg/metrics/histogram_test.go
Normal file
@@ -0,0 +1,90 @@
|
||||
// includes code from
|
||||
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
|
||||
// Copyright 2012 Richard Crowley. All rights reserved.
|
||||
|
||||
package metrics
|
||||
|
||||
import "testing"
|
||||
|
||||
func BenchmarkHistogram(b *testing.B) {
|
||||
h := NewHistogram(nil, NewUniformSample(100))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
h.Update(int64(i))
|
||||
}
|
||||
}
|
||||
|
||||
func TestHistogram10000(t *testing.T) {
|
||||
h := NewHistogram(nil, NewUniformSample(100000))
|
||||
for i := 1; i <= 10000; i++ {
|
||||
h.Update(int64(i))
|
||||
}
|
||||
testHistogram10000(t, h)
|
||||
}
|
||||
|
||||
func TestHistogramEmpty(t *testing.T) {
|
||||
h := NewHistogram(nil, NewUniformSample(100))
|
||||
if count := h.Count(); 0 != count {
|
||||
t.Errorf("h.Count(): 0 != %v\n", count)
|
||||
}
|
||||
if min := h.Min(); 0 != min {
|
||||
t.Errorf("h.Min(): 0 != %v\n", min)
|
||||
}
|
||||
if max := h.Max(); 0 != max {
|
||||
t.Errorf("h.Max(): 0 != %v\n", max)
|
||||
}
|
||||
if mean := h.Mean(); 0.0 != mean {
|
||||
t.Errorf("h.Mean(): 0.0 != %v\n", mean)
|
||||
}
|
||||
if stdDev := h.StdDev(); 0.0 != stdDev {
|
||||
t.Errorf("h.StdDev(): 0.0 != %v\n", stdDev)
|
||||
}
|
||||
ps := h.Percentiles([]float64{0.5, 0.75, 0.99})
|
||||
if 0.0 != ps[0] {
|
||||
t.Errorf("median: 0.0 != %v\n", ps[0])
|
||||
}
|
||||
if 0.0 != ps[1] {
|
||||
t.Errorf("75th percentile: 0.0 != %v\n", ps[1])
|
||||
}
|
||||
if 0.0 != ps[2] {
|
||||
t.Errorf("99th percentile: 0.0 != %v\n", ps[2])
|
||||
}
|
||||
}
|
||||
|
||||
func TestHistogramSnapshot(t *testing.T) {
|
||||
h := NewHistogram(nil, NewUniformSample(100000))
|
||||
for i := 1; i <= 10000; i++ {
|
||||
h.Update(int64(i))
|
||||
}
|
||||
snapshot := h.Snapshot().(Histogram)
|
||||
h.Update(0)
|
||||
testHistogram10000(t, snapshot)
|
||||
}
|
||||
|
||||
func testHistogram10000(t *testing.T, h Histogram) {
|
||||
if count := h.Count(); 10000 != count {
|
||||
t.Errorf("h.Count(): 10000 != %v\n", count)
|
||||
}
|
||||
if min := h.Min(); 1 != min {
|
||||
t.Errorf("h.Min(): 1 != %v\n", min)
|
||||
}
|
||||
if max := h.Max(); 10000 != max {
|
||||
t.Errorf("h.Max(): 10000 != %v\n", max)
|
||||
}
|
||||
if mean := h.Mean(); 5000.5 != mean {
|
||||
t.Errorf("h.Mean(): 5000.5 != %v\n", mean)
|
||||
}
|
||||
if stdDev := h.StdDev(); 2886.751331514372 != stdDev {
|
||||
t.Errorf("h.StdDev(): 2886.751331514372 != %v\n", stdDev)
|
||||
}
|
||||
ps := h.Percentiles([]float64{0.5, 0.75, 0.99})
|
||||
if 5000.5 != ps[0] {
|
||||
t.Errorf("median: 5000.5 != %v\n", ps[0])
|
||||
}
|
||||
if 7500.75 != ps[1] {
|
||||
t.Errorf("75th percentile: 7500.75 != %v\n", ps[1])
|
||||
}
|
||||
if 9900.99 != ps[2] {
|
||||
t.Errorf("99th percentile: 9900.99 != %v\n", ps[2])
|
||||
}
|
||||
}
|
@@ -76,7 +76,7 @@ func (this *InfluxPublisher) Publish(metrics []Metric) {
|
||||
|
||||
for _, m := range metrics {
|
||||
tags := m.GetTagsCopy()
|
||||
addPoint := func(name string, value int64) {
|
||||
addPoint := func(name string, value interface{}) {
|
||||
bp.Points = append(bp.Points, client.Point{
|
||||
Measurement: name,
|
||||
Tags: tags,
|
||||
@@ -87,11 +87,11 @@ func (this *InfluxPublisher) Publish(metrics []Metric) {
|
||||
switch metric := m.(type) {
|
||||
case Counter:
|
||||
addPoint(metric.Name()+".count", metric.Count())
|
||||
case Timer:
|
||||
case SimpleTimer:
|
||||
addPoint(metric.Name()+".count", metric.Count())
|
||||
addPoint(metric.Name()+".max", metric.Max())
|
||||
addPoint(metric.Name()+".min", metric.Min())
|
||||
addPoint(metric.Name()+".avg", metric.Avg())
|
||||
addPoint(metric.Name()+".avg", metric.Mean())
|
||||
}
|
||||
}
|
||||
|
||||
|
221
pkg/metrics/meter.go
Normal file
221
pkg/metrics/meter.go
Normal file
@@ -0,0 +1,221 @@
|
||||
// includes code from
|
||||
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
|
||||
// Copyright 2012 Richard Crowley. All rights reserved.
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Meters count events to produce exponentially-weighted moving average rates
|
||||
// at one-, five-, and fifteen-minutes and a mean rate.
|
||||
type Meter interface {
|
||||
Metric
|
||||
|
||||
Count() int64
|
||||
Mark(int64)
|
||||
Rate1() float64
|
||||
Rate5() float64
|
||||
Rate15() float64
|
||||
RateMean() float64
|
||||
}
|
||||
|
||||
// NewMeter constructs a new StandardMeter and launches a goroutine.
|
||||
func NewMeter(meta *MetricMeta) Meter {
|
||||
if UseNilMetrics {
|
||||
return NilMeter{}
|
||||
}
|
||||
|
||||
m := newStandardMeter(meta)
|
||||
arbiter.Lock()
|
||||
defer arbiter.Unlock()
|
||||
arbiter.meters = append(arbiter.meters, m)
|
||||
if !arbiter.started {
|
||||
arbiter.started = true
|
||||
go arbiter.tick()
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type MeterSnapshot struct {
|
||||
*MetricMeta
|
||||
count int64
|
||||
rate1, rate5, rate15, rateMean float64
|
||||
}
|
||||
|
||||
// Count returns the count of events at the time the snapshot was taken.
|
||||
func (m *MeterSnapshot) Count() int64 { return m.count }
|
||||
|
||||
// Mark panics.
|
||||
func (*MeterSnapshot) Mark(n int64) {
|
||||
panic("Mark called on a MeterSnapshot")
|
||||
}
|
||||
|
||||
// Rate1 returns the one-minute moving average rate of events per second at the
|
||||
// time the snapshot was taken.
|
||||
func (m *MeterSnapshot) Rate1() float64 { return m.rate1 }
|
||||
|
||||
// Rate5 returns the five-minute moving average rate of events per second at
|
||||
// the time the snapshot was taken.
|
||||
func (m *MeterSnapshot) Rate5() float64 { return m.rate5 }
|
||||
|
||||
// Rate15 returns the fifteen-minute moving average rate of events per second
|
||||
// at the time the snapshot was taken.
|
||||
func (m *MeterSnapshot) Rate15() float64 { return m.rate15 }
|
||||
|
||||
// RateMean returns the meter's mean rate of events per second at the time the
|
||||
// snapshot was taken.
|
||||
func (m *MeterSnapshot) RateMean() float64 { return m.rateMean }
|
||||
|
||||
// Snapshot returns the snapshot.
|
||||
func (m *MeterSnapshot) Snapshot() Metric { return m }
|
||||
|
||||
// NilMeter is a no-op Meter.
|
||||
type NilMeter struct{ *MetricMeta }
|
||||
|
||||
// Count is a no-op.
|
||||
func (NilMeter) Count() int64 { return 0 }
|
||||
|
||||
// Mark is a no-op.
|
||||
func (NilMeter) Mark(n int64) {}
|
||||
|
||||
// Rate1 is a no-op.
|
||||
func (NilMeter) Rate1() float64 { return 0.0 }
|
||||
|
||||
// Rate5 is a no-op.
|
||||
func (NilMeter) Rate5() float64 { return 0.0 }
|
||||
|
||||
// Rate15is a no-op.
|
||||
func (NilMeter) Rate15() float64 { return 0.0 }
|
||||
|
||||
// RateMean is a no-op.
|
||||
func (NilMeter) RateMean() float64 { return 0.0 }
|
||||
|
||||
// Snapshot is a no-op.
|
||||
func (NilMeter) Snapshot() Metric { return NilMeter{} }
|
||||
|
||||
// StandardMeter is the standard implementation of a Meter.
|
||||
type StandardMeter struct {
|
||||
*MetricMeta
|
||||
lock sync.RWMutex
|
||||
snapshot *MeterSnapshot
|
||||
a1, a5, a15 EWMA
|
||||
startTime time.Time
|
||||
}
|
||||
|
||||
func newStandardMeter(meta *MetricMeta) *StandardMeter {
|
||||
return &StandardMeter{
|
||||
MetricMeta: meta,
|
||||
snapshot: &MeterSnapshot{MetricMeta: meta},
|
||||
a1: NewEWMA1(),
|
||||
a5: NewEWMA5(),
|
||||
a15: NewEWMA15(),
|
||||
startTime: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
// Count returns the number of events recorded.
|
||||
func (m *StandardMeter) Count() int64 {
|
||||
m.lock.RLock()
|
||||
count := m.snapshot.count
|
||||
m.lock.RUnlock()
|
||||
return count
|
||||
}
|
||||
|
||||
// Mark records the occurance of n events.
|
||||
func (m *StandardMeter) Mark(n int64) {
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
m.snapshot.count += n
|
||||
m.a1.Update(n)
|
||||
m.a5.Update(n)
|
||||
m.a15.Update(n)
|
||||
m.updateSnapshot()
|
||||
}
|
||||
|
||||
// Rate1 returns the one-minute moving average rate of events per second.
|
||||
func (m *StandardMeter) Rate1() float64 {
|
||||
m.lock.RLock()
|
||||
rate1 := m.snapshot.rate1
|
||||
m.lock.RUnlock()
|
||||
return rate1
|
||||
}
|
||||
|
||||
// Rate5 returns the five-minute moving average rate of events per second.
|
||||
func (m *StandardMeter) Rate5() float64 {
|
||||
m.lock.RLock()
|
||||
rate5 := m.snapshot.rate5
|
||||
m.lock.RUnlock()
|
||||
return rate5
|
||||
}
|
||||
|
||||
// Rate15 returns the fifteen-minute moving average rate of events per second.
|
||||
func (m *StandardMeter) Rate15() float64 {
|
||||
m.lock.RLock()
|
||||
rate15 := m.snapshot.rate15
|
||||
m.lock.RUnlock()
|
||||
return rate15
|
||||
}
|
||||
|
||||
// RateMean returns the meter's mean rate of events per second.
|
||||
func (m *StandardMeter) RateMean() float64 {
|
||||
m.lock.RLock()
|
||||
rateMean := m.snapshot.rateMean
|
||||
m.lock.RUnlock()
|
||||
return rateMean
|
||||
}
|
||||
|
||||
// Snapshot returns a read-only copy of the meter.
|
||||
func (m *StandardMeter) Snapshot() Metric {
|
||||
m.lock.RLock()
|
||||
snapshot := *m.snapshot
|
||||
m.lock.RUnlock()
|
||||
return &snapshot
|
||||
}
|
||||
|
||||
func (m *StandardMeter) updateSnapshot() {
|
||||
// should run with write lock held on m.lock
|
||||
snapshot := m.snapshot
|
||||
snapshot.rate1 = m.a1.Rate()
|
||||
snapshot.rate5 = m.a5.Rate()
|
||||
snapshot.rate15 = m.a15.Rate()
|
||||
snapshot.rateMean = float64(snapshot.count) / time.Since(m.startTime).Seconds()
|
||||
}
|
||||
|
||||
func (m *StandardMeter) tick() {
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
m.a1.Tick()
|
||||
m.a5.Tick()
|
||||
m.a15.Tick()
|
||||
m.updateSnapshot()
|
||||
}
|
||||
|
||||
type meterArbiter struct {
|
||||
sync.RWMutex
|
||||
started bool
|
||||
meters []*StandardMeter
|
||||
ticker *time.Ticker
|
||||
}
|
||||
|
||||
var arbiter = meterArbiter{ticker: time.NewTicker(5e9)}
|
||||
|
||||
// Ticks meters on the scheduled interval
|
||||
func (ma *meterArbiter) tick() {
|
||||
for {
|
||||
select {
|
||||
case <-ma.ticker.C:
|
||||
ma.tickMeters()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ma *meterArbiter) tickMeters() {
|
||||
ma.RLock()
|
||||
defer ma.RUnlock()
|
||||
for _, meter := range ma.meters {
|
||||
meter.tick()
|
||||
}
|
||||
}
|
@@ -1,101 +0,0 @@
|
||||
package metrics
|
||||
|
||||
type comboCounterRef struct {
|
||||
*MetricMeta
|
||||
usageCounter Counter
|
||||
metricCounter Counter
|
||||
}
|
||||
|
||||
type comboTimerRef struct {
|
||||
*MetricMeta
|
||||
usageTimer Timer
|
||||
metricTimer Timer
|
||||
}
|
||||
|
||||
func RegComboCounter(name string, tagStrings ...string) Counter {
|
||||
meta := NewMetricMeta(name, tagStrings)
|
||||
cr := &comboCounterRef{
|
||||
MetricMeta: meta,
|
||||
usageCounter: NewCounter(meta),
|
||||
metricCounter: NewCounter(meta),
|
||||
}
|
||||
|
||||
UsageStats.Register(cr.usageCounter)
|
||||
MetricStats.Register(cr.metricCounter)
|
||||
|
||||
return cr
|
||||
}
|
||||
|
||||
func RegComboTimer(name string, tagStrings ...string) Timer {
|
||||
meta := NewMetricMeta(name, tagStrings)
|
||||
tr := &comboTimerRef{
|
||||
MetricMeta: meta,
|
||||
usageTimer: NewTimer(meta),
|
||||
metricTimer: NewTimer(meta),
|
||||
}
|
||||
|
||||
UsageStats.Register(tr.usageTimer)
|
||||
MetricStats.Register(tr.metricTimer)
|
||||
return tr
|
||||
}
|
||||
|
||||
func RegTimer(name string, tagStrings ...string) Timer {
|
||||
tr := NewTimer(NewMetricMeta(name, tagStrings))
|
||||
MetricStats.Register(tr)
|
||||
return tr
|
||||
}
|
||||
|
||||
func (t comboTimerRef) Clear() {
|
||||
t.metricTimer.Clear()
|
||||
t.usageTimer.Clear()
|
||||
}
|
||||
|
||||
func (t comboTimerRef) Avg() int64 {
|
||||
panic("Avg called on combotimer ref")
|
||||
}
|
||||
|
||||
func (t comboTimerRef) Min() int64 {
|
||||
panic("Avg called on combotimer ref")
|
||||
}
|
||||
|
||||
func (t comboTimerRef) Max() int64 {
|
||||
panic("Avg called on combotimer ref")
|
||||
}
|
||||
|
||||
func (t comboTimerRef) Count() int64 {
|
||||
panic("Avg called on combotimer ref")
|
||||
}
|
||||
|
||||
func (t comboTimerRef) Snapshot() Metric {
|
||||
panic("Snapshot called on combotimer ref")
|
||||
}
|
||||
|
||||
func (t comboTimerRef) AddTiming(timing int64) {
|
||||
t.metricTimer.AddTiming(timing)
|
||||
t.usageTimer.AddTiming(timing)
|
||||
}
|
||||
|
||||
func (c comboCounterRef) Clear() {
|
||||
c.usageCounter.Clear()
|
||||
c.metricCounter.Clear()
|
||||
}
|
||||
|
||||
func (c comboCounterRef) Count() int64 {
|
||||
panic("Count called on a combocounter ref")
|
||||
}
|
||||
|
||||
// Dec panics.
|
||||
func (c comboCounterRef) Dec(i int64) {
|
||||
c.usageCounter.Dec(i)
|
||||
c.metricCounter.Dec(i)
|
||||
}
|
||||
|
||||
// Inc panics.
|
||||
func (c comboCounterRef) Inc(i int64) {
|
||||
c.usageCounter.Inc(i)
|
||||
c.metricCounter.Inc(i)
|
||||
}
|
||||
|
||||
func (c comboCounterRef) Snapshot() Metric {
|
||||
return c.metricCounter.Snapshot()
|
||||
}
|
@@ -1,35 +1,65 @@
|
||||
package metrics
|
||||
|
||||
var UsageStats = NewRegistry()
|
||||
import "github.com/Unknwon/log"
|
||||
|
||||
var MetricStats = NewRegistry()
|
||||
var UseNilMetrics bool = true
|
||||
|
||||
var (
|
||||
M_Instance_Start = RegComboCounter("instance_start")
|
||||
|
||||
M_Page_Status_200 = RegComboCounter("page.resp_status", "code", "200")
|
||||
M_Page_Status_500 = RegComboCounter("page.resp_status", "code", "500")
|
||||
M_Page_Status_404 = RegComboCounter("page.resp_status", "code", "404")
|
||||
|
||||
M_Api_Status_500 = RegComboCounter("api.resp_status", "code", "500")
|
||||
M_Api_Status_404 = RegComboCounter("api.resp_status", "code", "404")
|
||||
|
||||
M_Api_User_SignUpStarted = RegComboCounter("api.user.signup_started")
|
||||
M_Api_User_SignUpCompleted = RegComboCounter("api.user.signup_completed")
|
||||
M_Api_User_SignUpInvite = RegComboCounter("api.user.signup_invite")
|
||||
M_Api_Dashboard_Get = RegComboCounter("api.dashboard.get")
|
||||
|
||||
M_Api_Dashboard_Post = RegComboCounter("api.dashboard.post")
|
||||
M_Api_Admin_User_Create = RegComboCounter("api.admin.user_create")
|
||||
M_Api_Login_Post = RegComboCounter("api.login.post")
|
||||
M_Api_Login_OAuth = RegComboCounter("api.login.oauth")
|
||||
M_Api_Org_Create = RegComboCounter("api.org.create")
|
||||
|
||||
M_Api_Dashboard_Snapshot_Create = RegComboCounter("api.dashboard_snapshot.create")
|
||||
M_Api_Dashboard_Snapshot_External = RegComboCounter("api.dashboard_snapshot.external")
|
||||
M_Api_Dashboard_Snapshot_Get = RegComboCounter("api.dashboard_snapshot.get")
|
||||
|
||||
M_Models_Dashboard_Insert = RegComboCounter("models.dashboard.insert")
|
||||
M_Instance_Start Counter
|
||||
M_Page_Status_200 Counter
|
||||
M_Page_Status_500 Counter
|
||||
M_Page_Status_404 Counter
|
||||
M_Api_Status_500 Counter
|
||||
M_Api_Status_404 Counter
|
||||
M_Api_User_SignUpStarted Counter
|
||||
M_Api_User_SignUpCompleted Counter
|
||||
M_Api_User_SignUpInvite Counter
|
||||
M_Api_Dashboard_Get Counter
|
||||
M_Api_Dashboard_Post Counter
|
||||
M_Api_Admin_User_Create Counter
|
||||
M_Api_Login_Post Counter
|
||||
M_Api_Login_OAuth Counter
|
||||
M_Api_Org_Create Counter
|
||||
M_Api_Dashboard_Snapshot_Create Counter
|
||||
M_Api_Dashboard_Snapshot_External Counter
|
||||
M_Api_Dashboard_Snapshot_Get Counter
|
||||
M_Models_Dashboard_Insert Counter
|
||||
|
||||
// Timers
|
||||
M_DataSource_ProxyReq_Timer = RegComboTimer("api.dataproxy.request.all")
|
||||
M_DataSource_ProxyReq_Timer Timer
|
||||
)
|
||||
|
||||
func initMetricVars(settings *MetricSettings) {
|
||||
log.Info("Init metric vars")
|
||||
UseNilMetrics = settings.Enabled == false
|
||||
|
||||
M_Instance_Start = RegCounter("instance_start")
|
||||
|
||||
M_Page_Status_200 = RegCounter("page.resp_status", "code", "200")
|
||||
M_Page_Status_500 = RegCounter("page.resp_status", "code", "500")
|
||||
M_Page_Status_404 = RegCounter("page.resp_status", "code", "404")
|
||||
|
||||
M_Api_Status_500 = RegCounter("api.resp_status", "code", "500")
|
||||
M_Api_Status_404 = RegCounter("api.resp_status", "code", "404")
|
||||
|
||||
M_Api_User_SignUpStarted = RegCounter("api.user.signup_started")
|
||||
M_Api_User_SignUpCompleted = RegCounter("api.user.signup_completed")
|
||||
M_Api_User_SignUpInvite = RegCounter("api.user.signup_invite")
|
||||
M_Api_Dashboard_Get = RegCounter("api.dashboard.get")
|
||||
|
||||
M_Api_Dashboard_Post = RegCounter("api.dashboard.post")
|
||||
M_Api_Admin_User_Create = RegCounter("api.admin.user_create")
|
||||
M_Api_Login_Post = RegCounter("api.login.post")
|
||||
M_Api_Login_OAuth = RegCounter("api.login.oauth")
|
||||
M_Api_Org_Create = RegCounter("api.org.create")
|
||||
|
||||
M_Api_Dashboard_Snapshot_Create = RegCounter("api.dashboard_snapshot.create")
|
||||
M_Api_Dashboard_Snapshot_External = RegCounter("api.dashboard_snapshot.external")
|
||||
M_Api_Dashboard_Snapshot_Get = RegCounter("api.dashboard_snapshot.get")
|
||||
|
||||
M_Models_Dashboard_Insert = RegCounter("models.dashboard.insert")
|
||||
|
||||
// Timers
|
||||
M_DataSource_ProxyReq_Timer = RegTimer("api.dataproxy.request.all")
|
||||
}
|
||||
|
@@ -15,14 +15,14 @@ import (
|
||||
)
|
||||
|
||||
func Init() {
|
||||
go instrumentationLoop()
|
||||
settings := readSettings()
|
||||
initMetricVars(settings)
|
||||
go instrumentationLoop(settings)
|
||||
}
|
||||
|
||||
func instrumentationLoop() chan struct{} {
|
||||
func instrumentationLoop(settings *MetricSettings) chan struct{} {
|
||||
M_Instance_Start.Inc(1)
|
||||
|
||||
settings := readSettings()
|
||||
|
||||
onceEveryDayTick := time.NewTicker(time.Hour * 24)
|
||||
secondTicker := time.NewTicker(time.Second * time.Duration(settings.IntervalSeconds))
|
||||
|
||||
@@ -61,16 +61,6 @@ func sendUsageStats() {
|
||||
"metrics": metrics,
|
||||
}
|
||||
|
||||
snapshots := UsageStats.GetSnapshots()
|
||||
for _, m := range snapshots {
|
||||
switch metric := m.(type) {
|
||||
case Counter:
|
||||
if metric.Count() > 0 {
|
||||
metrics[metric.Name()+".count"] = metric.Count()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
statsQuery := m.GetSystemStatsQuery{}
|
||||
if err := bus.Dispatch(&statsQuery); err != nil {
|
||||
log.Error(3, "Failed to get system stats", err)
|
||||
|
@@ -32,7 +32,12 @@ func (r *StandardRegistry) GetSnapshots() []Metric {
|
||||
metrics := make([]Metric, len(r.metrics))
|
||||
for i, metric := range r.metrics {
|
||||
metrics[i] = metric.Snapshot()
|
||||
metric.Clear()
|
||||
switch typedMetric := metric.(type) {
|
||||
case Histogram:
|
||||
// do not clear histograms
|
||||
case Counter:
|
||||
typedMetric.Clear()
|
||||
}
|
||||
}
|
||||
return metrics
|
||||
}
|
||||
|
607
pkg/metrics/sample.go
Normal file
607
pkg/metrics/sample.go
Normal file
@@ -0,0 +1,607 @@
|
||||
// includes code from
|
||||
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
|
||||
// Copyright 2012 Richard Crowley. All rights reserved.
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"math"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const rescaleThreshold = time.Hour
|
||||
|
||||
// Samples maintain a statistically-significant selection of values from
|
||||
// a stream.
|
||||
type Sample interface {
|
||||
Clear()
|
||||
Count() int64
|
||||
Max() int64
|
||||
Mean() float64
|
||||
Min() int64
|
||||
Percentile(float64) float64
|
||||
Percentiles([]float64) []float64
|
||||
Size() int
|
||||
Snapshot() Sample
|
||||
StdDev() float64
|
||||
Sum() int64
|
||||
Update(int64)
|
||||
Values() []int64
|
||||
Variance() float64
|
||||
}
|
||||
|
||||
// ExpDecaySample is an exponentially-decaying sample using a forward-decaying
|
||||
// priority reservoir. See Cormode et al's "Forward Decay: A Practical Time
|
||||
// Decay Model for Streaming Systems".
|
||||
//
|
||||
// <http://www.research.att.com/people/Cormode_Graham/library/publications/CormodeShkapenyukSrivastavaXu09.pdf>
|
||||
type ExpDecaySample struct {
|
||||
alpha float64
|
||||
count int64
|
||||
mutex sync.Mutex
|
||||
reservoirSize int
|
||||
t0, t1 time.Time
|
||||
values *expDecaySampleHeap
|
||||
}
|
||||
|
||||
// NewExpDecaySample constructs a new exponentially-decaying sample with the
|
||||
// given reservoir size and alpha.
|
||||
func NewExpDecaySample(reservoirSize int, alpha float64) Sample {
|
||||
s := &ExpDecaySample{
|
||||
alpha: alpha,
|
||||
reservoirSize: reservoirSize,
|
||||
t0: time.Now(),
|
||||
values: newExpDecaySampleHeap(reservoirSize),
|
||||
}
|
||||
s.t1 = s.t0.Add(rescaleThreshold)
|
||||
return s
|
||||
}
|
||||
|
||||
// Clear clears all samples.
|
||||
func (s *ExpDecaySample) Clear() {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
s.count = 0
|
||||
s.t0 = time.Now()
|
||||
s.t1 = s.t0.Add(rescaleThreshold)
|
||||
s.values.Clear()
|
||||
}
|
||||
|
||||
// Count returns the number of samples recorded, which may exceed the
|
||||
// reservoir size.
|
||||
func (s *ExpDecaySample) Count() int64 {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
return s.count
|
||||
}
|
||||
|
||||
// Max returns the maximum value in the sample, which may not be the maximum
|
||||
// value ever to be part of the sample.
|
||||
func (s *ExpDecaySample) Max() int64 {
|
||||
return SampleMax(s.Values())
|
||||
}
|
||||
|
||||
// Mean returns the mean of the values in the sample.
|
||||
func (s *ExpDecaySample) Mean() float64 {
|
||||
return SampleMean(s.Values())
|
||||
}
|
||||
|
||||
// Min returns the minimum value in the sample, which may not be the minimum
|
||||
// value ever to be part of the sample.
|
||||
func (s *ExpDecaySample) Min() int64 {
|
||||
return SampleMin(s.Values())
|
||||
}
|
||||
|
||||
// Percentile returns an arbitrary percentile of values in the sample.
|
||||
func (s *ExpDecaySample) Percentile(p float64) float64 {
|
||||
return SamplePercentile(s.Values(), p)
|
||||
}
|
||||
|
||||
// Percentiles returns a slice of arbitrary percentiles of values in the
|
||||
// sample.
|
||||
func (s *ExpDecaySample) Percentiles(ps []float64) []float64 {
|
||||
return SamplePercentiles(s.Values(), ps)
|
||||
}
|
||||
|
||||
// Size returns the size of the sample, which is at most the reservoir size.
|
||||
func (s *ExpDecaySample) Size() int {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
return s.values.Size()
|
||||
}
|
||||
|
||||
// Snapshot returns a read-only copy of the sample.
|
||||
func (s *ExpDecaySample) Snapshot() Sample {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
vals := s.values.Values()
|
||||
values := make([]int64, len(vals))
|
||||
for i, v := range vals {
|
||||
values[i] = v.v
|
||||
}
|
||||
return &SampleSnapshot{
|
||||
count: s.count,
|
||||
values: values,
|
||||
}
|
||||
}
|
||||
|
||||
// StdDev returns the standard deviation of the values in the sample.
|
||||
func (s *ExpDecaySample) StdDev() float64 {
|
||||
return SampleStdDev(s.Values())
|
||||
}
|
||||
|
||||
// Sum returns the sum of the values in the sample.
|
||||
func (s *ExpDecaySample) Sum() int64 {
|
||||
return SampleSum(s.Values())
|
||||
}
|
||||
|
||||
// Update samples a new value.
|
||||
func (s *ExpDecaySample) Update(v int64) {
|
||||
s.update(time.Now(), v)
|
||||
}
|
||||
|
||||
// Values returns a copy of the values in the sample.
|
||||
func (s *ExpDecaySample) Values() []int64 {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
vals := s.values.Values()
|
||||
values := make([]int64, len(vals))
|
||||
for i, v := range vals {
|
||||
values[i] = v.v
|
||||
}
|
||||
return values
|
||||
}
|
||||
|
||||
// Variance returns the variance of the values in the sample.
|
||||
func (s *ExpDecaySample) Variance() float64 {
|
||||
return SampleVariance(s.Values())
|
||||
}
|
||||
|
||||
// update samples a new value at a particular timestamp. This is a method all
|
||||
// its own to facilitate testing.
|
||||
func (s *ExpDecaySample) update(t time.Time, v int64) {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
s.count++
|
||||
if s.values.Size() == s.reservoirSize {
|
||||
s.values.Pop()
|
||||
}
|
||||
s.values.Push(expDecaySample{
|
||||
k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / rand.Float64(),
|
||||
v: v,
|
||||
})
|
||||
if t.After(s.t1) {
|
||||
values := s.values.Values()
|
||||
t0 := s.t0
|
||||
s.values.Clear()
|
||||
s.t0 = t
|
||||
s.t1 = s.t0.Add(rescaleThreshold)
|
||||
for _, v := range values {
|
||||
v.k = v.k * math.Exp(-s.alpha*s.t0.Sub(t0).Seconds())
|
||||
s.values.Push(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NilSample is a no-op Sample.
|
||||
type NilSample struct{}
|
||||
|
||||
// Clear is a no-op.
|
||||
func (NilSample) Clear() {}
|
||||
|
||||
// Count is a no-op.
|
||||
func (NilSample) Count() int64 { return 0 }
|
||||
|
||||
// Max is a no-op.
|
||||
func (NilSample) Max() int64 { return 0 }
|
||||
|
||||
// Mean is a no-op.
|
||||
func (NilSample) Mean() float64 { return 0.0 }
|
||||
|
||||
// Min is a no-op.
|
||||
func (NilSample) Min() int64 { return 0 }
|
||||
|
||||
// Percentile is a no-op.
|
||||
func (NilSample) Percentile(p float64) float64 { return 0.0 }
|
||||
|
||||
// Percentiles is a no-op.
|
||||
func (NilSample) Percentiles(ps []float64) []float64 {
|
||||
return make([]float64, len(ps))
|
||||
}
|
||||
|
||||
// Size is a no-op.
|
||||
func (NilSample) Size() int { return 0 }
|
||||
|
||||
// Sample is a no-op.
|
||||
func (NilSample) Snapshot() Sample { return NilSample{} }
|
||||
|
||||
// StdDev is a no-op.
|
||||
func (NilSample) StdDev() float64 { return 0.0 }
|
||||
|
||||
// Sum is a no-op.
|
||||
func (NilSample) Sum() int64 { return 0 }
|
||||
|
||||
// Update is a no-op.
|
||||
func (NilSample) Update(v int64) {}
|
||||
|
||||
// Values is a no-op.
|
||||
func (NilSample) Values() []int64 { return []int64{} }
|
||||
|
||||
// Variance is a no-op.
|
||||
func (NilSample) Variance() float64 { return 0.0 }
|
||||
|
||||
// SampleMax returns the maximum value of the slice of int64.
|
||||
func SampleMax(values []int64) int64 {
|
||||
if 0 == len(values) {
|
||||
return 0
|
||||
}
|
||||
var max int64 = math.MinInt64
|
||||
for _, v := range values {
|
||||
if max < v {
|
||||
max = v
|
||||
}
|
||||
}
|
||||
return max
|
||||
}
|
||||
|
||||
// SampleMean returns the mean value of the slice of int64.
|
||||
func SampleMean(values []int64) float64 {
|
||||
if 0 == len(values) {
|
||||
return 0.0
|
||||
}
|
||||
return float64(SampleSum(values)) / float64(len(values))
|
||||
}
|
||||
|
||||
// SampleMin returns the minimum value of the slice of int64.
|
||||
func SampleMin(values []int64) int64 {
|
||||
if 0 == len(values) {
|
||||
return 0
|
||||
}
|
||||
var min int64 = math.MaxInt64
|
||||
for _, v := range values {
|
||||
if min > v {
|
||||
min = v
|
||||
}
|
||||
}
|
||||
return min
|
||||
}
|
||||
|
||||
// SamplePercentiles returns an arbitrary percentile of the slice of int64.
|
||||
func SamplePercentile(values int64Slice, p float64) float64 {
|
||||
return SamplePercentiles(values, []float64{p})[0]
|
||||
}
|
||||
|
||||
// SamplePercentiles returns a slice of arbitrary percentiles of the slice of
|
||||
// int64.
|
||||
func SamplePercentiles(values int64Slice, ps []float64) []float64 {
|
||||
scores := make([]float64, len(ps))
|
||||
size := len(values)
|
||||
if size > 0 {
|
||||
sort.Sort(values)
|
||||
for i, p := range ps {
|
||||
pos := p * float64(size+1)
|
||||
if pos < 1.0 {
|
||||
scores[i] = float64(values[0])
|
||||
} else if pos >= float64(size) {
|
||||
scores[i] = float64(values[size-1])
|
||||
} else {
|
||||
lower := float64(values[int(pos)-1])
|
||||
upper := float64(values[int(pos)])
|
||||
scores[i] = lower + (pos-math.Floor(pos))*(upper-lower)
|
||||
}
|
||||
}
|
||||
}
|
||||
return scores
|
||||
}
|
||||
|
||||
// SampleSnapshot is a read-only copy of another Sample.
|
||||
type SampleSnapshot struct {
|
||||
count int64
|
||||
values []int64
|
||||
}
|
||||
|
||||
// Clear panics.
|
||||
func (*SampleSnapshot) Clear() {
|
||||
panic("Clear called on a SampleSnapshot")
|
||||
}
|
||||
|
||||
// Count returns the count of inputs at the time the snapshot was taken.
|
||||
func (s *SampleSnapshot) Count() int64 { return s.count }
|
||||
|
||||
// Max returns the maximal value at the time the snapshot was taken.
|
||||
func (s *SampleSnapshot) Max() int64 { return SampleMax(s.values) }
|
||||
|
||||
// Mean returns the mean value at the time the snapshot was taken.
|
||||
func (s *SampleSnapshot) Mean() float64 { return SampleMean(s.values) }
|
||||
|
||||
// Min returns the minimal value at the time the snapshot was taken.
|
||||
func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) }
|
||||
|
||||
// Percentile returns an arbitrary percentile of values at the time the
|
||||
// snapshot was taken.
|
||||
func (s *SampleSnapshot) Percentile(p float64) float64 {
|
||||
return SamplePercentile(s.values, p)
|
||||
}
|
||||
|
||||
// Percentiles returns a slice of arbitrary percentiles of values at the time
|
||||
// the snapshot was taken.
|
||||
func (s *SampleSnapshot) Percentiles(ps []float64) []float64 {
|
||||
return SamplePercentiles(s.values, ps)
|
||||
}
|
||||
|
||||
// Size returns the size of the sample at the time the snapshot was taken.
|
||||
func (s *SampleSnapshot) Size() int { return len(s.values) }
|
||||
|
||||
// Snapshot returns the snapshot.
|
||||
func (s *SampleSnapshot) Snapshot() Sample { return s }
|
||||
|
||||
// StdDev returns the standard deviation of values at the time the snapshot was
|
||||
// taken.
|
||||
func (s *SampleSnapshot) StdDev() float64 { return SampleStdDev(s.values) }
|
||||
|
||||
// Sum returns the sum of values at the time the snapshot was taken.
|
||||
func (s *SampleSnapshot) Sum() int64 { return SampleSum(s.values) }
|
||||
|
||||
// Update panics.
|
||||
func (*SampleSnapshot) Update(int64) {
|
||||
panic("Update called on a SampleSnapshot")
|
||||
}
|
||||
|
||||
// Values returns a copy of the values in the sample.
|
||||
func (s *SampleSnapshot) Values() []int64 {
|
||||
values := make([]int64, len(s.values))
|
||||
copy(values, s.values)
|
||||
return values
|
||||
}
|
||||
|
||||
// Variance returns the variance of values at the time the snapshot was taken.
|
||||
func (s *SampleSnapshot) Variance() float64 { return SampleVariance(s.values) }
|
||||
|
||||
// SampleStdDev returns the standard deviation of the slice of int64.
|
||||
func SampleStdDev(values []int64) float64 {
|
||||
return math.Sqrt(SampleVariance(values))
|
||||
}
|
||||
|
||||
// SampleSum returns the sum of the slice of int64.
|
||||
func SampleSum(values []int64) int64 {
|
||||
var sum int64
|
||||
for _, v := range values {
|
||||
sum += v
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
// SampleVariance returns the variance of the slice of int64.
|
||||
func SampleVariance(values []int64) float64 {
|
||||
if 0 == len(values) {
|
||||
return 0.0
|
||||
}
|
||||
m := SampleMean(values)
|
||||
var sum float64
|
||||
for _, v := range values {
|
||||
d := float64(v) - m
|
||||
sum += d * d
|
||||
}
|
||||
return sum / float64(len(values))
|
||||
}
|
||||
|
||||
// A uniform sample using Vitter's Algorithm R.
|
||||
//
|
||||
// <http://www.cs.umd.edu/~samir/498/vitter.pdf>
|
||||
type UniformSample struct {
|
||||
count int64
|
||||
mutex sync.Mutex
|
||||
reservoirSize int
|
||||
values []int64
|
||||
}
|
||||
|
||||
// NewUniformSample constructs a new uniform sample with the given reservoir
|
||||
// size.
|
||||
func NewUniformSample(reservoirSize int) Sample {
|
||||
return &UniformSample{
|
||||
reservoirSize: reservoirSize,
|
||||
values: make([]int64, 0, reservoirSize),
|
||||
}
|
||||
}
|
||||
|
||||
// Clear clears all samples.
|
||||
func (s *UniformSample) Clear() {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
s.count = 0
|
||||
s.values = make([]int64, 0, s.reservoirSize)
|
||||
}
|
||||
|
||||
// Count returns the number of samples recorded, which may exceed the
|
||||
// reservoir size.
|
||||
func (s *UniformSample) Count() int64 {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
return s.count
|
||||
}
|
||||
|
||||
// Max returns the maximum value in the sample, which may not be the maximum
|
||||
// value ever to be part of the sample.
|
||||
func (s *UniformSample) Max() int64 {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
return SampleMax(s.values)
|
||||
}
|
||||
|
||||
// Mean returns the mean of the values in the sample.
|
||||
func (s *UniformSample) Mean() float64 {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
return SampleMean(s.values)
|
||||
}
|
||||
|
||||
// Min returns the minimum value in the sample, which may not be the minimum
|
||||
// value ever to be part of the sample.
|
||||
func (s *UniformSample) Min() int64 {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
return SampleMin(s.values)
|
||||
}
|
||||
|
||||
// Percentile returns an arbitrary percentile of values in the sample.
|
||||
func (s *UniformSample) Percentile(p float64) float64 {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
return SamplePercentile(s.values, p)
|
||||
}
|
||||
|
||||
// Percentiles returns a slice of arbitrary percentiles of values in the
|
||||
// sample.
|
||||
func (s *UniformSample) Percentiles(ps []float64) []float64 {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
return SamplePercentiles(s.values, ps)
|
||||
}
|
||||
|
||||
// Size returns the size of the sample, which is at most the reservoir size.
|
||||
func (s *UniformSample) Size() int {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
return len(s.values)
|
||||
}
|
||||
|
||||
// Snapshot returns a read-only copy of the sample.
|
||||
func (s *UniformSample) Snapshot() Sample {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
values := make([]int64, len(s.values))
|
||||
copy(values, s.values)
|
||||
return &SampleSnapshot{
|
||||
count: s.count,
|
||||
values: values,
|
||||
}
|
||||
}
|
||||
|
||||
// StdDev returns the standard deviation of the values in the sample.
|
||||
func (s *UniformSample) StdDev() float64 {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
return SampleStdDev(s.values)
|
||||
}
|
||||
|
||||
// Sum returns the sum of the values in the sample.
|
||||
func (s *UniformSample) Sum() int64 {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
return SampleSum(s.values)
|
||||
}
|
||||
|
||||
// Update samples a new value.
|
||||
func (s *UniformSample) Update(v int64) {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
s.count++
|
||||
if len(s.values) < s.reservoirSize {
|
||||
s.values = append(s.values, v)
|
||||
} else {
|
||||
r := rand.Int63n(s.count)
|
||||
if r < int64(len(s.values)) {
|
||||
s.values[int(r)] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Values returns a copy of the values in the sample.
|
||||
func (s *UniformSample) Values() []int64 {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
values := make([]int64, len(s.values))
|
||||
copy(values, s.values)
|
||||
return values
|
||||
}
|
||||
|
||||
// Variance returns the variance of the values in the sample.
|
||||
func (s *UniformSample) Variance() float64 {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
return SampleVariance(s.values)
|
||||
}
|
||||
|
||||
// expDecaySample represents an individual sample in a heap.
|
||||
type expDecaySample struct {
|
||||
k float64
|
||||
v int64
|
||||
}
|
||||
|
||||
func newExpDecaySampleHeap(reservoirSize int) *expDecaySampleHeap {
|
||||
return &expDecaySampleHeap{make([]expDecaySample, 0, reservoirSize)}
|
||||
}
|
||||
|
||||
// expDecaySampleHeap is a min-heap of expDecaySamples.
|
||||
// The internal implementation is copied from the standard library's container/heap
|
||||
type expDecaySampleHeap struct {
|
||||
s []expDecaySample
|
||||
}
|
||||
|
||||
func (h *expDecaySampleHeap) Clear() {
|
||||
h.s = h.s[:0]
|
||||
}
|
||||
|
||||
func (h *expDecaySampleHeap) Push(s expDecaySample) {
|
||||
n := len(h.s)
|
||||
h.s = h.s[0 : n+1]
|
||||
h.s[n] = s
|
||||
h.up(n)
|
||||
}
|
||||
|
||||
func (h *expDecaySampleHeap) Pop() expDecaySample {
|
||||
n := len(h.s) - 1
|
||||
h.s[0], h.s[n] = h.s[n], h.s[0]
|
||||
h.down(0, n)
|
||||
|
||||
n = len(h.s)
|
||||
s := h.s[n-1]
|
||||
h.s = h.s[0 : n-1]
|
||||
return s
|
||||
}
|
||||
|
||||
func (h *expDecaySampleHeap) Size() int {
|
||||
return len(h.s)
|
||||
}
|
||||
|
||||
func (h *expDecaySampleHeap) Values() []expDecaySample {
|
||||
return h.s
|
||||
}
|
||||
|
||||
func (h *expDecaySampleHeap) up(j int) {
|
||||
for {
|
||||
i := (j - 1) / 2 // parent
|
||||
if i == j || !(h.s[j].k < h.s[i].k) {
|
||||
break
|
||||
}
|
||||
h.s[i], h.s[j] = h.s[j], h.s[i]
|
||||
j = i
|
||||
}
|
||||
}
|
||||
|
||||
func (h *expDecaySampleHeap) down(i, n int) {
|
||||
for {
|
||||
j1 := 2*i + 1
|
||||
if j1 >= n || j1 < 0 { // j1 < 0 after int overflow
|
||||
break
|
||||
}
|
||||
j := j1 // left child
|
||||
if j2 := j1 + 1; j2 < n && !(h.s[j1].k < h.s[j2].k) {
|
||||
j = j2 // = 2*i + 2 // right child
|
||||
}
|
||||
if !(h.s[j].k < h.s[i].k) {
|
||||
break
|
||||
}
|
||||
h.s[i], h.s[j] = h.s[j], h.s[i]
|
||||
i = j
|
||||
}
|
||||
}
|
||||
|
||||
type int64Slice []int64
|
||||
|
||||
func (p int64Slice) Len() int { return len(p) }
|
||||
func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] }
|
||||
func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
367
pkg/metrics/sample_test.go
Normal file
367
pkg/metrics/sample_test.go
Normal file
@@ -0,0 +1,367 @@
|
||||
// includes code from
|
||||
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
|
||||
// Copyright 2012 Richard Crowley. All rights reserved.
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Benchmark{Compute,Copy}{1000,1000000} demonstrate that, even for relatively
|
||||
// expensive computations like Variance, the cost of copying the Sample, as
|
||||
// approximated by a make and copy, is much greater than the cost of the
|
||||
// computation for small samples and only slightly less for large samples.
|
||||
func BenchmarkCompute1000(b *testing.B) {
|
||||
s := make([]int64, 1000)
|
||||
for i := 0; i < len(s); i++ {
|
||||
s[i] = int64(i)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
SampleVariance(s)
|
||||
}
|
||||
}
|
||||
func BenchmarkCompute1000000(b *testing.B) {
|
||||
s := make([]int64, 1000000)
|
||||
for i := 0; i < len(s); i++ {
|
||||
s[i] = int64(i)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
SampleVariance(s)
|
||||
}
|
||||
}
|
||||
func BenchmarkCopy1000(b *testing.B) {
|
||||
s := make([]int64, 1000)
|
||||
for i := 0; i < len(s); i++ {
|
||||
s[i] = int64(i)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
sCopy := make([]int64, len(s))
|
||||
copy(sCopy, s)
|
||||
}
|
||||
}
|
||||
func BenchmarkCopy1000000(b *testing.B) {
|
||||
s := make([]int64, 1000000)
|
||||
for i := 0; i < len(s); i++ {
|
||||
s[i] = int64(i)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
sCopy := make([]int64, len(s))
|
||||
copy(sCopy, s)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkExpDecaySample257(b *testing.B) {
|
||||
benchmarkSample(b, NewExpDecaySample(257, 0.015))
|
||||
}
|
||||
|
||||
func BenchmarkExpDecaySample514(b *testing.B) {
|
||||
benchmarkSample(b, NewExpDecaySample(514, 0.015))
|
||||
}
|
||||
|
||||
func BenchmarkExpDecaySample1028(b *testing.B) {
|
||||
benchmarkSample(b, NewExpDecaySample(1028, 0.015))
|
||||
}
|
||||
|
||||
func BenchmarkUniformSample257(b *testing.B) {
|
||||
benchmarkSample(b, NewUniformSample(257))
|
||||
}
|
||||
|
||||
func BenchmarkUniformSample514(b *testing.B) {
|
||||
benchmarkSample(b, NewUniformSample(514))
|
||||
}
|
||||
|
||||
func BenchmarkUniformSample1028(b *testing.B) {
|
||||
benchmarkSample(b, NewUniformSample(1028))
|
||||
}
|
||||
|
||||
func TestExpDecaySample10(t *testing.T) {
|
||||
rand.Seed(1)
|
||||
s := NewExpDecaySample(100, 0.99)
|
||||
for i := 0; i < 10; i++ {
|
||||
s.Update(int64(i))
|
||||
}
|
||||
if size := s.Count(); 10 != size {
|
||||
t.Errorf("s.Count(): 10 != %v\n", size)
|
||||
}
|
||||
if size := s.Size(); 10 != size {
|
||||
t.Errorf("s.Size(): 10 != %v\n", size)
|
||||
}
|
||||
if l := len(s.Values()); 10 != l {
|
||||
t.Errorf("len(s.Values()): 10 != %v\n", l)
|
||||
}
|
||||
for _, v := range s.Values() {
|
||||
if v > 10 || v < 0 {
|
||||
t.Errorf("out of range [0, 10): %v\n", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestExpDecaySample100(t *testing.T) {
|
||||
rand.Seed(1)
|
||||
s := NewExpDecaySample(1000, 0.01)
|
||||
for i := 0; i < 100; i++ {
|
||||
s.Update(int64(i))
|
||||
}
|
||||
if size := s.Count(); 100 != size {
|
||||
t.Errorf("s.Count(): 100 != %v\n", size)
|
||||
}
|
||||
if size := s.Size(); 100 != size {
|
||||
t.Errorf("s.Size(): 100 != %v\n", size)
|
||||
}
|
||||
if l := len(s.Values()); 100 != l {
|
||||
t.Errorf("len(s.Values()): 100 != %v\n", l)
|
||||
}
|
||||
for _, v := range s.Values() {
|
||||
if v > 100 || v < 0 {
|
||||
t.Errorf("out of range [0, 100): %v\n", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestExpDecaySample1000(t *testing.T) {
|
||||
rand.Seed(1)
|
||||
s := NewExpDecaySample(100, 0.99)
|
||||
for i := 0; i < 1000; i++ {
|
||||
s.Update(int64(i))
|
||||
}
|
||||
if size := s.Count(); 1000 != size {
|
||||
t.Errorf("s.Count(): 1000 != %v\n", size)
|
||||
}
|
||||
if size := s.Size(); 100 != size {
|
||||
t.Errorf("s.Size(): 100 != %v\n", size)
|
||||
}
|
||||
if l := len(s.Values()); 100 != l {
|
||||
t.Errorf("len(s.Values()): 100 != %v\n", l)
|
||||
}
|
||||
for _, v := range s.Values() {
|
||||
if v > 1000 || v < 0 {
|
||||
t.Errorf("out of range [0, 1000): %v\n", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This test makes sure that the sample's priority is not amplified by using
|
||||
// nanosecond duration since start rather than second duration since start.
|
||||
// The priority becomes +Inf quickly after starting if this is done,
|
||||
// effectively freezing the set of samples until a rescale step happens.
|
||||
func TestExpDecaySampleNanosecondRegression(t *testing.T) {
|
||||
rand.Seed(1)
|
||||
s := NewExpDecaySample(100, 0.99)
|
||||
for i := 0; i < 100; i++ {
|
||||
s.Update(10)
|
||||
}
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
for i := 0; i < 100; i++ {
|
||||
s.Update(20)
|
||||
}
|
||||
v := s.Values()
|
||||
avg := float64(0)
|
||||
for i := 0; i < len(v); i++ {
|
||||
avg += float64(v[i])
|
||||
}
|
||||
avg /= float64(len(v))
|
||||
if avg > 16 || avg < 14 {
|
||||
t.Errorf("out of range [14, 16]: %v\n", avg)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExpDecaySampleRescale(t *testing.T) {
|
||||
s := NewExpDecaySample(2, 0.001).(*ExpDecaySample)
|
||||
s.update(time.Now(), 1)
|
||||
s.update(time.Now().Add(time.Hour+time.Microsecond), 1)
|
||||
for _, v := range s.values.Values() {
|
||||
if v.k == 0.0 {
|
||||
t.Fatal("v.k == 0.0")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestExpDecaySampleSnapshot(t *testing.T) {
|
||||
now := time.Now()
|
||||
rand.Seed(1)
|
||||
s := NewExpDecaySample(100, 0.99)
|
||||
for i := 1; i <= 10000; i++ {
|
||||
s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
|
||||
}
|
||||
snapshot := s.Snapshot()
|
||||
s.Update(1)
|
||||
testExpDecaySampleStatistics(t, snapshot)
|
||||
}
|
||||
|
||||
func TestExpDecaySampleStatistics(t *testing.T) {
|
||||
now := time.Now()
|
||||
rand.Seed(1)
|
||||
s := NewExpDecaySample(100, 0.99)
|
||||
for i := 1; i <= 10000; i++ {
|
||||
s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
|
||||
}
|
||||
testExpDecaySampleStatistics(t, s)
|
||||
}
|
||||
|
||||
func TestUniformSample(t *testing.T) {
|
||||
rand.Seed(1)
|
||||
s := NewUniformSample(100)
|
||||
for i := 0; i < 1000; i++ {
|
||||
s.Update(int64(i))
|
||||
}
|
||||
if size := s.Count(); 1000 != size {
|
||||
t.Errorf("s.Count(): 1000 != %v\n", size)
|
||||
}
|
||||
if size := s.Size(); 100 != size {
|
||||
t.Errorf("s.Size(): 100 != %v\n", size)
|
||||
}
|
||||
if l := len(s.Values()); 100 != l {
|
||||
t.Errorf("len(s.Values()): 100 != %v\n", l)
|
||||
}
|
||||
for _, v := range s.Values() {
|
||||
if v > 1000 || v < 0 {
|
||||
t.Errorf("out of range [0, 100): %v\n", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUniformSampleIncludesTail(t *testing.T) {
|
||||
rand.Seed(1)
|
||||
s := NewUniformSample(100)
|
||||
max := 100
|
||||
for i := 0; i < max; i++ {
|
||||
s.Update(int64(i))
|
||||
}
|
||||
v := s.Values()
|
||||
sum := 0
|
||||
exp := (max - 1) * max / 2
|
||||
for i := 0; i < len(v); i++ {
|
||||
sum += int(v[i])
|
||||
}
|
||||
if exp != sum {
|
||||
t.Errorf("sum: %v != %v\n", exp, sum)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUniformSampleSnapshot(t *testing.T) {
|
||||
s := NewUniformSample(100)
|
||||
for i := 1; i <= 10000; i++ {
|
||||
s.Update(int64(i))
|
||||
}
|
||||
snapshot := s.Snapshot()
|
||||
s.Update(1)
|
||||
testUniformSampleStatistics(t, snapshot)
|
||||
}
|
||||
|
||||
func TestUniformSampleStatistics(t *testing.T) {
|
||||
rand.Seed(1)
|
||||
s := NewUniformSample(100)
|
||||
for i := 1; i <= 10000; i++ {
|
||||
s.Update(int64(i))
|
||||
}
|
||||
testUniformSampleStatistics(t, s)
|
||||
}
|
||||
|
||||
func benchmarkSample(b *testing.B, s Sample) {
|
||||
var memStats runtime.MemStats
|
||||
runtime.ReadMemStats(&memStats)
|
||||
pauseTotalNs := memStats.PauseTotalNs
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.Update(1)
|
||||
}
|
||||
b.StopTimer()
|
||||
runtime.GC()
|
||||
runtime.ReadMemStats(&memStats)
|
||||
b.Logf("GC cost: %d ns/op", int(memStats.PauseTotalNs-pauseTotalNs)/b.N)
|
||||
}
|
||||
|
||||
func testExpDecaySampleStatistics(t *testing.T, s Sample) {
|
||||
if count := s.Count(); 10000 != count {
|
||||
t.Errorf("s.Count(): 10000 != %v\n", count)
|
||||
}
|
||||
if min := s.Min(); 107 != min {
|
||||
t.Errorf("s.Min(): 107 != %v\n", min)
|
||||
}
|
||||
if max := s.Max(); 10000 != max {
|
||||
t.Errorf("s.Max(): 10000 != %v\n", max)
|
||||
}
|
||||
if mean := s.Mean(); 4965.98 != mean {
|
||||
t.Errorf("s.Mean(): 4965.98 != %v\n", mean)
|
||||
}
|
||||
if stdDev := s.StdDev(); 2959.825156930727 != stdDev {
|
||||
t.Errorf("s.StdDev(): 2959.825156930727 != %v\n", stdDev)
|
||||
}
|
||||
ps := s.Percentiles([]float64{0.5, 0.75, 0.99})
|
||||
if 4615 != ps[0] {
|
||||
t.Errorf("median: 4615 != %v\n", ps[0])
|
||||
}
|
||||
if 7672 != ps[1] {
|
||||
t.Errorf("75th percentile: 7672 != %v\n", ps[1])
|
||||
}
|
||||
if 9998.99 != ps[2] {
|
||||
t.Errorf("99th percentile: 9998.99 != %v\n", ps[2])
|
||||
}
|
||||
}
|
||||
|
||||
func testUniformSampleStatistics(t *testing.T, s Sample) {
|
||||
if count := s.Count(); 10000 != count {
|
||||
t.Errorf("s.Count(): 10000 != %v\n", count)
|
||||
}
|
||||
if min := s.Min(); 37 != min {
|
||||
t.Errorf("s.Min(): 37 != %v\n", min)
|
||||
}
|
||||
if max := s.Max(); 9989 != max {
|
||||
t.Errorf("s.Max(): 9989 != %v\n", max)
|
||||
}
|
||||
if mean := s.Mean(); 4748.14 != mean {
|
||||
t.Errorf("s.Mean(): 4748.14 != %v\n", mean)
|
||||
}
|
||||
if stdDev := s.StdDev(); 2826.684117548333 != stdDev {
|
||||
t.Errorf("s.StdDev(): 2826.684117548333 != %v\n", stdDev)
|
||||
}
|
||||
ps := s.Percentiles([]float64{0.5, 0.75, 0.99})
|
||||
if 4599 != ps[0] {
|
||||
t.Errorf("median: 4599 != %v\n", ps[0])
|
||||
}
|
||||
if 7380.5 != ps[1] {
|
||||
t.Errorf("75th percentile: 7380.5 != %v\n", ps[1])
|
||||
}
|
||||
if 9986.429999999998 != ps[2] {
|
||||
t.Errorf("99th percentile: 9986.429999999998 != %v\n", ps[2])
|
||||
}
|
||||
}
|
||||
|
||||
// TestUniformSampleConcurrentUpdateCount would expose data race problems with
|
||||
// concurrent Update and Count calls on Sample when test is called with -race
|
||||
// argument
|
||||
func TestUniformSampleConcurrentUpdateCount(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping in short mode")
|
||||
}
|
||||
s := NewUniformSample(100)
|
||||
for i := 0; i < 100; i++ {
|
||||
s.Update(int64(i))
|
||||
}
|
||||
quit := make(chan struct{})
|
||||
go func() {
|
||||
t := time.NewTicker(10 * time.Millisecond)
|
||||
for {
|
||||
select {
|
||||
case <-t.C:
|
||||
s.Update(rand.Int63())
|
||||
case <-quit:
|
||||
t.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
for i := 0; i < 1000; i++ {
|
||||
s.Count()
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
}
|
||||
quit <- struct{}{}
|
||||
}
|
89
pkg/metrics/simple_timer.go
Normal file
89
pkg/metrics/simple_timer.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package metrics
|
||||
|
||||
//import "sync/atomic"
|
||||
|
||||
type SimpleTimer interface {
|
||||
Metric
|
||||
|
||||
AddTiming(int64)
|
||||
Mean() float64
|
||||
Min() int64
|
||||
Max() int64
|
||||
Count() int64
|
||||
}
|
||||
|
||||
type StandardSimpleTimer struct {
|
||||
*MetricMeta
|
||||
|
||||
total int64
|
||||
count int64
|
||||
mean float64
|
||||
min int64
|
||||
max int64
|
||||
}
|
||||
|
||||
func NewSimpleTimer(meta *MetricMeta) SimpleTimer {
|
||||
return &StandardSimpleTimer{
|
||||
MetricMeta: meta,
|
||||
mean: 0,
|
||||
min: 0,
|
||||
max: 0,
|
||||
total: 0,
|
||||
count: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func RegSimpleTimer(name string, tagStrings ...string) SimpleTimer {
|
||||
tr := NewSimpleTimer(NewMetricMeta(name, tagStrings))
|
||||
MetricStats.Register(tr)
|
||||
return tr
|
||||
}
|
||||
|
||||
func (this *StandardSimpleTimer) AddTiming(time int64) {
|
||||
if this.min > time {
|
||||
this.min = time
|
||||
}
|
||||
|
||||
if this.max < time {
|
||||
this.max = time
|
||||
}
|
||||
|
||||
this.total += time
|
||||
this.count++
|
||||
this.mean = float64(this.total) / float64(this.count)
|
||||
}
|
||||
|
||||
func (this *StandardSimpleTimer) Clear() {
|
||||
this.mean = 0
|
||||
this.min = 0
|
||||
this.max = 0
|
||||
this.total = 0
|
||||
this.count = 0
|
||||
}
|
||||
|
||||
func (this *StandardSimpleTimer) Mean() float64 {
|
||||
return this.mean
|
||||
}
|
||||
|
||||
func (this *StandardSimpleTimer) Min() int64 {
|
||||
return this.min
|
||||
}
|
||||
|
||||
func (this *StandardSimpleTimer) Max() int64 {
|
||||
return this.max
|
||||
}
|
||||
|
||||
func (this *StandardSimpleTimer) Count() int64 {
|
||||
return this.count
|
||||
}
|
||||
|
||||
func (this *StandardSimpleTimer) Snapshot() Metric {
|
||||
return &StandardSimpleTimer{
|
||||
MetricMeta: this.MetricMeta,
|
||||
mean: this.mean,
|
||||
min: this.min,
|
||||
max: this.max,
|
||||
total: this.total,
|
||||
count: this.count,
|
||||
}
|
||||
}
|
@@ -1,84 +1,309 @@
|
||||
// includes code from
|
||||
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
|
||||
// Copyright 2012 Richard Crowley. All rights reserved.
|
||||
|
||||
package metrics
|
||||
|
||||
//import "sync/atomic"
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Timers capture the duration and rate of events.
|
||||
type Timer interface {
|
||||
Metric
|
||||
|
||||
AddTiming(int64)
|
||||
Avg() int64
|
||||
Min() int64
|
||||
Max() int64
|
||||
Count() int64
|
||||
Max() int64
|
||||
Mean() float64
|
||||
Min() int64
|
||||
Percentile(float64) float64
|
||||
Percentiles([]float64) []float64
|
||||
Rate1() float64
|
||||
Rate5() float64
|
||||
Rate15() float64
|
||||
RateMean() float64
|
||||
StdDev() float64
|
||||
Sum() int64
|
||||
Time(func())
|
||||
Update(time.Duration)
|
||||
UpdateSince(time.Time)
|
||||
Variance() float64
|
||||
}
|
||||
|
||||
type StandardTimer struct {
|
||||
*MetricMeta
|
||||
|
||||
total int64
|
||||
count int64
|
||||
avg int64
|
||||
min int64
|
||||
max int64
|
||||
}
|
||||
|
||||
func NewTimer(meta *MetricMeta) Timer {
|
||||
// NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter.
|
||||
func NewCustomTimer(meta *MetricMeta, h Histogram, m Meter) Timer {
|
||||
if UseNilMetrics {
|
||||
return NilTimer{}
|
||||
}
|
||||
return &StandardTimer{
|
||||
MetricMeta: meta,
|
||||
avg: 0,
|
||||
min: 0,
|
||||
max: 0,
|
||||
total: 0,
|
||||
count: 0,
|
||||
histogram: h,
|
||||
meter: m,
|
||||
}
|
||||
}
|
||||
|
||||
func (this *StandardTimer) AddTiming(time int64) {
|
||||
if this.min > time {
|
||||
this.min = time
|
||||
// NewTimer constructs a new StandardTimer using an exponentially-decaying
|
||||
// sample with the same reservoir size and alpha as UNIX load averages.
|
||||
func NewTimer(meta *MetricMeta) Timer {
|
||||
if UseNilMetrics {
|
||||
return NilTimer{}
|
||||
}
|
||||
|
||||
if this.max < time {
|
||||
this.max = time
|
||||
}
|
||||
|
||||
this.total += time
|
||||
this.count++
|
||||
|
||||
this.avg = this.total / this.count
|
||||
}
|
||||
|
||||
func (this *StandardTimer) Clear() {
|
||||
this.avg = 0
|
||||
this.min = 0
|
||||
this.max = 0
|
||||
this.total = 0
|
||||
this.count = 0
|
||||
}
|
||||
|
||||
func (this *StandardTimer) Avg() int64 {
|
||||
return this.avg
|
||||
}
|
||||
|
||||
func (this *StandardTimer) Min() int64 {
|
||||
return this.min
|
||||
}
|
||||
|
||||
func (this *StandardTimer) Max() int64 {
|
||||
return this.max
|
||||
}
|
||||
|
||||
func (this *StandardTimer) Count() int64 {
|
||||
return this.count
|
||||
}
|
||||
|
||||
func (this *StandardTimer) Snapshot() Metric {
|
||||
return &StandardTimer{
|
||||
MetricMeta: this.MetricMeta,
|
||||
avg: this.avg,
|
||||
min: this.min,
|
||||
max: this.max,
|
||||
total: this.total,
|
||||
count: this.count,
|
||||
MetricMeta: meta,
|
||||
histogram: NewHistogram(meta, NewExpDecaySample(1028, 0.015)),
|
||||
meter: NewMeter(meta),
|
||||
}
|
||||
}
|
||||
|
||||
func RegTimer(name string, tagStrings ...string) Timer {
|
||||
tr := NewTimer(NewMetricMeta(name, tagStrings))
|
||||
MetricStats.Register(tr)
|
||||
return tr
|
||||
}
|
||||
|
||||
// NilTimer is a no-op Timer.
|
||||
type NilTimer struct {
|
||||
*MetricMeta
|
||||
h Histogram
|
||||
m Meter
|
||||
}
|
||||
|
||||
// Count is a no-op.
|
||||
func (NilTimer) Count() int64 { return 0 }
|
||||
|
||||
// Max is a no-op.
|
||||
func (NilTimer) Max() int64 { return 0 }
|
||||
|
||||
// Mean is a no-op.
|
||||
func (NilTimer) Mean() float64 { return 0.0 }
|
||||
|
||||
// Min is a no-op.
|
||||
func (NilTimer) Min() int64 { return 0 }
|
||||
|
||||
// Percentile is a no-op.
|
||||
func (NilTimer) Percentile(p float64) float64 { return 0.0 }
|
||||
|
||||
// Percentiles is a no-op.
|
||||
func (NilTimer) Percentiles(ps []float64) []float64 {
|
||||
return make([]float64, len(ps))
|
||||
}
|
||||
|
||||
// Rate1 is a no-op.
|
||||
func (NilTimer) Rate1() float64 { return 0.0 }
|
||||
|
||||
// Rate5 is a no-op.
|
||||
func (NilTimer) Rate5() float64 { return 0.0 }
|
||||
|
||||
// Rate15 is a no-op.
|
||||
func (NilTimer) Rate15() float64 { return 0.0 }
|
||||
|
||||
// RateMean is a no-op.
|
||||
func (NilTimer) RateMean() float64 { return 0.0 }
|
||||
|
||||
// Snapshot is a no-op.
|
||||
func (n NilTimer) Snapshot() Metric { return n }
|
||||
|
||||
// StdDev is a no-op.
|
||||
func (NilTimer) StdDev() float64 { return 0.0 }
|
||||
|
||||
// Sum is a no-op.
|
||||
func (NilTimer) Sum() int64 { return 0 }
|
||||
|
||||
// Time is a no-op.
|
||||
func (NilTimer) Time(func()) {}
|
||||
|
||||
// Update is a no-op.
|
||||
func (NilTimer) Update(time.Duration) {}
|
||||
|
||||
// UpdateSince is a no-op.
|
||||
func (NilTimer) UpdateSince(time.Time) {}
|
||||
|
||||
// Variance is a no-op.
|
||||
func (NilTimer) Variance() float64 { return 0.0 }
|
||||
|
||||
// StandardTimer is the standard implementation of a Timer and uses a Histogram
|
||||
// and Meter.
|
||||
type StandardTimer struct {
|
||||
*MetricMeta
|
||||
histogram Histogram
|
||||
meter Meter
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
// Count returns the number of events recorded.
|
||||
func (t *StandardTimer) Count() int64 {
|
||||
return t.histogram.Count()
|
||||
}
|
||||
|
||||
// Max returns the maximum value in the sample.
|
||||
func (t *StandardTimer) Max() int64 {
|
||||
return t.histogram.Max()
|
||||
}
|
||||
|
||||
// Mean returns the mean of the values in the sample.
|
||||
func (t *StandardTimer) Mean() float64 {
|
||||
return t.histogram.Mean()
|
||||
}
|
||||
|
||||
// Min returns the minimum value in the sample.
|
||||
func (t *StandardTimer) Min() int64 {
|
||||
return t.histogram.Min()
|
||||
}
|
||||
|
||||
// Percentile returns an arbitrary percentile of the values in the sample.
|
||||
func (t *StandardTimer) Percentile(p float64) float64 {
|
||||
return t.histogram.Percentile(p)
|
||||
}
|
||||
|
||||
// Percentiles returns a slice of arbitrary percentiles of the values in the
|
||||
// sample.
|
||||
func (t *StandardTimer) Percentiles(ps []float64) []float64 {
|
||||
return t.histogram.Percentiles(ps)
|
||||
}
|
||||
|
||||
// Rate1 returns the one-minute moving average rate of events per second.
|
||||
func (t *StandardTimer) Rate1() float64 {
|
||||
return t.meter.Rate1()
|
||||
}
|
||||
|
||||
// Rate5 returns the five-minute moving average rate of events per second.
|
||||
func (t *StandardTimer) Rate5() float64 {
|
||||
return t.meter.Rate5()
|
||||
}
|
||||
|
||||
// Rate15 returns the fifteen-minute moving average rate of events per second.
|
||||
func (t *StandardTimer) Rate15() float64 {
|
||||
return t.meter.Rate15()
|
||||
}
|
||||
|
||||
// RateMean returns the meter's mean rate of events per second.
|
||||
func (t *StandardTimer) RateMean() float64 {
|
||||
return t.meter.RateMean()
|
||||
}
|
||||
|
||||
// Snapshot returns a read-only copy of the timer.
|
||||
func (t *StandardTimer) Snapshot() Metric {
|
||||
t.mutex.Lock()
|
||||
defer t.mutex.Unlock()
|
||||
return &TimerSnapshot{
|
||||
MetricMeta: t.MetricMeta,
|
||||
histogram: t.histogram.Snapshot().(*HistogramSnapshot),
|
||||
meter: t.meter.Snapshot().(*MeterSnapshot),
|
||||
}
|
||||
}
|
||||
|
||||
// StdDev returns the standard deviation of the values in the sample.
|
||||
func (t *StandardTimer) StdDev() float64 {
|
||||
return t.histogram.StdDev()
|
||||
}
|
||||
|
||||
// Sum returns the sum in the sample.
|
||||
func (t *StandardTimer) Sum() int64 {
|
||||
return t.histogram.Sum()
|
||||
}
|
||||
|
||||
// Record the duration of the execution of the given function.
|
||||
func (t *StandardTimer) Time(f func()) {
|
||||
ts := time.Now()
|
||||
f()
|
||||
t.Update(time.Since(ts))
|
||||
}
|
||||
|
||||
// Record the duration of an event.
|
||||
func (t *StandardTimer) Update(d time.Duration) {
|
||||
t.mutex.Lock()
|
||||
defer t.mutex.Unlock()
|
||||
t.histogram.Update(int64(d))
|
||||
t.meter.Mark(1)
|
||||
}
|
||||
|
||||
// Record the duration of an event that started at a time and ends now.
|
||||
func (t *StandardTimer) UpdateSince(ts time.Time) {
|
||||
t.mutex.Lock()
|
||||
defer t.mutex.Unlock()
|
||||
t.histogram.Update(int64(time.Since(ts)))
|
||||
t.meter.Mark(1)
|
||||
}
|
||||
|
||||
// Variance returns the variance of the values in the sample.
|
||||
func (t *StandardTimer) Variance() float64 {
|
||||
return t.histogram.Variance()
|
||||
}
|
||||
|
||||
// TimerSnapshot is a read-only copy of another Timer.
|
||||
type TimerSnapshot struct {
|
||||
*MetricMeta
|
||||
histogram *HistogramSnapshot
|
||||
meter *MeterSnapshot
|
||||
}
|
||||
|
||||
// Count returns the number of events recorded at the time the snapshot was
|
||||
// taken.
|
||||
func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() }
|
||||
|
||||
// Max returns the maximum value at the time the snapshot was taken.
|
||||
func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() }
|
||||
|
||||
// Mean returns the mean value at the time the snapshot was taken.
|
||||
func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() }
|
||||
|
||||
// Min returns the minimum value at the time the snapshot was taken.
|
||||
func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() }
|
||||
|
||||
// Percentile returns an arbitrary percentile of sampled values at the time the
|
||||
// snapshot was taken.
|
||||
func (t *TimerSnapshot) Percentile(p float64) float64 {
|
||||
return t.histogram.Percentile(p)
|
||||
}
|
||||
|
||||
// Percentiles returns a slice of arbitrary percentiles of sampled values at
|
||||
// the time the snapshot was taken.
|
||||
func (t *TimerSnapshot) Percentiles(ps []float64) []float64 {
|
||||
return t.histogram.Percentiles(ps)
|
||||
}
|
||||
|
||||
// Rate1 returns the one-minute moving average rate of events per second at the
|
||||
// time the snapshot was taken.
|
||||
func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() }
|
||||
|
||||
// Rate5 returns the five-minute moving average rate of events per second at
|
||||
// the time the snapshot was taken.
|
||||
func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() }
|
||||
|
||||
// Rate15 returns the fifteen-minute moving average rate of events per second
|
||||
// at the time the snapshot was taken.
|
||||
func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() }
|
||||
|
||||
// RateMean returns the meter's mean rate of events per second at the time the
|
||||
// snapshot was taken.
|
||||
func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() }
|
||||
|
||||
// Snapshot returns the snapshot.
|
||||
func (t *TimerSnapshot) Snapshot() Metric { return t }
|
||||
|
||||
// StdDev returns the standard deviation of the values at the time the snapshot
|
||||
// was taken.
|
||||
func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() }
|
||||
|
||||
// Sum returns the sum at the time the snapshot was taken.
|
||||
func (t *TimerSnapshot) Sum() int64 { return t.histogram.Sum() }
|
||||
|
||||
// Time panics.
|
||||
func (*TimerSnapshot) Time(func()) {
|
||||
panic("Time called on a TimerSnapshot")
|
||||
}
|
||||
|
||||
// Update panics.
|
||||
func (*TimerSnapshot) Update(time.Duration) {
|
||||
panic("Update called on a TimerSnapshot")
|
||||
}
|
||||
|
||||
// UpdateSince panics.
|
||||
func (*TimerSnapshot) UpdateSince(time.Time) {
|
||||
panic("UpdateSince called on a TimerSnapshot")
|
||||
}
|
||||
|
||||
// Variance returns the variance of the values at the time the snapshot was
|
||||
// taken.
|
||||
func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() }
|
||||
|
@@ -39,12 +39,12 @@ func Logger() macaron.Handler {
|
||||
rw := res.(macaron.ResponseWriter)
|
||||
c.Next()
|
||||
|
||||
timeTakenMs := int64(time.Since(start) / time.Millisecond)
|
||||
timeTakenMs := time.Since(start) / time.Millisecond
|
||||
content := fmt.Sprintf("Completed %s %s \"%s %s %s\" %v %s %d bytes in %dms", c.RemoteAddr(), uname, req.Method, req.URL.Path, req.Proto, rw.Status(), http.StatusText(rw.Status()), rw.Size(), timeTakenMs)
|
||||
|
||||
if timer, ok := c.Data["perfmon.timer"]; ok {
|
||||
timerTyped := timer.(metrics.Timer)
|
||||
timerTyped.AddTiming(timeTakenMs)
|
||||
timerTyped.Update(timeTakenMs)
|
||||
}
|
||||
|
||||
switch rw.Status() {
|
||||
|
Reference in New Issue
Block a user