mirror of
https://github.com/grafana/grafana.git
synced 2025-02-25 18:55:37 -06:00
Merge branch 'master' into alerting_definitions
Conflicts: conf/defaults.ini pkg/api/dashboard.go
This commit is contained in:
commit
f36a44c49a
2
.github/ISSUE_TEMPLATE.md
vendored
2
.github/ISSUE_TEMPLATE.md
vendored
@ -1,7 +1,7 @@
|
|||||||
* **I'm submitting a ...**
|
* **I'm submitting a ...**
|
||||||
- [ ] Bug report
|
- [ ] Bug report
|
||||||
- [ ] Feature request
|
- [ ] Feature request
|
||||||
- [ ] Question / Support request: **Please do not** open a github issue. [Support Options](https://grafana.org/support/)
|
- [ ] Question / Support request: **Please do not** open a github issue. [Support Options](http://grafana.org/support/)
|
||||||
|
|
||||||
Please include this information:
|
Please include this information:
|
||||||
- What Grafana version are you using?
|
- What Grafana version are you using?
|
||||||
|
16
Godeps/Godeps.json
generated
16
Godeps/Godeps.json
generated
@ -1,6 +1,7 @@
|
|||||||
{
|
{
|
||||||
"ImportPath": "github.com/grafana/grafana",
|
"ImportPath": "github.com/grafana/grafana",
|
||||||
"GoVersion": "go1.5.1",
|
"GoVersion": "go1.5.1",
|
||||||
|
"GodepVersion": "v60",
|
||||||
"Packages": [
|
"Packages": [
|
||||||
"./pkg/..."
|
"./pkg/..."
|
||||||
],
|
],
|
||||||
@ -226,6 +227,21 @@
|
|||||||
"ImportPath": "github.com/hashicorp/go-version",
|
"ImportPath": "github.com/hashicorp/go-version",
|
||||||
"Rev": "7e3c02b30806fa5779d3bdfc152ce4c6f40e7b38"
|
"Rev": "7e3c02b30806fa5779d3bdfc152ce4c6f40e7b38"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/influxdata/influxdb/client",
|
||||||
|
"Comment": "v0.13.0-74-g2c9d0fc",
|
||||||
|
"Rev": "2c9d0fcc04eba3ffc88f2aafe8466874e384d80d"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/influxdata/influxdb/models",
|
||||||
|
"Comment": "v0.13.0-74-g2c9d0fc",
|
||||||
|
"Rev": "2c9d0fcc04eba3ffc88f2aafe8466874e384d80d"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/influxdata/influxdb/pkg/escape",
|
||||||
|
"Comment": "v0.13.0-74-g2c9d0fc",
|
||||||
|
"Rev": "2c9d0fcc04eba3ffc88f2aafe8466874e384d80d"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/jmespath/go-jmespath",
|
"ImportPath": "github.com/jmespath/go-jmespath",
|
||||||
"Comment": "0.2.2",
|
"Comment": "0.2.2",
|
||||||
|
20
Godeps/_workspace/src/github.com/influxdata/influxdb/LICENSE
generated
vendored
Normal file
20
Godeps/_workspace/src/github.com/influxdata/influxdb/LICENSE
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2013-2016 Errplane Inc.
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
this software and associated documentation files (the "Software"), to deal in
|
||||||
|
the Software without restriction, including without limitation the rights to
|
||||||
|
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||||
|
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||||
|
subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||||
|
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||||
|
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||||
|
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||||
|
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
27
Godeps/_workspace/src/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md
generated
vendored
Normal file
27
Godeps/_workspace/src/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
# List
|
||||||
|
- bootstrap 3.3.5 [MIT LICENSE](https://github.com/twbs/bootstrap/blob/master/LICENSE)
|
||||||
|
- collectd.org [ISC LICENSE](https://github.com/collectd/go-collectd/blob/master/LICENSE)
|
||||||
|
- github.com/armon/go-metrics [MIT LICENSE](https://github.com/armon/go-metrics/blob/master/LICENSE)
|
||||||
|
- github.com/BurntSushi/toml [WTFPL LICENSE](https://github.com/BurntSushi/toml/blob/master/COPYING)
|
||||||
|
- github.com/bmizerany/pat [MIT LICENSE](https://github.com/bmizerany/pat#license)
|
||||||
|
- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE)
|
||||||
|
- github.com/dgryski/go-bits [MIT LICENSE](https://github.com/dgryski/go-bits/blob/master/LICENSE)
|
||||||
|
- github.com/dgryski/go-bitstream [MIT LICENSE](https://github.com/dgryski/go-bitstream/blob/master/LICENSE)
|
||||||
|
- github.com/gogo/protobuf/proto [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE)
|
||||||
|
- github.com/davecgh/go-spew/spew [ISC LICENSE](https://github.com/davecgh/go-spew/blob/master/LICENSE)
|
||||||
|
- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE)
|
||||||
|
- github.com/hashicorp/go-msgpack [BSD LICENSE](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE)
|
||||||
|
- github.com/hashicorp/raft [MPL LICENSE](https://github.com/hashicorp/raft/blob/master/LICENSE)
|
||||||
|
- github.com/hashicorp/raft-boltdb [MOZILLA PUBLIC LICENSE](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE)
|
||||||
|
- github.com/influxdata/usage-client [MIT LICENSE](https://github.com/influxdata/usage-client/blob/master/LICENSE.txt)
|
||||||
|
- github.com/jwilder/encoding [MIT LICENSE](https://github.com/jwilder/encoding/blob/master/LICENSE)
|
||||||
|
- github.com/kimor79/gollectd [BSD LICENSE](https://github.com/kimor79/gollectd/blob/master/LICENSE)
|
||||||
|
- github.com/paulbellamy/ratecounter [MIT LICENSE](https://github.com/paulbellamy/ratecounter/blob/master/LICENSE)
|
||||||
|
- github.com/peterh/liner [MIT LICENSE](https://github.com/peterh/liner/blob/master/COPYING)
|
||||||
|
- github.com/rakyll/statik [APACHE LICENSE](https://github.com/rakyll/statik/blob/master/LICENSE)
|
||||||
|
- glyphicons [LICENSE](http://glyphicons.com/license/)
|
||||||
|
- golang.org/x/crypto [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)
|
||||||
|
- golang.org/x/tools [BSD LICENSE](https://github.com/golang/tools/blob/master/LICENSE)
|
||||||
|
- gopkg.in/fatih/pool.v2 [MIT LICENSE](https://github.com/fatih/pool/blob/v2.0.0/LICENSE)
|
||||||
|
- jquery 2.1.4 [MIT LICENSE](https://github.com/jquery/jquery/blob/master/LICENSE.txt)
|
||||||
|
- react 0.13.3 [BSD LICENSE](https://github.com/facebook/react/blob/master/LICENSE)
|
267
Godeps/_workspace/src/github.com/influxdata/influxdb/client/README.md
generated
vendored
Normal file
267
Godeps/_workspace/src/github.com/influxdata/influxdb/client/README.md
generated
vendored
Normal file
@ -0,0 +1,267 @@
|
|||||||
|
# InfluxDB Client
|
||||||
|
|
||||||
|
[](http://godoc.org/github.com/influxdata/influxdb/client/v2)
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
**NOTE:** The Go client library now has a "v2" version, with the old version
|
||||||
|
being deprecated. The new version can be imported at
|
||||||
|
`import "github.com/influxdata/influxdb/client/v2"`. It is not backwards-compatible.
|
||||||
|
|
||||||
|
A Go client library written and maintained by the **InfluxDB** team.
|
||||||
|
This package provides convenience functions to read and write time series data.
|
||||||
|
It uses the HTTP protocol to communicate with your **InfluxDB** cluster.
|
||||||
|
|
||||||
|
|
||||||
|
## Getting Started
|
||||||
|
|
||||||
|
### Connecting To Your Database
|
||||||
|
|
||||||
|
Connecting to an **InfluxDB** database is straightforward. You will need a host
|
||||||
|
name, a port and the cluster user credentials if applicable. The default port is
|
||||||
|
8086. You can customize these settings to your specific installation via the
|
||||||
|
**InfluxDB** configuration file.
|
||||||
|
|
||||||
|
Though not necessary for experimentation, you may want to create a new user
|
||||||
|
and authenticate the connection to your database.
|
||||||
|
|
||||||
|
For more information please check out the
|
||||||
|
[Admin Docs](https://docs.influxdata.com/influxdb/latest/administration/).
|
||||||
|
|
||||||
|
For the impatient, you can create a new admin user _bubba_ by firing off the
|
||||||
|
[InfluxDB CLI](https://github.com/influxdata/influxdb/blob/master/cmd/influx/main.go).
|
||||||
|
|
||||||
|
```shell
|
||||||
|
influx
|
||||||
|
> create user bubba with password 'bumblebeetuna'
|
||||||
|
> grant all privileges to bubba
|
||||||
|
```
|
||||||
|
|
||||||
|
And now for good measure set the credentials in you shell environment.
|
||||||
|
In the example below we will use $INFLUX_USER and $INFLUX_PWD
|
||||||
|
|
||||||
|
Now with the administrivia out of the way, let's connect to our database.
|
||||||
|
|
||||||
|
NOTE: If you've opted out of creating a user, you can omit Username and Password in
|
||||||
|
the configuration below.
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdata/influxdb/client/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
MyDB = "square_holes"
|
||||||
|
username = "bubba"
|
||||||
|
password = "bumblebeetuna"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Make client
|
||||||
|
c, err := client.NewHTTPClient(client.HTTPConfig{
|
||||||
|
Addr: "http://localhost:8086",
|
||||||
|
Username: username,
|
||||||
|
Password: password,
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln("Error: ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a new point batch
|
||||||
|
bp, err := client.NewBatchPoints(client.BatchPointsConfig{
|
||||||
|
Database: MyDB,
|
||||||
|
Precision: "s",
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln("Error: ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a point and add to batch
|
||||||
|
tags := map[string]string{"cpu": "cpu-total"}
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
"idle": 10.1,
|
||||||
|
"system": 53.3,
|
||||||
|
"user": 46.6,
|
||||||
|
}
|
||||||
|
pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln("Error: ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
bp.AddPoint(pt)
|
||||||
|
|
||||||
|
// Write the batch
|
||||||
|
c.Write(bp)
|
||||||
|
}
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
### Inserting Data
|
||||||
|
|
||||||
|
Time series data aka *points* are written to the database using batch inserts.
|
||||||
|
The mechanism is to create one or more points and then create a batch aka
|
||||||
|
*batch points* and write these to a given database and series. A series is a
|
||||||
|
combination of a measurement (time/values) and a set of tags.
|
||||||
|
|
||||||
|
In this sample we will create a batch of a 1,000 points. Each point has a time and
|
||||||
|
a single value as well as 2 tags indicating a shape and color. We write these points
|
||||||
|
to a database called _square_holes_ using a measurement named _shapes_.
|
||||||
|
|
||||||
|
NOTE: You can specify a RetentionPolicy as part of the batch points. If not
|
||||||
|
provided InfluxDB will use the database _default_ retention policy.
|
||||||
|
|
||||||
|
```go
|
||||||
|
func writePoints(clnt client.Client) {
|
||||||
|
sampleSize := 1000
|
||||||
|
rand.Seed(42)
|
||||||
|
|
||||||
|
bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
|
||||||
|
Database: "systemstats",
|
||||||
|
Precision: "us",
|
||||||
|
})
|
||||||
|
|
||||||
|
for i := 0; i < sampleSize; i++ {
|
||||||
|
regions := []string{"us-west1", "us-west2", "us-west3", "us-east1"}
|
||||||
|
tags := map[string]string{
|
||||||
|
"cpu": "cpu-total",
|
||||||
|
"host": fmt.Sprintf("host%d", rand.Intn(1000)),
|
||||||
|
"region": regions[rand.Intn(len(regions))],
|
||||||
|
}
|
||||||
|
|
||||||
|
idle := rand.Float64() * 100.0
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
"idle": idle,
|
||||||
|
"busy": 100.0 - idle,
|
||||||
|
}
|
||||||
|
|
||||||
|
bp.AddPoint(client.NewPoint(
|
||||||
|
"cpu_usage",
|
||||||
|
tags,
|
||||||
|
fields,
|
||||||
|
time.Now(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
err := clnt.Write(bp)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
### Querying Data
|
||||||
|
|
||||||
|
One nice advantage of using **InfluxDB** the ability to query your data using familiar
|
||||||
|
SQL constructs. In this example we can create a convenience function to query the database
|
||||||
|
as follows:
|
||||||
|
|
||||||
|
```go
|
||||||
|
// queryDB convenience function to query the database
|
||||||
|
func queryDB(clnt client.Client, cmd string) (res []client.Result, err error) {
|
||||||
|
q := client.Query{
|
||||||
|
Command: cmd,
|
||||||
|
Database: MyDB,
|
||||||
|
}
|
||||||
|
if response, err := clnt.Query(q); err == nil {
|
||||||
|
if response.Error() != nil {
|
||||||
|
return res, response.Error()
|
||||||
|
}
|
||||||
|
res = response.Results
|
||||||
|
} else {
|
||||||
|
return res, err
|
||||||
|
}
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Creating a Database
|
||||||
|
|
||||||
|
```go
|
||||||
|
_, err := queryDB(clnt, fmt.Sprintf("CREATE DATABASE %s", MyDB))
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Count Records
|
||||||
|
|
||||||
|
```go
|
||||||
|
q := fmt.Sprintf("SELECT count(%s) FROM %s", "value", MyMeasurement)
|
||||||
|
res, err := queryDB(clnt, q)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
count := res[0].Series[0].Values[0][1]
|
||||||
|
log.Printf("Found a total of %v records\n", count)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Find the last 10 _shapes_ records
|
||||||
|
|
||||||
|
```go
|
||||||
|
q := fmt.Sprintf("SELECT * FROM %s LIMIT %d", MyMeasurement, 20)
|
||||||
|
res, err = queryDB(clnt, q)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, row := range res[0].Series[0].Values {
|
||||||
|
t, err := time.Parse(time.RFC3339, row[0].(string))
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
val := row[1].(string)
|
||||||
|
log.Printf("[%2d] %s: %s\n", i, t.Format(time.Stamp), val)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Using the UDP Client
|
||||||
|
|
||||||
|
The **InfluxDB** client also supports writing over UDP.
|
||||||
|
|
||||||
|
```go
|
||||||
|
func WriteUDP() {
|
||||||
|
// Make client
|
||||||
|
c := client.NewUDPClient("localhost:8089")
|
||||||
|
|
||||||
|
// Create a new point batch
|
||||||
|
bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
|
||||||
|
Precision: "s",
|
||||||
|
})
|
||||||
|
|
||||||
|
// Create a point and add to batch
|
||||||
|
tags := map[string]string{"cpu": "cpu-total"}
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
"idle": 10.1,
|
||||||
|
"system": 53.3,
|
||||||
|
"user": 46.6,
|
||||||
|
}
|
||||||
|
pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
|
||||||
|
if err != nil {
|
||||||
|
panic(err.Error())
|
||||||
|
}
|
||||||
|
bp.AddPoint(pt)
|
||||||
|
|
||||||
|
// Write the batch
|
||||||
|
c.Write(bp)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Go Docs
|
||||||
|
|
||||||
|
Please refer to
|
||||||
|
[http://godoc.org/github.com/influxdata/influxdb/client/v2](http://godoc.org/github.com/influxdata/influxdb/client/v2)
|
||||||
|
for documentation.
|
||||||
|
|
||||||
|
## See Also
|
||||||
|
|
||||||
|
You can also examine how the client library is used by the
|
||||||
|
[InfluxDB CLI](https://github.com/influxdata/influxdb/blob/master/cmd/influx/main.go).
|
789
Godeps/_workspace/src/github.com/influxdata/influxdb/client/influxdb.go
generated
vendored
Normal file
789
Godeps/_workspace/src/github.com/influxdata/influxdb/client/influxdb.go
generated
vendored
Normal file
@ -0,0 +1,789 @@
|
|||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/tls"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdata/influxdb/models"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DefaultHost is the default host used to connect to an InfluxDB instance
|
||||||
|
DefaultHost = "localhost"
|
||||||
|
|
||||||
|
// DefaultPort is the default port used to connect to an InfluxDB instance
|
||||||
|
DefaultPort = 8086
|
||||||
|
|
||||||
|
// DefaultTimeout is the default connection timeout used to connect to an InfluxDB instance
|
||||||
|
DefaultTimeout = 0
|
||||||
|
)
|
||||||
|
|
||||||
|
// Query is used to send a command to the server. Both Command and Database are required.
|
||||||
|
type Query struct {
|
||||||
|
Command string
|
||||||
|
Database string
|
||||||
|
|
||||||
|
// Chunked tells the server to send back chunked responses. This places
|
||||||
|
// less load on the server by sending back chunks of the response rather
|
||||||
|
// than waiting for the entire response all at once.
|
||||||
|
Chunked bool
|
||||||
|
|
||||||
|
// ChunkSize sets the maximum number of rows that will be returned per
|
||||||
|
// chunk. Chunks are either divided based on their series or if they hit
|
||||||
|
// the chunk size limit.
|
||||||
|
//
|
||||||
|
// Chunked must be set to true for this option to be used.
|
||||||
|
ChunkSize int
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseConnectionString will parse a string to create a valid connection URL
|
||||||
|
func ParseConnectionString(path string, ssl bool) (url.URL, error) {
|
||||||
|
var host string
|
||||||
|
var port int
|
||||||
|
|
||||||
|
h, p, err := net.SplitHostPort(path)
|
||||||
|
if err != nil {
|
||||||
|
if path == "" {
|
||||||
|
host = DefaultHost
|
||||||
|
} else {
|
||||||
|
host = path
|
||||||
|
}
|
||||||
|
// If they didn't specify a port, always use the default port
|
||||||
|
port = DefaultPort
|
||||||
|
} else {
|
||||||
|
host = h
|
||||||
|
port, err = strconv.Atoi(p)
|
||||||
|
if err != nil {
|
||||||
|
return url.URL{}, fmt.Errorf("invalid port number %q: %s\n", path, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
u := url.URL{
|
||||||
|
Scheme: "http",
|
||||||
|
}
|
||||||
|
if ssl {
|
||||||
|
u.Scheme = "https"
|
||||||
|
}
|
||||||
|
|
||||||
|
u.Host = net.JoinHostPort(host, strconv.Itoa(port))
|
||||||
|
|
||||||
|
return u, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config is used to specify what server to connect to.
|
||||||
|
// URL: The URL of the server connecting to.
|
||||||
|
// Username/Password are optional. They will be passed via basic auth if provided.
|
||||||
|
// UserAgent: If not provided, will default "InfluxDBClient",
|
||||||
|
// Timeout: If not provided, will default to 0 (no timeout)
|
||||||
|
type Config struct {
|
||||||
|
URL url.URL
|
||||||
|
Username string
|
||||||
|
Password string
|
||||||
|
UserAgent string
|
||||||
|
Timeout time.Duration
|
||||||
|
Precision string
|
||||||
|
UnsafeSsl bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewConfig will create a config to be used in connecting to the client
|
||||||
|
func NewConfig() Config {
|
||||||
|
return Config{
|
||||||
|
Timeout: DefaultTimeout,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Client is used to make calls to the server.
|
||||||
|
type Client struct {
|
||||||
|
url url.URL
|
||||||
|
username string
|
||||||
|
password string
|
||||||
|
httpClient *http.Client
|
||||||
|
userAgent string
|
||||||
|
precision string
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ConsistencyOne requires at least one data node acknowledged a write.
|
||||||
|
ConsistencyOne = "one"
|
||||||
|
|
||||||
|
// ConsistencyAll requires all data nodes to acknowledge a write.
|
||||||
|
ConsistencyAll = "all"
|
||||||
|
|
||||||
|
// ConsistencyQuorum requires a quorum of data nodes to acknowledge a write.
|
||||||
|
ConsistencyQuorum = "quorum"
|
||||||
|
|
||||||
|
// ConsistencyAny allows for hinted hand off, potentially no write happened yet.
|
||||||
|
ConsistencyAny = "any"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewClient will instantiate and return a connected client to issue commands to the server.
|
||||||
|
func NewClient(c Config) (*Client, error) {
|
||||||
|
tlsConfig := &tls.Config{
|
||||||
|
InsecureSkipVerify: c.UnsafeSsl,
|
||||||
|
}
|
||||||
|
|
||||||
|
tr := &http.Transport{
|
||||||
|
TLSClientConfig: tlsConfig,
|
||||||
|
}
|
||||||
|
|
||||||
|
client := Client{
|
||||||
|
url: c.URL,
|
||||||
|
username: c.Username,
|
||||||
|
password: c.Password,
|
||||||
|
httpClient: &http.Client{Timeout: c.Timeout, Transport: tr},
|
||||||
|
userAgent: c.UserAgent,
|
||||||
|
precision: c.Precision,
|
||||||
|
}
|
||||||
|
if client.userAgent == "" {
|
||||||
|
client.userAgent = "InfluxDBClient"
|
||||||
|
}
|
||||||
|
return &client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAuth will update the username and passwords
|
||||||
|
func (c *Client) SetAuth(u, p string) {
|
||||||
|
c.username = u
|
||||||
|
c.password = p
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPrecision will update the precision
|
||||||
|
func (c *Client) SetPrecision(precision string) {
|
||||||
|
c.precision = precision
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query sends a command to the server and returns the Response
|
||||||
|
func (c *Client) Query(q Query) (*Response, error) {
|
||||||
|
u := c.url
|
||||||
|
|
||||||
|
u.Path = "query"
|
||||||
|
values := u.Query()
|
||||||
|
values.Set("q", q.Command)
|
||||||
|
values.Set("db", q.Database)
|
||||||
|
if q.Chunked {
|
||||||
|
values.Set("chunked", "true")
|
||||||
|
if q.ChunkSize > 0 {
|
||||||
|
values.Set("chunk_size", strconv.Itoa(q.ChunkSize))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if c.precision != "" {
|
||||||
|
values.Set("epoch", c.precision)
|
||||||
|
}
|
||||||
|
u.RawQuery = values.Encode()
|
||||||
|
|
||||||
|
req, err := http.NewRequest("POST", u.String(), nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
req.Header.Set("User-Agent", c.userAgent)
|
||||||
|
if c.username != "" {
|
||||||
|
req.SetBasicAuth(c.username, c.password)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := c.httpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var response Response
|
||||||
|
if q.Chunked {
|
||||||
|
cr := NewChunkedResponse(resp.Body)
|
||||||
|
for {
|
||||||
|
r, err := cr.NextResponse()
|
||||||
|
if err != nil {
|
||||||
|
// If we got an error while decoding the response, send that back.
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if r == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
response.Results = append(response.Results, r.Results...)
|
||||||
|
if r.Err != nil {
|
||||||
|
response.Err = r.Err
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
dec := json.NewDecoder(resp.Body)
|
||||||
|
dec.UseNumber()
|
||||||
|
if err := dec.Decode(&response); err != nil {
|
||||||
|
// Ignore EOF errors if we got an invalid status code.
|
||||||
|
if !(err == io.EOF && resp.StatusCode != http.StatusOK) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we don't have an error in our json response, and didn't get StatusOK,
|
||||||
|
// then send back an error.
|
||||||
|
if resp.StatusCode != http.StatusOK && response.Error() == nil {
|
||||||
|
return &response, fmt.Errorf("received status code %d from server", resp.StatusCode)
|
||||||
|
}
|
||||||
|
return &response, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write takes BatchPoints and allows for writing of multiple points with defaults
|
||||||
|
// If successful, error is nil and Response is nil
|
||||||
|
// If an error occurs, Response may contain additional information if populated.
|
||||||
|
func (c *Client) Write(bp BatchPoints) (*Response, error) {
|
||||||
|
u := c.url
|
||||||
|
u.Path = "write"
|
||||||
|
|
||||||
|
var b bytes.Buffer
|
||||||
|
for _, p := range bp.Points {
|
||||||
|
err := checkPointTypes(p)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if p.Raw != "" {
|
||||||
|
if _, err := b.WriteString(p.Raw); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for k, v := range bp.Tags {
|
||||||
|
if p.Tags == nil {
|
||||||
|
p.Tags = make(map[string]string, len(bp.Tags))
|
||||||
|
}
|
||||||
|
p.Tags[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := b.WriteString(p.MarshalString()); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := b.WriteByte('\n'); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequest("POST", u.String(), &b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", "")
|
||||||
|
req.Header.Set("User-Agent", c.userAgent)
|
||||||
|
if c.username != "" {
|
||||||
|
req.SetBasicAuth(c.username, c.password)
|
||||||
|
}
|
||||||
|
|
||||||
|
precision := bp.Precision
|
||||||
|
if precision == "" {
|
||||||
|
precision = c.precision
|
||||||
|
}
|
||||||
|
|
||||||
|
params := req.URL.Query()
|
||||||
|
params.Set("db", bp.Database)
|
||||||
|
params.Set("rp", bp.RetentionPolicy)
|
||||||
|
params.Set("precision", precision)
|
||||||
|
params.Set("consistency", bp.WriteConsistency)
|
||||||
|
req.URL.RawQuery = params.Encode()
|
||||||
|
|
||||||
|
resp, err := c.httpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var response Response
|
||||||
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
|
||||||
|
var err = fmt.Errorf(string(body))
|
||||||
|
response.Err = err
|
||||||
|
return &response, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteLineProtocol takes a string with line returns to delimit each write
|
||||||
|
// If successful, error is nil and Response is nil
|
||||||
|
// If an error occurs, Response may contain additional information if populated.
|
||||||
|
func (c *Client) WriteLineProtocol(data, database, retentionPolicy, precision, writeConsistency string) (*Response, error) {
|
||||||
|
u := c.url
|
||||||
|
u.Path = "write"
|
||||||
|
|
||||||
|
r := strings.NewReader(data)
|
||||||
|
|
||||||
|
req, err := http.NewRequest("POST", u.String(), r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", "")
|
||||||
|
req.Header.Set("User-Agent", c.userAgent)
|
||||||
|
if c.username != "" {
|
||||||
|
req.SetBasicAuth(c.username, c.password)
|
||||||
|
}
|
||||||
|
params := req.URL.Query()
|
||||||
|
params.Set("db", database)
|
||||||
|
params.Set("rp", retentionPolicy)
|
||||||
|
params.Set("precision", precision)
|
||||||
|
params.Set("consistency", writeConsistency)
|
||||||
|
req.URL.RawQuery = params.Encode()
|
||||||
|
|
||||||
|
resp, err := c.httpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var response Response
|
||||||
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
|
||||||
|
err := fmt.Errorf(string(body))
|
||||||
|
response.Err = err
|
||||||
|
return &response, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ping will check to see if the server is up
|
||||||
|
// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred.
|
||||||
|
func (c *Client) Ping() (time.Duration, string, error) {
|
||||||
|
now := time.Now()
|
||||||
|
u := c.url
|
||||||
|
u.Path = "ping"
|
||||||
|
|
||||||
|
req, err := http.NewRequest("GET", u.String(), nil)
|
||||||
|
if err != nil {
|
||||||
|
return 0, "", err
|
||||||
|
}
|
||||||
|
req.Header.Set("User-Agent", c.userAgent)
|
||||||
|
if c.username != "" {
|
||||||
|
req.SetBasicAuth(c.username, c.password)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := c.httpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return 0, "", err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
version := resp.Header.Get("X-Influxdb-Version")
|
||||||
|
return time.Since(now), version, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Structs
|
||||||
|
|
||||||
|
// Message represents a user message.
|
||||||
|
type Message struct {
|
||||||
|
Level string `json:"level,omitempty"`
|
||||||
|
Text string `json:"text,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Result represents a resultset returned from a single statement.
|
||||||
|
type Result struct {
|
||||||
|
Series []models.Row
|
||||||
|
Messages []*Message
|
||||||
|
Err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON encodes the result into JSON.
|
||||||
|
func (r *Result) MarshalJSON() ([]byte, error) {
|
||||||
|
// Define a struct that outputs "error" as a string.
|
||||||
|
var o struct {
|
||||||
|
Series []models.Row `json:"series,omitempty"`
|
||||||
|
Messages []*Message `json:"messages,omitempty"`
|
||||||
|
Err string `json:"error,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy fields to output struct.
|
||||||
|
o.Series = r.Series
|
||||||
|
o.Messages = r.Messages
|
||||||
|
if r.Err != nil {
|
||||||
|
o.Err = r.Err.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
return json.Marshal(&o)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON decodes the data into the Result struct
|
||||||
|
func (r *Result) UnmarshalJSON(b []byte) error {
|
||||||
|
var o struct {
|
||||||
|
Series []models.Row `json:"series,omitempty"`
|
||||||
|
Messages []*Message `json:"messages,omitempty"`
|
||||||
|
Err string `json:"error,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
dec := json.NewDecoder(bytes.NewBuffer(b))
|
||||||
|
dec.UseNumber()
|
||||||
|
err := dec.Decode(&o)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
r.Series = o.Series
|
||||||
|
r.Messages = o.Messages
|
||||||
|
if o.Err != "" {
|
||||||
|
r.Err = errors.New(o.Err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Response represents a list of statement results.
|
||||||
|
type Response struct {
|
||||||
|
Results []Result
|
||||||
|
Err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON encodes the response into JSON.
|
||||||
|
func (r *Response) MarshalJSON() ([]byte, error) {
|
||||||
|
// Define a struct that outputs "error" as a string.
|
||||||
|
var o struct {
|
||||||
|
Results []Result `json:"results,omitempty"`
|
||||||
|
Err string `json:"error,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy fields to output struct.
|
||||||
|
o.Results = r.Results
|
||||||
|
if r.Err != nil {
|
||||||
|
o.Err = r.Err.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
return json.Marshal(&o)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON decodes the data into the Response struct
|
||||||
|
func (r *Response) UnmarshalJSON(b []byte) error {
|
||||||
|
var o struct {
|
||||||
|
Results []Result `json:"results,omitempty"`
|
||||||
|
Err string `json:"error,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
dec := json.NewDecoder(bytes.NewBuffer(b))
|
||||||
|
dec.UseNumber()
|
||||||
|
err := dec.Decode(&o)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
r.Results = o.Results
|
||||||
|
if o.Err != "" {
|
||||||
|
r.Err = errors.New(o.Err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error returns the first error from any statement.
|
||||||
|
// Returns nil if no errors occurred on any statements.
|
||||||
|
func (r *Response) Error() error {
|
||||||
|
if r.Err != nil {
|
||||||
|
return r.Err
|
||||||
|
}
|
||||||
|
for _, result := range r.Results {
|
||||||
|
if result.Err != nil {
|
||||||
|
return result.Err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChunkedResponse represents a response from the server that
|
||||||
|
// uses chunking to stream the output.
|
||||||
|
type ChunkedResponse struct {
|
||||||
|
dec *json.Decoder
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewChunkedResponse reads a stream and produces responses from the stream.
|
||||||
|
func NewChunkedResponse(r io.Reader) *ChunkedResponse {
|
||||||
|
dec := json.NewDecoder(r)
|
||||||
|
dec.UseNumber()
|
||||||
|
return &ChunkedResponse{dec: dec}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NextResponse reads the next line of the stream and returns a response.
|
||||||
|
func (r *ChunkedResponse) NextResponse() (*Response, error) {
|
||||||
|
var response Response
|
||||||
|
if err := r.dec.Decode(&response); err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &response, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Point defines the fields that will be written to the database
|
||||||
|
// Measurement, Time, and Fields are required
|
||||||
|
// Precision can be specified if the time is in epoch format (integer).
|
||||||
|
// Valid values for Precision are n, u, ms, s, m, and h
|
||||||
|
type Point struct {
|
||||||
|
Measurement string
|
||||||
|
Tags map[string]string
|
||||||
|
Time time.Time
|
||||||
|
Fields map[string]interface{}
|
||||||
|
Precision string
|
||||||
|
Raw string
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON will format the time in RFC3339Nano
|
||||||
|
// Precision is also ignored as it is only used for writing, not reading
|
||||||
|
// Or another way to say it is we always send back in nanosecond precision
|
||||||
|
func (p *Point) MarshalJSON() ([]byte, error) {
|
||||||
|
point := struct {
|
||||||
|
Measurement string `json:"measurement,omitempty"`
|
||||||
|
Tags map[string]string `json:"tags,omitempty"`
|
||||||
|
Time string `json:"time,omitempty"`
|
||||||
|
Fields map[string]interface{} `json:"fields,omitempty"`
|
||||||
|
Precision string `json:"precision,omitempty"`
|
||||||
|
}{
|
||||||
|
Measurement: p.Measurement,
|
||||||
|
Tags: p.Tags,
|
||||||
|
Fields: p.Fields,
|
||||||
|
Precision: p.Precision,
|
||||||
|
}
|
||||||
|
// Let it omit empty if it's really zero
|
||||||
|
if !p.Time.IsZero() {
|
||||||
|
point.Time = p.Time.UTC().Format(time.RFC3339Nano)
|
||||||
|
}
|
||||||
|
return json.Marshal(&point)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalString renders string representation of a Point with specified
|
||||||
|
// precision. The default precision is nanoseconds.
|
||||||
|
func (p *Point) MarshalString() string {
|
||||||
|
pt, err := models.NewPoint(p.Measurement, p.Tags, p.Fields, p.Time)
|
||||||
|
if err != nil {
|
||||||
|
return "# ERROR: " + err.Error() + " " + p.Measurement
|
||||||
|
}
|
||||||
|
if p.Precision == "" || p.Precision == "ns" || p.Precision == "n" {
|
||||||
|
return pt.String()
|
||||||
|
}
|
||||||
|
return pt.PrecisionString(p.Precision)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON decodes the data into the Point struct
|
||||||
|
func (p *Point) UnmarshalJSON(b []byte) error {
|
||||||
|
var normal struct {
|
||||||
|
Measurement string `json:"measurement"`
|
||||||
|
Tags map[string]string `json:"tags"`
|
||||||
|
Time time.Time `json:"time"`
|
||||||
|
Precision string `json:"precision"`
|
||||||
|
Fields map[string]interface{} `json:"fields"`
|
||||||
|
}
|
||||||
|
var epoch struct {
|
||||||
|
Measurement string `json:"measurement"`
|
||||||
|
Tags map[string]string `json:"tags"`
|
||||||
|
Time *int64 `json:"time"`
|
||||||
|
Precision string `json:"precision"`
|
||||||
|
Fields map[string]interface{} `json:"fields"`
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := func() error {
|
||||||
|
var err error
|
||||||
|
dec := json.NewDecoder(bytes.NewBuffer(b))
|
||||||
|
dec.UseNumber()
|
||||||
|
if err = dec.Decode(&epoch); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Convert from epoch to time.Time, but only if Time
|
||||||
|
// was actually set.
|
||||||
|
var ts time.Time
|
||||||
|
if epoch.Time != nil {
|
||||||
|
ts, err = EpochToTime(*epoch.Time, epoch.Precision)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.Measurement = epoch.Measurement
|
||||||
|
p.Tags = epoch.Tags
|
||||||
|
p.Time = ts
|
||||||
|
p.Precision = epoch.Precision
|
||||||
|
p.Fields = normalizeFields(epoch.Fields)
|
||||||
|
return nil
|
||||||
|
}(); err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
dec := json.NewDecoder(bytes.NewBuffer(b))
|
||||||
|
dec.UseNumber()
|
||||||
|
if err := dec.Decode(&normal); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
normal.Time = SetPrecision(normal.Time, normal.Precision)
|
||||||
|
p.Measurement = normal.Measurement
|
||||||
|
p.Tags = normal.Tags
|
||||||
|
p.Time = normal.Time
|
||||||
|
p.Precision = normal.Precision
|
||||||
|
p.Fields = normalizeFields(normal.Fields)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove any notion of json.Number
|
||||||
|
func normalizeFields(fields map[string]interface{}) map[string]interface{} {
|
||||||
|
newFields := map[string]interface{}{}
|
||||||
|
|
||||||
|
for k, v := range fields {
|
||||||
|
switch v := v.(type) {
|
||||||
|
case json.Number:
|
||||||
|
jv, e := v.Float64()
|
||||||
|
if e != nil {
|
||||||
|
panic(fmt.Sprintf("unable to convert json.Number to float64: %s", e))
|
||||||
|
}
|
||||||
|
newFields[k] = jv
|
||||||
|
default:
|
||||||
|
newFields[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return newFields
|
||||||
|
}
|
||||||
|
|
||||||
|
// BatchPoints is used to send batched data in a single write.
|
||||||
|
// Database and Points are required
|
||||||
|
// If no retention policy is specified, it will use the databases default retention policy.
|
||||||
|
// If tags are specified, they will be "merged" with all points. If a point already has that tag, it will be ignored.
|
||||||
|
// If time is specified, it will be applied to any point with an empty time.
|
||||||
|
// Precision can be specified if the time is in epoch format (integer).
|
||||||
|
// Valid values for Precision are n, u, ms, s, m, and h
|
||||||
|
type BatchPoints struct {
|
||||||
|
Points []Point `json:"points,omitempty"`
|
||||||
|
Database string `json:"database,omitempty"`
|
||||||
|
RetentionPolicy string `json:"retentionPolicy,omitempty"`
|
||||||
|
Tags map[string]string `json:"tags,omitempty"`
|
||||||
|
Time time.Time `json:"time,omitempty"`
|
||||||
|
Precision string `json:"precision,omitempty"`
|
||||||
|
WriteConsistency string `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON decodes the data into the BatchPoints struct
|
||||||
|
func (bp *BatchPoints) UnmarshalJSON(b []byte) error {
|
||||||
|
var normal struct {
|
||||||
|
Points []Point `json:"points"`
|
||||||
|
Database string `json:"database"`
|
||||||
|
RetentionPolicy string `json:"retentionPolicy"`
|
||||||
|
Tags map[string]string `json:"tags"`
|
||||||
|
Time time.Time `json:"time"`
|
||||||
|
Precision string `json:"precision"`
|
||||||
|
}
|
||||||
|
var epoch struct {
|
||||||
|
Points []Point `json:"points"`
|
||||||
|
Database string `json:"database"`
|
||||||
|
RetentionPolicy string `json:"retentionPolicy"`
|
||||||
|
Tags map[string]string `json:"tags"`
|
||||||
|
Time *int64 `json:"time"`
|
||||||
|
Precision string `json:"precision"`
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := func() error {
|
||||||
|
var err error
|
||||||
|
if err = json.Unmarshal(b, &epoch); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Convert from epoch to time.Time
|
||||||
|
var ts time.Time
|
||||||
|
if epoch.Time != nil {
|
||||||
|
ts, err = EpochToTime(*epoch.Time, epoch.Precision)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
bp.Points = epoch.Points
|
||||||
|
bp.Database = epoch.Database
|
||||||
|
bp.RetentionPolicy = epoch.RetentionPolicy
|
||||||
|
bp.Tags = epoch.Tags
|
||||||
|
bp.Time = ts
|
||||||
|
bp.Precision = epoch.Precision
|
||||||
|
return nil
|
||||||
|
}(); err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(b, &normal); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
normal.Time = SetPrecision(normal.Time, normal.Precision)
|
||||||
|
bp.Points = normal.Points
|
||||||
|
bp.Database = normal.Database
|
||||||
|
bp.RetentionPolicy = normal.RetentionPolicy
|
||||||
|
bp.Tags = normal.Tags
|
||||||
|
bp.Time = normal.Time
|
||||||
|
bp.Precision = normal.Precision
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// utility functions
|
||||||
|
|
||||||
|
// Addr provides the current url as a string of the server the client is connected to.
|
||||||
|
func (c *Client) Addr() string {
|
||||||
|
return c.url.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkPointTypes ensures no unsupported types are submitted to influxdb, returning error if they are found.
|
||||||
|
func checkPointTypes(p Point) error {
|
||||||
|
for _, v := range p.Fields {
|
||||||
|
switch v.(type) {
|
||||||
|
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, float32, float64, bool, string, nil:
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unsupported point type: %T", v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// helper functions
|
||||||
|
|
||||||
|
// EpochToTime takes a unix epoch time and uses precision to return back a time.Time
|
||||||
|
func EpochToTime(epoch int64, precision string) (time.Time, error) {
|
||||||
|
if precision == "" {
|
||||||
|
precision = "s"
|
||||||
|
}
|
||||||
|
var t time.Time
|
||||||
|
switch precision {
|
||||||
|
case "h":
|
||||||
|
t = time.Unix(0, epoch*int64(time.Hour))
|
||||||
|
case "m":
|
||||||
|
t = time.Unix(0, epoch*int64(time.Minute))
|
||||||
|
case "s":
|
||||||
|
t = time.Unix(0, epoch*int64(time.Second))
|
||||||
|
case "ms":
|
||||||
|
t = time.Unix(0, epoch*int64(time.Millisecond))
|
||||||
|
case "u":
|
||||||
|
t = time.Unix(0, epoch*int64(time.Microsecond))
|
||||||
|
case "n":
|
||||||
|
t = time.Unix(0, epoch)
|
||||||
|
default:
|
||||||
|
return time.Time{}, fmt.Errorf("Unknown precision %q", precision)
|
||||||
|
}
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPrecision will round a time to the specified precision
|
||||||
|
func SetPrecision(t time.Time, precision string) time.Time {
|
||||||
|
switch precision {
|
||||||
|
case "n":
|
||||||
|
case "u":
|
||||||
|
return t.Round(time.Microsecond)
|
||||||
|
case "ms":
|
||||||
|
return t.Round(time.Millisecond)
|
||||||
|
case "s":
|
||||||
|
return t.Round(time.Second)
|
||||||
|
case "m":
|
||||||
|
return t.Round(time.Minute)
|
||||||
|
case "h":
|
||||||
|
return t.Round(time.Hour)
|
||||||
|
}
|
||||||
|
return t
|
||||||
|
}
|
46
Godeps/_workspace/src/github.com/influxdata/influxdb/models/consistency.go
generated
vendored
Normal file
46
Godeps/_workspace/src/github.com/influxdata/influxdb/models/consistency.go
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ConsistencyLevel represent a required replication criteria before a write can
|
||||||
|
// be returned as successful
|
||||||
|
type ConsistencyLevel int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ConsistencyLevelAny allows for hinted hand off, potentially no write happened yet
|
||||||
|
ConsistencyLevelAny ConsistencyLevel = iota
|
||||||
|
|
||||||
|
// ConsistencyLevelOne requires at least one data node acknowledged a write
|
||||||
|
ConsistencyLevelOne
|
||||||
|
|
||||||
|
// ConsistencyLevelQuorum requires a quorum of data nodes to acknowledge a write
|
||||||
|
ConsistencyLevelQuorum
|
||||||
|
|
||||||
|
// ConsistencyLevelAll requires all data nodes to acknowledge a write
|
||||||
|
ConsistencyLevelAll
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrInvalidConsistencyLevel is returned when parsing the string version
|
||||||
|
// of a consistency level.
|
||||||
|
ErrInvalidConsistencyLevel = errors.New("invalid consistency level")
|
||||||
|
)
|
||||||
|
|
||||||
|
// ParseConsistencyLevel converts a consistency level string to the corresponding ConsistencyLevel const
|
||||||
|
func ParseConsistencyLevel(level string) (ConsistencyLevel, error) {
|
||||||
|
switch strings.ToLower(level) {
|
||||||
|
case "any":
|
||||||
|
return ConsistencyLevelAny, nil
|
||||||
|
case "one":
|
||||||
|
return ConsistencyLevelOne, nil
|
||||||
|
case "quorum":
|
||||||
|
return ConsistencyLevelQuorum, nil
|
||||||
|
case "all":
|
||||||
|
return ConsistencyLevelAll, nil
|
||||||
|
default:
|
||||||
|
return 0, ErrInvalidConsistencyLevel
|
||||||
|
}
|
||||||
|
}
|
1576
Godeps/_workspace/src/github.com/influxdata/influxdb/models/points.go
generated
vendored
Normal file
1576
Godeps/_workspace/src/github.com/influxdata/influxdb/models/points.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
60
Godeps/_workspace/src/github.com/influxdata/influxdb/models/rows.go
generated
vendored
Normal file
60
Godeps/_workspace/src/github.com/influxdata/influxdb/models/rows.go
generated
vendored
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"hash/fnv"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Row represents a single row returned from the execution of a statement.
|
||||||
|
type Row struct {
|
||||||
|
Name string `json:"name,omitempty"`
|
||||||
|
Tags map[string]string `json:"tags,omitempty"`
|
||||||
|
Columns []string `json:"columns,omitempty"`
|
||||||
|
Values [][]interface{} `json:"values,omitempty"`
|
||||||
|
Err error `json:"err,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SameSeries returns true if r contains values for the same series as o.
|
||||||
|
func (r *Row) SameSeries(o *Row) bool {
|
||||||
|
return r.tagsHash() == o.tagsHash() && r.Name == o.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
// tagsHash returns a hash of tag key/value pairs.
|
||||||
|
func (r *Row) tagsHash() uint64 {
|
||||||
|
h := fnv.New64a()
|
||||||
|
keys := r.tagsKeys()
|
||||||
|
for _, k := range keys {
|
||||||
|
h.Write([]byte(k))
|
||||||
|
h.Write([]byte(r.Tags[k]))
|
||||||
|
}
|
||||||
|
return h.Sum64()
|
||||||
|
}
|
||||||
|
|
||||||
|
// tagKeys returns a sorted list of tag keys.
|
||||||
|
func (r *Row) tagsKeys() []string {
|
||||||
|
a := make([]string, 0, len(r.Tags))
|
||||||
|
for k := range r.Tags {
|
||||||
|
a = append(a, k)
|
||||||
|
}
|
||||||
|
sort.Strings(a)
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rows represents a collection of rows. Rows implements sort.Interface.
|
||||||
|
type Rows []*Row
|
||||||
|
|
||||||
|
func (p Rows) Len() int { return len(p) }
|
||||||
|
|
||||||
|
func (p Rows) Less(i, j int) bool {
|
||||||
|
// Sort by name first.
|
||||||
|
if p[i].Name != p[j].Name {
|
||||||
|
return p[i].Name < p[j].Name
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort by tag set hash. Tags don't have a meaningful sort order so we
|
||||||
|
// just compute a hash and sort by that instead. This allows the tests
|
||||||
|
// to receive rows in a predictable order every time.
|
||||||
|
return p[i].tagsHash() < p[j].tagsHash()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p Rows) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
51
Godeps/_workspace/src/github.com/influxdata/influxdb/models/time.go
generated
vendored
Normal file
51
Godeps/_workspace/src/github.com/influxdata/influxdb/models/time.go
generated
vendored
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
package models
|
||||||
|
|
||||||
|
// Helper time methods since parsing time can easily overflow and we only support a
|
||||||
|
// specific time range.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// MaxNanoTime is the maximum time that can be represented via int64 nanoseconds since the epoch.
|
||||||
|
MaxNanoTime = time.Unix(0, math.MaxInt64).UTC()
|
||||||
|
// MinNanoTime is the minumum time that can be represented via int64 nanoseconds since the epoch.
|
||||||
|
MinNanoTime = time.Unix(0, math.MinInt64).UTC()
|
||||||
|
|
||||||
|
// ErrTimeOutOfRange gets returned when time is out of the representable range using int64 nanoseconds since the epoch.
|
||||||
|
ErrTimeOutOfRange = fmt.Errorf("time outside range %s - %s", MinNanoTime, MaxNanoTime)
|
||||||
|
)
|
||||||
|
|
||||||
|
// SafeCalcTime safely calculates the time given. Will return error if the time is outside the
|
||||||
|
// supported range.
|
||||||
|
func SafeCalcTime(timestamp int64, precision string) (time.Time, error) {
|
||||||
|
mult := GetPrecisionMultiplier(precision)
|
||||||
|
if t, ok := safeSignedMult(timestamp, mult); ok {
|
||||||
|
return time.Unix(0, t).UTC(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return time.Time{}, ErrTimeOutOfRange
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckTime checks that a time is within the safe range.
|
||||||
|
func CheckTime(t time.Time) error {
|
||||||
|
if t.Before(MinNanoTime) || t.After(MaxNanoTime) {
|
||||||
|
return ErrTimeOutOfRange
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform the multiplication and check to make sure it didn't overflow.
|
||||||
|
func safeSignedMult(a, b int64) (int64, bool) {
|
||||||
|
if a == 0 || b == 0 || a == 1 || b == 1 {
|
||||||
|
return a * b, true
|
||||||
|
}
|
||||||
|
if a == math.MinInt64 || b == math.MaxInt64 {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
c := a * b
|
||||||
|
return c, c/b == a
|
||||||
|
}
|
53
Godeps/_workspace/src/github.com/influxdata/influxdb/pkg/escape/bytes.go
generated
vendored
Normal file
53
Godeps/_workspace/src/github.com/influxdata/influxdb/pkg/escape/bytes.go
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
package escape
|
||||||
|
|
||||||
|
import "bytes"
|
||||||
|
|
||||||
|
func Bytes(in []byte) []byte {
|
||||||
|
for b, esc := range Codes {
|
||||||
|
in = bytes.Replace(in, []byte{b}, esc, -1)
|
||||||
|
}
|
||||||
|
return in
|
||||||
|
}
|
||||||
|
|
||||||
|
func Unescape(in []byte) []byte {
|
||||||
|
if len(in) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if bytes.IndexByte(in, '\\') == -1 {
|
||||||
|
return in
|
||||||
|
}
|
||||||
|
|
||||||
|
i := 0
|
||||||
|
inLen := len(in)
|
||||||
|
var out []byte
|
||||||
|
|
||||||
|
for {
|
||||||
|
if i >= inLen {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if in[i] == '\\' && i+1 < inLen {
|
||||||
|
switch in[i+1] {
|
||||||
|
case ',':
|
||||||
|
out = append(out, ',')
|
||||||
|
i += 2
|
||||||
|
continue
|
||||||
|
case '"':
|
||||||
|
out = append(out, '"')
|
||||||
|
i += 2
|
||||||
|
continue
|
||||||
|
case ' ':
|
||||||
|
out = append(out, ' ')
|
||||||
|
i += 2
|
||||||
|
continue
|
||||||
|
case '=':
|
||||||
|
out = append(out, '=')
|
||||||
|
i += 2
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out = append(out, in[i])
|
||||||
|
i += 1
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
34
Godeps/_workspace/src/github.com/influxdata/influxdb/pkg/escape/strings.go
generated
vendored
Normal file
34
Godeps/_workspace/src/github.com/influxdata/influxdb/pkg/escape/strings.go
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
package escape
|
||||||
|
|
||||||
|
import "strings"
|
||||||
|
|
||||||
|
var (
|
||||||
|
Codes = map[byte][]byte{
|
||||||
|
',': []byte(`\,`),
|
||||||
|
'"': []byte(`\"`),
|
||||||
|
' ': []byte(`\ `),
|
||||||
|
'=': []byte(`\=`),
|
||||||
|
}
|
||||||
|
|
||||||
|
codesStr = map[string]string{}
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
for k, v := range Codes {
|
||||||
|
codesStr[string(k)] = string(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func UnescapeString(in string) string {
|
||||||
|
for b, esc := range codesStr {
|
||||||
|
in = strings.Replace(in, esc, b, -1)
|
||||||
|
}
|
||||||
|
return in
|
||||||
|
}
|
||||||
|
|
||||||
|
func String(in string) string {
|
||||||
|
for b, esc := range codesStr {
|
||||||
|
in = strings.Replace(in, b, esc, -1)
|
||||||
|
}
|
||||||
|
return in
|
||||||
|
}
|
@ -6,6 +6,9 @@
|
|||||||
# possible values : production, development
|
# possible values : production, development
|
||||||
app_mode = production
|
app_mode = production
|
||||||
|
|
||||||
|
# instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty
|
||||||
|
instance_name = ${HOSTNAME}
|
||||||
|
|
||||||
#################################### Paths ####################################
|
#################################### Paths ####################################
|
||||||
[paths]
|
[paths]
|
||||||
# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)
|
# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)
|
||||||
@ -336,7 +339,6 @@ global_api_key = -1
|
|||||||
# global limit on number of logged in users.
|
# global limit on number of logged in users.
|
||||||
global_session = -1
|
global_session = -1
|
||||||
|
|
||||||
|
|
||||||
#################################### Alerting ######################################
|
#################################### Alerting ######################################
|
||||||
# docs about alerting can be found in /docs/sources/alerting/
|
# docs about alerting can be found in /docs/sources/alerting/
|
||||||
|
|
||||||
@ -356,3 +358,12 @@ global_session = -1
|
|||||||
|
|
||||||
[alerting]
|
[alerting]
|
||||||
enabled = false
|
enabled = false
|
||||||
|
|
||||||
|
#################################### Internal Grafana Metrics ##########################
|
||||||
|
[metrics]
|
||||||
|
enabled = true
|
||||||
|
interval_seconds = 60
|
||||||
|
|
||||||
|
; [metrics.graphite]
|
||||||
|
; address = localhost:2003
|
||||||
|
; prefix = prod.grafana.%(instance_name)s.
|
||||||
|
@ -6,6 +6,9 @@
|
|||||||
# possible values : production, development
|
# possible values : production, development
|
||||||
; app_mode = production
|
; app_mode = production
|
||||||
|
|
||||||
|
# instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty
|
||||||
|
; instance_name = ${HOSTNAME}
|
||||||
|
|
||||||
#################################### Paths ####################################
|
#################################### Paths ####################################
|
||||||
[paths]
|
[paths]
|
||||||
# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)
|
# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)
|
||||||
@ -269,5 +272,17 @@ check_for_updates = true
|
|||||||
;enabled = false
|
;enabled = false
|
||||||
;path = /var/lib/grafana/dashboards
|
;path = /var/lib/grafana/dashboards
|
||||||
|
|
||||||
|
#################################### Internal Grafana Metrics ##########################
|
||||||
|
[metrics]
|
||||||
|
# Disable / Enable internal metrics
|
||||||
|
;enabled = true
|
||||||
|
|
||||||
|
# Publish interval
|
||||||
|
;interval_seconds = 10
|
||||||
|
|
||||||
|
# Send internal metrics to Graphite
|
||||||
|
; [metrics.graphite]
|
||||||
|
; address = localhost:2003
|
||||||
|
; prefix = prod.grafana.%(instance_name)s.
|
||||||
|
|
||||||
|
|
||||||
|
@ -44,6 +44,12 @@ Then you can override them using:
|
|||||||
|
|
||||||
<hr />
|
<hr />
|
||||||
|
|
||||||
|
## instance_name
|
||||||
|
Set the name of the grafana-server instance. Used in logging and internal metrics and in
|
||||||
|
clustering info. Defaults to: `${HOSTNAME}, which will be replaced with
|
||||||
|
environment variable `HOSTNAME`, if that is empty or does not exist Grafana will try to use
|
||||||
|
system calls to get the machine name.
|
||||||
|
|
||||||
## [paths]
|
## [paths]
|
||||||
|
|
||||||
### data
|
### data
|
||||||
@ -439,3 +445,22 @@ Grafana backend index those json dashboards which will make them appear in regul
|
|||||||
|
|
||||||
### path
|
### path
|
||||||
The full path to a directory containing your json dashboards.
|
The full path to a directory containing your json dashboards.
|
||||||
|
|
||||||
|
## [metrics]
|
||||||
|
|
||||||
|
### enabled
|
||||||
|
Enable metrics reporting. defaults true. Available via HTTP API `/api/metrics`.
|
||||||
|
|
||||||
|
### interval_seconds
|
||||||
|
|
||||||
|
Flush/Write interval when sending metrics to external TSDB. Defaults to 60s.
|
||||||
|
|
||||||
|
## [metrics.graphite]
|
||||||
|
Include this section if you want to send internal Grafana metrics to Graphite.
|
||||||
|
|
||||||
|
### address
|
||||||
|
Format `<Hostname or ip>`:port
|
||||||
|
|
||||||
|
### prefix
|
||||||
|
Graphite metric prefix. Defaults to `prod.grafana.%(instance_name)s.`
|
||||||
|
|
||||||
|
@ -237,7 +237,10 @@ func Register(r *macaron.Macaron) {
|
|||||||
r.Get("/search/", Search)
|
r.Get("/search/", Search)
|
||||||
|
|
||||||
// metrics
|
// metrics
|
||||||
r.Get("/metrics/test", GetTestMetrics)
|
r.Get("/metrics/test", wrap(GetTestMetrics))
|
||||||
|
|
||||||
|
// metrics
|
||||||
|
r.Get("/metrics", wrap(GetInternalMetrics))
|
||||||
|
|
||||||
r.Group("/alerts", func() {
|
r.Group("/alerts", func() {
|
||||||
r.Group("/rules", func() {
|
r.Group("/rules", func() {
|
||||||
|
@ -30,7 +30,7 @@ func InitAppPluginRoutes(r *macaron.Macaron) {
|
|||||||
}
|
}
|
||||||
handlers = append(handlers, AppPluginRoute(route, plugin.Id))
|
handlers = append(handlers, AppPluginRoute(route, plugin.Id))
|
||||||
r.Route(url, route.Method, handlers...)
|
r.Route(url, route.Method, handlers...)
|
||||||
log.Info("Plugins: Adding proxy route %s", url)
|
log.Debug("Plugins: Adding proxy route %s", url)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -12,8 +12,12 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
NotFound = ApiError(404, "Not found", nil)
|
NotFound = func() Response {
|
||||||
ServerError = ApiError(500, "Server error", nil)
|
return ApiError(404, "Not found", nil)
|
||||||
|
}
|
||||||
|
ServerError = func(err error) Response {
|
||||||
|
return ApiError(500, "Server error", err)
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
type Response interface {
|
type Response interface {
|
||||||
@ -34,7 +38,7 @@ func wrap(action interface{}) macaron.Handler {
|
|||||||
if err == nil && val != nil && len(val) > 0 {
|
if err == nil && val != nil && len(val) > 0 {
|
||||||
res = val[0].Interface().(Response)
|
res = val[0].Interface().(Response)
|
||||||
} else {
|
} else {
|
||||||
res = ServerError
|
res = ServerError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
res.WriteTo(c.Resp)
|
res.WriteTo(c.Resp)
|
||||||
|
@ -32,8 +32,6 @@ func isDashboardStarredByUser(c *middleware.Context, dashId int64) (bool, error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func GetDashboard(c *middleware.Context) {
|
func GetDashboard(c *middleware.Context) {
|
||||||
metrics.M_Api_Dashboard_Get.Inc(1)
|
|
||||||
|
|
||||||
slug := strings.ToLower(c.Params(":slug"))
|
slug := strings.ToLower(c.Params(":slug"))
|
||||||
|
|
||||||
query := m.GetDashboardQuery{Slug: slug, OrgId: c.OrgId}
|
query := m.GetDashboardQuery{Slug: slug, OrgId: c.OrgId}
|
||||||
@ -77,6 +75,7 @@ func GetDashboard(c *middleware.Context) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
c.TimeRequest(metrics.M_Api_Dashboard_Get)
|
||||||
c.JSON(200, dto)
|
c.JSON(200, dto)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -166,8 +165,7 @@ func PostDashboard(c *middleware.Context, cmd m.SaveDashboardCommand) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
metrics.M_Api_Dashboard_Post.Inc(1)
|
c.TimeRequest(metrics.M_Api_Dashboard_Save)
|
||||||
|
|
||||||
c.JSON(200, util.DynMap{"status": "success", "slug": cmd.Result.Slug, "version": cmd.Result.Version})
|
c.JSON(200, util.DynMap{"status": "success", "slug": cmd.Result.Slug, "version": cmd.Result.Version})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -10,6 +10,7 @@ import (
|
|||||||
|
|
||||||
"github.com/grafana/grafana/pkg/api/cloudwatch"
|
"github.com/grafana/grafana/pkg/api/cloudwatch"
|
||||||
"github.com/grafana/grafana/pkg/bus"
|
"github.com/grafana/grafana/pkg/bus"
|
||||||
|
"github.com/grafana/grafana/pkg/metrics"
|
||||||
"github.com/grafana/grafana/pkg/middleware"
|
"github.com/grafana/grafana/pkg/middleware"
|
||||||
m "github.com/grafana/grafana/pkg/models"
|
m "github.com/grafana/grafana/pkg/models"
|
||||||
"github.com/grafana/grafana/pkg/setting"
|
"github.com/grafana/grafana/pkg/setting"
|
||||||
@ -80,7 +81,10 @@ func getDatasource(id int64, orgId int64) (*m.DataSource, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func ProxyDataSourceRequest(c *middleware.Context) {
|
func ProxyDataSourceRequest(c *middleware.Context) {
|
||||||
|
c.TimeRequest(metrics.M_DataSource_ProxyReq_Timer)
|
||||||
|
|
||||||
ds, err := getDatasource(c.ParamsInt64(":id"), c.OrgId)
|
ds, err := getDatasource(c.ParamsInt64(":id"), c.OrgId)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.JsonApiErr(500, "Unable to load datasource meta data", err)
|
c.JsonApiErr(500, "Unable to load datasource meta data", err)
|
||||||
return
|
return
|
||||||
|
@ -1,10 +1,15 @@
|
|||||||
package api
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/grafana/grafana/pkg/api/dtos"
|
"encoding/json"
|
||||||
"github.com/grafana/grafana/pkg/middleware"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/grafana/grafana/pkg/api/dtos"
|
||||||
|
"github.com/grafana/grafana/pkg/metrics"
|
||||||
|
"github.com/grafana/grafana/pkg/middleware"
|
||||||
|
"github.com/grafana/grafana/pkg/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
func GetTestMetrics(c *middleware.Context) {
|
func GetTestMetrics(c *middleware.Context) {
|
||||||
@ -34,3 +39,51 @@ func GetTestMetrics(c *middleware.Context) {
|
|||||||
|
|
||||||
c.JSON(200, &result)
|
c.JSON(200, &result)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func GetInternalMetrics(c *middleware.Context) Response {
|
||||||
|
if metrics.UseNilMetrics {
|
||||||
|
return Json(200, util.DynMap{"message": "Metrics disabled"})
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshots := metrics.MetricStats.GetSnapshots()
|
||||||
|
|
||||||
|
resp := make(map[string]interface{})
|
||||||
|
|
||||||
|
for _, m := range snapshots {
|
||||||
|
metricName := m.Name() + m.StringifyTags()
|
||||||
|
|
||||||
|
switch metric := m.(type) {
|
||||||
|
case metrics.Counter:
|
||||||
|
resp[metricName] = map[string]interface{}{
|
||||||
|
"count": metric.Count(),
|
||||||
|
}
|
||||||
|
case metrics.Timer:
|
||||||
|
percentiles := metric.Percentiles([]float64{0.25, 0.75, 0.90, 0.99})
|
||||||
|
resp[metricName] = map[string]interface{}{
|
||||||
|
"count": metric.Count(),
|
||||||
|
"min": metric.Min(),
|
||||||
|
"max": metric.Max(),
|
||||||
|
"mean": metric.Mean(),
|
||||||
|
"std": metric.StdDev(),
|
||||||
|
"p25": percentiles[0],
|
||||||
|
"p75": percentiles[1],
|
||||||
|
"p90": percentiles[2],
|
||||||
|
"p99": percentiles[3],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var b []byte
|
||||||
|
var err error
|
||||||
|
if b, err = json.MarshalIndent(resp, "", " "); err != nil {
|
||||||
|
return ApiError(500, "body json marshal", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &NormalResponse{
|
||||||
|
body: b,
|
||||||
|
status: 200,
|
||||||
|
header: http.Header{
|
||||||
|
"Content-Type": []string{"application/json"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -4,6 +4,7 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/grafana/grafana/pkg/bus"
|
"github.com/grafana/grafana/pkg/bus"
|
||||||
|
"github.com/grafana/grafana/pkg/metrics"
|
||||||
"github.com/grafana/grafana/pkg/middleware"
|
"github.com/grafana/grafana/pkg/middleware"
|
||||||
"github.com/grafana/grafana/pkg/services/search"
|
"github.com/grafana/grafana/pkg/services/search"
|
||||||
)
|
)
|
||||||
@ -42,5 +43,6 @@ func Search(c *middleware.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
c.TimeRequest(metrics.M_Api_Dashboard_Search)
|
||||||
c.JSON(200, searchQuery.Result)
|
c.JSON(200, searchQuery.Result)
|
||||||
}
|
}
|
||||||
|
@ -59,6 +59,7 @@ func main() {
|
|||||||
flag.Parse()
|
flag.Parse()
|
||||||
writePIDFile()
|
writePIDFile()
|
||||||
initRuntime()
|
initRuntime()
|
||||||
|
metrics.Init()
|
||||||
|
|
||||||
search.Init()
|
search.Init()
|
||||||
login.Init()
|
login.Init()
|
||||||
@ -71,10 +72,6 @@ func main() {
|
|||||||
log.Fatal(3, "Notification service failed to initialize", err)
|
log.Fatal(3, "Notification service failed to initialize", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if setting.ReportingEnabled {
|
|
||||||
go metrics.StartUsageReportLoop()
|
|
||||||
}
|
|
||||||
|
|
||||||
StartServer()
|
StartServer()
|
||||||
exitChan <- 0
|
exitChan <- 0
|
||||||
}
|
}
|
||||||
@ -92,6 +89,7 @@ func initRuntime() {
|
|||||||
|
|
||||||
log.Info("Starting Grafana")
|
log.Info("Starting Grafana")
|
||||||
log.Info("Version: %v, Commit: %v, Build date: %v", setting.BuildVersion, setting.BuildCommit, time.Unix(setting.BuildStamp, 0))
|
log.Info("Version: %v, Commit: %v, Build date: %v", setting.BuildVersion, setting.BuildCommit, time.Unix(setting.BuildStamp, 0))
|
||||||
|
|
||||||
setting.LogConfigurationInfo()
|
setting.LogConfigurationInfo()
|
||||||
|
|
||||||
sqlstore.NewEngine()
|
sqlstore.NewEngine()
|
||||||
@ -120,9 +118,7 @@ func listenToSystemSignels() {
|
|||||||
signalChan := make(chan os.Signal, 1)
|
signalChan := make(chan os.Signal, 1)
|
||||||
code := 0
|
code := 0
|
||||||
|
|
||||||
signal.Notify(signalChan, os.Interrupt)
|
signal.Notify(signalChan, os.Interrupt, os.Kill, syscall.SIGTERM)
|
||||||
signal.Notify(signalChan, os.Kill)
|
|
||||||
signal.Notify(signalChan, syscall.SIGTERM)
|
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case sig := <-signalChan:
|
case sig := <-signalChan:
|
||||||
|
@ -31,7 +31,7 @@ func newMacaron() *macaron.Macaron {
|
|||||||
|
|
||||||
for _, route := range plugins.StaticRoutes {
|
for _, route := range plugins.StaticRoutes {
|
||||||
pluginRoute := path.Join("/public/plugins/", route.PluginId)
|
pluginRoute := path.Join("/public/plugins/", route.PluginId)
|
||||||
log.Info("Plugins: Adding route %s -> %s", pluginRoute, route.Directory)
|
log.Debug("Plugins: Adding route %s -> %s", pluginRoute, route.Directory)
|
||||||
mapStatic(m, route.Directory, "", pluginRoute)
|
mapStatic(m, route.Directory, "", pluginRoute)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
122
pkg/metrics/EMWA.go
Normal file
122
pkg/metrics/EMWA.go
Normal file
@ -0,0 +1,122 @@
|
|||||||
|
// includes code from
|
||||||
|
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
|
||||||
|
// Copyright 2012 Richard Crowley. All rights reserved.
|
||||||
|
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
)
|
||||||
|
|
||||||
|
// EWMAs continuously calculate an exponentially-weighted moving average
|
||||||
|
// based on an outside source of clock ticks.
|
||||||
|
type EWMA interface {
|
||||||
|
Rate() float64
|
||||||
|
Snapshot() EWMA
|
||||||
|
Tick()
|
||||||
|
Update(int64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEWMA constructs a new EWMA with the given alpha.
|
||||||
|
func NewEWMA(alpha float64) EWMA {
|
||||||
|
if UseNilMetrics {
|
||||||
|
return NilEWMA{}
|
||||||
|
}
|
||||||
|
return &StandardEWMA{alpha: alpha}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEWMA1 constructs a new EWMA for a one-minute moving average.
|
||||||
|
func NewEWMA1() EWMA {
|
||||||
|
return NewEWMA(1 - math.Exp(-5.0/60.0/1))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEWMA5 constructs a new EWMA for a five-minute moving average.
|
||||||
|
func NewEWMA5() EWMA {
|
||||||
|
return NewEWMA(1 - math.Exp(-5.0/60.0/5))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEWMA15 constructs a new EWMA for a fifteen-minute moving average.
|
||||||
|
func NewEWMA15() EWMA {
|
||||||
|
return NewEWMA(1 - math.Exp(-5.0/60.0/15))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EWMASnapshot is a read-only copy of another EWMA.
|
||||||
|
type EWMASnapshot float64
|
||||||
|
|
||||||
|
// Rate returns the rate of events per second at the time the snapshot was
|
||||||
|
// taken.
|
||||||
|
func (a EWMASnapshot) Rate() float64 { return float64(a) }
|
||||||
|
|
||||||
|
// Snapshot returns the snapshot.
|
||||||
|
func (a EWMASnapshot) Snapshot() EWMA { return a }
|
||||||
|
|
||||||
|
// Tick panics.
|
||||||
|
func (EWMASnapshot) Tick() {
|
||||||
|
panic("Tick called on an EWMASnapshot")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update panics.
|
||||||
|
func (EWMASnapshot) Update(int64) {
|
||||||
|
panic("Update called on an EWMASnapshot")
|
||||||
|
}
|
||||||
|
|
||||||
|
// NilEWMA is a no-op EWMA.
|
||||||
|
type NilEWMA struct{}
|
||||||
|
|
||||||
|
// Rate is a no-op.
|
||||||
|
func (NilEWMA) Rate() float64 { return 0.0 }
|
||||||
|
|
||||||
|
// Snapshot is a no-op.
|
||||||
|
func (NilEWMA) Snapshot() EWMA { return NilEWMA{} }
|
||||||
|
|
||||||
|
// Tick is a no-op.
|
||||||
|
func (NilEWMA) Tick() {}
|
||||||
|
|
||||||
|
// Update is a no-op.
|
||||||
|
func (NilEWMA) Update(n int64) {}
|
||||||
|
|
||||||
|
// StandardEWMA is the standard implementation of an EWMA and tracks the number
|
||||||
|
// of uncounted events and processes them on each tick. It uses the
|
||||||
|
// sync/atomic package to manage uncounted events.
|
||||||
|
type StandardEWMA struct {
|
||||||
|
uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment
|
||||||
|
alpha float64
|
||||||
|
rate float64
|
||||||
|
init bool
|
||||||
|
mutex sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rate returns the moving average rate of events per second.
|
||||||
|
func (a *StandardEWMA) Rate() float64 {
|
||||||
|
a.mutex.Lock()
|
||||||
|
defer a.mutex.Unlock()
|
||||||
|
return a.rate * float64(1e9)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Snapshot returns a read-only copy of the EWMA.
|
||||||
|
func (a *StandardEWMA) Snapshot() EWMA {
|
||||||
|
return EWMASnapshot(a.Rate())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tick ticks the clock to update the moving average. It assumes it is called
|
||||||
|
// every five seconds.
|
||||||
|
func (a *StandardEWMA) Tick() {
|
||||||
|
count := atomic.LoadInt64(&a.uncounted)
|
||||||
|
atomic.AddInt64(&a.uncounted, -count)
|
||||||
|
instantRate := float64(count) / float64(5e9)
|
||||||
|
a.mutex.Lock()
|
||||||
|
defer a.mutex.Unlock()
|
||||||
|
if a.init {
|
||||||
|
a.rate += a.alpha * (instantRate - a.rate)
|
||||||
|
} else {
|
||||||
|
a.init = true
|
||||||
|
a.rate = instantRate
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update adds n uncounted events.
|
||||||
|
func (a *StandardEWMA) Update(n int64) {
|
||||||
|
atomic.AddInt64(&a.uncounted, n)
|
||||||
|
}
|
46
pkg/metrics/combos.go
Normal file
46
pkg/metrics/combos.go
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
package metrics
|
||||||
|
|
||||||
|
// type comboCounterRef struct {
|
||||||
|
// *MetricMeta
|
||||||
|
// usageCounter Counter
|
||||||
|
// metricCounter Counter
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// func RegComboCounter(name string, tagStrings ...string) Counter {
|
||||||
|
// meta := NewMetricMeta(name, tagStrings)
|
||||||
|
// cr := &comboCounterRef{
|
||||||
|
// MetricMeta: meta,
|
||||||
|
// usageCounter: NewCounter(meta),
|
||||||
|
// metricCounter: NewCounter(meta),
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// UsageStats.Register(cr.usageCounter)
|
||||||
|
// MetricStats.Register(cr.metricCounter)
|
||||||
|
//
|
||||||
|
// return cr
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// func (c comboCounterRef) Clear() {
|
||||||
|
// c.usageCounter.Clear()
|
||||||
|
// c.metricCounter.Clear()
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// func (c comboCounterRef) Count() int64 {
|
||||||
|
// panic("Count called on a combocounter ref")
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// // Dec panics.
|
||||||
|
// func (c comboCounterRef) Dec(i int64) {
|
||||||
|
// c.usageCounter.Dec(i)
|
||||||
|
// c.metricCounter.Dec(i)
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// // Inc panics.
|
||||||
|
// func (c comboCounterRef) Inc(i int64) {
|
||||||
|
// c.usageCounter.Inc(i)
|
||||||
|
// c.metricCounter.Inc(i)
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// func (c comboCounterRef) Snapshot() Metric {
|
||||||
|
// return c.metricCounter.Snapshot()
|
||||||
|
// }
|
61
pkg/metrics/common.go
Normal file
61
pkg/metrics/common.go
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
package metrics
|
||||||
|
|
||||||
|
import "github.com/grafana/grafana/pkg/log"
|
||||||
|
|
||||||
|
type MetricMeta struct {
|
||||||
|
tags map[string]string
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMetricMeta(name string, tagStrings []string) *MetricMeta {
|
||||||
|
if len(tagStrings)%2 != 0 {
|
||||||
|
log.Fatal(3, "Metrics: tags array is missing value for key, %v", tagStrings)
|
||||||
|
}
|
||||||
|
|
||||||
|
tags := make(map[string]string)
|
||||||
|
for i := 0; i < len(tagStrings); i += 2 {
|
||||||
|
tags[tagStrings[i]] = tagStrings[i+1]
|
||||||
|
}
|
||||||
|
|
||||||
|
return &MetricMeta{
|
||||||
|
tags: tags,
|
||||||
|
name: name,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MetricMeta) Name() string {
|
||||||
|
return m.name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MetricMeta) GetTagsCopy() map[string]string {
|
||||||
|
if len(m.tags) == 0 {
|
||||||
|
return make(map[string]string)
|
||||||
|
}
|
||||||
|
|
||||||
|
copy := make(map[string]string)
|
||||||
|
for k2, v2 := range m.tags {
|
||||||
|
copy[k2] = v2
|
||||||
|
}
|
||||||
|
|
||||||
|
return copy
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MetricMeta) StringifyTags() string {
|
||||||
|
if len(m.tags) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
str := ""
|
||||||
|
for key, value := range m.tags {
|
||||||
|
str += "." + key + "_" + value
|
||||||
|
}
|
||||||
|
|
||||||
|
return str
|
||||||
|
}
|
||||||
|
|
||||||
|
type Metric interface {
|
||||||
|
Name() string
|
||||||
|
GetTagsCopy() map[string]string
|
||||||
|
StringifyTags() string
|
||||||
|
Snapshot() Metric
|
||||||
|
}
|
@ -4,45 +4,33 @@ import "sync/atomic"
|
|||||||
|
|
||||||
// Counters hold an int64 value that can be incremented and decremented.
|
// Counters hold an int64 value that can be incremented and decremented.
|
||||||
type Counter interface {
|
type Counter interface {
|
||||||
|
Metric
|
||||||
|
|
||||||
Clear()
|
Clear()
|
||||||
Count() int64
|
Count() int64
|
||||||
Dec(int64)
|
Dec(int64)
|
||||||
Inc(int64)
|
Inc(int64)
|
||||||
Snapshot() Counter
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewCounter constructs a new StandardCounter.
|
// NewCounter constructs a new StandardCounter.
|
||||||
func NewCounter() Counter {
|
func NewCounter(meta *MetricMeta) Counter {
|
||||||
return &StandardCounter{0}
|
return &StandardCounter{
|
||||||
|
MetricMeta: meta,
|
||||||
|
count: 0,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// CounterSnapshot is a read-only copy of another Counter.
|
func RegCounter(name string, tagStrings ...string) Counter {
|
||||||
type CounterSnapshot int64
|
cr := NewCounter(NewMetricMeta(name, tagStrings))
|
||||||
|
MetricStats.Register(cr)
|
||||||
// Clear panics.
|
return cr
|
||||||
func (CounterSnapshot) Clear() {
|
|
||||||
panic("Clear called on a CounterSnapshot")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Count returns the count at the time the snapshot was taken.
|
|
||||||
func (c CounterSnapshot) Count() int64 { return int64(c) }
|
|
||||||
|
|
||||||
// Dec panics.
|
|
||||||
func (CounterSnapshot) Dec(int64) {
|
|
||||||
panic("Dec called on a CounterSnapshot")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Inc panics.
|
|
||||||
func (CounterSnapshot) Inc(int64) {
|
|
||||||
panic("Inc called on a CounterSnapshot")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Snapshot returns the snapshot.
|
|
||||||
func (c CounterSnapshot) Snapshot() Counter { return c }
|
|
||||||
|
|
||||||
// StandardCounter is the standard implementation of a Counter and uses the
|
// StandardCounter is the standard implementation of a Counter and uses the
|
||||||
// sync/atomic package to manage a single int64 value.
|
// sync/atomic package to manage a single int64 value.
|
||||||
type StandardCounter struct {
|
type StandardCounter struct {
|
||||||
|
*MetricMeta
|
||||||
|
|
||||||
count int64
|
count int64
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -66,7 +54,9 @@ func (c *StandardCounter) Inc(i int64) {
|
|||||||
atomic.AddInt64(&c.count, i)
|
atomic.AddInt64(&c.count, i)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Snapshot returns a read-only copy of the counter.
|
func (c *StandardCounter) Snapshot() Metric {
|
||||||
func (c *StandardCounter) Snapshot() Counter {
|
return &StandardCounter{
|
||||||
return CounterSnapshot(c.Count())
|
MetricMeta: c.MetricMeta,
|
||||||
|
count: c.count,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
11
pkg/metrics/delta.go
Normal file
11
pkg/metrics/delta.go
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
package metrics
|
||||||
|
|
||||||
|
import "math"
|
||||||
|
|
||||||
|
func calculateDelta(oldValue, newValue int64) int64 {
|
||||||
|
if oldValue < newValue {
|
||||||
|
return newValue - oldValue
|
||||||
|
} else {
|
||||||
|
return (math.MaxInt64 - oldValue) + (newValue - math.MinInt64) + 1
|
||||||
|
}
|
||||||
|
}
|
82
pkg/metrics/gauge.go
Normal file
82
pkg/metrics/gauge.go
Normal file
@ -0,0 +1,82 @@
|
|||||||
|
// includes code from
|
||||||
|
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
|
||||||
|
// Copyright 2012 Richard Crowley. All rights reserved.
|
||||||
|
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import "sync/atomic"
|
||||||
|
|
||||||
|
// Gauges hold an int64 value that can be set arbitrarily.
|
||||||
|
type Gauge interface {
|
||||||
|
Metric
|
||||||
|
|
||||||
|
Update(int64)
|
||||||
|
Value() int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewGauge(meta *MetricMeta) Gauge {
|
||||||
|
if UseNilMetrics {
|
||||||
|
return NilGauge{}
|
||||||
|
}
|
||||||
|
return &StandardGauge{
|
||||||
|
MetricMeta: meta,
|
||||||
|
value: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func RegGauge(meta *MetricMeta) Gauge {
|
||||||
|
g := NewGauge(meta)
|
||||||
|
MetricStats.Register(g)
|
||||||
|
return g
|
||||||
|
}
|
||||||
|
|
||||||
|
// GaugeSnapshot is a read-only copy of another Gauge.
|
||||||
|
type GaugeSnapshot struct {
|
||||||
|
*MetricMeta
|
||||||
|
value int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Snapshot returns the snapshot.
|
||||||
|
func (g GaugeSnapshot) Snapshot() Metric { return g }
|
||||||
|
|
||||||
|
// Update panics.
|
||||||
|
func (GaugeSnapshot) Update(int64) {
|
||||||
|
panic("Update called on a GaugeSnapshot")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the value at the time the snapshot was taken.
|
||||||
|
func (g GaugeSnapshot) Value() int64 { return g.value }
|
||||||
|
|
||||||
|
// NilGauge is a no-op Gauge.
|
||||||
|
type NilGauge struct{ *MetricMeta }
|
||||||
|
|
||||||
|
// Snapshot is a no-op.
|
||||||
|
func (NilGauge) Snapshot() Metric { return NilGauge{} }
|
||||||
|
|
||||||
|
// Update is a no-op.
|
||||||
|
func (NilGauge) Update(v int64) {}
|
||||||
|
|
||||||
|
// Value is a no-op.
|
||||||
|
func (NilGauge) Value() int64 { return 0 }
|
||||||
|
|
||||||
|
// StandardGauge is the standard implementation of a Gauge and uses the
|
||||||
|
// sync/atomic package to manage a single int64 value.
|
||||||
|
type StandardGauge struct {
|
||||||
|
*MetricMeta
|
||||||
|
value int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Snapshot returns a read-only copy of the gauge.
|
||||||
|
func (g *StandardGauge) Snapshot() Metric {
|
||||||
|
return GaugeSnapshot{MetricMeta: g.MetricMeta, value: g.value}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update updates the gauge's value.
|
||||||
|
func (g *StandardGauge) Update(v int64) {
|
||||||
|
atomic.StoreInt64(&g.value, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the gauge's current value.
|
||||||
|
func (g *StandardGauge) Value() int64 {
|
||||||
|
return atomic.LoadInt64(&g.value)
|
||||||
|
}
|
91
pkg/metrics/graphite.go
Normal file
91
pkg/metrics/graphite.go
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/grafana/grafana/pkg/log"
|
||||||
|
"github.com/grafana/grafana/pkg/setting"
|
||||||
|
)
|
||||||
|
|
||||||
|
type GraphitePublisher struct {
|
||||||
|
address string
|
||||||
|
protocol string
|
||||||
|
prefix string
|
||||||
|
prevCounts map[string]int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateGraphitePublisher() (*GraphitePublisher, error) {
|
||||||
|
graphiteSection, err := setting.Cfg.GetSection("metrics.graphite")
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
publisher := &GraphitePublisher{}
|
||||||
|
publisher.prevCounts = make(map[string]int64)
|
||||||
|
publisher.protocol = "tcp"
|
||||||
|
publisher.address = graphiteSection.Key("address").MustString("localhost:2003")
|
||||||
|
publisher.prefix = graphiteSection.Key("prefix").MustString("service.grafana.%(instance_name)s")
|
||||||
|
|
||||||
|
return publisher, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *GraphitePublisher) Publish(metrics []Metric) {
|
||||||
|
conn, err := net.DialTimeout(this.protocol, this.address, time.Second*5)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Error(3, "Metrics: GraphitePublisher: Failed to connect to %s!", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := bytes.NewBufferString("")
|
||||||
|
now := time.Now().Unix()
|
||||||
|
|
||||||
|
for _, m := range metrics {
|
||||||
|
metricName := this.prefix + m.Name() + m.StringifyTags()
|
||||||
|
|
||||||
|
switch metric := m.(type) {
|
||||||
|
case Counter:
|
||||||
|
this.addCount(buf, metricName+".count", metric.Count(), now)
|
||||||
|
case Timer:
|
||||||
|
percentiles := metric.Percentiles([]float64{0.25, 0.75, 0.90, 0.99})
|
||||||
|
this.addCount(buf, metricName+".count", metric.Count(), now)
|
||||||
|
this.addInt(buf, metricName+".max", metric.Max(), now)
|
||||||
|
this.addInt(buf, metricName+".min", metric.Min(), now)
|
||||||
|
this.addFloat(buf, metricName+".mean", metric.Mean(), now)
|
||||||
|
this.addFloat(buf, metricName+".std", metric.StdDev(), now)
|
||||||
|
this.addFloat(buf, metricName+".p25", percentiles[0], now)
|
||||||
|
this.addFloat(buf, metricName+".p75", percentiles[1], now)
|
||||||
|
this.addFloat(buf, metricName+".p90", percentiles[2], now)
|
||||||
|
this.addFloat(buf, metricName+".p99", percentiles[3], now)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Trace("Metrics: GraphitePublisher.Publish() \n%s", buf)
|
||||||
|
_, err = conn.Write(buf.Bytes())
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Error(3, "Metrics: GraphitePublisher: Failed to send metrics! %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *GraphitePublisher) addInt(buf *bytes.Buffer, metric string, value int64, now int64) {
|
||||||
|
buf.WriteString(fmt.Sprintf("%s %d %d\n", metric, value, now))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *GraphitePublisher) addFloat(buf *bytes.Buffer, metric string, value float64, now int64) {
|
||||||
|
buf.WriteString(fmt.Sprintf("%s %f %d\n", metric, value, now))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *GraphitePublisher) addCount(buf *bytes.Buffer, metric string, value int64, now int64) {
|
||||||
|
delta := value
|
||||||
|
|
||||||
|
if last, ok := this.prevCounts[metric]; ok {
|
||||||
|
delta = calculateDelta(last, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
this.prevCounts[metric] = value
|
||||||
|
buf.WriteString(fmt.Sprintf("%s %d %d\n", metric, delta, now))
|
||||||
|
}
|
189
pkg/metrics/histogram.go
Normal file
189
pkg/metrics/histogram.go
Normal file
@ -0,0 +1,189 @@
|
|||||||
|
// includes code from
|
||||||
|
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
|
||||||
|
// Copyright 2012 Richard Crowley. All rights reserved.
|
||||||
|
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
// Histograms calculate distribution statistics from a series of int64 values.
|
||||||
|
type Histogram interface {
|
||||||
|
Metric
|
||||||
|
|
||||||
|
Clear()
|
||||||
|
Count() int64
|
||||||
|
Max() int64
|
||||||
|
Mean() float64
|
||||||
|
Min() int64
|
||||||
|
Percentile(float64) float64
|
||||||
|
Percentiles([]float64) []float64
|
||||||
|
StdDev() float64
|
||||||
|
Sum() int64
|
||||||
|
Update(int64)
|
||||||
|
Variance() float64
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewHistogram(meta *MetricMeta, s Sample) Histogram {
|
||||||
|
return &StandardHistogram{
|
||||||
|
MetricMeta: meta,
|
||||||
|
sample: s,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HistogramSnapshot is a read-only copy of another Histogram.
|
||||||
|
type HistogramSnapshot struct {
|
||||||
|
*MetricMeta
|
||||||
|
sample *SampleSnapshot
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear panics.
|
||||||
|
func (*HistogramSnapshot) Clear() {
|
||||||
|
panic("Clear called on a HistogramSnapshot")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the number of samples recorded at the time the snapshot was
|
||||||
|
// taken.
|
||||||
|
func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() }
|
||||||
|
|
||||||
|
// Max returns the maximum value in the sample at the time the snapshot was
|
||||||
|
// taken.
|
||||||
|
func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() }
|
||||||
|
|
||||||
|
// Mean returns the mean of the values in the sample at the time the snapshot
|
||||||
|
// was taken.
|
||||||
|
func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() }
|
||||||
|
|
||||||
|
// Min returns the minimum value in the sample at the time the snapshot was
|
||||||
|
// taken.
|
||||||
|
func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() }
|
||||||
|
|
||||||
|
// Percentile returns an arbitrary percentile of values in the sample at the
|
||||||
|
// time the snapshot was taken.
|
||||||
|
func (h *HistogramSnapshot) Percentile(p float64) float64 {
|
||||||
|
return h.sample.Percentile(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Percentiles returns a slice of arbitrary percentiles of values in the sample
|
||||||
|
// at the time the snapshot was taken.
|
||||||
|
func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 {
|
||||||
|
return h.sample.Percentiles(ps)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sample returns the Sample underlying the histogram.
|
||||||
|
func (h *HistogramSnapshot) Sample() Sample { return h.sample }
|
||||||
|
|
||||||
|
// Snapshot returns the snapshot.
|
||||||
|
func (h *HistogramSnapshot) Snapshot() Metric { return h }
|
||||||
|
|
||||||
|
// StdDev returns the standard deviation of the values in the sample at the
|
||||||
|
// time the snapshot was taken.
|
||||||
|
func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() }
|
||||||
|
|
||||||
|
// Sum returns the sum in the sample at the time the snapshot was taken.
|
||||||
|
func (h *HistogramSnapshot) Sum() int64 { return h.sample.Sum() }
|
||||||
|
|
||||||
|
// Update panics.
|
||||||
|
func (*HistogramSnapshot) Update(int64) {
|
||||||
|
panic("Update called on a HistogramSnapshot")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Variance returns the variance of inputs at the time the snapshot was taken.
|
||||||
|
func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() }
|
||||||
|
|
||||||
|
// NilHistogram is a no-op Histogram.
|
||||||
|
type NilHistogram struct {
|
||||||
|
*MetricMeta
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear is a no-op.
|
||||||
|
func (NilHistogram) Clear() {}
|
||||||
|
|
||||||
|
// Count is a no-op.
|
||||||
|
func (NilHistogram) Count() int64 { return 0 }
|
||||||
|
|
||||||
|
// Max is a no-op.
|
||||||
|
func (NilHistogram) Max() int64 { return 0 }
|
||||||
|
|
||||||
|
// Mean is a no-op.
|
||||||
|
func (NilHistogram) Mean() float64 { return 0.0 }
|
||||||
|
|
||||||
|
// Min is a no-op.
|
||||||
|
func (NilHistogram) Min() int64 { return 0 }
|
||||||
|
|
||||||
|
// Percentile is a no-op.
|
||||||
|
func (NilHistogram) Percentile(p float64) float64 { return 0.0 }
|
||||||
|
|
||||||
|
// Percentiles is a no-op.
|
||||||
|
func (NilHistogram) Percentiles(ps []float64) []float64 {
|
||||||
|
return make([]float64, len(ps))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sample is a no-op.
|
||||||
|
func (NilHistogram) Sample() Sample { return NilSample{} }
|
||||||
|
|
||||||
|
// Snapshot is a no-op.
|
||||||
|
func (n NilHistogram) Snapshot() Metric { return n }
|
||||||
|
|
||||||
|
// StdDev is a no-op.
|
||||||
|
func (NilHistogram) StdDev() float64 { return 0.0 }
|
||||||
|
|
||||||
|
// Sum is a no-op.
|
||||||
|
func (NilHistogram) Sum() int64 { return 0 }
|
||||||
|
|
||||||
|
// Update is a no-op.
|
||||||
|
func (NilHistogram) Update(v int64) {}
|
||||||
|
|
||||||
|
// Variance is a no-op.
|
||||||
|
func (NilHistogram) Variance() float64 { return 0.0 }
|
||||||
|
|
||||||
|
// StandardHistogram is the standard implementation of a Histogram and uses a
|
||||||
|
// Sample to bound its memory use.
|
||||||
|
type StandardHistogram struct {
|
||||||
|
*MetricMeta
|
||||||
|
sample Sample
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear clears the histogram and its sample.
|
||||||
|
func (h *StandardHistogram) Clear() { h.sample.Clear() }
|
||||||
|
|
||||||
|
// Count returns the number of samples recorded since the histogram was last
|
||||||
|
// cleared.
|
||||||
|
func (h *StandardHistogram) Count() int64 { return h.sample.Count() }
|
||||||
|
|
||||||
|
// Max returns the maximum value in the sample.
|
||||||
|
func (h *StandardHistogram) Max() int64 { return h.sample.Max() }
|
||||||
|
|
||||||
|
// Mean returns the mean of the values in the sample.
|
||||||
|
func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() }
|
||||||
|
|
||||||
|
// Min returns the minimum value in the sample.
|
||||||
|
func (h *StandardHistogram) Min() int64 { return h.sample.Min() }
|
||||||
|
|
||||||
|
// Percentile returns an arbitrary percentile of the values in the sample.
|
||||||
|
func (h *StandardHistogram) Percentile(p float64) float64 {
|
||||||
|
return h.sample.Percentile(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Percentiles returns a slice of arbitrary percentiles of the values in the
|
||||||
|
// sample.
|
||||||
|
func (h *StandardHistogram) Percentiles(ps []float64) []float64 {
|
||||||
|
return h.sample.Percentiles(ps)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sample returns the Sample underlying the histogram.
|
||||||
|
func (h *StandardHistogram) Sample() Sample { return h.sample }
|
||||||
|
|
||||||
|
// Snapshot returns a read-only copy of the histogram.
|
||||||
|
func (h *StandardHistogram) Snapshot() Metric {
|
||||||
|
return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StdDev returns the standard deviation of the values in the sample.
|
||||||
|
func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() }
|
||||||
|
|
||||||
|
// Sum returns the sum in the sample.
|
||||||
|
func (h *StandardHistogram) Sum() int64 { return h.sample.Sum() }
|
||||||
|
|
||||||
|
// Update samples a new value.
|
||||||
|
func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) }
|
||||||
|
|
||||||
|
// Variance returns the variance of the values in the sample.
|
||||||
|
func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() }
|
90
pkg/metrics/histogram_test.go
Normal file
90
pkg/metrics/histogram_test.go
Normal file
@ -0,0 +1,90 @@
|
|||||||
|
// includes code from
|
||||||
|
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
|
||||||
|
// Copyright 2012 Richard Crowley. All rights reserved.
|
||||||
|
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func BenchmarkHistogram(b *testing.B) {
|
||||||
|
h := NewHistogram(nil, NewUniformSample(100))
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
h.Update(int64(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHistogram10000(t *testing.T) {
|
||||||
|
h := NewHistogram(nil, NewUniformSample(100000))
|
||||||
|
for i := 1; i <= 10000; i++ {
|
||||||
|
h.Update(int64(i))
|
||||||
|
}
|
||||||
|
testHistogram10000(t, h)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHistogramEmpty(t *testing.T) {
|
||||||
|
h := NewHistogram(nil, NewUniformSample(100))
|
||||||
|
if count := h.Count(); 0 != count {
|
||||||
|
t.Errorf("h.Count(): 0 != %v\n", count)
|
||||||
|
}
|
||||||
|
if min := h.Min(); 0 != min {
|
||||||
|
t.Errorf("h.Min(): 0 != %v\n", min)
|
||||||
|
}
|
||||||
|
if max := h.Max(); 0 != max {
|
||||||
|
t.Errorf("h.Max(): 0 != %v\n", max)
|
||||||
|
}
|
||||||
|
if mean := h.Mean(); 0.0 != mean {
|
||||||
|
t.Errorf("h.Mean(): 0.0 != %v\n", mean)
|
||||||
|
}
|
||||||
|
if stdDev := h.StdDev(); 0.0 != stdDev {
|
||||||
|
t.Errorf("h.StdDev(): 0.0 != %v\n", stdDev)
|
||||||
|
}
|
||||||
|
ps := h.Percentiles([]float64{0.5, 0.75, 0.99})
|
||||||
|
if 0.0 != ps[0] {
|
||||||
|
t.Errorf("median: 0.0 != %v\n", ps[0])
|
||||||
|
}
|
||||||
|
if 0.0 != ps[1] {
|
||||||
|
t.Errorf("75th percentile: 0.0 != %v\n", ps[1])
|
||||||
|
}
|
||||||
|
if 0.0 != ps[2] {
|
||||||
|
t.Errorf("99th percentile: 0.0 != %v\n", ps[2])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHistogramSnapshot(t *testing.T) {
|
||||||
|
h := NewHistogram(nil, NewUniformSample(100000))
|
||||||
|
for i := 1; i <= 10000; i++ {
|
||||||
|
h.Update(int64(i))
|
||||||
|
}
|
||||||
|
snapshot := h.Snapshot().(Histogram)
|
||||||
|
h.Update(0)
|
||||||
|
testHistogram10000(t, snapshot)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testHistogram10000(t *testing.T, h Histogram) {
|
||||||
|
if count := h.Count(); 10000 != count {
|
||||||
|
t.Errorf("h.Count(): 10000 != %v\n", count)
|
||||||
|
}
|
||||||
|
if min := h.Min(); 1 != min {
|
||||||
|
t.Errorf("h.Min(): 1 != %v\n", min)
|
||||||
|
}
|
||||||
|
if max := h.Max(); 10000 != max {
|
||||||
|
t.Errorf("h.Max(): 10000 != %v\n", max)
|
||||||
|
}
|
||||||
|
if mean := h.Mean(); 5000.5 != mean {
|
||||||
|
t.Errorf("h.Mean(): 5000.5 != %v\n", mean)
|
||||||
|
}
|
||||||
|
if stdDev := h.StdDev(); 2886.751331514372 != stdDev {
|
||||||
|
t.Errorf("h.StdDev(): 2886.751331514372 != %v\n", stdDev)
|
||||||
|
}
|
||||||
|
ps := h.Percentiles([]float64{0.5, 0.75, 0.99})
|
||||||
|
if 5000.5 != ps[0] {
|
||||||
|
t.Errorf("median: 5000.5 != %v\n", ps[0])
|
||||||
|
}
|
||||||
|
if 7500.75 != ps[1] {
|
||||||
|
t.Errorf("75th percentile: 7500.75 != %v\n", ps[1])
|
||||||
|
}
|
||||||
|
if 9900.99 != ps[2] {
|
||||||
|
t.Errorf("99th percentile: 9900.99 != %v\n", ps[2])
|
||||||
|
}
|
||||||
|
}
|
221
pkg/metrics/meter.go
Normal file
221
pkg/metrics/meter.go
Normal file
@ -0,0 +1,221 @@
|
|||||||
|
// includes code from
|
||||||
|
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
|
||||||
|
// Copyright 2012 Richard Crowley. All rights reserved.
|
||||||
|
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Meters count events to produce exponentially-weighted moving average rates
|
||||||
|
// at one-, five-, and fifteen-minutes and a mean rate.
|
||||||
|
type Meter interface {
|
||||||
|
Metric
|
||||||
|
|
||||||
|
Count() int64
|
||||||
|
Mark(int64)
|
||||||
|
Rate1() float64
|
||||||
|
Rate5() float64
|
||||||
|
Rate15() float64
|
||||||
|
RateMean() float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMeter constructs a new StandardMeter and launches a goroutine.
|
||||||
|
func NewMeter(meta *MetricMeta) Meter {
|
||||||
|
if UseNilMetrics {
|
||||||
|
return NilMeter{}
|
||||||
|
}
|
||||||
|
|
||||||
|
m := newStandardMeter(meta)
|
||||||
|
arbiter.Lock()
|
||||||
|
defer arbiter.Unlock()
|
||||||
|
arbiter.meters = append(arbiter.meters, m)
|
||||||
|
if !arbiter.started {
|
||||||
|
arbiter.started = true
|
||||||
|
go arbiter.tick()
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
type MeterSnapshot struct {
|
||||||
|
*MetricMeta
|
||||||
|
count int64
|
||||||
|
rate1, rate5, rate15, rateMean float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the count of events at the time the snapshot was taken.
|
||||||
|
func (m *MeterSnapshot) Count() int64 { return m.count }
|
||||||
|
|
||||||
|
// Mark panics.
|
||||||
|
func (*MeterSnapshot) Mark(n int64) {
|
||||||
|
panic("Mark called on a MeterSnapshot")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rate1 returns the one-minute moving average rate of events per second at the
|
||||||
|
// time the snapshot was taken.
|
||||||
|
func (m *MeterSnapshot) Rate1() float64 { return m.rate1 }
|
||||||
|
|
||||||
|
// Rate5 returns the five-minute moving average rate of events per second at
|
||||||
|
// the time the snapshot was taken.
|
||||||
|
func (m *MeterSnapshot) Rate5() float64 { return m.rate5 }
|
||||||
|
|
||||||
|
// Rate15 returns the fifteen-minute moving average rate of events per second
|
||||||
|
// at the time the snapshot was taken.
|
||||||
|
func (m *MeterSnapshot) Rate15() float64 { return m.rate15 }
|
||||||
|
|
||||||
|
// RateMean returns the meter's mean rate of events per second at the time the
|
||||||
|
// snapshot was taken.
|
||||||
|
func (m *MeterSnapshot) RateMean() float64 { return m.rateMean }
|
||||||
|
|
||||||
|
// Snapshot returns the snapshot.
|
||||||
|
func (m *MeterSnapshot) Snapshot() Metric { return m }
|
||||||
|
|
||||||
|
// NilMeter is a no-op Meter.
|
||||||
|
type NilMeter struct{ *MetricMeta }
|
||||||
|
|
||||||
|
// Count is a no-op.
|
||||||
|
func (NilMeter) Count() int64 { return 0 }
|
||||||
|
|
||||||
|
// Mark is a no-op.
|
||||||
|
func (NilMeter) Mark(n int64) {}
|
||||||
|
|
||||||
|
// Rate1 is a no-op.
|
||||||
|
func (NilMeter) Rate1() float64 { return 0.0 }
|
||||||
|
|
||||||
|
// Rate5 is a no-op.
|
||||||
|
func (NilMeter) Rate5() float64 { return 0.0 }
|
||||||
|
|
||||||
|
// Rate15is a no-op.
|
||||||
|
func (NilMeter) Rate15() float64 { return 0.0 }
|
||||||
|
|
||||||
|
// RateMean is a no-op.
|
||||||
|
func (NilMeter) RateMean() float64 { return 0.0 }
|
||||||
|
|
||||||
|
// Snapshot is a no-op.
|
||||||
|
func (NilMeter) Snapshot() Metric { return NilMeter{} }
|
||||||
|
|
||||||
|
// StandardMeter is the standard implementation of a Meter.
|
||||||
|
type StandardMeter struct {
|
||||||
|
*MetricMeta
|
||||||
|
lock sync.RWMutex
|
||||||
|
snapshot *MeterSnapshot
|
||||||
|
a1, a5, a15 EWMA
|
||||||
|
startTime time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func newStandardMeter(meta *MetricMeta) *StandardMeter {
|
||||||
|
return &StandardMeter{
|
||||||
|
MetricMeta: meta,
|
||||||
|
snapshot: &MeterSnapshot{MetricMeta: meta},
|
||||||
|
a1: NewEWMA1(),
|
||||||
|
a5: NewEWMA5(),
|
||||||
|
a15: NewEWMA15(),
|
||||||
|
startTime: time.Now(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the number of events recorded.
|
||||||
|
func (m *StandardMeter) Count() int64 {
|
||||||
|
m.lock.RLock()
|
||||||
|
count := m.snapshot.count
|
||||||
|
m.lock.RUnlock()
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mark records the occurance of n events.
|
||||||
|
func (m *StandardMeter) Mark(n int64) {
|
||||||
|
m.lock.Lock()
|
||||||
|
defer m.lock.Unlock()
|
||||||
|
m.snapshot.count += n
|
||||||
|
m.a1.Update(n)
|
||||||
|
m.a5.Update(n)
|
||||||
|
m.a15.Update(n)
|
||||||
|
m.updateSnapshot()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rate1 returns the one-minute moving average rate of events per second.
|
||||||
|
func (m *StandardMeter) Rate1() float64 {
|
||||||
|
m.lock.RLock()
|
||||||
|
rate1 := m.snapshot.rate1
|
||||||
|
m.lock.RUnlock()
|
||||||
|
return rate1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rate5 returns the five-minute moving average rate of events per second.
|
||||||
|
func (m *StandardMeter) Rate5() float64 {
|
||||||
|
m.lock.RLock()
|
||||||
|
rate5 := m.snapshot.rate5
|
||||||
|
m.lock.RUnlock()
|
||||||
|
return rate5
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rate15 returns the fifteen-minute moving average rate of events per second.
|
||||||
|
func (m *StandardMeter) Rate15() float64 {
|
||||||
|
m.lock.RLock()
|
||||||
|
rate15 := m.snapshot.rate15
|
||||||
|
m.lock.RUnlock()
|
||||||
|
return rate15
|
||||||
|
}
|
||||||
|
|
||||||
|
// RateMean returns the meter's mean rate of events per second.
|
||||||
|
func (m *StandardMeter) RateMean() float64 {
|
||||||
|
m.lock.RLock()
|
||||||
|
rateMean := m.snapshot.rateMean
|
||||||
|
m.lock.RUnlock()
|
||||||
|
return rateMean
|
||||||
|
}
|
||||||
|
|
||||||
|
// Snapshot returns a read-only copy of the meter.
|
||||||
|
func (m *StandardMeter) Snapshot() Metric {
|
||||||
|
m.lock.RLock()
|
||||||
|
snapshot := *m.snapshot
|
||||||
|
m.lock.RUnlock()
|
||||||
|
return &snapshot
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *StandardMeter) updateSnapshot() {
|
||||||
|
// should run with write lock held on m.lock
|
||||||
|
snapshot := m.snapshot
|
||||||
|
snapshot.rate1 = m.a1.Rate()
|
||||||
|
snapshot.rate5 = m.a5.Rate()
|
||||||
|
snapshot.rate15 = m.a15.Rate()
|
||||||
|
snapshot.rateMean = float64(snapshot.count) / time.Since(m.startTime).Seconds()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *StandardMeter) tick() {
|
||||||
|
m.lock.Lock()
|
||||||
|
defer m.lock.Unlock()
|
||||||
|
m.a1.Tick()
|
||||||
|
m.a5.Tick()
|
||||||
|
m.a15.Tick()
|
||||||
|
m.updateSnapshot()
|
||||||
|
}
|
||||||
|
|
||||||
|
type meterArbiter struct {
|
||||||
|
sync.RWMutex
|
||||||
|
started bool
|
||||||
|
meters []*StandardMeter
|
||||||
|
ticker *time.Ticker
|
||||||
|
}
|
||||||
|
|
||||||
|
var arbiter = meterArbiter{ticker: time.NewTicker(5e9)}
|
||||||
|
|
||||||
|
// Ticks meters on the scheduled interval
|
||||||
|
func (ma *meterArbiter) tick() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ma.ticker.C:
|
||||||
|
ma.tickMeters()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ma *meterArbiter) tickMeters() {
|
||||||
|
ma.RLock()
|
||||||
|
defer ma.RUnlock()
|
||||||
|
for _, meter := range ma.meters {
|
||||||
|
meter.tick()
|
||||||
|
}
|
||||||
|
}
|
@ -1,39 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
type comboCounterRef struct {
|
|
||||||
usageCounter Counter
|
|
||||||
metricCounter Counter
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewComboCounterRef(name string) Counter {
|
|
||||||
cr := &comboCounterRef{}
|
|
||||||
cr.usageCounter = UsageStats.GetOrRegister(name, NewCounter).(Counter)
|
|
||||||
cr.metricCounter = MetricStats.GetOrRegister(name, NewCounter).(Counter)
|
|
||||||
return cr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c comboCounterRef) Clear() {
|
|
||||||
c.usageCounter.Clear()
|
|
||||||
c.metricCounter.Clear()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c comboCounterRef) Count() int64 {
|
|
||||||
panic("Count called on a combocounter ref")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Dec panics.
|
|
||||||
func (c comboCounterRef) Dec(i int64) {
|
|
||||||
c.usageCounter.Dec(i)
|
|
||||||
c.metricCounter.Dec(i)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Inc panics.
|
|
||||||
func (c comboCounterRef) Inc(i int64) {
|
|
||||||
c.usageCounter.Inc(i)
|
|
||||||
c.metricCounter.Inc(i)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Snapshot returns the snapshot.
|
|
||||||
func (c comboCounterRef) Snapshot() Counter {
|
|
||||||
panic("snapshot called on a combocounter ref")
|
|
||||||
}
|
|
@ -1,31 +1,71 @@
|
|||||||
package metrics
|
package metrics
|
||||||
|
|
||||||
var UsageStats = NewRegistry()
|
var MetricStats Registry
|
||||||
var MetricStats = NewRegistry()
|
var UseNilMetrics bool
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// init with nil metrics
|
||||||
|
initMetricVars(&MetricSettings{})
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
M_Instance_Start = NewComboCounterRef("instance.start")
|
M_Instance_Start Counter
|
||||||
|
M_Page_Status_200 Counter
|
||||||
|
M_Page_Status_500 Counter
|
||||||
|
M_Page_Status_404 Counter
|
||||||
|
M_Api_Status_500 Counter
|
||||||
|
M_Api_Status_404 Counter
|
||||||
|
M_Api_User_SignUpStarted Counter
|
||||||
|
M_Api_User_SignUpCompleted Counter
|
||||||
|
M_Api_User_SignUpInvite Counter
|
||||||
|
M_Api_Dashboard_Save Timer
|
||||||
|
M_Api_Dashboard_Get Timer
|
||||||
|
M_Api_Dashboard_Search Timer
|
||||||
|
M_Api_Admin_User_Create Counter
|
||||||
|
M_Api_Login_Post Counter
|
||||||
|
M_Api_Login_OAuth Counter
|
||||||
|
M_Api_Org_Create Counter
|
||||||
|
M_Api_Dashboard_Snapshot_Create Counter
|
||||||
|
M_Api_Dashboard_Snapshot_External Counter
|
||||||
|
M_Api_Dashboard_Snapshot_Get Counter
|
||||||
|
M_Models_Dashboard_Insert Counter
|
||||||
|
|
||||||
M_Page_Status_200 = NewComboCounterRef("page.status.200")
|
// Timers
|
||||||
M_Page_Status_500 = NewComboCounterRef("page.status.500")
|
M_DataSource_ProxyReq_Timer Timer
|
||||||
M_Page_Status_404 = NewComboCounterRef("page.status.404")
|
|
||||||
|
|
||||||
M_Api_Status_500 = NewComboCounterRef("api.status.500")
|
|
||||||
M_Api_Status_404 = NewComboCounterRef("api.status.404")
|
|
||||||
|
|
||||||
M_Api_User_SignUpStarted = NewComboCounterRef("api.user.signup_started")
|
|
||||||
M_Api_User_SignUpCompleted = NewComboCounterRef("api.user.signup_completed")
|
|
||||||
M_Api_User_SignUpInvite = NewComboCounterRef("api.user.signup_invite")
|
|
||||||
M_Api_Dashboard_Get = NewComboCounterRef("api.dashboard.get")
|
|
||||||
M_Api_Dashboard_Post = NewComboCounterRef("api.dashboard.post")
|
|
||||||
M_Api_Admin_User_Create = NewComboCounterRef("api.admin.user_create")
|
|
||||||
M_Api_Login_Post = NewComboCounterRef("api.login.post")
|
|
||||||
M_Api_Login_OAuth = NewComboCounterRef("api.login.oauth")
|
|
||||||
M_Api_Org_Create = NewComboCounterRef("api.org.create")
|
|
||||||
|
|
||||||
M_Api_Dashboard_Snapshot_Create = NewComboCounterRef("api.dashboard_snapshot.create")
|
|
||||||
M_Api_Dashboard_Snapshot_External = NewComboCounterRef("api.dashboard_snapshot.external")
|
|
||||||
M_Api_Dashboard_Snapshot_Get = NewComboCounterRef("api.dashboard_snapshot.get")
|
|
||||||
|
|
||||||
M_Models_Dashboard_Insert = NewComboCounterRef("models.dashboard.insert")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func initMetricVars(settings *MetricSettings) {
|
||||||
|
UseNilMetrics = settings.Enabled == false
|
||||||
|
MetricStats = NewRegistry()
|
||||||
|
|
||||||
|
M_Instance_Start = RegCounter("instance_start")
|
||||||
|
|
||||||
|
M_Page_Status_200 = RegCounter("page.resp_status", "code", "200")
|
||||||
|
M_Page_Status_500 = RegCounter("page.resp_status", "code", "500")
|
||||||
|
M_Page_Status_404 = RegCounter("page.resp_status", "code", "404")
|
||||||
|
|
||||||
|
M_Api_Status_500 = RegCounter("api.resp_status", "code", "500")
|
||||||
|
M_Api_Status_404 = RegCounter("api.resp_status", "code", "404")
|
||||||
|
|
||||||
|
M_Api_User_SignUpStarted = RegCounter("api.user.signup_started")
|
||||||
|
M_Api_User_SignUpCompleted = RegCounter("api.user.signup_completed")
|
||||||
|
M_Api_User_SignUpInvite = RegCounter("api.user.signup_invite")
|
||||||
|
|
||||||
|
M_Api_Dashboard_Save = RegTimer("api.dashboard.save")
|
||||||
|
M_Api_Dashboard_Get = RegTimer("api.dashboard.get")
|
||||||
|
M_Api_Dashboard_Search = RegTimer("api.dashboard.search")
|
||||||
|
|
||||||
|
M_Api_Admin_User_Create = RegCounter("api.admin.user_create")
|
||||||
|
M_Api_Login_Post = RegCounter("api.login.post")
|
||||||
|
M_Api_Login_OAuth = RegCounter("api.login.oauth")
|
||||||
|
M_Api_Org_Create = RegCounter("api.org.create")
|
||||||
|
|
||||||
|
M_Api_Dashboard_Snapshot_Create = RegCounter("api.dashboard_snapshot.create")
|
||||||
|
M_Api_Dashboard_Snapshot_External = RegCounter("api.dashboard_snapshot.external")
|
||||||
|
M_Api_Dashboard_Snapshot_Get = RegCounter("api.dashboard_snapshot.get")
|
||||||
|
|
||||||
|
M_Models_Dashboard_Insert = RegCounter("models.dashboard.insert")
|
||||||
|
|
||||||
|
// Timers
|
||||||
|
M_DataSource_ProxyReq_Timer = RegTimer("api.dataproxy.request.all")
|
||||||
|
}
|
||||||
|
@ -14,19 +14,46 @@ import (
|
|||||||
"github.com/grafana/grafana/pkg/setting"
|
"github.com/grafana/grafana/pkg/setting"
|
||||||
)
|
)
|
||||||
|
|
||||||
func StartUsageReportLoop() chan struct{} {
|
func Init() {
|
||||||
|
settings := readSettings()
|
||||||
|
initMetricVars(settings)
|
||||||
|
go instrumentationLoop(settings)
|
||||||
|
}
|
||||||
|
|
||||||
|
func instrumentationLoop(settings *MetricSettings) chan struct{} {
|
||||||
M_Instance_Start.Inc(1)
|
M_Instance_Start.Inc(1)
|
||||||
|
|
||||||
ticker := time.NewTicker(time.Hour * 24)
|
onceEveryDayTick := time.NewTicker(time.Hour * 24)
|
||||||
|
secondTicker := time.NewTicker(time.Second * time.Duration(settings.IntervalSeconds))
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ticker.C:
|
case <-onceEveryDayTick.C:
|
||||||
sendUsageStats()
|
sendUsageStats()
|
||||||
|
case <-secondTicker.C:
|
||||||
|
if settings.Enabled {
|
||||||
|
sendMetrics(settings)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func sendMetrics(settings *MetricSettings) {
|
||||||
|
if len(settings.Publishers) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
metrics := MetricStats.GetSnapshots()
|
||||||
|
for _, publisher := range settings.Publishers {
|
||||||
|
publisher.Publish(metrics)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func sendUsageStats() {
|
func sendUsageStats() {
|
||||||
|
if !setting.ReportingEnabled {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
log.Trace("Sending anonymous usage stats to stats.grafana.org")
|
log.Trace("Sending anonymous usage stats to stats.grafana.org")
|
||||||
|
|
||||||
version := strings.Replace(setting.BuildVersion, ".", "_", -1)
|
version := strings.Replace(setting.BuildVersion, ".", "_", -1)
|
||||||
@ -37,16 +64,6 @@ func sendUsageStats() {
|
|||||||
"metrics": metrics,
|
"metrics": metrics,
|
||||||
}
|
}
|
||||||
|
|
||||||
UsageStats.Each(func(name string, i interface{}) {
|
|
||||||
switch metric := i.(type) {
|
|
||||||
case Counter:
|
|
||||||
if metric.Count() > 0 {
|
|
||||||
metrics[name+".count"] = metric.Count()
|
|
||||||
metric.Clear()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
statsQuery := m.GetSystemStatsQuery{}
|
statsQuery := m.GetSystemStatsQuery{}
|
||||||
if err := bus.Dispatch(&statsQuery); err != nil {
|
if err := bus.Dispatch(&statsQuery); err != nil {
|
||||||
log.Error(3, "Failed to get system stats", err)
|
log.Error(3, "Failed to get system stats", err)
|
@ -1,102 +1,37 @@
|
|||||||
package metrics
|
package metrics
|
||||||
|
|
||||||
import (
|
import "sync"
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DuplicateMetric is the error returned by Registry.Register when a metric
|
|
||||||
// already exists. If you mean to Register that metric you must first
|
|
||||||
// Unregister the existing metric.
|
|
||||||
type DuplicateMetric string
|
|
||||||
|
|
||||||
func (err DuplicateMetric) Error() string {
|
|
||||||
return fmt.Sprintf("duplicate metric: %s", string(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
type Registry interface {
|
type Registry interface {
|
||||||
// Call the given function for each registered metric.
|
GetSnapshots() []Metric
|
||||||
Each(func(string, interface{}))
|
Register(metric Metric)
|
||||||
|
|
||||||
// Get the metric by the given name or nil if none is registered.
|
|
||||||
Get(string) interface{}
|
|
||||||
|
|
||||||
// Gets an existing metric or registers the given one.
|
|
||||||
// The interface can be the metric to register if not found in registry,
|
|
||||||
// or a function returning the metric for lazy instantiation.
|
|
||||||
GetOrRegister(string, interface{}) interface{}
|
|
||||||
|
|
||||||
// Register the given metric under the given name.
|
|
||||||
Register(string, interface{}) error
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// The standard implementation of a Registry is a mutex-protected map
|
// The standard implementation of a Registry is a mutex-protected map
|
||||||
// of names to metrics.
|
// of names to metrics.
|
||||||
type StandardRegistry struct {
|
type StandardRegistry struct {
|
||||||
metrics map[string]interface{}
|
metrics []Metric
|
||||||
mutex sync.Mutex
|
mutex sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a new registry.
|
// Create a new registry.
|
||||||
func NewRegistry() Registry {
|
func NewRegistry() Registry {
|
||||||
return &StandardRegistry{metrics: make(map[string]interface{})}
|
return &StandardRegistry{
|
||||||
|
metrics: make([]Metric, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *StandardRegistry) Register(metric Metric) {
|
||||||
|
r.mutex.Lock()
|
||||||
|
defer r.mutex.Unlock()
|
||||||
|
r.metrics = append(r.metrics, metric)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Call the given function for each registered metric.
|
// Call the given function for each registered metric.
|
||||||
func (r *StandardRegistry) Each(f func(string, interface{})) {
|
func (r *StandardRegistry) GetSnapshots() []Metric {
|
||||||
for name, i := range r.registered() {
|
metrics := make([]Metric, len(r.metrics))
|
||||||
f(name, i)
|
for i, metric := range r.metrics {
|
||||||
}
|
metrics[i] = metric.Snapshot()
|
||||||
}
|
|
||||||
|
|
||||||
// Get the metric by the given name or nil if none is registered.
|
|
||||||
func (r *StandardRegistry) Get(name string) interface{} {
|
|
||||||
r.mutex.Lock()
|
|
||||||
defer r.mutex.Unlock()
|
|
||||||
return r.metrics[name]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Gets an existing metric or creates and registers a new one. Threadsafe
|
|
||||||
// alternative to calling Get and Register on failure.
|
|
||||||
// The interface can be the metric to register if not found in registry,
|
|
||||||
// or a function returning the metric for lazy instantiation.
|
|
||||||
func (r *StandardRegistry) GetOrRegister(name string, i interface{}) interface{} {
|
|
||||||
r.mutex.Lock()
|
|
||||||
defer r.mutex.Unlock()
|
|
||||||
if metric, ok := r.metrics[name]; ok {
|
|
||||||
return metric
|
|
||||||
}
|
|
||||||
if v := reflect.ValueOf(i); v.Kind() == reflect.Func {
|
|
||||||
i = v.Call(nil)[0].Interface()
|
|
||||||
}
|
|
||||||
r.register(name, i)
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register the given metric under the given name. Returns a DuplicateMetric
|
|
||||||
// if a metric by the given name is already registered.
|
|
||||||
func (r *StandardRegistry) Register(name string, i interface{}) error {
|
|
||||||
r.mutex.Lock()
|
|
||||||
defer r.mutex.Unlock()
|
|
||||||
return r.register(name, i)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *StandardRegistry) register(name string, i interface{}) error {
|
|
||||||
if _, ok := r.metrics[name]; ok {
|
|
||||||
return DuplicateMetric(name)
|
|
||||||
}
|
|
||||||
|
|
||||||
r.metrics[name] = i
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *StandardRegistry) registered() map[string]interface{} {
|
|
||||||
metrics := make(map[string]interface{}, len(r.metrics))
|
|
||||||
r.mutex.Lock()
|
|
||||||
defer r.mutex.Unlock()
|
|
||||||
for name, i := range r.metrics {
|
|
||||||
metrics[name] = i
|
|
||||||
}
|
}
|
||||||
return metrics
|
return metrics
|
||||||
}
|
}
|
||||||
|
607
pkg/metrics/sample.go
Normal file
607
pkg/metrics/sample.go
Normal file
@ -0,0 +1,607 @@
|
|||||||
|
// includes code from
|
||||||
|
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
|
||||||
|
// Copyright 2012 Richard Crowley. All rights reserved.
|
||||||
|
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"math/rand"
|
||||||
|
"sort"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const rescaleThreshold = time.Hour
|
||||||
|
|
||||||
|
// Samples maintain a statistically-significant selection of values from
|
||||||
|
// a stream.
|
||||||
|
type Sample interface {
|
||||||
|
Clear()
|
||||||
|
Count() int64
|
||||||
|
Max() int64
|
||||||
|
Mean() float64
|
||||||
|
Min() int64
|
||||||
|
Percentile(float64) float64
|
||||||
|
Percentiles([]float64) []float64
|
||||||
|
Size() int
|
||||||
|
Snapshot() Sample
|
||||||
|
StdDev() float64
|
||||||
|
Sum() int64
|
||||||
|
Update(int64)
|
||||||
|
Values() []int64
|
||||||
|
Variance() float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpDecaySample is an exponentially-decaying sample using a forward-decaying
|
||||||
|
// priority reservoir. See Cormode et al's "Forward Decay: A Practical Time
|
||||||
|
// Decay Model for Streaming Systems".
|
||||||
|
//
|
||||||
|
// <http://www.research.att.com/people/Cormode_Graham/library/publications/CormodeShkapenyukSrivastavaXu09.pdf>
|
||||||
|
type ExpDecaySample struct {
|
||||||
|
alpha float64
|
||||||
|
count int64
|
||||||
|
mutex sync.Mutex
|
||||||
|
reservoirSize int
|
||||||
|
t0, t1 time.Time
|
||||||
|
values *expDecaySampleHeap
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewExpDecaySample constructs a new exponentially-decaying sample with the
|
||||||
|
// given reservoir size and alpha.
|
||||||
|
func NewExpDecaySample(reservoirSize int, alpha float64) Sample {
|
||||||
|
s := &ExpDecaySample{
|
||||||
|
alpha: alpha,
|
||||||
|
reservoirSize: reservoirSize,
|
||||||
|
t0: time.Now(),
|
||||||
|
values: newExpDecaySampleHeap(reservoirSize),
|
||||||
|
}
|
||||||
|
s.t1 = s.t0.Add(rescaleThreshold)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear clears all samples.
|
||||||
|
func (s *ExpDecaySample) Clear() {
|
||||||
|
s.mutex.Lock()
|
||||||
|
defer s.mutex.Unlock()
|
||||||
|
s.count = 0
|
||||||
|
s.t0 = time.Now()
|
||||||
|
s.t1 = s.t0.Add(rescaleThreshold)
|
||||||
|
s.values.Clear()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the number of samples recorded, which may exceed the
|
||||||
|
// reservoir size.
|
||||||
|
func (s *ExpDecaySample) Count() int64 {
|
||||||
|
s.mutex.Lock()
|
||||||
|
defer s.mutex.Unlock()
|
||||||
|
return s.count
|
||||||
|
}
|
||||||
|
|
||||||
|
// Max returns the maximum value in the sample, which may not be the maximum
|
||||||
|
// value ever to be part of the sample.
|
||||||
|
func (s *ExpDecaySample) Max() int64 {
|
||||||
|
return SampleMax(s.Values())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mean returns the mean of the values in the sample.
|
||||||
|
func (s *ExpDecaySample) Mean() float64 {
|
||||||
|
return SampleMean(s.Values())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Min returns the minimum value in the sample, which may not be the minimum
|
||||||
|
// value ever to be part of the sample.
|
||||||
|
func (s *ExpDecaySample) Min() int64 {
|
||||||
|
return SampleMin(s.Values())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Percentile returns an arbitrary percentile of values in the sample.
|
||||||
|
func (s *ExpDecaySample) Percentile(p float64) float64 {
|
||||||
|
return SamplePercentile(s.Values(), p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Percentiles returns a slice of arbitrary percentiles of values in the
|
||||||
|
// sample.
|
||||||
|
func (s *ExpDecaySample) Percentiles(ps []float64) []float64 {
|
||||||
|
return SamplePercentiles(s.Values(), ps)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size returns the size of the sample, which is at most the reservoir size.
|
||||||
|
func (s *ExpDecaySample) Size() int {
|
||||||
|
s.mutex.Lock()
|
||||||
|
defer s.mutex.Unlock()
|
||||||
|
return s.values.Size()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Snapshot returns a read-only copy of the sample.
|
||||||
|
func (s *ExpDecaySample) Snapshot() Sample {
|
||||||
|
s.mutex.Lock()
|
||||||
|
defer s.mutex.Unlock()
|
||||||
|
vals := s.values.Values()
|
||||||
|
values := make([]int64, len(vals))
|
||||||
|
for i, v := range vals {
|
||||||
|
values[i] = v.v
|
||||||
|
}
|
||||||
|
return &SampleSnapshot{
|
||||||
|
count: s.count,
|
||||||
|
values: values,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StdDev returns the standard deviation of the values in the sample.
|
||||||
|
func (s *ExpDecaySample) StdDev() float64 {
|
||||||
|
return SampleStdDev(s.Values())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sum returns the sum of the values in the sample.
|
||||||
|
func (s *ExpDecaySample) Sum() int64 {
|
||||||
|
return SampleSum(s.Values())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update samples a new value.
|
||||||
|
func (s *ExpDecaySample) Update(v int64) {
|
||||||
|
s.update(time.Now(), v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Values returns a copy of the values in the sample.
|
||||||
|
func (s *ExpDecaySample) Values() []int64 {
|
||||||
|
s.mutex.Lock()
|
||||||
|
defer s.mutex.Unlock()
|
||||||
|
vals := s.values.Values()
|
||||||
|
values := make([]int64, len(vals))
|
||||||
|
for i, v := range vals {
|
||||||
|
values[i] = v.v
|
||||||
|
}
|
||||||
|
return values
|
||||||
|
}
|
||||||
|
|
||||||
|
// Variance returns the variance of the values in the sample.
|
||||||
|
func (s *ExpDecaySample) Variance() float64 {
|
||||||
|
return SampleVariance(s.Values())
|
||||||
|
}
|
||||||
|
|
||||||
|
// update samples a new value at a particular timestamp. This is a method all
|
||||||
|
// its own to facilitate testing.
|
||||||
|
func (s *ExpDecaySample) update(t time.Time, v int64) {
|
||||||
|
s.mutex.Lock()
|
||||||
|
defer s.mutex.Unlock()
|
||||||
|
s.count++
|
||||||
|
if s.values.Size() == s.reservoirSize {
|
||||||
|
s.values.Pop()
|
||||||
|
}
|
||||||
|
s.values.Push(expDecaySample{
|
||||||
|
k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / rand.Float64(),
|
||||||
|
v: v,
|
||||||
|
})
|
||||||
|
if t.After(s.t1) {
|
||||||
|
values := s.values.Values()
|
||||||
|
t0 := s.t0
|
||||||
|
s.values.Clear()
|
||||||
|
s.t0 = t
|
||||||
|
s.t1 = s.t0.Add(rescaleThreshold)
|
||||||
|
for _, v := range values {
|
||||||
|
v.k = v.k * math.Exp(-s.alpha*s.t0.Sub(t0).Seconds())
|
||||||
|
s.values.Push(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NilSample is a no-op Sample.
|
||||||
|
type NilSample struct{}
|
||||||
|
|
||||||
|
// Clear is a no-op.
|
||||||
|
func (NilSample) Clear() {}
|
||||||
|
|
||||||
|
// Count is a no-op.
|
||||||
|
func (NilSample) Count() int64 { return 0 }
|
||||||
|
|
||||||
|
// Max is a no-op.
|
||||||
|
func (NilSample) Max() int64 { return 0 }
|
||||||
|
|
||||||
|
// Mean is a no-op.
|
||||||
|
func (NilSample) Mean() float64 { return 0.0 }
|
||||||
|
|
||||||
|
// Min is a no-op.
|
||||||
|
func (NilSample) Min() int64 { return 0 }
|
||||||
|
|
||||||
|
// Percentile is a no-op.
|
||||||
|
func (NilSample) Percentile(p float64) float64 { return 0.0 }
|
||||||
|
|
||||||
|
// Percentiles is a no-op.
|
||||||
|
func (NilSample) Percentiles(ps []float64) []float64 {
|
||||||
|
return make([]float64, len(ps))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size is a no-op.
|
||||||
|
func (NilSample) Size() int { return 0 }
|
||||||
|
|
||||||
|
// Sample is a no-op.
|
||||||
|
func (NilSample) Snapshot() Sample { return NilSample{} }
|
||||||
|
|
||||||
|
// StdDev is a no-op.
|
||||||
|
func (NilSample) StdDev() float64 { return 0.0 }
|
||||||
|
|
||||||
|
// Sum is a no-op.
|
||||||
|
func (NilSample) Sum() int64 { return 0 }
|
||||||
|
|
||||||
|
// Update is a no-op.
|
||||||
|
func (NilSample) Update(v int64) {}
|
||||||
|
|
||||||
|
// Values is a no-op.
|
||||||
|
func (NilSample) Values() []int64 { return []int64{} }
|
||||||
|
|
||||||
|
// Variance is a no-op.
|
||||||
|
func (NilSample) Variance() float64 { return 0.0 }
|
||||||
|
|
||||||
|
// SampleMax returns the maximum value of the slice of int64.
|
||||||
|
func SampleMax(values []int64) int64 {
|
||||||
|
if 0 == len(values) {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
var max int64 = math.MinInt64
|
||||||
|
for _, v := range values {
|
||||||
|
if max < v {
|
||||||
|
max = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return max
|
||||||
|
}
|
||||||
|
|
||||||
|
// SampleMean returns the mean value of the slice of int64.
|
||||||
|
func SampleMean(values []int64) float64 {
|
||||||
|
if 0 == len(values) {
|
||||||
|
return 0.0
|
||||||
|
}
|
||||||
|
return float64(SampleSum(values)) / float64(len(values))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SampleMin returns the minimum value of the slice of int64.
|
||||||
|
func SampleMin(values []int64) int64 {
|
||||||
|
if 0 == len(values) {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
var min int64 = math.MaxInt64
|
||||||
|
for _, v := range values {
|
||||||
|
if min > v {
|
||||||
|
min = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return min
|
||||||
|
}
|
||||||
|
|
||||||
|
// SamplePercentiles returns an arbitrary percentile of the slice of int64.
|
||||||
|
func SamplePercentile(values int64Slice, p float64) float64 {
|
||||||
|
return SamplePercentiles(values, []float64{p})[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// SamplePercentiles returns a slice of arbitrary percentiles of the slice of
|
||||||
|
// int64.
|
||||||
|
func SamplePercentiles(values int64Slice, ps []float64) []float64 {
|
||||||
|
scores := make([]float64, len(ps))
|
||||||
|
size := len(values)
|
||||||
|
if size > 0 {
|
||||||
|
sort.Sort(values)
|
||||||
|
for i, p := range ps {
|
||||||
|
pos := p * float64(size+1)
|
||||||
|
if pos < 1.0 {
|
||||||
|
scores[i] = float64(values[0])
|
||||||
|
} else if pos >= float64(size) {
|
||||||
|
scores[i] = float64(values[size-1])
|
||||||
|
} else {
|
||||||
|
lower := float64(values[int(pos)-1])
|
||||||
|
upper := float64(values[int(pos)])
|
||||||
|
scores[i] = lower + (pos-math.Floor(pos))*(upper-lower)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return scores
|
||||||
|
}
|
||||||
|
|
||||||
|
// SampleSnapshot is a read-only copy of another Sample.
|
||||||
|
type SampleSnapshot struct {
|
||||||
|
count int64
|
||||||
|
values []int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear panics.
|
||||||
|
func (*SampleSnapshot) Clear() {
|
||||||
|
panic("Clear called on a SampleSnapshot")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the count of inputs at the time the snapshot was taken.
|
||||||
|
func (s *SampleSnapshot) Count() int64 { return s.count }
|
||||||
|
|
||||||
|
// Max returns the maximal value at the time the snapshot was taken.
|
||||||
|
func (s *SampleSnapshot) Max() int64 { return SampleMax(s.values) }
|
||||||
|
|
||||||
|
// Mean returns the mean value at the time the snapshot was taken.
|
||||||
|
func (s *SampleSnapshot) Mean() float64 { return SampleMean(s.values) }
|
||||||
|
|
||||||
|
// Min returns the minimal value at the time the snapshot was taken.
|
||||||
|
func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) }
|
||||||
|
|
||||||
|
// Percentile returns an arbitrary percentile of values at the time the
|
||||||
|
// snapshot was taken.
|
||||||
|
func (s *SampleSnapshot) Percentile(p float64) float64 {
|
||||||
|
return SamplePercentile(s.values, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Percentiles returns a slice of arbitrary percentiles of values at the time
|
||||||
|
// the snapshot was taken.
|
||||||
|
func (s *SampleSnapshot) Percentiles(ps []float64) []float64 {
|
||||||
|
return SamplePercentiles(s.values, ps)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size returns the size of the sample at the time the snapshot was taken.
|
||||||
|
func (s *SampleSnapshot) Size() int { return len(s.values) }
|
||||||
|
|
||||||
|
// Snapshot returns the snapshot.
|
||||||
|
func (s *SampleSnapshot) Snapshot() Sample { return s }
|
||||||
|
|
||||||
|
// StdDev returns the standard deviation of values at the time the snapshot was
|
||||||
|
// taken.
|
||||||
|
func (s *SampleSnapshot) StdDev() float64 { return SampleStdDev(s.values) }
|
||||||
|
|
||||||
|
// Sum returns the sum of values at the time the snapshot was taken.
|
||||||
|
func (s *SampleSnapshot) Sum() int64 { return SampleSum(s.values) }
|
||||||
|
|
||||||
|
// Update panics.
|
||||||
|
func (*SampleSnapshot) Update(int64) {
|
||||||
|
panic("Update called on a SampleSnapshot")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Values returns a copy of the values in the sample.
|
||||||
|
func (s *SampleSnapshot) Values() []int64 {
|
||||||
|
values := make([]int64, len(s.values))
|
||||||
|
copy(values, s.values)
|
||||||
|
return values
|
||||||
|
}
|
||||||
|
|
||||||
|
// Variance returns the variance of values at the time the snapshot was taken.
|
||||||
|
func (s *SampleSnapshot) Variance() float64 { return SampleVariance(s.values) }
|
||||||
|
|
||||||
|
// SampleStdDev returns the standard deviation of the slice of int64.
|
||||||
|
func SampleStdDev(values []int64) float64 {
|
||||||
|
return math.Sqrt(SampleVariance(values))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SampleSum returns the sum of the slice of int64.
|
||||||
|
func SampleSum(values []int64) int64 {
|
||||||
|
var sum int64
|
||||||
|
for _, v := range values {
|
||||||
|
sum += v
|
||||||
|
}
|
||||||
|
return sum
|
||||||
|
}
|
||||||
|
|
||||||
|
// SampleVariance returns the variance of the slice of int64.
|
||||||
|
func SampleVariance(values []int64) float64 {
|
||||||
|
if 0 == len(values) {
|
||||||
|
return 0.0
|
||||||
|
}
|
||||||
|
m := SampleMean(values)
|
||||||
|
var sum float64
|
||||||
|
for _, v := range values {
|
||||||
|
d := float64(v) - m
|
||||||
|
sum += d * d
|
||||||
|
}
|
||||||
|
return sum / float64(len(values))
|
||||||
|
}
|
||||||
|
|
||||||
|
// A uniform sample using Vitter's Algorithm R.
|
||||||
|
//
|
||||||
|
// <http://www.cs.umd.edu/~samir/498/vitter.pdf>
|
||||||
|
type UniformSample struct {
|
||||||
|
count int64
|
||||||
|
mutex sync.Mutex
|
||||||
|
reservoirSize int
|
||||||
|
values []int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewUniformSample constructs a new uniform sample with the given reservoir
|
||||||
|
// size.
|
||||||
|
func NewUniformSample(reservoirSize int) Sample {
|
||||||
|
return &UniformSample{
|
||||||
|
reservoirSize: reservoirSize,
|
||||||
|
values: make([]int64, 0, reservoirSize),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear clears all samples.
|
||||||
|
func (s *UniformSample) Clear() {
|
||||||
|
s.mutex.Lock()
|
||||||
|
defer s.mutex.Unlock()
|
||||||
|
s.count = 0
|
||||||
|
s.values = make([]int64, 0, s.reservoirSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the number of samples recorded, which may exceed the
|
||||||
|
// reservoir size.
|
||||||
|
func (s *UniformSample) Count() int64 {
|
||||||
|
s.mutex.Lock()
|
||||||
|
defer s.mutex.Unlock()
|
||||||
|
return s.count
|
||||||
|
}
|
||||||
|
|
||||||
|
// Max returns the maximum value in the sample, which may not be the maximum
|
||||||
|
// value ever to be part of the sample.
|
||||||
|
func (s *UniformSample) Max() int64 {
|
||||||
|
s.mutex.Lock()
|
||||||
|
defer s.mutex.Unlock()
|
||||||
|
return SampleMax(s.values)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mean returns the mean of the values in the sample.
|
||||||
|
func (s *UniformSample) Mean() float64 {
|
||||||
|
s.mutex.Lock()
|
||||||
|
defer s.mutex.Unlock()
|
||||||
|
return SampleMean(s.values)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Min returns the minimum value in the sample, which may not be the minimum
|
||||||
|
// value ever to be part of the sample.
|
||||||
|
func (s *UniformSample) Min() int64 {
|
||||||
|
s.mutex.Lock()
|
||||||
|
defer s.mutex.Unlock()
|
||||||
|
return SampleMin(s.values)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Percentile returns an arbitrary percentile of values in the sample.
|
||||||
|
func (s *UniformSample) Percentile(p float64) float64 {
|
||||||
|
s.mutex.Lock()
|
||||||
|
defer s.mutex.Unlock()
|
||||||
|
return SamplePercentile(s.values, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Percentiles returns a slice of arbitrary percentiles of values in the
|
||||||
|
// sample.
|
||||||
|
func (s *UniformSample) Percentiles(ps []float64) []float64 {
|
||||||
|
s.mutex.Lock()
|
||||||
|
defer s.mutex.Unlock()
|
||||||
|
return SamplePercentiles(s.values, ps)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size returns the size of the sample, which is at most the reservoir size.
|
||||||
|
func (s *UniformSample) Size() int {
|
||||||
|
s.mutex.Lock()
|
||||||
|
defer s.mutex.Unlock()
|
||||||
|
return len(s.values)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Snapshot returns a read-only copy of the sample.
|
||||||
|
func (s *UniformSample) Snapshot() Sample {
|
||||||
|
s.mutex.Lock()
|
||||||
|
defer s.mutex.Unlock()
|
||||||
|
values := make([]int64, len(s.values))
|
||||||
|
copy(values, s.values)
|
||||||
|
return &SampleSnapshot{
|
||||||
|
count: s.count,
|
||||||
|
values: values,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StdDev returns the standard deviation of the values in the sample.
|
||||||
|
func (s *UniformSample) StdDev() float64 {
|
||||||
|
s.mutex.Lock()
|
||||||
|
defer s.mutex.Unlock()
|
||||||
|
return SampleStdDev(s.values)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sum returns the sum of the values in the sample.
|
||||||
|
func (s *UniformSample) Sum() int64 {
|
||||||
|
s.mutex.Lock()
|
||||||
|
defer s.mutex.Unlock()
|
||||||
|
return SampleSum(s.values)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update samples a new value.
|
||||||
|
func (s *UniformSample) Update(v int64) {
|
||||||
|
s.mutex.Lock()
|
||||||
|
defer s.mutex.Unlock()
|
||||||
|
s.count++
|
||||||
|
if len(s.values) < s.reservoirSize {
|
||||||
|
s.values = append(s.values, v)
|
||||||
|
} else {
|
||||||
|
r := rand.Int63n(s.count)
|
||||||
|
if r < int64(len(s.values)) {
|
||||||
|
s.values[int(r)] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Values returns a copy of the values in the sample.
|
||||||
|
func (s *UniformSample) Values() []int64 {
|
||||||
|
s.mutex.Lock()
|
||||||
|
defer s.mutex.Unlock()
|
||||||
|
values := make([]int64, len(s.values))
|
||||||
|
copy(values, s.values)
|
||||||
|
return values
|
||||||
|
}
|
||||||
|
|
||||||
|
// Variance returns the variance of the values in the sample.
|
||||||
|
func (s *UniformSample) Variance() float64 {
|
||||||
|
s.mutex.Lock()
|
||||||
|
defer s.mutex.Unlock()
|
||||||
|
return SampleVariance(s.values)
|
||||||
|
}
|
||||||
|
|
||||||
|
// expDecaySample represents an individual sample in a heap.
|
||||||
|
type expDecaySample struct {
|
||||||
|
k float64
|
||||||
|
v int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func newExpDecaySampleHeap(reservoirSize int) *expDecaySampleHeap {
|
||||||
|
return &expDecaySampleHeap{make([]expDecaySample, 0, reservoirSize)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// expDecaySampleHeap is a min-heap of expDecaySamples.
|
||||||
|
// The internal implementation is copied from the standard library's container/heap
|
||||||
|
type expDecaySampleHeap struct {
|
||||||
|
s []expDecaySample
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *expDecaySampleHeap) Clear() {
|
||||||
|
h.s = h.s[:0]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *expDecaySampleHeap) Push(s expDecaySample) {
|
||||||
|
n := len(h.s)
|
||||||
|
h.s = h.s[0 : n+1]
|
||||||
|
h.s[n] = s
|
||||||
|
h.up(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *expDecaySampleHeap) Pop() expDecaySample {
|
||||||
|
n := len(h.s) - 1
|
||||||
|
h.s[0], h.s[n] = h.s[n], h.s[0]
|
||||||
|
h.down(0, n)
|
||||||
|
|
||||||
|
n = len(h.s)
|
||||||
|
s := h.s[n-1]
|
||||||
|
h.s = h.s[0 : n-1]
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *expDecaySampleHeap) Size() int {
|
||||||
|
return len(h.s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *expDecaySampleHeap) Values() []expDecaySample {
|
||||||
|
return h.s
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *expDecaySampleHeap) up(j int) {
|
||||||
|
for {
|
||||||
|
i := (j - 1) / 2 // parent
|
||||||
|
if i == j || !(h.s[j].k < h.s[i].k) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
h.s[i], h.s[j] = h.s[j], h.s[i]
|
||||||
|
j = i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *expDecaySampleHeap) down(i, n int) {
|
||||||
|
for {
|
||||||
|
j1 := 2*i + 1
|
||||||
|
if j1 >= n || j1 < 0 { // j1 < 0 after int overflow
|
||||||
|
break
|
||||||
|
}
|
||||||
|
j := j1 // left child
|
||||||
|
if j2 := j1 + 1; j2 < n && !(h.s[j1].k < h.s[j2].k) {
|
||||||
|
j = j2 // = 2*i + 2 // right child
|
||||||
|
}
|
||||||
|
if !(h.s[j].k < h.s[i].k) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
h.s[i], h.s[j] = h.s[j], h.s[i]
|
||||||
|
i = j
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type int64Slice []int64
|
||||||
|
|
||||||
|
func (p int64Slice) Len() int { return len(p) }
|
||||||
|
func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] }
|
||||||
|
func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
367
pkg/metrics/sample_test.go
Normal file
367
pkg/metrics/sample_test.go
Normal file
@ -0,0 +1,367 @@
|
|||||||
|
// includes code from
|
||||||
|
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
|
||||||
|
// Copyright 2012 Richard Crowley. All rights reserved.
|
||||||
|
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"runtime"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Benchmark{Compute,Copy}{1000,1000000} demonstrate that, even for relatively
|
||||||
|
// expensive computations like Variance, the cost of copying the Sample, as
|
||||||
|
// approximated by a make and copy, is much greater than the cost of the
|
||||||
|
// computation for small samples and only slightly less for large samples.
|
||||||
|
func BenchmarkCompute1000(b *testing.B) {
|
||||||
|
s := make([]int64, 1000)
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
s[i] = int64(i)
|
||||||
|
}
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
SampleVariance(s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func BenchmarkCompute1000000(b *testing.B) {
|
||||||
|
s := make([]int64, 1000000)
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
s[i] = int64(i)
|
||||||
|
}
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
SampleVariance(s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func BenchmarkCopy1000(b *testing.B) {
|
||||||
|
s := make([]int64, 1000)
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
s[i] = int64(i)
|
||||||
|
}
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
sCopy := make([]int64, len(s))
|
||||||
|
copy(sCopy, s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func BenchmarkCopy1000000(b *testing.B) {
|
||||||
|
s := make([]int64, 1000000)
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
s[i] = int64(i)
|
||||||
|
}
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
sCopy := make([]int64, len(s))
|
||||||
|
copy(sCopy, s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkExpDecaySample257(b *testing.B) {
|
||||||
|
benchmarkSample(b, NewExpDecaySample(257, 0.015))
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkExpDecaySample514(b *testing.B) {
|
||||||
|
benchmarkSample(b, NewExpDecaySample(514, 0.015))
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkExpDecaySample1028(b *testing.B) {
|
||||||
|
benchmarkSample(b, NewExpDecaySample(1028, 0.015))
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkUniformSample257(b *testing.B) {
|
||||||
|
benchmarkSample(b, NewUniformSample(257))
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkUniformSample514(b *testing.B) {
|
||||||
|
benchmarkSample(b, NewUniformSample(514))
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkUniformSample1028(b *testing.B) {
|
||||||
|
benchmarkSample(b, NewUniformSample(1028))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExpDecaySample10(t *testing.T) {
|
||||||
|
rand.Seed(1)
|
||||||
|
s := NewExpDecaySample(100, 0.99)
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
s.Update(int64(i))
|
||||||
|
}
|
||||||
|
if size := s.Count(); 10 != size {
|
||||||
|
t.Errorf("s.Count(): 10 != %v\n", size)
|
||||||
|
}
|
||||||
|
if size := s.Size(); 10 != size {
|
||||||
|
t.Errorf("s.Size(): 10 != %v\n", size)
|
||||||
|
}
|
||||||
|
if l := len(s.Values()); 10 != l {
|
||||||
|
t.Errorf("len(s.Values()): 10 != %v\n", l)
|
||||||
|
}
|
||||||
|
for _, v := range s.Values() {
|
||||||
|
if v > 10 || v < 0 {
|
||||||
|
t.Errorf("out of range [0, 10): %v\n", v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExpDecaySample100(t *testing.T) {
|
||||||
|
rand.Seed(1)
|
||||||
|
s := NewExpDecaySample(1000, 0.01)
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
s.Update(int64(i))
|
||||||
|
}
|
||||||
|
if size := s.Count(); 100 != size {
|
||||||
|
t.Errorf("s.Count(): 100 != %v\n", size)
|
||||||
|
}
|
||||||
|
if size := s.Size(); 100 != size {
|
||||||
|
t.Errorf("s.Size(): 100 != %v\n", size)
|
||||||
|
}
|
||||||
|
if l := len(s.Values()); 100 != l {
|
||||||
|
t.Errorf("len(s.Values()): 100 != %v\n", l)
|
||||||
|
}
|
||||||
|
for _, v := range s.Values() {
|
||||||
|
if v > 100 || v < 0 {
|
||||||
|
t.Errorf("out of range [0, 100): %v\n", v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExpDecaySample1000(t *testing.T) {
|
||||||
|
rand.Seed(1)
|
||||||
|
s := NewExpDecaySample(100, 0.99)
|
||||||
|
for i := 0; i < 1000; i++ {
|
||||||
|
s.Update(int64(i))
|
||||||
|
}
|
||||||
|
if size := s.Count(); 1000 != size {
|
||||||
|
t.Errorf("s.Count(): 1000 != %v\n", size)
|
||||||
|
}
|
||||||
|
if size := s.Size(); 100 != size {
|
||||||
|
t.Errorf("s.Size(): 100 != %v\n", size)
|
||||||
|
}
|
||||||
|
if l := len(s.Values()); 100 != l {
|
||||||
|
t.Errorf("len(s.Values()): 100 != %v\n", l)
|
||||||
|
}
|
||||||
|
for _, v := range s.Values() {
|
||||||
|
if v > 1000 || v < 0 {
|
||||||
|
t.Errorf("out of range [0, 1000): %v\n", v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This test makes sure that the sample's priority is not amplified by using
|
||||||
|
// nanosecond duration since start rather than second duration since start.
|
||||||
|
// The priority becomes +Inf quickly after starting if this is done,
|
||||||
|
// effectively freezing the set of samples until a rescale step happens.
|
||||||
|
func TestExpDecaySampleNanosecondRegression(t *testing.T) {
|
||||||
|
rand.Seed(1)
|
||||||
|
s := NewExpDecaySample(100, 0.99)
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
s.Update(10)
|
||||||
|
}
|
||||||
|
time.Sleep(1 * time.Millisecond)
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
s.Update(20)
|
||||||
|
}
|
||||||
|
v := s.Values()
|
||||||
|
avg := float64(0)
|
||||||
|
for i := 0; i < len(v); i++ {
|
||||||
|
avg += float64(v[i])
|
||||||
|
}
|
||||||
|
avg /= float64(len(v))
|
||||||
|
if avg > 16 || avg < 14 {
|
||||||
|
t.Errorf("out of range [14, 16]: %v\n", avg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExpDecaySampleRescale(t *testing.T) {
|
||||||
|
s := NewExpDecaySample(2, 0.001).(*ExpDecaySample)
|
||||||
|
s.update(time.Now(), 1)
|
||||||
|
s.update(time.Now().Add(time.Hour+time.Microsecond), 1)
|
||||||
|
for _, v := range s.values.Values() {
|
||||||
|
if v.k == 0.0 {
|
||||||
|
t.Fatal("v.k == 0.0")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExpDecaySampleSnapshot(t *testing.T) {
|
||||||
|
now := time.Now()
|
||||||
|
rand.Seed(1)
|
||||||
|
s := NewExpDecaySample(100, 0.99)
|
||||||
|
for i := 1; i <= 10000; i++ {
|
||||||
|
s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
|
||||||
|
}
|
||||||
|
snapshot := s.Snapshot()
|
||||||
|
s.Update(1)
|
||||||
|
testExpDecaySampleStatistics(t, snapshot)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExpDecaySampleStatistics(t *testing.T) {
|
||||||
|
now := time.Now()
|
||||||
|
rand.Seed(1)
|
||||||
|
s := NewExpDecaySample(100, 0.99)
|
||||||
|
for i := 1; i <= 10000; i++ {
|
||||||
|
s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
|
||||||
|
}
|
||||||
|
testExpDecaySampleStatistics(t, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUniformSample(t *testing.T) {
|
||||||
|
rand.Seed(1)
|
||||||
|
s := NewUniformSample(100)
|
||||||
|
for i := 0; i < 1000; i++ {
|
||||||
|
s.Update(int64(i))
|
||||||
|
}
|
||||||
|
if size := s.Count(); 1000 != size {
|
||||||
|
t.Errorf("s.Count(): 1000 != %v\n", size)
|
||||||
|
}
|
||||||
|
if size := s.Size(); 100 != size {
|
||||||
|
t.Errorf("s.Size(): 100 != %v\n", size)
|
||||||
|
}
|
||||||
|
if l := len(s.Values()); 100 != l {
|
||||||
|
t.Errorf("len(s.Values()): 100 != %v\n", l)
|
||||||
|
}
|
||||||
|
for _, v := range s.Values() {
|
||||||
|
if v > 1000 || v < 0 {
|
||||||
|
t.Errorf("out of range [0, 100): %v\n", v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUniformSampleIncludesTail(t *testing.T) {
|
||||||
|
rand.Seed(1)
|
||||||
|
s := NewUniformSample(100)
|
||||||
|
max := 100
|
||||||
|
for i := 0; i < max; i++ {
|
||||||
|
s.Update(int64(i))
|
||||||
|
}
|
||||||
|
v := s.Values()
|
||||||
|
sum := 0
|
||||||
|
exp := (max - 1) * max / 2
|
||||||
|
for i := 0; i < len(v); i++ {
|
||||||
|
sum += int(v[i])
|
||||||
|
}
|
||||||
|
if exp != sum {
|
||||||
|
t.Errorf("sum: %v != %v\n", exp, sum)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUniformSampleSnapshot(t *testing.T) {
|
||||||
|
s := NewUniformSample(100)
|
||||||
|
for i := 1; i <= 10000; i++ {
|
||||||
|
s.Update(int64(i))
|
||||||
|
}
|
||||||
|
snapshot := s.Snapshot()
|
||||||
|
s.Update(1)
|
||||||
|
testUniformSampleStatistics(t, snapshot)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUniformSampleStatistics(t *testing.T) {
|
||||||
|
rand.Seed(1)
|
||||||
|
s := NewUniformSample(100)
|
||||||
|
for i := 1; i <= 10000; i++ {
|
||||||
|
s.Update(int64(i))
|
||||||
|
}
|
||||||
|
testUniformSampleStatistics(t, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchmarkSample(b *testing.B, s Sample) {
|
||||||
|
var memStats runtime.MemStats
|
||||||
|
runtime.ReadMemStats(&memStats)
|
||||||
|
pauseTotalNs := memStats.PauseTotalNs
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
s.Update(1)
|
||||||
|
}
|
||||||
|
b.StopTimer()
|
||||||
|
runtime.GC()
|
||||||
|
runtime.ReadMemStats(&memStats)
|
||||||
|
b.Logf("GC cost: %d ns/op", int(memStats.PauseTotalNs-pauseTotalNs)/b.N)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testExpDecaySampleStatistics(t *testing.T, s Sample) {
|
||||||
|
if count := s.Count(); 10000 != count {
|
||||||
|
t.Errorf("s.Count(): 10000 != %v\n", count)
|
||||||
|
}
|
||||||
|
if min := s.Min(); 107 != min {
|
||||||
|
t.Errorf("s.Min(): 107 != %v\n", min)
|
||||||
|
}
|
||||||
|
if max := s.Max(); 10000 != max {
|
||||||
|
t.Errorf("s.Max(): 10000 != %v\n", max)
|
||||||
|
}
|
||||||
|
if mean := s.Mean(); 4965.98 != mean {
|
||||||
|
t.Errorf("s.Mean(): 4965.98 != %v\n", mean)
|
||||||
|
}
|
||||||
|
if stdDev := s.StdDev(); 2959.825156930727 != stdDev {
|
||||||
|
t.Errorf("s.StdDev(): 2959.825156930727 != %v\n", stdDev)
|
||||||
|
}
|
||||||
|
ps := s.Percentiles([]float64{0.5, 0.75, 0.99})
|
||||||
|
if 4615 != ps[0] {
|
||||||
|
t.Errorf("median: 4615 != %v\n", ps[0])
|
||||||
|
}
|
||||||
|
if 7672 != ps[1] {
|
||||||
|
t.Errorf("75th percentile: 7672 != %v\n", ps[1])
|
||||||
|
}
|
||||||
|
if 9998.99 != ps[2] {
|
||||||
|
t.Errorf("99th percentile: 9998.99 != %v\n", ps[2])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testUniformSampleStatistics(t *testing.T, s Sample) {
|
||||||
|
if count := s.Count(); 10000 != count {
|
||||||
|
t.Errorf("s.Count(): 10000 != %v\n", count)
|
||||||
|
}
|
||||||
|
if min := s.Min(); 37 != min {
|
||||||
|
t.Errorf("s.Min(): 37 != %v\n", min)
|
||||||
|
}
|
||||||
|
if max := s.Max(); 9989 != max {
|
||||||
|
t.Errorf("s.Max(): 9989 != %v\n", max)
|
||||||
|
}
|
||||||
|
if mean := s.Mean(); 4748.14 != mean {
|
||||||
|
t.Errorf("s.Mean(): 4748.14 != %v\n", mean)
|
||||||
|
}
|
||||||
|
if stdDev := s.StdDev(); 2826.684117548333 != stdDev {
|
||||||
|
t.Errorf("s.StdDev(): 2826.684117548333 != %v\n", stdDev)
|
||||||
|
}
|
||||||
|
ps := s.Percentiles([]float64{0.5, 0.75, 0.99})
|
||||||
|
if 4599 != ps[0] {
|
||||||
|
t.Errorf("median: 4599 != %v\n", ps[0])
|
||||||
|
}
|
||||||
|
if 7380.5 != ps[1] {
|
||||||
|
t.Errorf("75th percentile: 7380.5 != %v\n", ps[1])
|
||||||
|
}
|
||||||
|
if 9986.429999999998 != ps[2] {
|
||||||
|
t.Errorf("99th percentile: 9986.429999999998 != %v\n", ps[2])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestUniformSampleConcurrentUpdateCount would expose data race problems with
|
||||||
|
// concurrent Update and Count calls on Sample when test is called with -race
|
||||||
|
// argument
|
||||||
|
func TestUniformSampleConcurrentUpdateCount(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("skipping in short mode")
|
||||||
|
}
|
||||||
|
s := NewUniformSample(100)
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
s.Update(int64(i))
|
||||||
|
}
|
||||||
|
quit := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
t := time.NewTicker(10 * time.Millisecond)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-t.C:
|
||||||
|
s.Update(rand.Int63())
|
||||||
|
case <-quit:
|
||||||
|
t.Stop()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
for i := 0; i < 1000; i++ {
|
||||||
|
s.Count()
|
||||||
|
time.Sleep(5 * time.Millisecond)
|
||||||
|
}
|
||||||
|
quit <- struct{}{}
|
||||||
|
}
|
46
pkg/metrics/settings.go
Normal file
46
pkg/metrics/settings.go
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/grafana/grafana/pkg/log"
|
||||||
|
"github.com/grafana/grafana/pkg/setting"
|
||||||
|
)
|
||||||
|
|
||||||
|
type MetricPublisher interface {
|
||||||
|
Publish(metrics []Metric)
|
||||||
|
}
|
||||||
|
|
||||||
|
type MetricSettings struct {
|
||||||
|
Enabled bool
|
||||||
|
IntervalSeconds int64
|
||||||
|
|
||||||
|
Publishers []MetricPublisher
|
||||||
|
}
|
||||||
|
|
||||||
|
func readSettings() *MetricSettings {
|
||||||
|
var settings = &MetricSettings{
|
||||||
|
Enabled: false,
|
||||||
|
Publishers: make([]MetricPublisher, 0),
|
||||||
|
}
|
||||||
|
|
||||||
|
var section, err = setting.Cfg.GetSection("metrics")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(3, "Unable to find metrics config section")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
settings.Enabled = section.Key("enabled").MustBool(false)
|
||||||
|
settings.IntervalSeconds = section.Key("interval_seconds").MustInt64(10)
|
||||||
|
|
||||||
|
if !settings.Enabled {
|
||||||
|
return settings
|
||||||
|
}
|
||||||
|
|
||||||
|
if graphitePublisher, err := CreateGraphitePublisher(); err != nil {
|
||||||
|
log.Error(3, "Metrics: Failed to init Graphite metric publisher", err)
|
||||||
|
} else if graphitePublisher != nil {
|
||||||
|
log.Info("Metrics: Graphite publisher initialized")
|
||||||
|
settings.Publishers = append(settings.Publishers, graphitePublisher)
|
||||||
|
}
|
||||||
|
|
||||||
|
return settings
|
||||||
|
}
|
309
pkg/metrics/timer.go
Normal file
309
pkg/metrics/timer.go
Normal file
@ -0,0 +1,309 @@
|
|||||||
|
// includes code from
|
||||||
|
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
|
||||||
|
// Copyright 2012 Richard Crowley. All rights reserved.
|
||||||
|
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Timers capture the duration and rate of events.
|
||||||
|
type Timer interface {
|
||||||
|
Metric
|
||||||
|
|
||||||
|
Count() int64
|
||||||
|
Max() int64
|
||||||
|
Mean() float64
|
||||||
|
Min() int64
|
||||||
|
Percentile(float64) float64
|
||||||
|
Percentiles([]float64) []float64
|
||||||
|
Rate1() float64
|
||||||
|
Rate5() float64
|
||||||
|
Rate15() float64
|
||||||
|
RateMean() float64
|
||||||
|
StdDev() float64
|
||||||
|
Sum() int64
|
||||||
|
Time(func())
|
||||||
|
Update(time.Duration)
|
||||||
|
UpdateSince(time.Time)
|
||||||
|
Variance() float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter.
|
||||||
|
func NewCustomTimer(meta *MetricMeta, h Histogram, m Meter) Timer {
|
||||||
|
if UseNilMetrics {
|
||||||
|
return NilTimer{}
|
||||||
|
}
|
||||||
|
return &StandardTimer{
|
||||||
|
MetricMeta: meta,
|
||||||
|
histogram: h,
|
||||||
|
meter: m,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTimer constructs a new StandardTimer using an exponentially-decaying
|
||||||
|
// sample with the same reservoir size and alpha as UNIX load averages.
|
||||||
|
func NewTimer(meta *MetricMeta) Timer {
|
||||||
|
if UseNilMetrics {
|
||||||
|
return NilTimer{}
|
||||||
|
}
|
||||||
|
return &StandardTimer{
|
||||||
|
MetricMeta: meta,
|
||||||
|
histogram: NewHistogram(meta, NewExpDecaySample(1028, 0.015)),
|
||||||
|
meter: NewMeter(meta),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func RegTimer(name string, tagStrings ...string) Timer {
|
||||||
|
tr := NewTimer(NewMetricMeta(name, tagStrings))
|
||||||
|
MetricStats.Register(tr)
|
||||||
|
return tr
|
||||||
|
}
|
||||||
|
|
||||||
|
// NilTimer is a no-op Timer.
|
||||||
|
type NilTimer struct {
|
||||||
|
*MetricMeta
|
||||||
|
h Histogram
|
||||||
|
m Meter
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count is a no-op.
|
||||||
|
func (NilTimer) Count() int64 { return 0 }
|
||||||
|
|
||||||
|
// Max is a no-op.
|
||||||
|
func (NilTimer) Max() int64 { return 0 }
|
||||||
|
|
||||||
|
// Mean is a no-op.
|
||||||
|
func (NilTimer) Mean() float64 { return 0.0 }
|
||||||
|
|
||||||
|
// Min is a no-op.
|
||||||
|
func (NilTimer) Min() int64 { return 0 }
|
||||||
|
|
||||||
|
// Percentile is a no-op.
|
||||||
|
func (NilTimer) Percentile(p float64) float64 { return 0.0 }
|
||||||
|
|
||||||
|
// Percentiles is a no-op.
|
||||||
|
func (NilTimer) Percentiles(ps []float64) []float64 {
|
||||||
|
return make([]float64, len(ps))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rate1 is a no-op.
|
||||||
|
func (NilTimer) Rate1() float64 { return 0.0 }
|
||||||
|
|
||||||
|
// Rate5 is a no-op.
|
||||||
|
func (NilTimer) Rate5() float64 { return 0.0 }
|
||||||
|
|
||||||
|
// Rate15 is a no-op.
|
||||||
|
func (NilTimer) Rate15() float64 { return 0.0 }
|
||||||
|
|
||||||
|
// RateMean is a no-op.
|
||||||
|
func (NilTimer) RateMean() float64 { return 0.0 }
|
||||||
|
|
||||||
|
// Snapshot is a no-op.
|
||||||
|
func (n NilTimer) Snapshot() Metric { return n }
|
||||||
|
|
||||||
|
// StdDev is a no-op.
|
||||||
|
func (NilTimer) StdDev() float64 { return 0.0 }
|
||||||
|
|
||||||
|
// Sum is a no-op.
|
||||||
|
func (NilTimer) Sum() int64 { return 0 }
|
||||||
|
|
||||||
|
// Time is a no-op.
|
||||||
|
func (NilTimer) Time(func()) {}
|
||||||
|
|
||||||
|
// Update is a no-op.
|
||||||
|
func (NilTimer) Update(time.Duration) {}
|
||||||
|
|
||||||
|
// UpdateSince is a no-op.
|
||||||
|
func (NilTimer) UpdateSince(time.Time) {}
|
||||||
|
|
||||||
|
// Variance is a no-op.
|
||||||
|
func (NilTimer) Variance() float64 { return 0.0 }
|
||||||
|
|
||||||
|
// StandardTimer is the standard implementation of a Timer and uses a Histogram
|
||||||
|
// and Meter.
|
||||||
|
type StandardTimer struct {
|
||||||
|
*MetricMeta
|
||||||
|
histogram Histogram
|
||||||
|
meter Meter
|
||||||
|
mutex sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the number of events recorded.
|
||||||
|
func (t *StandardTimer) Count() int64 {
|
||||||
|
return t.histogram.Count()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Max returns the maximum value in the sample.
|
||||||
|
func (t *StandardTimer) Max() int64 {
|
||||||
|
return t.histogram.Max()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mean returns the mean of the values in the sample.
|
||||||
|
func (t *StandardTimer) Mean() float64 {
|
||||||
|
return t.histogram.Mean()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Min returns the minimum value in the sample.
|
||||||
|
func (t *StandardTimer) Min() int64 {
|
||||||
|
return t.histogram.Min()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Percentile returns an arbitrary percentile of the values in the sample.
|
||||||
|
func (t *StandardTimer) Percentile(p float64) float64 {
|
||||||
|
return t.histogram.Percentile(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Percentiles returns a slice of arbitrary percentiles of the values in the
|
||||||
|
// sample.
|
||||||
|
func (t *StandardTimer) Percentiles(ps []float64) []float64 {
|
||||||
|
return t.histogram.Percentiles(ps)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rate1 returns the one-minute moving average rate of events per second.
|
||||||
|
func (t *StandardTimer) Rate1() float64 {
|
||||||
|
return t.meter.Rate1()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rate5 returns the five-minute moving average rate of events per second.
|
||||||
|
func (t *StandardTimer) Rate5() float64 {
|
||||||
|
return t.meter.Rate5()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rate15 returns the fifteen-minute moving average rate of events per second.
|
||||||
|
func (t *StandardTimer) Rate15() float64 {
|
||||||
|
return t.meter.Rate15()
|
||||||
|
}
|
||||||
|
|
||||||
|
// RateMean returns the meter's mean rate of events per second.
|
||||||
|
func (t *StandardTimer) RateMean() float64 {
|
||||||
|
return t.meter.RateMean()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Snapshot returns a read-only copy of the timer.
|
||||||
|
func (t *StandardTimer) Snapshot() Metric {
|
||||||
|
t.mutex.Lock()
|
||||||
|
defer t.mutex.Unlock()
|
||||||
|
return &TimerSnapshot{
|
||||||
|
MetricMeta: t.MetricMeta,
|
||||||
|
histogram: t.histogram.Snapshot().(*HistogramSnapshot),
|
||||||
|
meter: t.meter.Snapshot().(*MeterSnapshot),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StdDev returns the standard deviation of the values in the sample.
|
||||||
|
func (t *StandardTimer) StdDev() float64 {
|
||||||
|
return t.histogram.StdDev()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sum returns the sum in the sample.
|
||||||
|
func (t *StandardTimer) Sum() int64 {
|
||||||
|
return t.histogram.Sum()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Record the duration of the execution of the given function.
|
||||||
|
func (t *StandardTimer) Time(f func()) {
|
||||||
|
ts := time.Now()
|
||||||
|
f()
|
||||||
|
t.Update(time.Since(ts))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Record the duration of an event.
|
||||||
|
func (t *StandardTimer) Update(d time.Duration) {
|
||||||
|
t.mutex.Lock()
|
||||||
|
defer t.mutex.Unlock()
|
||||||
|
t.histogram.Update(int64(d))
|
||||||
|
t.meter.Mark(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Record the duration of an event that started at a time and ends now.
|
||||||
|
func (t *StandardTimer) UpdateSince(ts time.Time) {
|
||||||
|
t.mutex.Lock()
|
||||||
|
defer t.mutex.Unlock()
|
||||||
|
t.histogram.Update(int64(time.Since(ts)))
|
||||||
|
t.meter.Mark(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Variance returns the variance of the values in the sample.
|
||||||
|
func (t *StandardTimer) Variance() float64 {
|
||||||
|
return t.histogram.Variance()
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimerSnapshot is a read-only copy of another Timer.
|
||||||
|
type TimerSnapshot struct {
|
||||||
|
*MetricMeta
|
||||||
|
histogram *HistogramSnapshot
|
||||||
|
meter *MeterSnapshot
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the number of events recorded at the time the snapshot was
|
||||||
|
// taken.
|
||||||
|
func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() }
|
||||||
|
|
||||||
|
// Max returns the maximum value at the time the snapshot was taken.
|
||||||
|
func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() }
|
||||||
|
|
||||||
|
// Mean returns the mean value at the time the snapshot was taken.
|
||||||
|
func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() }
|
||||||
|
|
||||||
|
// Min returns the minimum value at the time the snapshot was taken.
|
||||||
|
func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() }
|
||||||
|
|
||||||
|
// Percentile returns an arbitrary percentile of sampled values at the time the
|
||||||
|
// snapshot was taken.
|
||||||
|
func (t *TimerSnapshot) Percentile(p float64) float64 {
|
||||||
|
return t.histogram.Percentile(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Percentiles returns a slice of arbitrary percentiles of sampled values at
|
||||||
|
// the time the snapshot was taken.
|
||||||
|
func (t *TimerSnapshot) Percentiles(ps []float64) []float64 {
|
||||||
|
return t.histogram.Percentiles(ps)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rate1 returns the one-minute moving average rate of events per second at the
|
||||||
|
// time the snapshot was taken.
|
||||||
|
func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() }
|
||||||
|
|
||||||
|
// Rate5 returns the five-minute moving average rate of events per second at
|
||||||
|
// the time the snapshot was taken.
|
||||||
|
func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() }
|
||||||
|
|
||||||
|
// Rate15 returns the fifteen-minute moving average rate of events per second
|
||||||
|
// at the time the snapshot was taken.
|
||||||
|
func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() }
|
||||||
|
|
||||||
|
// RateMean returns the meter's mean rate of events per second at the time the
|
||||||
|
// snapshot was taken.
|
||||||
|
func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() }
|
||||||
|
|
||||||
|
// Snapshot returns the snapshot.
|
||||||
|
func (t *TimerSnapshot) Snapshot() Metric { return t }
|
||||||
|
|
||||||
|
// StdDev returns the standard deviation of the values at the time the snapshot
|
||||||
|
// was taken.
|
||||||
|
func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() }
|
||||||
|
|
||||||
|
// Sum returns the sum at the time the snapshot was taken.
|
||||||
|
func (t *TimerSnapshot) Sum() int64 { return t.histogram.Sum() }
|
||||||
|
|
||||||
|
// Time panics.
|
||||||
|
func (*TimerSnapshot) Time(func()) {
|
||||||
|
panic("Time called on a TimerSnapshot")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update panics.
|
||||||
|
func (*TimerSnapshot) Update(time.Duration) {
|
||||||
|
panic("Update called on a TimerSnapshot")
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateSince panics.
|
||||||
|
func (*TimerSnapshot) UpdateSince(time.Time) {
|
||||||
|
panic("UpdateSince called on a TimerSnapshot")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Variance returns the variance of the values at the time the snapshot was
|
||||||
|
// taken.
|
||||||
|
func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() }
|
@ -21,6 +21,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/grafana/grafana/pkg/log"
|
"github.com/grafana/grafana/pkg/log"
|
||||||
|
"github.com/grafana/grafana/pkg/metrics"
|
||||||
"github.com/grafana/grafana/pkg/setting"
|
"github.com/grafana/grafana/pkg/setting"
|
||||||
"gopkg.in/macaron.v1"
|
"gopkg.in/macaron.v1"
|
||||||
)
|
)
|
||||||
@ -28,6 +29,7 @@ import (
|
|||||||
func Logger() macaron.Handler {
|
func Logger() macaron.Handler {
|
||||||
return func(res http.ResponseWriter, req *http.Request, c *macaron.Context) {
|
return func(res http.ResponseWriter, req *http.Request, c *macaron.Context) {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
|
c.Data["perfmon.start"] = start
|
||||||
|
|
||||||
uname := c.GetCookie(setting.CookieUserName)
|
uname := c.GetCookie(setting.CookieUserName)
|
||||||
if len(uname) == 0 {
|
if len(uname) == 0 {
|
||||||
@ -37,7 +39,13 @@ func Logger() macaron.Handler {
|
|||||||
rw := res.(macaron.ResponseWriter)
|
rw := res.(macaron.ResponseWriter)
|
||||||
c.Next()
|
c.Next()
|
||||||
|
|
||||||
content := fmt.Sprintf("Completed %s %s \"%s %s %s\" %v %s %d bytes in %dus", c.RemoteAddr(), uname, req.Method, req.URL.Path, req.Proto, rw.Status(), http.StatusText(rw.Status()), rw.Size(), time.Since(start)/time.Microsecond)
|
timeTakenMs := time.Since(start) / time.Millisecond
|
||||||
|
content := fmt.Sprintf("Completed %s %s \"%s %s %s\" %v %s %d bytes in %dms", c.RemoteAddr(), uname, req.Method, req.URL.Path, req.Proto, rw.Status(), http.StatusText(rw.Status()), rw.Size(), timeTakenMs)
|
||||||
|
|
||||||
|
if timer, ok := c.Data["perfmon.timer"]; ok {
|
||||||
|
timerTyped := timer.(metrics.Timer)
|
||||||
|
timerTyped.Update(timeTakenMs)
|
||||||
|
}
|
||||||
|
|
||||||
switch rw.Status() {
|
switch rw.Status() {
|
||||||
case 200, 304:
|
case 200, 304:
|
||||||
|
@ -257,3 +257,7 @@ func (ctx *Context) JsonApiErr(status int, message string, err error) {
|
|||||||
func (ctx *Context) HasUserRole(role m.RoleType) bool {
|
func (ctx *Context) HasUserRole(role m.RoleType) bool {
|
||||||
return ctx.OrgRole.Includes(role)
|
return ctx.OrgRole.Includes(role)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ctx *Context) TimeRequest(timer metrics.Timer) {
|
||||||
|
ctx.Data["perfmon.timer"] = timer
|
||||||
|
}
|
||||||
|
12
pkg/middleware/perf.go
Normal file
12
pkg/middleware/perf.go
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
package middleware
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"gopkg.in/macaron.v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func MeasureRequestTime() macaron.Handler {
|
||||||
|
return func(res http.ResponseWriter, req *http.Request, c *Context) {
|
||||||
|
}
|
||||||
|
}
|
@ -40,6 +40,7 @@ var (
|
|||||||
Env string = DEV
|
Env string = DEV
|
||||||
AppUrl string
|
AppUrl string
|
||||||
AppSubUrl string
|
AppSubUrl string
|
||||||
|
InstanceName string
|
||||||
|
|
||||||
// build
|
// build
|
||||||
BuildVersion string
|
BuildVersion string
|
||||||
@ -262,6 +263,12 @@ func evalEnvVarExpression(value string) string {
|
|||||||
envVar = strings.TrimPrefix(envVar, "${")
|
envVar = strings.TrimPrefix(envVar, "${")
|
||||||
envVar = strings.TrimSuffix(envVar, "}")
|
envVar = strings.TrimSuffix(envVar, "}")
|
||||||
envValue := os.Getenv(envVar)
|
envValue := os.Getenv(envVar)
|
||||||
|
|
||||||
|
// if env variable is hostname and it is emtpy use os.Hostname as default
|
||||||
|
if envVar == "HOSTNAME" && envValue == "" {
|
||||||
|
envValue, _ = os.Hostname()
|
||||||
|
}
|
||||||
|
|
||||||
return envValue
|
return envValue
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -398,11 +405,28 @@ func validateStaticRootPath() error {
|
|||||||
return fmt.Errorf("Failed to detect generated css or javascript files in static root (%s), have you executed default grunt task?", StaticRootPath)
|
return fmt.Errorf("Failed to detect generated css or javascript files in static root (%s), have you executed default grunt task?", StaticRootPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// func readInstanceName() string {
|
||||||
|
// hostname, _ := os.Hostname()
|
||||||
|
// if hostname == "" {
|
||||||
|
// hostname = "hostname_unknown"
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// instanceName := Cfg.Section("").Key("instance_name").MustString("")
|
||||||
|
// if instanceName = "" {
|
||||||
|
// // set value as it might be used in other places
|
||||||
|
// Cfg.Section("").Key("instance_name").SetValue(hostname)
|
||||||
|
// instanceName = hostname
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// return
|
||||||
|
// }
|
||||||
|
|
||||||
func NewConfigContext(args *CommandLineArgs) error {
|
func NewConfigContext(args *CommandLineArgs) error {
|
||||||
setHomePath(args)
|
setHomePath(args)
|
||||||
loadConfiguration(args)
|
loadConfiguration(args)
|
||||||
|
|
||||||
Env = Cfg.Section("").Key("app_mode").MustString("development")
|
Env = Cfg.Section("").Key("app_mode").MustString("development")
|
||||||
|
InstanceName = Cfg.Section("").Key("instance_name").MustString("unknown_instance_name")
|
||||||
PluginsPath = Cfg.Section("paths").Key("plugins").String()
|
PluginsPath = Cfg.Section("paths").Key("plugins").String()
|
||||||
|
|
||||||
server := Cfg.Section("server")
|
server := Cfg.Section("server")
|
||||||
|
@ -89,5 +89,14 @@ func TestLoadingSettings(t *testing.T) {
|
|||||||
So(DataPath, ShouldEqual, "/tmp/env_override")
|
So(DataPath, ShouldEqual, "/tmp/env_override")
|
||||||
})
|
})
|
||||||
|
|
||||||
|
Convey("instance_name default to hostname even if hostname env is emtpy", func() {
|
||||||
|
NewConfigContext(&CommandLineArgs{
|
||||||
|
HomePath: "../../",
|
||||||
|
})
|
||||||
|
|
||||||
|
hostname, _ := os.Hostname()
|
||||||
|
So(InstanceName, ShouldEqual, hostname)
|
||||||
|
})
|
||||||
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -1,18 +1,18 @@
|
|||||||
package util
|
package util
|
||||||
|
|
||||||
func StringsFallback2(val1 string, val2 string) string {
|
func StringsFallback2(val1 string, val2 string) string {
|
||||||
if val1 != "" {
|
return stringsFallback(val1, val2)
|
||||||
return val1
|
|
||||||
}
|
|
||||||
return val2
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func StringsFallback3(val1 string, val2 string, val3 string) string {
|
func StringsFallback3(val1 string, val2 string, val3 string) string {
|
||||||
if val1 != "" {
|
return stringsFallback(val1, val2, val3)
|
||||||
return val1
|
|
||||||
}
|
}
|
||||||
if val2 != "" {
|
|
||||||
return val2
|
func stringsFallback(vals ...string) string {
|
||||||
|
for _, v := range vals {
|
||||||
|
if v != "" {
|
||||||
|
return v
|
||||||
}
|
}
|
||||||
return val3
|
}
|
||||||
|
return ""
|
||||||
}
|
}
|
||||||
|
15
pkg/util/strings_test.go
Normal file
15
pkg/util/strings_test.go
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
. "github.com/smartystreets/goconvey/convey"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestStringsUtil(t *testing.T) {
|
||||||
|
Convey("Falling back until none empty string", t, func() {
|
||||||
|
So(StringsFallback2("1", "2"), ShouldEqual, "1")
|
||||||
|
So(StringsFallback2("", "2"), ShouldEqual, "2")
|
||||||
|
So(StringsFallback3("", "", "3"), ShouldEqual, "3")
|
||||||
|
})
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user