mirror of
https://github.com/grafana/grafana.git
synced 2025-02-25 18:55:37 -06:00
Merge branch 'alerting_mqe'
This commit is contained in:
commit
63bf332023
@ -21,6 +21,7 @@ import (
|
|||||||
_ "github.com/grafana/grafana/pkg/services/alerting/notifiers"
|
_ "github.com/grafana/grafana/pkg/services/alerting/notifiers"
|
||||||
_ "github.com/grafana/grafana/pkg/tsdb/graphite"
|
_ "github.com/grafana/grafana/pkg/tsdb/graphite"
|
||||||
_ "github.com/grafana/grafana/pkg/tsdb/influxdb"
|
_ "github.com/grafana/grafana/pkg/tsdb/influxdb"
|
||||||
|
_ "github.com/grafana/grafana/pkg/tsdb/mqe"
|
||||||
_ "github.com/grafana/grafana/pkg/tsdb/opentsdb"
|
_ "github.com/grafana/grafana/pkg/tsdb/opentsdb"
|
||||||
_ "github.com/grafana/grafana/pkg/tsdb/prometheus"
|
_ "github.com/grafana/grafana/pkg/tsdb/prometheus"
|
||||||
_ "github.com/grafana/grafana/pkg/tsdb/testdata"
|
_ "github.com/grafana/grafana/pkg/tsdb/testdata"
|
||||||
|
43
pkg/tsdb/mqe/model_parser.go
Normal file
43
pkg/tsdb/mqe/model_parser.go
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
package mqe
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||||
|
"github.com/grafana/grafana/pkg/models"
|
||||||
|
"github.com/grafana/grafana/pkg/tsdb"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewQueryParser() *QueryParser {
|
||||||
|
return &QueryParser{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type QueryParser struct{}
|
||||||
|
|
||||||
|
func (qp *QueryParser) Parse(model *simplejson.Json, dsInfo *models.DataSource, queryContext *tsdb.QueryContext) (*Query, error) {
|
||||||
|
query := &Query{TimeRange: queryContext.TimeRange}
|
||||||
|
query.AddAppToAlias = model.Get("addAppToAlias").MustBool(false)
|
||||||
|
query.AddHostToAlias = model.Get("addHostToAlias").MustBool(false)
|
||||||
|
query.UseRawQuery = model.Get("rawQuery").MustBool(false)
|
||||||
|
query.RawQuery = model.Get("query").MustString("")
|
||||||
|
|
||||||
|
query.Apps = model.Get("apps").MustStringArray([]string{})
|
||||||
|
query.Hosts = model.Get("hosts").MustStringArray([]string{})
|
||||||
|
|
||||||
|
var metrics []Metric
|
||||||
|
var err error
|
||||||
|
for _, metricsObj := range model.Get("metrics").MustArray() {
|
||||||
|
metricJson := simplejson.NewFromAny(metricsObj)
|
||||||
|
var m Metric
|
||||||
|
|
||||||
|
m.Alias = metricJson.Get("alias").MustString("")
|
||||||
|
m.Metric, err = metricJson.Get("metric").String()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
metrics = append(metrics, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
query.Metrics = metrics
|
||||||
|
|
||||||
|
return query, nil
|
||||||
|
}
|
119
pkg/tsdb/mqe/model_parser_test.go
Normal file
119
pkg/tsdb/mqe/model_parser_test.go
Normal file
@ -0,0 +1,119 @@
|
|||||||
|
package mqe
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||||
|
"github.com/grafana/grafana/pkg/models"
|
||||||
|
"github.com/grafana/grafana/pkg/tsdb"
|
||||||
|
. "github.com/smartystreets/goconvey/convey"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMQEQueryParser(t *testing.T) {
|
||||||
|
Convey("MQE query parser", t, func() {
|
||||||
|
parser := &QueryParser{}
|
||||||
|
|
||||||
|
dsInfo := &models.DataSource{JsonData: simplejson.New()}
|
||||||
|
queryContext := &tsdb.QueryContext{}
|
||||||
|
|
||||||
|
Convey("can parse simple mqe model", func() {
|
||||||
|
json := `
|
||||||
|
{
|
||||||
|
"apps": [],
|
||||||
|
"hosts": [
|
||||||
|
"staples-lab-1"
|
||||||
|
],
|
||||||
|
"metrics": [
|
||||||
|
{
|
||||||
|
"metric": "os.cpu.all*"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"rawQuery": "",
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
`
|
||||||
|
modelJson, err := simplejson.NewJson([]byte(json))
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
|
||||||
|
query, err := parser.Parse(modelJson, dsInfo, queryContext)
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
So(query.UseRawQuery, ShouldBeFalse)
|
||||||
|
|
||||||
|
So(len(query.Apps), ShouldEqual, 0)
|
||||||
|
So(query.Hosts[0], ShouldEqual, "staples-lab-1")
|
||||||
|
So(query.Metrics[0].Metric, ShouldEqual, "os.cpu.all*")
|
||||||
|
})
|
||||||
|
|
||||||
|
Convey("can parse multi serie mqe model", func() {
|
||||||
|
json := `
|
||||||
|
{
|
||||||
|
"apps": [
|
||||||
|
"demoapp"
|
||||||
|
],
|
||||||
|
"hosts": [
|
||||||
|
"staples-lab-1"
|
||||||
|
],
|
||||||
|
"metrics": [
|
||||||
|
{
|
||||||
|
"metric": "os.cpu.all.active_percentage"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"metric": "os.disk.sda.io_time"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"rawQuery": "",
|
||||||
|
"refId": "A",
|
||||||
|
"addAppToAlias": true,
|
||||||
|
"addHostToAlias": true
|
||||||
|
}
|
||||||
|
`
|
||||||
|
modelJson, err := simplejson.NewJson([]byte(json))
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
|
||||||
|
query, err := parser.Parse(modelJson, dsInfo, queryContext)
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
So(query.UseRawQuery, ShouldBeFalse)
|
||||||
|
So(query.Apps[0], ShouldEqual, "demoapp")
|
||||||
|
So(query.Metrics[0].Metric, ShouldEqual, "os.cpu.all.active_percentage")
|
||||||
|
So(query.Metrics[1].Metric, ShouldEqual, "os.disk.sda.io_time")
|
||||||
|
})
|
||||||
|
|
||||||
|
Convey("can parse raw query", func() {
|
||||||
|
json := `
|
||||||
|
{
|
||||||
|
"addAppToAlias": true,
|
||||||
|
"addHostToAlias": true,
|
||||||
|
"apps": [],
|
||||||
|
"hosts": [
|
||||||
|
"staples-lab-1"
|
||||||
|
],
|
||||||
|
"metrics": [
|
||||||
|
{
|
||||||
|
"alias": "cpu active",
|
||||||
|
"metric": "os.cpu.all.active_percentage"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"alias": "disk sda time",
|
||||||
|
"metric": "os.disk.sda.io_time"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"rawQuery": true,
|
||||||
|
"query": "raw-query",
|
||||||
|
"refId": "A",
|
||||||
|
"addAppToAlias": true,
|
||||||
|
"addHostToAlias": true
|
||||||
|
}
|
||||||
|
`
|
||||||
|
modelJson, err := simplejson.NewJson([]byte(json))
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
|
||||||
|
query, err := parser.Parse(modelJson, dsInfo, queryContext)
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
|
||||||
|
So(query.UseRawQuery, ShouldBeTrue)
|
||||||
|
So(query.RawQuery, ShouldEqual, "raw-query")
|
||||||
|
So(query.AddAppToAlias, ShouldBeTrue)
|
||||||
|
So(query.AddHostToAlias, ShouldBeTrue)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
144
pkg/tsdb/mqe/mqe.go
Normal file
144
pkg/tsdb/mqe/mqe.go
Normal file
@ -0,0 +1,144 @@
|
|||||||
|
package mqe
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/net/context/ctxhttp"
|
||||||
|
|
||||||
|
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||||
|
"github.com/grafana/grafana/pkg/log"
|
||||||
|
"github.com/grafana/grafana/pkg/models"
|
||||||
|
"github.com/grafana/grafana/pkg/setting"
|
||||||
|
"github.com/grafana/grafana/pkg/tsdb"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
TODO:
|
||||||
|
* performance. outgoing requests in pararell.
|
||||||
|
* frontend plugin. targetContainsTemplates
|
||||||
|
*/
|
||||||
|
|
||||||
|
type MQEExecutor struct {
|
||||||
|
*models.DataSource
|
||||||
|
queryParser *QueryParser
|
||||||
|
responseParser *ResponseParser
|
||||||
|
httpClient *http.Client
|
||||||
|
log log.Logger
|
||||||
|
tokenClient *TokenClient
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMQEExecutor(dsInfo *models.DataSource) (tsdb.Executor, error) {
|
||||||
|
httpclient, err := dsInfo.GetHttpClient()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &MQEExecutor{
|
||||||
|
DataSource: dsInfo,
|
||||||
|
httpClient: httpclient,
|
||||||
|
log: log.New("tsdb.mqe"),
|
||||||
|
queryParser: NewQueryParser(),
|
||||||
|
responseParser: NewResponseParser(),
|
||||||
|
tokenClient: NewTokenClient(dsInfo),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
tsdb.RegisterExecutor("mqe-datasource", NewMQEExecutor)
|
||||||
|
}
|
||||||
|
|
||||||
|
type QueryToSend struct {
|
||||||
|
RawQuery string
|
||||||
|
QueryRef *Query
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *MQEExecutor) Execute(ctx context.Context, queries tsdb.QuerySlice, queryContext *tsdb.QueryContext) *tsdb.BatchResult {
|
||||||
|
result := &tsdb.BatchResult{}
|
||||||
|
|
||||||
|
availableSeries, err := e.tokenClient.GetTokenData(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return result.WithError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var mqeQueries []*Query
|
||||||
|
for _, v := range queries {
|
||||||
|
q, err := e.queryParser.Parse(v.Model, e.DataSource, queryContext)
|
||||||
|
if err != nil {
|
||||||
|
return result.WithError(err)
|
||||||
|
}
|
||||||
|
mqeQueries = append(mqeQueries, q)
|
||||||
|
}
|
||||||
|
|
||||||
|
var rawQueries []QueryToSend
|
||||||
|
for _, v := range mqeQueries {
|
||||||
|
queries, err := v.Build(availableSeries.Metrics)
|
||||||
|
if err != nil {
|
||||||
|
return result.WithError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rawQueries = append(rawQueries, queries...)
|
||||||
|
}
|
||||||
|
|
||||||
|
e.log.Debug("Sending request", "url", e.DataSource.Url)
|
||||||
|
|
||||||
|
queryResult := &tsdb.QueryResult{}
|
||||||
|
for _, v := range rawQueries {
|
||||||
|
if setting.Env == setting.DEV {
|
||||||
|
e.log.Debug("Executing", "query", v)
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := e.createRequest(v.RawQuery)
|
||||||
|
|
||||||
|
resp, err := ctxhttp.Do(ctx, e.httpClient, req)
|
||||||
|
if err != nil {
|
||||||
|
return result.WithError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
series, err := e.responseParser.Parse(resp, v.QueryRef)
|
||||||
|
if err != nil {
|
||||||
|
return result.WithError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
queryResult.Series = append(queryResult.Series, series.Series...)
|
||||||
|
}
|
||||||
|
|
||||||
|
result.QueryResults = make(map[string]*tsdb.QueryResult)
|
||||||
|
result.QueryResults["A"] = queryResult
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *MQEExecutor) createRequest(query string) (*http.Request, error) {
|
||||||
|
u, err := url.Parse(e.Url)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
u.Path = path.Join(u.Path, "query")
|
||||||
|
|
||||||
|
payload := simplejson.New()
|
||||||
|
payload.Set("query", query)
|
||||||
|
|
||||||
|
jsonPayload, err := payload.MarshalJSON()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(string(jsonPayload)))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Set("User-Agent", "Grafana")
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
if e.BasicAuth {
|
||||||
|
req.SetBasicAuth(e.BasicAuthUser, e.BasicAuthPassword)
|
||||||
|
}
|
||||||
|
|
||||||
|
return req, nil
|
||||||
|
}
|
101
pkg/tsdb/mqe/response_parser.go
Normal file
101
pkg/tsdb/mqe/response_parser.go
Normal file
@ -0,0 +1,101 @@
|
|||||||
|
package mqe
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
null "gopkg.in/guregu/null.v3"
|
||||||
|
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/grafana/grafana/pkg/log"
|
||||||
|
"github.com/grafana/grafana/pkg/tsdb"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewResponseParser() *ResponseParser {
|
||||||
|
return &ResponseParser{
|
||||||
|
log: log.New("tsdb.mqe"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type MQEResponse struct {
|
||||||
|
Success bool `json:"success"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Body []MQEResponseSerie `json:"body"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResponseTimeRange struct {
|
||||||
|
Start int64 `json:"start"`
|
||||||
|
End int64 `json:"end"`
|
||||||
|
Resolution int64 `json:"Resolution"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type MQEResponseSerie struct {
|
||||||
|
Query string `json:"query"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Series []MQESerie `json:"series"`
|
||||||
|
TimeRange ResponseTimeRange `json:"timerange"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type MQESerie struct {
|
||||||
|
Values []null.Float `json:"values"`
|
||||||
|
Tagset map[string]string `json:"tagset"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResponseParser struct {
|
||||||
|
log log.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
func (parser *ResponseParser) Parse(res *http.Response, queryRef *Query) (*tsdb.QueryResult, error) {
|
||||||
|
body, err := ioutil.ReadAll(res.Body)
|
||||||
|
defer res.Body.Close()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if res.StatusCode/100 != 2 {
|
||||||
|
parser.log.Error("Request failed", "status code", res.StatusCode, "body", string(body))
|
||||||
|
return nil, fmt.Errorf("Returned invalid statuscode")
|
||||||
|
}
|
||||||
|
|
||||||
|
var data *MQEResponse = &MQEResponse{}
|
||||||
|
err = json.Unmarshal(body, data)
|
||||||
|
if err != nil {
|
||||||
|
parser.log.Info("Failed to unmarshal response", "error", err, "status", res.Status, "body", string(body))
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !data.Success {
|
||||||
|
return nil, fmt.Errorf("Request failed.")
|
||||||
|
}
|
||||||
|
|
||||||
|
var series tsdb.TimeSeriesSlice
|
||||||
|
for _, body := range data.Body {
|
||||||
|
for _, mqeSerie := range body.Series {
|
||||||
|
namePrefix := ""
|
||||||
|
|
||||||
|
//append predefined tags to seriename
|
||||||
|
for key, value := range mqeSerie.Tagset {
|
||||||
|
if key == "app" && queryRef.AddAppToAlias {
|
||||||
|
namePrefix += value + " "
|
||||||
|
}
|
||||||
|
if key == "host" && queryRef.AddHostToAlias {
|
||||||
|
namePrefix += value + " "
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
serie := &tsdb.TimeSeries{Name: namePrefix + body.Name}
|
||||||
|
|
||||||
|
for i, value := range mqeSerie.Values {
|
||||||
|
timestamp := body.TimeRange.Start + int64(i)*body.TimeRange.Resolution
|
||||||
|
serie.Points = append(serie.Points, tsdb.NewTimePoint(value, float64(timestamp)))
|
||||||
|
}
|
||||||
|
|
||||||
|
series = append(series, serie)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &tsdb.QueryResult{Series: series}, nil
|
||||||
|
}
|
131
pkg/tsdb/mqe/response_parser_test.go
Normal file
131
pkg/tsdb/mqe/response_parser_test.go
Normal file
@ -0,0 +1,131 @@
|
|||||||
|
package mqe
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"io/ioutil"
|
||||||
|
|
||||||
|
. "github.com/smartystreets/goconvey/convey"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
dummieJson string
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMQEResponseParser(t *testing.T) {
|
||||||
|
Convey("MQE response parser", t, func() {
|
||||||
|
parser := NewResponseParser()
|
||||||
|
|
||||||
|
Convey("Can parse response", func() {
|
||||||
|
queryRef := &Query{
|
||||||
|
AddAppToAlias: true,
|
||||||
|
AddHostToAlias: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
response := &http.Response{
|
||||||
|
StatusCode: 200,
|
||||||
|
Body: ioutil.NopCloser(strings.NewReader(dummieJson)),
|
||||||
|
}
|
||||||
|
res, err := parser.Parse(response, queryRef)
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
So(len(res.Series), ShouldEqual, 2)
|
||||||
|
So(len(res.Series[0].Points), ShouldEqual, 14)
|
||||||
|
So(res.Series[0].Name, ShouldEqual, "demoapp staples-lab-1 os.disk.sda3.weighted_io_time")
|
||||||
|
startTime := 1479287280000
|
||||||
|
for i := 0; i < 11; i++ {
|
||||||
|
So(res.Series[0].Points[i][0].Float64, ShouldEqual, i+1)
|
||||||
|
So(res.Series[0].Points[i][1].Float64, ShouldEqual, startTime+(i*30000))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
dummieJson = `{
|
||||||
|
"success": true,
|
||||||
|
"name": "select",
|
||||||
|
"body": [
|
||||||
|
{
|
||||||
|
"query": "os.disk.sda3.weighted_io_time",
|
||||||
|
"name": "os.disk.sda3.weighted_io_time",
|
||||||
|
"type": "series",
|
||||||
|
"series": [
|
||||||
|
{
|
||||||
|
"tagset": {
|
||||||
|
"app": "demoapp",
|
||||||
|
"host": "staples-lab-1"
|
||||||
|
},
|
||||||
|
"values": [1,2,3,4,5,6,7,8,9,10,11, null, null, null]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"tagset": {
|
||||||
|
"app": "demoapp",
|
||||||
|
"host": "staples-lab-2"
|
||||||
|
},
|
||||||
|
"values": [11,10,9,8,7,6,5,4,3,2,1]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"timerange": {
|
||||||
|
"start": 1479287280000,
|
||||||
|
"end": 1479287580000,
|
||||||
|
"resolution": 30000
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"description": {
|
||||||
|
"app": [
|
||||||
|
"demoapp"
|
||||||
|
],
|
||||||
|
"host": [
|
||||||
|
"staples-lab-1",
|
||||||
|
"staples-lab-2"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"notes": null,
|
||||||
|
"profile": [
|
||||||
|
{
|
||||||
|
"name": "Parsing Query",
|
||||||
|
"start": "2016-11-16T04:16:21.874354721-05:00",
|
||||||
|
"finish": "2016-11-16T04:16:21.874762291-05:00"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Cassandra GetAllTags",
|
||||||
|
"start": "2016-11-16T04:16:21.874907171-05:00",
|
||||||
|
"finish": "2016-11-16T04:16:21.876401922-05:00"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "CachedMetricMetadataAPI_GetAllTags_Expired",
|
||||||
|
"start": "2016-11-16T04:16:21.874904751-05:00",
|
||||||
|
"finish": "2016-11-16T04:16:21.876407852-05:00"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "CachedMetricMetadataAPI_GetAllTags",
|
||||||
|
"start": "2016-11-16T04:16:21.874899491-05:00",
|
||||||
|
"finish": "2016-11-16T04:16:21.876410382-05:00"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Blueflood FetchSingleTimeseries Resolution",
|
||||||
|
"description": "os.disk.sda3.weighted_io_time [app=demoapp,host=staples-lab-1]\n at 30s",
|
||||||
|
"start": "2016-11-16T04:16:21.876623312-05:00",
|
||||||
|
"finish": "2016-11-16T04:16:21.881763444-05:00"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Blueflood FetchSingleTimeseries Resolution",
|
||||||
|
"description": "os.disk.sda3.weighted_io_time [app=demoapp,host=staples-lab-2]\n at 30s",
|
||||||
|
"start": "2016-11-16T04:16:21.876642682-05:00",
|
||||||
|
"finish": "2016-11-16T04:16:21.881895914-05:00"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Blueflood FetchMultipleTimeseries",
|
||||||
|
"start": "2016-11-16T04:16:21.876418022-05:00",
|
||||||
|
"finish": "2016-11-16T04:16:21.881921474-05:00"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
||||||
|
}
|
101
pkg/tsdb/mqe/token_client.go
Normal file
101
pkg/tsdb/mqe/token_client.go
Normal file
@ -0,0 +1,101 @@
|
|||||||
|
package mqe
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"path"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/net/context/ctxhttp"
|
||||||
|
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/grafana/grafana/pkg/log"
|
||||||
|
"github.com/grafana/grafana/pkg/models"
|
||||||
|
"github.com/patrickmn/go-cache"
|
||||||
|
)
|
||||||
|
|
||||||
|
var tokenCache *cache.Cache
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
tokenCache = cache.New(5*time.Minute, 30*time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
type TokenClient struct {
|
||||||
|
log log.Logger
|
||||||
|
Datasource *models.DataSource
|
||||||
|
HttpClient *http.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewTokenClient(datasource *models.DataSource) *TokenClient {
|
||||||
|
httpClient, _ := datasource.GetHttpClient()
|
||||||
|
|
||||||
|
return &TokenClient{
|
||||||
|
log: log.New("tsdb.mqe.tokenclient"),
|
||||||
|
Datasource: datasource,
|
||||||
|
HttpClient: httpClient,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *TokenClient) GetTokenData(ctx context.Context) (*TokenBody, error) {
|
||||||
|
key := strconv.FormatInt(client.Datasource.Id, 10)
|
||||||
|
|
||||||
|
item, found := tokenCache.Get(key)
|
||||||
|
if found {
|
||||||
|
if result, ok := item.(*TokenBody); ok {
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := client.RequestTokenData(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
tokenCache.Set(key, b, cache.DefaultExpiration)
|
||||||
|
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *TokenClient) RequestTokenData(ctx context.Context) (*TokenBody, error) {
|
||||||
|
u, _ := url.Parse(client.Datasource.Url)
|
||||||
|
u.Path = path.Join(u.Path, "token")
|
||||||
|
|
||||||
|
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
||||||
|
if err != nil {
|
||||||
|
client.log.Info("Failed to create request", "error", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := ctxhttp.Do(ctx, client.HttpClient, req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
body, err := ioutil.ReadAll(res.Body)
|
||||||
|
defer res.Body.Close()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if res.StatusCode/100 != 2 {
|
||||||
|
client.log.Info("Request failed", "status", res.Status, "body", string(body))
|
||||||
|
return nil, fmt.Errorf("Request failed status: %v", res.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
var result *TokenResponse
|
||||||
|
err = json.Unmarshal(body, &result)
|
||||||
|
if err != nil {
|
||||||
|
client.log.Info("Failed to unmarshal response", "error", err, "status", res.Status, "body", string(body))
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !result.Success {
|
||||||
|
return nil, fmt.Errorf("Request failed for unknown reason.")
|
||||||
|
}
|
||||||
|
|
||||||
|
return &result.Body, nil
|
||||||
|
}
|
27
pkg/tsdb/mqe/token_client_test.go
Normal file
27
pkg/tsdb/mqe/token_client_test.go
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
package mqe
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||||
|
"github.com/grafana/grafana/pkg/models"
|
||||||
|
. "github.com/smartystreets/goconvey/convey"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestTokenClient(t *testing.T) {
|
||||||
|
SkipConvey("Token client", t, func() {
|
||||||
|
dsInfo := &models.DataSource{
|
||||||
|
JsonData: simplejson.New(),
|
||||||
|
Url: "",
|
||||||
|
}
|
||||||
|
|
||||||
|
client := NewTokenClient(dsInfo)
|
||||||
|
|
||||||
|
body, err := client.RequestTokenData(context.TODO())
|
||||||
|
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
//So(len(body.Functions), ShouldBeGreaterThan, 1)
|
||||||
|
So(len(body.Metrics), ShouldBeGreaterThan, 1)
|
||||||
|
})
|
||||||
|
}
|
126
pkg/tsdb/mqe/types.go
Normal file
126
pkg/tsdb/mqe/types.go
Normal file
@ -0,0 +1,126 @@
|
|||||||
|
package mqe
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"regexp"
|
||||||
|
|
||||||
|
"github.com/grafana/grafana/pkg/log"
|
||||||
|
"github.com/grafana/grafana/pkg/tsdb"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Metric struct {
|
||||||
|
Metric string
|
||||||
|
Alias string
|
||||||
|
}
|
||||||
|
|
||||||
|
type Query struct {
|
||||||
|
Metrics []Metric
|
||||||
|
Hosts []string
|
||||||
|
Apps []string
|
||||||
|
AddAppToAlias bool
|
||||||
|
AddHostToAlias bool
|
||||||
|
|
||||||
|
TimeRange *tsdb.TimeRange
|
||||||
|
UseRawQuery bool
|
||||||
|
RawQuery string
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
containsWildcardPattern *regexp.Regexp = regexp.MustCompile(`\*`)
|
||||||
|
)
|
||||||
|
|
||||||
|
func (q *Query) Build(availableSeries []string) ([]QueryToSend, error) {
|
||||||
|
var queriesToSend []QueryToSend
|
||||||
|
where := q.buildWhereClause()
|
||||||
|
|
||||||
|
for _, v := range q.Metrics {
|
||||||
|
if !containsWildcardPattern.Match([]byte(v.Metric)) {
|
||||||
|
alias := ""
|
||||||
|
if v.Alias != "" {
|
||||||
|
alias = fmt.Sprintf(" {%s}", v.Alias)
|
||||||
|
}
|
||||||
|
rawQuery := fmt.Sprintf(
|
||||||
|
"`%s`%s %s from %v to %v",
|
||||||
|
v.Metric,
|
||||||
|
alias,
|
||||||
|
where,
|
||||||
|
q.TimeRange.GetFromAsMsEpoch(),
|
||||||
|
q.TimeRange.GetToAsMsEpoch())
|
||||||
|
queriesToSend = append(queriesToSend, QueryToSend{
|
||||||
|
RawQuery: rawQuery,
|
||||||
|
QueryRef: q,
|
||||||
|
})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
m := strings.Replace(v.Metric, "*", ".*", -1)
|
||||||
|
mp, err := regexp.Compile(m)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Error2("failed to compile regex for ", "metric", m)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
//TODO: this lookup should be cached
|
||||||
|
for _, a := range availableSeries {
|
||||||
|
if mp.Match([]byte(a)) {
|
||||||
|
alias := ""
|
||||||
|
if v.Alias != "" {
|
||||||
|
alias = fmt.Sprintf(" {%s}", v.Alias)
|
||||||
|
}
|
||||||
|
|
||||||
|
rawQuery := fmt.Sprintf(
|
||||||
|
"`%s`%s %s from %v to %v",
|
||||||
|
a,
|
||||||
|
alias,
|
||||||
|
where,
|
||||||
|
q.TimeRange.GetFromAsMsEpoch(),
|
||||||
|
q.TimeRange.GetToAsMsEpoch())
|
||||||
|
|
||||||
|
queriesToSend = append(queriesToSend, QueryToSend{
|
||||||
|
RawQuery: rawQuery,
|
||||||
|
QueryRef: q,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return queriesToSend, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *Query) buildWhereClause() string {
|
||||||
|
hasApps := len(q.Apps) > 0
|
||||||
|
hasHosts := len(q.Hosts) > 0
|
||||||
|
|
||||||
|
where := ""
|
||||||
|
if hasHosts || hasApps {
|
||||||
|
where += "where "
|
||||||
|
}
|
||||||
|
|
||||||
|
if hasApps {
|
||||||
|
apps := strings.Join(q.Apps, "', '")
|
||||||
|
where += fmt.Sprintf("app in ('%s')", apps)
|
||||||
|
}
|
||||||
|
|
||||||
|
if hasHosts && hasApps {
|
||||||
|
where += " and "
|
||||||
|
}
|
||||||
|
|
||||||
|
if hasHosts {
|
||||||
|
hosts := strings.Join(q.Hosts, "', '")
|
||||||
|
where += fmt.Sprintf("host in ('%s')", hosts)
|
||||||
|
}
|
||||||
|
|
||||||
|
return where
|
||||||
|
}
|
||||||
|
|
||||||
|
type TokenBody struct {
|
||||||
|
Metrics []string
|
||||||
|
}
|
||||||
|
|
||||||
|
type TokenResponse struct {
|
||||||
|
Success bool
|
||||||
|
Body TokenBody
|
||||||
|
}
|
70
pkg/tsdb/mqe/types_test.go
Normal file
70
pkg/tsdb/mqe/types_test.go
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
package mqe
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/grafana/grafana/pkg/tsdb"
|
||||||
|
. "github.com/smartystreets/goconvey/convey"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestWildcardExpansion(t *testing.T) {
|
||||||
|
availableMetrics := []string{
|
||||||
|
"os.cpu.all.idle",
|
||||||
|
"os.cpu.1.idle",
|
||||||
|
"os.cpu.2.idle",
|
||||||
|
"os.cpu.3.idle",
|
||||||
|
}
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
from := now.Add((time.Minute*5)*-1).UnixNano() / int64(time.Millisecond)
|
||||||
|
to := now.UnixNano() / int64(time.Millisecond)
|
||||||
|
|
||||||
|
Convey("Can expanding query", t, func() {
|
||||||
|
Convey("Without wildcard series", func() {
|
||||||
|
query := &Query{
|
||||||
|
Metrics: []Metric{
|
||||||
|
Metric{Metric: "os.cpu.3.idle", Alias: ""},
|
||||||
|
Metric{Metric: "os.cpu.2.idle", Alias: ""},
|
||||||
|
Metric{Metric: "os.cpu.1.idle", Alias: "cpu"},
|
||||||
|
},
|
||||||
|
Hosts: []string{"staples-lab-1", "staples-lab-2"},
|
||||||
|
Apps: []string{"demoapp-1", "demoapp-2"},
|
||||||
|
AddAppToAlias: false,
|
||||||
|
AddHostToAlias: false,
|
||||||
|
TimeRange: &tsdb.TimeRange{Now: now, From: "5m", To: "now"},
|
||||||
|
}
|
||||||
|
|
||||||
|
expandeQueries, err := query.Build(availableMetrics)
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
So(len(expandeQueries), ShouldEqual, 3)
|
||||||
|
So(expandeQueries[0].RawQuery, ShouldEqual, fmt.Sprintf("`os.cpu.3.idle` where app in ('demoapp-1', 'demoapp-2') and host in ('staples-lab-1', 'staples-lab-2') from %v to %v", from, to))
|
||||||
|
So(expandeQueries[1].RawQuery, ShouldEqual, fmt.Sprintf("`os.cpu.2.idle` where app in ('demoapp-1', 'demoapp-2') and host in ('staples-lab-1', 'staples-lab-2') from %v to %v", from, to))
|
||||||
|
So(expandeQueries[2].RawQuery, ShouldEqual, fmt.Sprintf("`os.cpu.1.idle` {cpu} where app in ('demoapp-1', 'demoapp-2') and host in ('staples-lab-1', 'staples-lab-2') from %v to %v", from, to))
|
||||||
|
})
|
||||||
|
|
||||||
|
Convey("Containg wildcard series", func() {
|
||||||
|
query := &Query{
|
||||||
|
Metrics: []Metric{
|
||||||
|
Metric{Metric: "os.cpu*", Alias: ""},
|
||||||
|
},
|
||||||
|
Hosts: []string{"staples-lab-1"},
|
||||||
|
AddAppToAlias: false,
|
||||||
|
AddHostToAlias: false,
|
||||||
|
TimeRange: &tsdb.TimeRange{Now: now, From: "5m", To: "now"},
|
||||||
|
}
|
||||||
|
|
||||||
|
expandeQueries, err := query.Build(availableMetrics)
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
So(len(expandeQueries), ShouldEqual, 4)
|
||||||
|
|
||||||
|
So(expandeQueries[0].RawQuery, ShouldEqual, fmt.Sprintf("`os.cpu.all.idle` where host in ('staples-lab-1') from %v to %v", from, to))
|
||||||
|
So(expandeQueries[1].RawQuery, ShouldEqual, fmt.Sprintf("`os.cpu.1.idle` where host in ('staples-lab-1') from %v to %v", from, to))
|
||||||
|
So(expandeQueries[2].RawQuery, ShouldEqual, fmt.Sprintf("`os.cpu.2.idle` where host in ('staples-lab-1') from %v to %v", from, to))
|
||||||
|
So(expandeQueries[3].RawQuery, ShouldEqual, fmt.Sprintf("`os.cpu.3.idle` where host in ('staples-lab-1') from %v to %v", from, to))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
8
vendor/github.com/patrickmn/go-cache/CONTRIBUTORS
generated
vendored
Normal file
8
vendor/github.com/patrickmn/go-cache/CONTRIBUTORS
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
This is a list of people who have contributed code to go-cache. They, or their
|
||||||
|
employers, are the copyright holders of the contributed code. Contributed code
|
||||||
|
is subject to the license restrictions listed in LICENSE (as they were when the
|
||||||
|
code was contributed.)
|
||||||
|
|
||||||
|
Dustin Sallings <dustin@spy.net>
|
||||||
|
Jason Mooberry <jasonmoo@me.com>
|
||||||
|
Sergey Shepelev <temotor@gmail.com>
|
19
vendor/github.com/patrickmn/go-cache/LICENSE
generated
vendored
Normal file
19
vendor/github.com/patrickmn/go-cache/LICENSE
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
Copyright (c) 2012-2016 Patrick Mylund Nielsen and the go-cache contributors
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
107
vendor/github.com/patrickmn/go-cache/README.md
generated
vendored
Normal file
107
vendor/github.com/patrickmn/go-cache/README.md
generated
vendored
Normal file
@ -0,0 +1,107 @@
|
|||||||
|
# go-cache
|
||||||
|
|
||||||
|
go-cache is an in-memory key:value store/cache similar to memcached that is
|
||||||
|
suitable for applications running on a single machine. Its major advantage is
|
||||||
|
that, being essentially a thread-safe `map[string]interface{}` with expiration
|
||||||
|
times, it doesn't need to serialize or transmit its contents over the network.
|
||||||
|
|
||||||
|
Any object can be stored, for a given duration or forever, and the cache can be
|
||||||
|
safely used by multiple goroutines.
|
||||||
|
|
||||||
|
Although go-cache isn't meant to be used as a persistent datastore, the entire
|
||||||
|
cache can be saved to and loaded from a file (using `c.Items()` to retrieve the
|
||||||
|
items map to serialize, and `NewFrom()` to create a cache from a deserialized
|
||||||
|
one) to recover from downtime quickly. (See the docs for `NewFrom()` for caveats.)
|
||||||
|
|
||||||
|
### Installation
|
||||||
|
|
||||||
|
`go get github.com/patrickmn/go-cache`
|
||||||
|
|
||||||
|
### Usage
|
||||||
|
|
||||||
|
```go
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/patrickmn/go-cache"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
|
||||||
|
// Create a cache with a default expiration time of 5 minutes, and which
|
||||||
|
// purges expired items every 30 seconds
|
||||||
|
c := cache.New(5*time.Minute, 30*time.Second)
|
||||||
|
|
||||||
|
// Set the value of the key "foo" to "bar", with the default expiration time
|
||||||
|
c.Set("foo", "bar", cache.DefaultExpiration)
|
||||||
|
|
||||||
|
// Set the value of the key "baz" to 42, with no expiration time
|
||||||
|
// (the item won't be removed until it is re-set, or removed using
|
||||||
|
// c.Delete("baz")
|
||||||
|
c.Set("baz", 42, cache.NoExpiration)
|
||||||
|
|
||||||
|
// Get the string associated with the key "foo" from the cache
|
||||||
|
foo, found := c.Get("foo")
|
||||||
|
if found {
|
||||||
|
fmt.Println(foo)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Since Go is statically typed, and cache values can be anything, type
|
||||||
|
// assertion is needed when values are being passed to functions that don't
|
||||||
|
// take arbitrary types, (i.e. interface{}). The simplest way to do this for
|
||||||
|
// values which will only be used once--e.g. for passing to another
|
||||||
|
// function--is:
|
||||||
|
foo, found := c.Get("foo")
|
||||||
|
if found {
|
||||||
|
MyFunction(foo.(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
// This gets tedious if the value is used several times in the same function.
|
||||||
|
// You might do either of the following instead:
|
||||||
|
if x, found := c.Get("foo"); found {
|
||||||
|
foo := x.(string)
|
||||||
|
// ...
|
||||||
|
}
|
||||||
|
// or
|
||||||
|
var foo string
|
||||||
|
if x, found := c.Get("foo"); found {
|
||||||
|
foo = x.(string)
|
||||||
|
}
|
||||||
|
// ...
|
||||||
|
// foo can then be passed around freely as a string
|
||||||
|
|
||||||
|
// Want performance? Store pointers!
|
||||||
|
c.Set("foo", &MyStruct, cache.DefaultExpiration)
|
||||||
|
if x, found := c.Get("foo"); found {
|
||||||
|
foo := x.(*MyStruct)
|
||||||
|
// ...
|
||||||
|
}
|
||||||
|
|
||||||
|
// If you store a reference type like a pointer, slice, map or channel, you
|
||||||
|
// do not need to run Set if you modify the underlying data. The cached
|
||||||
|
// reference points to the same memory, so if you modify a struct whose
|
||||||
|
// pointer you've stored in the cache, retrieving that pointer with Get will
|
||||||
|
// point you to the same data:
|
||||||
|
foo := &MyStruct{Num: 1}
|
||||||
|
c.Set("foo", foo, cache.DefaultExpiration)
|
||||||
|
// ...
|
||||||
|
x, _ := c.Get("foo")
|
||||||
|
foo := x.(*MyStruct)
|
||||||
|
fmt.Println(foo.Num)
|
||||||
|
// ...
|
||||||
|
foo.Num++
|
||||||
|
// ...
|
||||||
|
x, _ := c.Get("foo")
|
||||||
|
foo := x.(*MyStruct)
|
||||||
|
foo.Println(foo.Num)
|
||||||
|
|
||||||
|
// will print:
|
||||||
|
// 1
|
||||||
|
// 2
|
||||||
|
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Reference
|
||||||
|
|
||||||
|
`godoc` or [http://godoc.org/github.com/patrickmn/go-cache](http://godoc.org/github.com/patrickmn/go-cache)
|
1131
vendor/github.com/patrickmn/go-cache/cache.go
generated
vendored
Normal file
1131
vendor/github.com/patrickmn/go-cache/cache.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
192
vendor/github.com/patrickmn/go-cache/sharded.go
generated
vendored
Normal file
192
vendor/github.com/patrickmn/go-cache/sharded.go
generated
vendored
Normal file
@ -0,0 +1,192 @@
|
|||||||
|
package cache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"math"
|
||||||
|
"math/big"
|
||||||
|
insecurerand "math/rand"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// This is an experimental and unexported (for now) attempt at making a cache
|
||||||
|
// with better algorithmic complexity than the standard one, namely by
|
||||||
|
// preventing write locks of the entire cache when an item is added. As of the
|
||||||
|
// time of writing, the overhead of selecting buckets results in cache
|
||||||
|
// operations being about twice as slow as for the standard cache with small
|
||||||
|
// total cache sizes, and faster for larger ones.
|
||||||
|
//
|
||||||
|
// See cache_test.go for a few benchmarks.
|
||||||
|
|
||||||
|
type unexportedShardedCache struct {
|
||||||
|
*shardedCache
|
||||||
|
}
|
||||||
|
|
||||||
|
type shardedCache struct {
|
||||||
|
seed uint32
|
||||||
|
m uint32
|
||||||
|
cs []*cache
|
||||||
|
janitor *shardedJanitor
|
||||||
|
}
|
||||||
|
|
||||||
|
// djb2 with better shuffling. 5x faster than FNV with the hash.Hash overhead.
|
||||||
|
func djb33(seed uint32, k string) uint32 {
|
||||||
|
var (
|
||||||
|
l = uint32(len(k))
|
||||||
|
d = 5381 + seed + l
|
||||||
|
i = uint32(0)
|
||||||
|
)
|
||||||
|
// Why is all this 5x faster than a for loop?
|
||||||
|
if l >= 4 {
|
||||||
|
for i < l-4 {
|
||||||
|
d = (d * 33) ^ uint32(k[i])
|
||||||
|
d = (d * 33) ^ uint32(k[i+1])
|
||||||
|
d = (d * 33) ^ uint32(k[i+2])
|
||||||
|
d = (d * 33) ^ uint32(k[i+3])
|
||||||
|
i += 4
|
||||||
|
}
|
||||||
|
}
|
||||||
|
switch l - i {
|
||||||
|
case 1:
|
||||||
|
case 2:
|
||||||
|
d = (d * 33) ^ uint32(k[i])
|
||||||
|
case 3:
|
||||||
|
d = (d * 33) ^ uint32(k[i])
|
||||||
|
d = (d * 33) ^ uint32(k[i+1])
|
||||||
|
case 4:
|
||||||
|
d = (d * 33) ^ uint32(k[i])
|
||||||
|
d = (d * 33) ^ uint32(k[i+1])
|
||||||
|
d = (d * 33) ^ uint32(k[i+2])
|
||||||
|
}
|
||||||
|
return d ^ (d >> 16)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *shardedCache) bucket(k string) *cache {
|
||||||
|
return sc.cs[djb33(sc.seed, k)%sc.m]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *shardedCache) Set(k string, x interface{}, d time.Duration) {
|
||||||
|
sc.bucket(k).Set(k, x, d)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *shardedCache) Add(k string, x interface{}, d time.Duration) error {
|
||||||
|
return sc.bucket(k).Add(k, x, d)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *shardedCache) Replace(k string, x interface{}, d time.Duration) error {
|
||||||
|
return sc.bucket(k).Replace(k, x, d)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *shardedCache) Get(k string) (interface{}, bool) {
|
||||||
|
return sc.bucket(k).Get(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *shardedCache) Increment(k string, n int64) error {
|
||||||
|
return sc.bucket(k).Increment(k, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *shardedCache) IncrementFloat(k string, n float64) error {
|
||||||
|
return sc.bucket(k).IncrementFloat(k, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *shardedCache) Decrement(k string, n int64) error {
|
||||||
|
return sc.bucket(k).Decrement(k, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *shardedCache) Delete(k string) {
|
||||||
|
sc.bucket(k).Delete(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *shardedCache) DeleteExpired() {
|
||||||
|
for _, v := range sc.cs {
|
||||||
|
v.DeleteExpired()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the items in the cache. This may include items that have expired,
|
||||||
|
// but have not yet been cleaned up. If this is significant, the Expiration
|
||||||
|
// fields of the items should be checked. Note that explicit synchronization
|
||||||
|
// is needed to use a cache and its corresponding Items() return values at
|
||||||
|
// the same time, as the maps are shared.
|
||||||
|
func (sc *shardedCache) Items() []map[string]Item {
|
||||||
|
res := make([]map[string]Item, len(sc.cs))
|
||||||
|
for i, v := range sc.cs {
|
||||||
|
res[i] = v.Items()
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *shardedCache) Flush() {
|
||||||
|
for _, v := range sc.cs {
|
||||||
|
v.Flush()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type shardedJanitor struct {
|
||||||
|
Interval time.Duration
|
||||||
|
stop chan bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *shardedJanitor) Run(sc *shardedCache) {
|
||||||
|
j.stop = make(chan bool)
|
||||||
|
tick := time.Tick(j.Interval)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-tick:
|
||||||
|
sc.DeleteExpired()
|
||||||
|
case <-j.stop:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func stopShardedJanitor(sc *unexportedShardedCache) {
|
||||||
|
sc.janitor.stop <- true
|
||||||
|
}
|
||||||
|
|
||||||
|
func runShardedJanitor(sc *shardedCache, ci time.Duration) {
|
||||||
|
j := &shardedJanitor{
|
||||||
|
Interval: ci,
|
||||||
|
}
|
||||||
|
sc.janitor = j
|
||||||
|
go j.Run(sc)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newShardedCache(n int, de time.Duration) *shardedCache {
|
||||||
|
max := big.NewInt(0).SetUint64(uint64(math.MaxUint32))
|
||||||
|
rnd, err := rand.Int(rand.Reader, max)
|
||||||
|
var seed uint32
|
||||||
|
if err != nil {
|
||||||
|
os.Stderr.Write([]byte("WARNING: go-cache's newShardedCache failed to read from the system CSPRNG (/dev/urandom or equivalent.) Your system's security may be compromised. Continuing with an insecure seed.\n"))
|
||||||
|
seed = insecurerand.Uint32()
|
||||||
|
} else {
|
||||||
|
seed = uint32(rnd.Uint64())
|
||||||
|
}
|
||||||
|
sc := &shardedCache{
|
||||||
|
seed: seed,
|
||||||
|
m: uint32(n),
|
||||||
|
cs: make([]*cache, n),
|
||||||
|
}
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
c := &cache{
|
||||||
|
defaultExpiration: de,
|
||||||
|
items: map[string]Item{},
|
||||||
|
}
|
||||||
|
sc.cs[i] = c
|
||||||
|
}
|
||||||
|
return sc
|
||||||
|
}
|
||||||
|
|
||||||
|
func unexportedNewSharded(defaultExpiration, cleanupInterval time.Duration, shards int) *unexportedShardedCache {
|
||||||
|
if defaultExpiration == 0 {
|
||||||
|
defaultExpiration = -1
|
||||||
|
}
|
||||||
|
sc := newShardedCache(shards, defaultExpiration)
|
||||||
|
SC := &unexportedShardedCache{sc}
|
||||||
|
if cleanupInterval > 0 {
|
||||||
|
runShardedJanitor(sc, cleanupInterval)
|
||||||
|
runtime.SetFinalizer(SC, stopShardedJanitor)
|
||||||
|
}
|
||||||
|
return SC
|
||||||
|
}
|
6
vendor/vendor.json
vendored
6
vendor/vendor.json
vendored
@ -314,6 +314,12 @@
|
|||||||
"version": "v1.21.1",
|
"version": "v1.21.1",
|
||||||
"versionExact": "v1.21.1"
|
"versionExact": "v1.21.1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"checksumSHA1": "8z32QKTSDusa4QQyunKE4kyYXZ8=",
|
||||||
|
"path": "github.com/patrickmn/go-cache",
|
||||||
|
"revision": "e7a9def80f35fe1b170b7b8b68871d59dea117e1",
|
||||||
|
"revisionTime": "2016-11-25T23:48:19Z"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "SMUvX2B8eoFd9wnPofwBKlN6btE=",
|
"checksumSHA1": "SMUvX2B8eoFd9wnPofwBKlN6btE=",
|
||||||
"path": "github.com/prometheus/client_golang/api/prometheus",
|
"path": "github.com/prometheus/client_golang/api/prometheus",
|
||||||
|
Loading…
Reference in New Issue
Block a user