mirror of
https://github.com/grafana/grafana.git
synced 2025-02-25 18:55:37 -06:00
Merge pull request #13490 from mtanda/stackdriver_distribution
Stackdriver heatmap support. Fixes #13559
This commit is contained in:
commit
a1d4675169
@ -341,29 +341,6 @@ func (e *StackdriverExecutor) parseResponse(queryRes *tsdb.QueryResult, data Sta
|
||||
for _, series := range data.TimeSeries {
|
||||
points := make([]tsdb.TimePoint, 0)
|
||||
|
||||
// reverse the order to be ascending
|
||||
for i := len(series.Points) - 1; i >= 0; i-- {
|
||||
point := series.Points[i]
|
||||
value := point.Value.DoubleValue
|
||||
|
||||
if series.ValueType == "INT64" {
|
||||
parsedValue, err := strconv.ParseFloat(point.Value.IntValue, 64)
|
||||
if err == nil {
|
||||
value = parsedValue
|
||||
}
|
||||
}
|
||||
|
||||
if series.ValueType == "BOOL" {
|
||||
if point.Value.BoolValue {
|
||||
value = 1
|
||||
} else {
|
||||
value = 0
|
||||
}
|
||||
}
|
||||
|
||||
points = append(points, tsdb.NewTimePoint(null.FloatFrom(value), float64((point.Interval.EndTime).Unix())*1000))
|
||||
}
|
||||
|
||||
defaultMetricName := series.Metric.Type
|
||||
|
||||
for key, value := range series.Metric.Labels {
|
||||
@ -379,18 +356,87 @@ func (e *StackdriverExecutor) parseResponse(queryRes *tsdb.QueryResult, data Sta
|
||||
if !containsLabel(resourceLabels[key], value) {
|
||||
resourceLabels[key] = append(resourceLabels[key], value)
|
||||
}
|
||||
|
||||
if containsLabel(query.GroupBys, "resource.label."+key) {
|
||||
defaultMetricName += " " + value
|
||||
}
|
||||
}
|
||||
|
||||
metricName := formatLegendKeys(series.Metric.Type, defaultMetricName, series.Metric.Labels, series.Resource.Labels, query)
|
||||
// reverse the order to be ascending
|
||||
if series.ValueType != "DISTRIBUTION" {
|
||||
for i := len(series.Points) - 1; i >= 0; i-- {
|
||||
point := series.Points[i]
|
||||
value := point.Value.DoubleValue
|
||||
|
||||
queryRes.Series = append(queryRes.Series, &tsdb.TimeSeries{
|
||||
Name: metricName,
|
||||
Points: points,
|
||||
})
|
||||
if series.ValueType == "INT64" {
|
||||
parsedValue, err := strconv.ParseFloat(point.Value.IntValue, 64)
|
||||
if err == nil {
|
||||
value = parsedValue
|
||||
}
|
||||
}
|
||||
|
||||
if series.ValueType == "BOOL" {
|
||||
if point.Value.BoolValue {
|
||||
value = 1
|
||||
} else {
|
||||
value = 0
|
||||
}
|
||||
}
|
||||
|
||||
points = append(points, tsdb.NewTimePoint(null.FloatFrom(value), float64((point.Interval.EndTime).Unix())*1000))
|
||||
}
|
||||
|
||||
metricName := formatLegendKeys(series.Metric.Type, defaultMetricName, series.Metric.Labels, series.Resource.Labels, make(map[string]string), query)
|
||||
|
||||
queryRes.Series = append(queryRes.Series, &tsdb.TimeSeries{
|
||||
Name: metricName,
|
||||
Points: points,
|
||||
})
|
||||
} else {
|
||||
buckets := make(map[int]*tsdb.TimeSeries)
|
||||
|
||||
for i := len(series.Points) - 1; i >= 0; i-- {
|
||||
point := series.Points[i]
|
||||
if len(point.Value.DistributionValue.BucketCounts) == 0 {
|
||||
continue
|
||||
}
|
||||
maxKey := 0
|
||||
for i := 0; i < len(point.Value.DistributionValue.BucketCounts); i++ {
|
||||
value, err := strconv.ParseFloat(point.Value.DistributionValue.BucketCounts[i], 64)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if _, ok := buckets[i]; !ok {
|
||||
// set lower bounds
|
||||
// https://cloud.google.com/monitoring/api/ref_v3/rest/v3/TimeSeries#Distribution
|
||||
bucketBound := calcBucketBound(point.Value.DistributionValue.BucketOptions, i)
|
||||
additionalLabels := map[string]string{"bucket": bucketBound}
|
||||
buckets[i] = &tsdb.TimeSeries{
|
||||
Name: formatLegendKeys(series.Metric.Type, defaultMetricName, series.Metric.Labels, series.Resource.Labels, additionalLabels, query),
|
||||
Points: make([]tsdb.TimePoint, 0),
|
||||
}
|
||||
if maxKey < i {
|
||||
maxKey = i
|
||||
}
|
||||
}
|
||||
buckets[i].Points = append(buckets[i].Points, tsdb.NewTimePoint(null.FloatFrom(value), float64((point.Interval.EndTime).Unix())*1000))
|
||||
}
|
||||
|
||||
// fill empty bucket
|
||||
for i := 0; i < maxKey; i++ {
|
||||
if _, ok := buckets[i]; !ok {
|
||||
bucketBound := calcBucketBound(point.Value.DistributionValue.BucketOptions, i)
|
||||
additionalLabels := map[string]string{"bucket": bucketBound}
|
||||
buckets[i] = &tsdb.TimeSeries{
|
||||
Name: formatLegendKeys(series.Metric.Type, defaultMetricName, series.Metric.Labels, series.Resource.Labels, additionalLabels, query),
|
||||
Points: make([]tsdb.TimePoint, 0),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := 0; i < len(buckets); i++ {
|
||||
queryRes.Series = append(queryRes.Series, buckets[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
queryRes.Meta.Set("resourceLabels", resourceLabels)
|
||||
@ -409,7 +455,7 @@ func containsLabel(labels []string, newLabel string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func formatLegendKeys(metricType string, defaultMetricName string, metricLabels map[string]string, resourceLabels map[string]string, query *StackdriverQuery) string {
|
||||
func formatLegendKeys(metricType string, defaultMetricName string, metricLabels map[string]string, resourceLabels map[string]string, additionalLabels map[string]string, query *StackdriverQuery) string {
|
||||
if query.AliasBy == "" {
|
||||
return defaultMetricName
|
||||
}
|
||||
@ -441,6 +487,10 @@ func formatLegendKeys(metricType string, defaultMetricName string, metricLabels
|
||||
return []byte(val)
|
||||
}
|
||||
|
||||
if val, exists := additionalLabels[metaPartName]; exists {
|
||||
return []byte(val)
|
||||
}
|
||||
|
||||
return in
|
||||
})
|
||||
|
||||
@ -466,6 +516,22 @@ func replaceWithMetricPart(metaPartName string, metricType string) []byte {
|
||||
return nil
|
||||
}
|
||||
|
||||
func calcBucketBound(bucketOptions StackdriverBucketOptions, n int) string {
|
||||
bucketBound := "0"
|
||||
if n == 0 {
|
||||
return bucketBound
|
||||
}
|
||||
|
||||
if bucketOptions.LinearBuckets != nil {
|
||||
bucketBound = strconv.FormatInt(bucketOptions.LinearBuckets.Offset+(bucketOptions.LinearBuckets.Width*int64(n-1)), 10)
|
||||
} else if bucketOptions.ExponentialBuckets != nil {
|
||||
bucketBound = strconv.FormatInt(int64(bucketOptions.ExponentialBuckets.Scale*math.Pow(bucketOptions.ExponentialBuckets.GrowthFactor, float64(n-1))), 10)
|
||||
} else if bucketOptions.ExplicitBuckets != nil {
|
||||
bucketBound = strconv.FormatInt(bucketOptions.ExplicitBuckets.Bounds[(n-1)], 10)
|
||||
}
|
||||
return bucketBound
|
||||
}
|
||||
|
||||
func (e *StackdriverExecutor) createRequest(ctx context.Context, dsInfo *models.DataSource) (*http.Request, error) {
|
||||
u, _ := url.Parse(dsInfo.Url)
|
||||
u.Path = path.Join(u.Path, "render")
|
||||
|
@ -4,6 +4,8 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -341,6 +343,46 @@ func TestStackdriver(t *testing.T) {
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Convey("when data from query is distribution", func() {
|
||||
data, err := loadTestFile("./test-data/3-series-response-distribution.json")
|
||||
So(err, ShouldBeNil)
|
||||
So(len(data.TimeSeries), ShouldEqual, 1)
|
||||
|
||||
res := &tsdb.QueryResult{Meta: simplejson.New(), RefId: "A"}
|
||||
query := &StackdriverQuery{AliasBy: "{{bucket}}"}
|
||||
err = executor.parseResponse(res, data, query)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(len(res.Series), ShouldEqual, 11)
|
||||
for i := 0; i < 11; i++ {
|
||||
if i == 0 {
|
||||
So(res.Series[i].Name, ShouldEqual, "0")
|
||||
} else {
|
||||
So(res.Series[i].Name, ShouldEqual, strconv.FormatInt(int64(math.Pow(float64(2), float64(i-1))), 10))
|
||||
}
|
||||
So(len(res.Series[i].Points), ShouldEqual, 3)
|
||||
}
|
||||
|
||||
Convey("timestamps should be in ascending order", func() {
|
||||
So(res.Series[0].Points[0][1].Float64, ShouldEqual, 1536668940000)
|
||||
So(res.Series[0].Points[1][1].Float64, ShouldEqual, 1536669000000)
|
||||
So(res.Series[0].Points[2][1].Float64, ShouldEqual, 1536669060000)
|
||||
})
|
||||
|
||||
Convey("value should be correct", func() {
|
||||
So(res.Series[8].Points[0][0].Float64, ShouldEqual, 1)
|
||||
So(res.Series[9].Points[0][0].Float64, ShouldEqual, 1)
|
||||
So(res.Series[10].Points[0][0].Float64, ShouldEqual, 1)
|
||||
So(res.Series[8].Points[1][0].Float64, ShouldEqual, 0)
|
||||
So(res.Series[9].Points[1][0].Float64, ShouldEqual, 0)
|
||||
So(res.Series[10].Points[1][0].Float64, ShouldEqual, 1)
|
||||
So(res.Series[8].Points[2][0].Float64, ShouldEqual, 0)
|
||||
So(res.Series[9].Points[2][0].Float64, ShouldEqual, 1)
|
||||
So(res.Series[10].Points[2][0].Float64, ShouldEqual, 0)
|
||||
})
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
Convey("when interpolating filter wildcards", func() {
|
||||
|
@ -0,0 +1,112 @@
|
||||
{
|
||||
"timeSeries": [
|
||||
{
|
||||
"metric": {
|
||||
"type": "loadbalancing.googleapis.com\/https\/backend_latencies"
|
||||
},
|
||||
"resource": {
|
||||
"type": "https_lb_rule",
|
||||
"labels": {
|
||||
"project_id": "grafana-prod"
|
||||
}
|
||||
},
|
||||
"metricKind": "DELTA",
|
||||
"valueType": "DISTRIBUTION",
|
||||
"points": [
|
||||
{
|
||||
"interval": {
|
||||
"startTime": "2018-09-11T12:30:00Z",
|
||||
"endTime": "2018-09-11T12:31:00Z"
|
||||
},
|
||||
"value": {
|
||||
"distributionValue": {
|
||||
"count": "1",
|
||||
"bucketOptions": {
|
||||
"exponentialBuckets": {
|
||||
"numFiniteBuckets": 10,
|
||||
"growthFactor": 2,
|
||||
"scale": 1
|
||||
}
|
||||
},
|
||||
"bucketCounts": [
|
||||
"0",
|
||||
"0",
|
||||
"0",
|
||||
"0",
|
||||
"0",
|
||||
"0",
|
||||
"0",
|
||||
"0",
|
||||
"0",
|
||||
"1",
|
||||
"0"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"interval": {
|
||||
"startTime": "2018-09-11T12:29:00Z",
|
||||
"endTime": "2018-09-11T12:30:00Z"
|
||||
},
|
||||
"value": {
|
||||
"distributionValue": {
|
||||
"count": "1",
|
||||
"bucketOptions": {
|
||||
"exponentialBuckets": {
|
||||
"numFiniteBuckets": 10,
|
||||
"growthFactor": 2,
|
||||
"scale": 1
|
||||
}
|
||||
},
|
||||
"bucketCounts": [
|
||||
"0",
|
||||
"0",
|
||||
"0",
|
||||
"0",
|
||||
"0",
|
||||
"0",
|
||||
"0",
|
||||
"0",
|
||||
"0",
|
||||
"0",
|
||||
"1"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"interval": {
|
||||
"startTime": "2018-09-11T12:28:00Z",
|
||||
"endTime": "2018-09-11T12:29:00Z"
|
||||
},
|
||||
"value": {
|
||||
"distributionValue": {
|
||||
"count": "3",
|
||||
"bucketOptions": {
|
||||
"exponentialBuckets": {
|
||||
"numFiniteBuckets": 10,
|
||||
"growthFactor": 2,
|
||||
"scale": 1
|
||||
}
|
||||
},
|
||||
"bucketCounts": [
|
||||
"0",
|
||||
"0",
|
||||
"0",
|
||||
"0",
|
||||
"0",
|
||||
"0",
|
||||
"0",
|
||||
"0",
|
||||
"1",
|
||||
"1",
|
||||
"1"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
@ -14,6 +14,22 @@ type StackdriverQuery struct {
|
||||
AliasBy string
|
||||
}
|
||||
|
||||
type StackdriverBucketOptions struct {
|
||||
LinearBuckets *struct {
|
||||
NumFiniteBuckets int64 `json:"numFiniteBuckets"`
|
||||
Width int64 `json:"width"`
|
||||
Offset int64 `json:"offset"`
|
||||
} `json:"linearBuckets"`
|
||||
ExponentialBuckets *struct {
|
||||
NumFiniteBuckets int64 `json:"numFiniteBuckets"`
|
||||
GrowthFactor float64 `json:"growthFactor"`
|
||||
Scale float64 `json:"scale"`
|
||||
} `json:"exponentialBuckets"`
|
||||
ExplicitBuckets *struct {
|
||||
Bounds []int64 `json:"bounds"`
|
||||
} `json:"explicitBuckets"`
|
||||
}
|
||||
|
||||
// StackdriverResponse is the data returned from the external Google Stackdriver API
|
||||
type StackdriverResponse struct {
|
||||
TimeSeries []struct {
|
||||
@ -33,10 +49,26 @@ type StackdriverResponse struct {
|
||||
EndTime time.Time `json:"endTime"`
|
||||
} `json:"interval"`
|
||||
Value struct {
|
||||
DoubleValue float64 `json:"doubleValue"`
|
||||
StringValue string `json:"stringValue"`
|
||||
BoolValue bool `json:"boolValue"`
|
||||
IntValue string `json:"int64Value"`
|
||||
DoubleValue float64 `json:"doubleValue"`
|
||||
StringValue string `json:"stringValue"`
|
||||
BoolValue bool `json:"boolValue"`
|
||||
IntValue string `json:"int64Value"`
|
||||
DistributionValue struct {
|
||||
Count string `json:"count"`
|
||||
Mean float64 `json:"mean"`
|
||||
SumOfSquaredDeviation float64 `json:"sumOfSquaredDeviation"`
|
||||
Range struct {
|
||||
Min int `json:"min"`
|
||||
Max int `json:"max"`
|
||||
} `json:"range"`
|
||||
BucketOptions StackdriverBucketOptions `json:"bucketOptions"`
|
||||
BucketCounts []string `json:"bucketCounts"`
|
||||
Examplars []struct {
|
||||
Value float64 `json:"value"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
// attachments
|
||||
} `json:"examplars"`
|
||||
} `json:"distributionValue"`
|
||||
} `json:"value"`
|
||||
} `json:"points"`
|
||||
} `json:"timeSeries"`
|
||||
|
@ -19,7 +19,7 @@ export const alignOptions = [
|
||||
{
|
||||
text: 'delta',
|
||||
value: 'ALIGN_DELTA',
|
||||
valueTypes: [ValueTypes.INT64, ValueTypes.DOUBLE, ValueTypes.MONEY],
|
||||
valueTypes: [ValueTypes.INT64, ValueTypes.DOUBLE, ValueTypes.MONEY, ValueTypes.DISTRIBUTION],
|
||||
metricKinds: [MetricKind.CUMULATIVE, MetricKind.DELTA],
|
||||
},
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user