AzureMonitor: remove duplicate query logic on the frontend (#17198)

* feat: AzureMonitor implements legend key on backend

To be able to remove the duplicated query logic on the
frontend, the backend code needs to implement alias
patterns for legend keys as well as allowing the default
list of allowed time grains to be overridden. Some metrics
do not support all the time grains and the auto timegrain
calculation can be incorrect if the list is not overridden.

* feat: AzureMonitor - removes duplicate query logic on frontend

* AzureMonitor small refactoring

Extracted method and tidied up the auto time grain
code.

* azuremonitor: support for auto time grains for alerting

Converts allowed timegrains into ms and saves in dashboard json.
This makes queries for alerting with an auto time grain work in
the same way as the frontend.

* chore: typings -> implicitAny count down to 3413

* azuremonitor: add more typings
This commit is contained in:
Daniel Lee
2019-07-04 22:47:24 +02:00
committed by GitHub
parent 55b63905ea
commit 7e95ded164
14 changed files with 397 additions and 699 deletions

View File

@@ -32,7 +32,7 @@ type AzureMonitorDatasource struct {
var (
// 1m, 5m, 15m, 30m, 1h, 6h, 12h, 1d in milliseconds
allowedIntervalsMS = []int64{60000, 300000, 900000, 1800000, 3600000, 21600000, 43200000, 86400000}
defaultAllowedIntervalsMS = []int64{60000, 300000, 900000, 1800000, 3600000, 21600000, 43200000, 86400000}
)
// executeTimeSeriesQuery does the following:
@@ -99,13 +99,15 @@ func (e *AzureMonitorDatasource) buildQueries(queries []*tsdb.Query, timeRange *
}
azureURL := ub.Build()
alias := fmt.Sprintf("%v", azureMonitorTarget["alias"])
alias := ""
if val, ok := azureMonitorTarget["alias"]; ok {
alias = fmt.Sprintf("%v", val)
}
timeGrain := fmt.Sprintf("%v", azureMonitorTarget["timeGrain"])
timeGrains := azureMonitorTarget["allowedTimeGrainsMs"]
if timeGrain == "auto" {
autoInterval := e.findClosestAllowedIntervalMS(query.IntervalMs)
tg := &TimeGrain{}
timeGrain, err = tg.createISO8601DurationFromIntervalMS(autoInterval)
timeGrain, err = e.setAutoTimeGrain(query.IntervalMs, timeGrains)
if err != nil {
return nil, err
}
@@ -120,7 +122,7 @@ func (e *AzureMonitorDatasource) buildQueries(queries []*tsdb.Query, timeRange *
dimension := strings.TrimSpace(fmt.Sprintf("%v", azureMonitorTarget["dimension"]))
dimensionFilter := strings.TrimSpace(fmt.Sprintf("%v", azureMonitorTarget["dimensionFilter"]))
if azureMonitorTarget["dimension"] != nil && azureMonitorTarget["dimensionFilter"] != nil && len(dimension) > 0 && len(dimensionFilter) > 0 {
if azureMonitorTarget["dimension"] != nil && azureMonitorTarget["dimensionFilter"] != nil && len(dimension) > 0 && len(dimensionFilter) > 0 && dimension != "None" {
params.Add("$filter", fmt.Sprintf("%s eq '%s'", dimension, dimensionFilter))
}
@@ -143,6 +145,35 @@ func (e *AzureMonitorDatasource) buildQueries(queries []*tsdb.Query, timeRange *
return azureMonitorQueries, nil
}
// setAutoTimeGrain tries to find the closest interval to the query's intervalMs value
// if the metric has a limited set of possible intervals/time grains then use those
// instead of the default list of intervals
func (e *AzureMonitorDatasource) setAutoTimeGrain(intervalMs int64, timeGrains interface{}) (string, error) {
// parses array of numbers from the timeGrains json field
allowedTimeGrains := []int64{}
tgs, ok := timeGrains.([]interface{})
if ok {
for _, v := range tgs {
jsonNumber, ok := v.(json.Number)
if ok {
tg, err := jsonNumber.Int64()
if err == nil {
allowedTimeGrains = append(allowedTimeGrains, tg)
}
}
}
}
autoInterval := e.findClosestAllowedIntervalMS(intervalMs, allowedTimeGrains)
tg := &TimeGrain{}
autoTimeGrain, err := tg.createISO8601DurationFromIntervalMS(autoInterval)
if err != nil {
return "", err
}
return autoTimeGrain, nil
}
func (e *AzureMonitorDatasource) executeQuery(ctx context.Context, query *AzureMonitorQuery, queries []*tsdb.Query, timeRange *tsdb.TimeRange) (*tsdb.QueryResult, AzureMonitorResponse, error) {
queryResult := &tsdb.QueryResult{Meta: simplejson.New(), RefId: query.RefID}
@@ -257,7 +288,7 @@ func (e *AzureMonitorDatasource) parseResponse(queryRes *tsdb.QueryResult, data
metadataName = series.Metadatavalues[0].Name.LocalizedValue
metadataValue = series.Metadatavalues[0].Value
}
defaultMetricName := formatLegendKey(query.UrlComponents["resourceName"], data.Value[0].Name.LocalizedValue, metadataName, metadataValue)
metricName := formatLegendKey(query.Alias, query.UrlComponents["resourceName"], data.Value[0].Name.LocalizedValue, metadataName, metadataValue, data.Namespace, data.Value[0].ID)
for _, point := range series.Data {
var value float64
@@ -279,10 +310,11 @@ func (e *AzureMonitorDatasource) parseResponse(queryRes *tsdb.QueryResult, data
}
queryRes.Series = append(queryRes.Series, &tsdb.TimeSeries{
Name: defaultMetricName,
Name: metricName,
Points: points,
})
}
queryRes.Meta.Set("unit", data.Value[0].Unit)
return nil
}
@@ -290,13 +322,21 @@ func (e *AzureMonitorDatasource) parseResponse(queryRes *tsdb.QueryResult, data
// findClosestAllowedIntervalMs is used for the auto time grain setting.
// It finds the closest time grain from the list of allowed time grains for Azure Monitor
// using the Grafana interval in milliseconds
func (e *AzureMonitorDatasource) findClosestAllowedIntervalMS(intervalMs int64) int64 {
closest := allowedIntervalsMS[0]
// Some metrics only allow a limited list of time grains. The allowedTimeGrains parameter
// allows overriding the default list of allowed time grains.
func (e *AzureMonitorDatasource) findClosestAllowedIntervalMS(intervalMs int64, allowedTimeGrains []int64) int64 {
allowedIntervals := defaultAllowedIntervalsMS
for i, allowed := range allowedIntervalsMS {
if len(allowedTimeGrains) > 0 {
allowedIntervals = allowedTimeGrains
}
closest := allowedIntervals[0]
for i, allowed := range allowedIntervals {
if intervalMs > allowed {
if i+1 < len(allowedIntervalsMS) {
closest = allowedIntervalsMS[i+1]
if i+1 < len(allowedIntervals) {
closest = allowedIntervals[i+1]
} else {
closest = allowed
}
@@ -306,9 +346,50 @@ func (e *AzureMonitorDatasource) findClosestAllowedIntervalMS(intervalMs int64)
}
// formatLegendKey builds the legend key or timeseries name
func formatLegendKey(resourceName string, metricName string, metadataName string, metadataValue string) string {
if len(metadataName) > 0 {
return fmt.Sprintf("%s{%s=%s}.%s", resourceName, metadataName, metadataValue, metricName)
// Alias patterns like {{resourcename}} are replaced with the appropriate data values.
func formatLegendKey(alias string, resourceName string, metricName string, metadataName string, metadataValue string, namespace string, seriesID string) string {
if alias == "" {
if len(metadataName) > 0 {
return fmt.Sprintf("%s{%s=%s}.%s", resourceName, metadataName, metadataValue, metricName)
}
return fmt.Sprintf("%s.%s", resourceName, metricName)
}
return fmt.Sprintf("%s.%s", resourceName, metricName)
startIndex := strings.Index(seriesID, "/resourceGroups/") + 16
endIndex := strings.Index(seriesID, "/providers")
resourceGroup := seriesID[startIndex:endIndex]
result := legendKeyFormat.ReplaceAllFunc([]byte(alias), func(in []byte) []byte {
metaPartName := strings.Replace(string(in), "{{", "", 1)
metaPartName = strings.Replace(metaPartName, "}}", "", 1)
metaPartName = strings.ToLower(strings.TrimSpace(metaPartName))
if metaPartName == "resourcegroup" {
return []byte(resourceGroup)
}
if metaPartName == "namespace" {
return []byte(namespace)
}
if metaPartName == "resourcename" {
return []byte(resourceName)
}
if metaPartName == "metric" {
return []byte(metricName)
}
if metaPartName == "dimensionname" {
return []byte(metadataName)
}
if metaPartName == "dimensionvalue" {
return []byte(metadataValue)
}
return in
})
return string(result)
}

View File

@@ -67,6 +67,49 @@ func TestAzureMonitorDatasource(t *testing.T) {
So(queries[0].Alias, ShouldEqual, "testalias")
})
Convey("and has a time grain set to auto", func() {
tsdbQuery.Queries[0].Model = simplejson.NewFromAny(map[string]interface{}{
"azureMonitor": map[string]interface{}{
"timeGrain": "auto",
"aggregation": "Average",
"resourceGroup": "grafanastaging",
"resourceName": "grafana",
"metricDefinition": "Microsoft.Compute/virtualMachines",
"metricName": "Percentage CPU",
"alias": "testalias",
"queryType": "Azure Monitor",
},
})
tsdbQuery.Queries[0].IntervalMs = 400000
queries, err := datasource.buildQueries(tsdbQuery.Queries, tsdbQuery.TimeRange)
So(err, ShouldBeNil)
So(queries[0].Params["interval"][0], ShouldEqual, "PT15M")
})
Convey("and has a time grain set to auto and the metric has a limited list of allowed time grains", func() {
tsdbQuery.Queries[0].Model = simplejson.NewFromAny(map[string]interface{}{
"azureMonitor": map[string]interface{}{
"timeGrain": "auto",
"aggregation": "Average",
"resourceGroup": "grafanastaging",
"resourceName": "grafana",
"metricDefinition": "Microsoft.Compute/virtualMachines",
"metricName": "Percentage CPU",
"alias": "testalias",
"queryType": "Azure Monitor",
"allowedTimeGrainsMs": []interface{}{"auto", json.Number("60000"), json.Number("300000")},
},
})
tsdbQuery.Queries[0].IntervalMs = 400000
queries, err := datasource.buildQueries(tsdbQuery.Queries, tsdbQuery.TimeRange)
So(err, ShouldBeNil)
So(queries[0].Params["interval"][0], ShouldEqual, "PT5M")
})
Convey("and has a dimension filter", func() {
tsdbQuery.Queries[0].Model = simplejson.NewFromAny(map[string]interface{}{
"azureMonitor": map[string]interface{}{
@@ -89,6 +132,29 @@ func TestAzureMonitorDatasource(t *testing.T) {
So(queries[0].Target, ShouldEqual, "%24filter=blob+eq+%27%2A%27&aggregation=Average&api-version=2018-01-01&interval=PT1M&metricnames=Percentage+CPU&timespan=2018-03-15T13%3A00%3A00Z%2F2018-03-15T13%3A34%3A00Z")
})
Convey("and has a dimension filter set to None", func() {
tsdbQuery.Queries[0].Model = simplejson.NewFromAny(map[string]interface{}{
"azureMonitor": map[string]interface{}{
"timeGrain": "PT1M",
"aggregation": "Average",
"resourceGroup": "grafanastaging",
"resourceName": "grafana",
"metricDefinition": "Microsoft.Compute/virtualMachines",
"metricName": "Percentage CPU",
"alias": "testalias",
"queryType": "Azure Monitor",
"dimension": "None",
"dimensionFilter": "*",
},
})
queries, err := datasource.buildQueries(tsdbQuery.Queries, tsdbQuery.TimeRange)
So(err, ShouldBeNil)
So(queries[0].Target, ShouldEqual, "aggregation=Average&api-version=2018-01-01&interval=PT1M&metricnames=Percentage+CPU&timespan=2018-03-15T13%3A00%3A00Z%2F2018-03-15T13%3A34%3A00Z")
})
})
Convey("Parse AzureMonitor API response in the time series format", func() {
@@ -235,6 +301,48 @@ func TestAzureMonitorDatasource(t *testing.T) {
So(res.Series[2].Name, ShouldEqual, "grafana{blobtype=Azure Data Lake Storage}.Blob Count")
So(res.Series[2].Points[0][0].Float64, ShouldEqual, 0)
})
Convey("when data from query has alias patterns", func() {
data, err := loadTestFile("./test-data/2-azure-monitor-response-total.json")
So(err, ShouldBeNil)
res := &tsdb.QueryResult{Meta: simplejson.New(), RefId: "A"}
query := &AzureMonitorQuery{
Alias: "custom {{resourcegroup}} {{namespace}} {{resourceName}} {{metric}}",
UrlComponents: map[string]string{
"resourceName": "grafana",
},
Params: url.Values{
"aggregation": {"Total"},
},
}
err = datasource.parseResponse(res, data, query)
So(err, ShouldBeNil)
So(res.Series[0].Name, ShouldEqual, "custom grafanastaging Microsoft.Compute/virtualMachines grafana Percentage CPU")
})
Convey("when data has dimension filters and alias patterns", func() {
data, err := loadTestFile("./test-data/6-azure-monitor-response-multi-dimension.json")
So(err, ShouldBeNil)
res := &tsdb.QueryResult{Meta: simplejson.New(), RefId: "A"}
query := &AzureMonitorQuery{
Alias: "{{dimensionname}}={{DimensionValue}}",
UrlComponents: map[string]string{
"resourceName": "grafana",
},
Params: url.Values{
"aggregation": {"Average"},
},
}
err = datasource.parseResponse(res, data, query)
So(err, ShouldBeNil)
So(res.Series[0].Name, ShouldEqual, "blobtype=PageBlob")
So(res.Series[1].Name, ShouldEqual, "blobtype=BlockBlob")
So(res.Series[2].Name, ShouldEqual, "blobtype=Azure Data Lake Storage")
})
})
Convey("Find closest allowed interval for auto time grain", func() {
@@ -247,13 +355,16 @@ func TestAzureMonitorDatasource(t *testing.T) {
"2d": 172800000,
}
closest := datasource.findClosestAllowedIntervalMS(intervals["3m"])
closest := datasource.findClosestAllowedIntervalMS(intervals["3m"], []int64{})
So(closest, ShouldEqual, intervals["5m"])
closest = datasource.findClosestAllowedIntervalMS(intervals["10m"])
closest = datasource.findClosestAllowedIntervalMS(intervals["10m"], []int64{})
So(closest, ShouldEqual, intervals["15m"])
closest = datasource.findClosestAllowedIntervalMS(intervals["2d"])
closest = datasource.findClosestAllowedIntervalMS(intervals["2d"], []int64{})
So(closest, ShouldEqual, intervals["1d"])
closest = datasource.findClosestAllowedIntervalMS(intervals["3m"], []int64{intervals["1d"]})
So(closest, ShouldEqual, intervals["1d"])
})
})

View File

@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"net/http"
"regexp"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/models"
@@ -11,7 +12,8 @@ import (
)
var (
azlog log.Logger
azlog log.Logger
legendKeyFormat *regexp.Regexp
)
// AzureMonitorExecutor executes queries for the Azure Monitor datasource - all four services
@@ -36,6 +38,7 @@ func NewAzureMonitorExecutor(dsInfo *models.DataSource) (tsdb.TsdbQueryEndpoint,
func init() {
azlog = log.New("tsdb.azuremonitor")
tsdb.RegisterTsdbQueryEndpoint("grafana-azure-monitor-datasource", NewAzureMonitorExecutor)
legendKeyFormat = regexp.MustCompile(`\{\{\s*(.+?)\s*\}\}`)
}
// Query takes in the frontend queries, parses them into the query format