AzureMonitor: Add errorsource (#92094)

* Add errorsource

* Migrate to individually build metric queries

* Migrate logs queries to be built individually

* Migrate to individually build resource graph queries
This commit is contained in:
Andreas Christou 2024-09-09 10:29:35 +01:00 committed by GitHub
parent 8eb7e55f8f
commit d61530941a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 246 additions and 242 deletions

View File

@ -6,6 +6,7 @@ import (
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
@ -18,6 +19,7 @@ import (
"github.com/grafana/grafana-plugin-sdk-go/backend"
"github.com/grafana/grafana-plugin-sdk-go/backend/tracing"
"github.com/grafana/grafana-plugin-sdk-go/data"
"github.com/grafana/grafana-plugin-sdk-go/experimental/errorsource"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
@ -136,15 +138,16 @@ func (e *AzureLogAnalyticsDatasource) GetBasicLogsUsage(ctx context.Context, url
// 3. parses the responses for each query into data frames
func (e *AzureLogAnalyticsDatasource) ExecuteTimeSeriesQuery(ctx context.Context, originalQueries []backend.DataQuery, dsInfo types.DatasourceInfo, client *http.Client, url string, fromAlert bool) (*backend.QueryDataResponse, error) {
result := backend.NewQueryDataResponse()
queries, err := e.buildQueries(ctx, originalQueries, dsInfo, fromAlert)
if err != nil {
return nil, err
}
for _, query := range queries {
res, err := e.executeQuery(ctx, query, dsInfo, client, url)
for _, query := range originalQueries {
logsQuery, err := e.buildQuery(ctx, query, dsInfo, fromAlert)
if err != nil {
result.Responses[query.RefID] = backend.DataResponse{Error: err}
errorsource.AddErrorToResponse(query.RefID, result, err)
continue
}
res, err := e.executeQuery(ctx, logsQuery, dsInfo, client, url)
if err != nil {
errorsource.AddErrorToResponse(query.RefID, result, err)
continue
}
result.Responses[query.RefID] = *res
@ -179,6 +182,7 @@ func buildLogAnalyticsQuery(query backend.DataQuery, dsInfo types.DatasourceInfo
if basicLogsQueryFlag {
if meetsBasicLogsCriteria, meetsBasicLogsCriteriaErr := meetsBasicLogsCriteria(resources, fromAlert); meetsBasicLogsCriteriaErr != nil {
// This error is a downstream error
return nil, meetsBasicLogsCriteriaErr
} else {
basicLogsQuery = meetsBasicLogsCriteria
@ -224,45 +228,52 @@ func buildLogAnalyticsQuery(query backend.DataQuery, dsInfo types.DatasourceInfo
}, nil
}
func (e *AzureLogAnalyticsDatasource) buildQueries(ctx context.Context, queries []backend.DataQuery, dsInfo types.DatasourceInfo, fromAlert bool) ([]*AzureLogAnalyticsQuery, error) {
azureLogAnalyticsQueries := []*AzureLogAnalyticsQuery{}
func (e *AzureLogAnalyticsDatasource) buildQuery(ctx context.Context, query backend.DataQuery, dsInfo types.DatasourceInfo, fromAlert bool) (*AzureLogAnalyticsQuery, error) {
var azureLogAnalyticsQuery *AzureLogAnalyticsQuery
appInsightsRegExp, err := regexp.Compile("(?i)providers/microsoft.insights/components")
if err != nil {
return nil, fmt.Errorf("failed to compile Application Insights regex")
}
for _, query := range queries {
if query.QueryType == string(dataquery.AzureQueryTypeAzureLogAnalytics) {
azureLogAnalyticsQuery, err := buildLogAnalyticsQuery(query, dsInfo, appInsightsRegExp, fromAlert)
if err != nil {
return nil, fmt.Errorf("failed to build azure log analytics query: %w", err)
if query.QueryType == string(dataquery.AzureQueryTypeAzureLogAnalytics) {
azureLogAnalyticsQuery, err = buildLogAnalyticsQuery(query, dsInfo, appInsightsRegExp, fromAlert)
if err != nil {
errorMessage := fmt.Errorf("failed to build azure log analytics query: %w", err)
var sourceError errorsource.Error
if errors.As(err, &sourceError) {
return nil, errorsource.SourceError(sourceError.Source(), errorMessage, false)
}
azureLogAnalyticsQueries = append(azureLogAnalyticsQueries, azureLogAnalyticsQuery)
}
if query.QueryType == string(dataquery.AzureQueryTypeAzureTraces) || query.QueryType == string(dataquery.AzureQueryTypeTraceql) {
if query.QueryType == string(dataquery.AzureQueryTypeTraceql) {
cfg := backend.GrafanaConfigFromContext(ctx)
hasPromExemplarsToggle := cfg.FeatureToggles().IsEnabled("azureMonitorPrometheusExemplars")
if !hasPromExemplarsToggle {
return nil, fmt.Errorf("query type unsupported as azureMonitorPrometheusExemplars feature toggle is not enabled")
}
}
azureAppInsightsQuery, err := buildAppInsightsQuery(ctx, query, dsInfo, appInsightsRegExp, e.Logger)
if err != nil {
return nil, fmt.Errorf("failed to build azure application insights query: %w", err)
}
azureLogAnalyticsQueries = append(azureLogAnalyticsQueries, azureAppInsightsQuery)
return nil, errorMessage
}
}
return azureLogAnalyticsQueries, nil
if query.QueryType == string(dataquery.AzureQueryTypeAzureTraces) || query.QueryType == string(dataquery.AzureQueryTypeTraceql) {
if query.QueryType == string(dataquery.AzureQueryTypeTraceql) {
cfg := backend.GrafanaConfigFromContext(ctx)
hasPromExemplarsToggle := cfg.FeatureToggles().IsEnabled("azureMonitorPrometheusExemplars")
if !hasPromExemplarsToggle {
return nil, errorsource.DownstreamError(fmt.Errorf("query type unsupported as azureMonitorPrometheusExemplars feature toggle is not enabled"), false)
}
}
azureAppInsightsQuery, err := buildAppInsightsQuery(ctx, query, dsInfo, appInsightsRegExp, e.Logger)
if err != nil {
errorMessage := fmt.Errorf("failed to build azure application insights query: %w", err)
var sourceError errorsource.Error
if errors.As(err, &sourceError) {
return nil, errorsource.SourceError(sourceError.Source(), errorMessage, false)
}
return nil, errorMessage
}
azureLogAnalyticsQuery = azureAppInsightsQuery
}
return azureLogAnalyticsQuery, nil
}
func (e *AzureLogAnalyticsDatasource) executeQuery(ctx context.Context, query *AzureLogAnalyticsQuery, dsInfo types.DatasourceInfo, client *http.Client, url string) (*backend.DataResponse, error) {
// If azureLogAnalyticsSameAs is defined and set to false, return an error
if sameAs, ok := dsInfo.JSONData["azureLogAnalyticsSameAs"]; ok && !sameAs.(bool) {
return nil, fmt.Errorf("credentials for Log Analytics are no longer supported. Go to the data source configuration to update Azure Monitor credentials")
return nil, errorsource.DownstreamError(fmt.Errorf("credentials for Log Analytics are no longer supported. Go to the data source configuration to update Azure Monitor credentials"), false)
}
queryJSONModel := dataquery.AzureMonitorQuery{}
@ -273,7 +284,7 @@ func (e *AzureLogAnalyticsDatasource) executeQuery(ctx context.Context, query *A
if query.QueryType == dataquery.AzureQueryTypeAzureTraces {
if query.ResultFormat == dataquery.ResultFormatTrace && query.Query == "" {
return nil, fmt.Errorf("cannot visualise trace events using the trace visualiser")
return nil, errorsource.DownstreamError(fmt.Errorf("cannot visualise trace events using the trace visualiser"), false)
}
}
@ -294,7 +305,7 @@ func (e *AzureLogAnalyticsDatasource) executeQuery(ctx context.Context, query *A
res, err := client.Do(req)
if err != nil {
return nil, err
return nil, errorsource.DownstreamError(err, false)
}
defer func() {
@ -611,7 +622,7 @@ func getCorrelationWorkspaces(ctx context.Context, baseResource string, resource
}()
if res.StatusCode/100 != 2 {
return AzureCorrelationAPIResponse{}, fmt.Errorf("request failed, status: %s, body: %s", res.Status, string(body))
return AzureCorrelationAPIResponse{}, errorsource.SourceError(backend.ErrorSourceFromHTTPStatus(res.StatusCode), fmt.Errorf("request failed, status: %s, body: %s", res.Status, string(body)), false)
}
var data AzureCorrelationAPIResponse
d := json.NewDecoder(bytes.NewReader(body))
@ -675,7 +686,7 @@ func (e *AzureLogAnalyticsDatasource) unmarshalResponse(res *http.Response) (Azu
}()
if res.StatusCode/100 != 2 {
return AzureLogAnalyticsResponse{}, fmt.Errorf("request failed, status: %s, body: %s", res.Status, string(body))
return AzureLogAnalyticsResponse{}, errorsource.SourceError(backend.ErrorSourceFromHTTPStatus(res.StatusCode), fmt.Errorf("request failed, status: %s, body: %s", res.Status, string(body)), false)
}
var data AzureLogAnalyticsResponse

View File

@ -741,7 +741,7 @@ func Test_exemplarsFeatureToggle(t *testing.T) {
QueryType: string(dataquery.AzureQueryTypeTraceql),
}
_, err := ds.buildQueries(ctx, []backend.DataQuery{query}, dsInfo, false)
_, err := ds.buildQuery(ctx, query, dsInfo, false)
require.NoError(t, err)
})
@ -761,7 +761,7 @@ func Test_exemplarsFeatureToggle(t *testing.T) {
QueryType: string(dataquery.AzureQueryTypeTraceql),
}
_, err := ds.buildQueries(ctx, []backend.DataQuery{query}, dsInfo, false)
_, err := ds.buildQuery(ctx, query, dsInfo, false)
require.Error(t, err, "query type unsupported as azureMonitorPrometheusExemplars feature toggle is not enabled")
})

View File

@ -8,6 +8,7 @@ import (
"time"
"github.com/grafana/grafana-plugin-sdk-go/data"
"github.com/grafana/grafana-plugin-sdk-go/experimental/errorsource"
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/kinds/dataquery"
)
@ -45,14 +46,14 @@ func AddConfigLinks(frame data.Frame, dl string, title *string) data.Frame {
// 4. number of selected resources is exactly one
func meetsBasicLogsCriteria(resources []string, fromAlert bool) (bool, error) {
if fromAlert {
return false, fmt.Errorf("basic Logs queries cannot be used for alerts")
return false, errorsource.DownstreamError(fmt.Errorf("basic Logs queries cannot be used for alerts"), false)
}
if len(resources) != 1 {
return false, fmt.Errorf("basic logs queries cannot be run against multiple resources")
return false, errorsource.DownstreamError(fmt.Errorf("basic logs queries cannot be run against multiple resources"), false)
}
if !strings.Contains(strings.ToLower(resources[0]), "microsoft.operationalinsights/workspaces") {
return false, fmt.Errorf("basic Logs queries may only be run against Log Analytics workspaces")
return false, errorsource.DownstreamError(fmt.Errorf("basic Logs queries may only be run against Log Analytics workspaces"), false)
}
return true, nil

View File

@ -17,6 +17,7 @@ import (
"github.com/grafana/grafana-plugin-sdk-go/backend/log"
"github.com/grafana/grafana-plugin-sdk-go/backend/tracing"
"github.com/grafana/grafana-plugin-sdk-go/data"
"github.com/grafana/grafana-plugin-sdk-go/experimental/errorsource"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
@ -51,15 +52,15 @@ func (e *AzureMonitorDatasource) ResourceRequest(rw http.ResponseWriter, req *ht
func (e *AzureMonitorDatasource) ExecuteTimeSeriesQuery(ctx context.Context, originalQueries []backend.DataQuery, dsInfo types.DatasourceInfo, client *http.Client, url string, fromAlert bool) (*backend.QueryDataResponse, error) {
result := backend.NewQueryDataResponse()
queries, err := e.buildQueries(originalQueries, dsInfo)
if err != nil {
return nil, err
}
for _, query := range queries {
res, err := e.executeQuery(ctx, query, dsInfo, client, url)
for _, query := range originalQueries {
azureQuery, err := e.buildQuery(query, dsInfo)
if err != nil {
result.Responses[query.RefID] = backend.DataResponse{Error: err}
errorsource.AddErrorToResponse(query.RefID, result, err)
continue
}
res, err := e.executeQuery(ctx, azureQuery, dsInfo, client, url)
if err != nil {
errorsource.AddErrorToResponse(query.RefID, result, err)
continue
}
result.Responses[query.RefID] = *res
@ -68,151 +69,146 @@ func (e *AzureMonitorDatasource) ExecuteTimeSeriesQuery(ctx context.Context, ori
return result, nil
}
func (e *AzureMonitorDatasource) buildQueries(queries []backend.DataQuery, dsInfo types.DatasourceInfo) ([]*types.AzureMonitorQuery, error) {
azureMonitorQueries := []*types.AzureMonitorQuery{}
func (e *AzureMonitorDatasource) buildQuery(query backend.DataQuery, dsInfo types.DatasourceInfo) (*types.AzureMonitorQuery, error) {
var target string
queryJSONModel := dataquery.AzureMonitorQuery{}
err := json.Unmarshal(query.JSON, &queryJSONModel)
if err != nil {
return nil, fmt.Errorf("failed to decode the Azure Monitor query object from JSON: %w", err)
}
for _, query := range queries {
var target string
queryJSONModel := dataquery.AzureMonitorQuery{}
err := json.Unmarshal(query.JSON, &queryJSONModel)
azJSONModel := queryJSONModel.AzureMonitor
// Legacy: If only MetricDefinition is set, use it as namespace
if azJSONModel.MetricDefinition != nil && *azJSONModel.MetricDefinition != "" &&
azJSONModel.MetricNamespace != nil && *azJSONModel.MetricNamespace == "" {
azJSONModel.MetricNamespace = azJSONModel.MetricDefinition
}
azJSONModel.DimensionFilters = MigrateDimensionFilters(azJSONModel.DimensionFilters)
alias := ""
if azJSONModel.Alias != nil {
alias = *azJSONModel.Alias
}
azureURL := ""
if queryJSONModel.Subscription != nil {
azureURL = BuildSubscriptionMetricsURL(*queryJSONModel.Subscription)
}
filterInBody := true
resourceIDs := []string{}
resourceMap := map[string]dataquery.AzureMonitorResource{}
if hasOne, resourceGroup, resourceName := hasOneResource(queryJSONModel); hasOne {
ub := urlBuilder{
ResourceURI: azJSONModel.ResourceUri,
// Alternative, used to reconstruct resource URI if it's not present
DefaultSubscription: &dsInfo.Settings.SubscriptionId,
Subscription: queryJSONModel.Subscription,
ResourceGroup: resourceGroup,
MetricNamespace: azJSONModel.MetricNamespace,
ResourceName: resourceName,
}
// Construct the resourceURI (for legacy query objects pre Grafana 9)
resourceUri, err := ub.buildResourceURI()
if err != nil {
return nil, fmt.Errorf("failed to decode the Azure Monitor query object from JSON: %w", err)
return nil, err
}
azJSONModel := queryJSONModel.AzureMonitor
// Legacy: If only MetricDefinition is set, use it as namespace
if azJSONModel.MetricDefinition != nil && *azJSONModel.MetricDefinition != "" &&
azJSONModel.MetricNamespace != nil && *azJSONModel.MetricNamespace == "" {
azJSONModel.MetricNamespace = azJSONModel.MetricDefinition
// POST requests are only supported at the subscription level
filterInBody = false
if resourceUri != nil {
azureURL = fmt.Sprintf("%s/providers/microsoft.insights/metrics", *resourceUri)
resourceMap[*resourceUri] = dataquery.AzureMonitorResource{ResourceGroup: resourceGroup, ResourceName: resourceName}
}
azJSONModel.DimensionFilters = MigrateDimensionFilters(azJSONModel.DimensionFilters)
alias := ""
if azJSONModel.Alias != nil {
alias = *azJSONModel.Alias
}
azureURL := ""
if queryJSONModel.Subscription != nil {
azureURL = BuildSubscriptionMetricsURL(*queryJSONModel.Subscription)
}
filterInBody := true
resourceIDs := []string{}
resourceMap := map[string]dataquery.AzureMonitorResource{}
if hasOne, resourceGroup, resourceName := hasOneResource(queryJSONModel); hasOne {
} else {
for _, r := range azJSONModel.Resources {
ub := urlBuilder{
ResourceURI: azJSONModel.ResourceUri,
// Alternative, used to reconstruct resource URI if it's not present
DefaultSubscription: &dsInfo.Settings.SubscriptionId,
Subscription: queryJSONModel.Subscription,
ResourceGroup: resourceGroup,
ResourceGroup: r.ResourceGroup,
MetricNamespace: azJSONModel.MetricNamespace,
ResourceName: resourceName,
ResourceName: r.ResourceName,
}
// Construct the resourceURI (for legacy query objects pre Grafana 9)
resourceUri, err := ub.buildResourceURI()
if err != nil {
return nil, err
}
// POST requests are only supported at the subscription level
filterInBody = false
if resourceUri != nil {
azureURL = fmt.Sprintf("%s/providers/microsoft.insights/metrics", *resourceUri)
resourceMap[*resourceUri] = dataquery.AzureMonitorResource{ResourceGroup: resourceGroup, ResourceName: resourceName}
}
} else {
for _, r := range azJSONModel.Resources {
ub := urlBuilder{
DefaultSubscription: &dsInfo.Settings.SubscriptionId,
Subscription: queryJSONModel.Subscription,
ResourceGroup: r.ResourceGroup,
MetricNamespace: azJSONModel.MetricNamespace,
ResourceName: r.ResourceName,
}
resourceUri, err := ub.buildResourceURI()
if err != nil {
return nil, err
}
if resourceUri != nil {
resourceMap[*resourceUri] = r
}
resourceIDs = append(resourceIDs, fmt.Sprintf("Microsoft.ResourceId eq '%s'", *resourceUri))
resourceMap[*resourceUri] = r
}
resourceIDs = append(resourceIDs, fmt.Sprintf("Microsoft.ResourceId eq '%s'", *resourceUri))
}
// old model
dimension := ""
if azJSONModel.Dimension != nil {
dimension = strings.TrimSpace(*azJSONModel.Dimension)
}
dimensionFilter := ""
if azJSONModel.DimensionFilter != nil {
dimensionFilter = strings.TrimSpace(*azJSONModel.DimensionFilter)
}
dimSB := strings.Builder{}
if dimension != "" && dimensionFilter != "" && dimension != "None" && len(azJSONModel.DimensionFilters) == 0 {
dimSB.WriteString(fmt.Sprintf("%s eq '%s'", dimension, dimensionFilter))
} else {
for i, filter := range azJSONModel.DimensionFilters {
if len(filter.Filters) == 0 {
dimSB.WriteString(fmt.Sprintf("%s eq '*'", *filter.Dimension))
} else {
dimSB.WriteString(types.ConstructFiltersString(filter))
}
if i != len(azJSONModel.DimensionFilters)-1 {
dimSB.WriteString(" and ")
}
}
}
filterString := strings.Join(resourceIDs, " or ")
if dimSB.String() != "" {
if filterString != "" {
filterString = fmt.Sprintf("(%s) and (%s)", filterString, dimSB.String())
} else {
filterString = dimSB.String()
}
}
params, err := getParams(azJSONModel, query)
if err != nil {
return nil, err
}
target = params.Encode()
sub := ""
if queryJSONModel.Subscription != nil {
sub = *queryJSONModel.Subscription
}
query := &types.AzureMonitorQuery{
URL: azureURL,
Target: target,
Params: params,
RefID: query.RefID,
Alias: alias,
TimeRange: query.TimeRange,
Dimensions: azJSONModel.DimensionFilters,
Resources: resourceMap,
Subscription: sub,
}
if filterString != "" {
if filterInBody {
query.BodyFilter = filterString
} else {
query.Params.Add("$filter", filterString)
}
}
azureMonitorQueries = append(azureMonitorQueries, query)
}
return azureMonitorQueries, nil
// old model
dimension := ""
if azJSONModel.Dimension != nil {
dimension = strings.TrimSpace(*azJSONModel.Dimension)
}
dimensionFilter := ""
if azJSONModel.DimensionFilter != nil {
dimensionFilter = strings.TrimSpace(*azJSONModel.DimensionFilter)
}
dimSB := strings.Builder{}
if dimension != "" && dimensionFilter != "" && dimension != "None" && len(azJSONModel.DimensionFilters) == 0 {
dimSB.WriteString(fmt.Sprintf("%s eq '%s'", dimension, dimensionFilter))
} else {
for i, filter := range azJSONModel.DimensionFilters {
if len(filter.Filters) == 0 {
dimSB.WriteString(fmt.Sprintf("%s eq '*'", *filter.Dimension))
} else {
dimSB.WriteString(types.ConstructFiltersString(filter))
}
if i != len(azJSONModel.DimensionFilters)-1 {
dimSB.WriteString(" and ")
}
}
}
filterString := strings.Join(resourceIDs, " or ")
if dimSB.String() != "" {
if filterString != "" {
filterString = fmt.Sprintf("(%s) and (%s)", filterString, dimSB.String())
} else {
filterString = dimSB.String()
}
}
params, err := getParams(azJSONModel, query)
if err != nil {
return nil, err
}
target = params.Encode()
sub := ""
if queryJSONModel.Subscription != nil {
sub = *queryJSONModel.Subscription
}
azureQuery := &types.AzureMonitorQuery{
URL: azureURL,
Target: target,
Params: params,
RefID: query.RefID,
Alias: alias,
TimeRange: query.TimeRange,
Dimensions: azJSONModel.DimensionFilters,
Resources: resourceMap,
Subscription: sub,
}
if filterString != "" {
if filterInBody {
azureQuery.BodyFilter = filterString
} else {
azureQuery.Params.Add("$filter", filterString)
}
}
return azureQuery, nil
}
func getParams(azJSONModel *dataquery.AzureMetricQuery, query backend.DataQuery) (url.Values, error) {
@ -288,7 +284,7 @@ func (e *AzureMonitorDatasource) retrieveSubscriptionDetails(cli *http.Client, c
}
if res.StatusCode/100 != 2 {
return "", fmt.Errorf("request failed, status: %s, error: %s", res.Status, string(body))
return "", errorsource.SourceError(backend.ErrorSourceFromHTTPStatus(res.StatusCode), fmt.Errorf("request failed, status: %s, error: %s", res.Status, string(body)), false)
}
var data types.SubscriptionsResponse
@ -325,7 +321,7 @@ func (e *AzureMonitorDatasource) executeQuery(ctx context.Context, query *types.
res, err := cli.Do(req)
if err != nil {
return nil, err
return nil, errorsource.DownstreamError(err, false)
}
defer func() {
@ -370,7 +366,7 @@ func (e *AzureMonitorDatasource) unmarshalResponse(res *http.Response) (types.Az
}
if res.StatusCode/100 != 2 {
return types.AzureMonitorResponse{}, fmt.Errorf("request failed, status: %s, error: %s", res.Status, string(body))
return types.AzureMonitorResponse{}, errorsource.SourceError(backend.ErrorSourceFromHTTPStatus(res.StatusCode), fmt.Errorf("request failed, status: %s, body: %s", res.Status, string(body)), false)
}
var data types.AzureMonitorResponse

View File

@ -294,7 +294,7 @@ func TestAzureMonitorBuildQueries(t *testing.T) {
},
}
queries, err := datasource.buildQueries(tsdbQuery, dsInfo)
query, err := datasource.buildQuery(tsdbQuery[0], dsInfo)
require.NoError(t, err)
resources := map[string]dataquery.AzureMonitorResource{}
@ -321,12 +321,12 @@ func TestAzureMonitorBuildQueries(t *testing.T) {
Resources: resources,
}
assert.Equal(t, tt.expectedParamFilter, queries[0].Params.Get("$filter"))
assert.Equal(t, tt.expectedParamFilter, query.Params.Get("$filter"))
if azureMonitorQuery.URL == "" {
azureMonitorQuery.URL = "/subscriptions/12345678-aaaa-bbbb-cccc-123456789abc/resourceGroups/grafanastaging/providers/Microsoft.Compute/virtualMachines/grafana/providers/microsoft.insights/metrics"
}
if diff := cmp.Diff(azureMonitorQuery, queries[0], cmpopts.IgnoreUnexported(struct{}{}), cmpopts.IgnoreFields(types.AzureMonitorQuery{}, "Params", "Dimensions")); diff != "" {
if diff := cmp.Diff(azureMonitorQuery, query, cmpopts.IgnoreUnexported(struct{}{}), cmpopts.IgnoreFields(types.AzureMonitorQuery{}, "Params", "Dimensions")); diff != "" {
t.Errorf("Result mismatch (-want +got):\n%s", diff)
}
@ -338,7 +338,7 @@ func TestAzureMonitorBuildQueries(t *testing.T) {
expectedPortalURL = *tt.expectedPortalURL
}
actual, err := getQueryUrl(queries[0], "http://ds", "/subscriptions/12345678-aaaa-bbbb-cccc-123456789abc/resourceGroups/grafanastaging/providers/Microsoft.Compute/virtualMachines/grafana", "grafana")
actual, err := getQueryUrl(query, "http://ds", "/subscriptions/12345678-aaaa-bbbb-cccc-123456789abc/resourceGroups/grafanastaging/providers/Microsoft.Compute/virtualMachines/grafana", "grafana")
require.NoError(t, err)
require.Equal(t, expectedPortalURL, actual)
})
@ -359,10 +359,10 @@ func TestCustomNamespace(t *testing.T) {
},
}
result, err := datasource.buildQueries(q, types.DatasourceInfo{})
result, err := datasource.buildQuery(q[0], types.DatasourceInfo{})
require.NoError(t, err)
expected := "custom/namespace"
require.Equal(t, expected, result[0].Params.Get("metricnamespace"))
require.Equal(t, expected, result.Params.Get("metricnamespace"))
})
}

View File

@ -3,6 +3,8 @@ package metrics
import (
"fmt"
"strings"
"github.com/grafana/grafana-plugin-sdk-go/experimental/errorsource"
)
// urlBuilder builds the URL for calling the Azure Monitor API
@ -33,7 +35,7 @@ func (params *urlBuilder) buildResourceURI() (*string, error) {
if metricNamespace == nil || *metricNamespace == "" {
if params.MetricDefinition == nil || *params.MetricDefinition == "" {
return nil, fmt.Errorf("no metricNamespace or metricDefiniton value provided")
return nil, errorsource.DownstreamError(fmt.Errorf("no metricNamespace or metricDefiniton value provided"), false)
}
metricNamespace = params.MetricDefinition
}
@ -45,7 +47,7 @@ func (params *urlBuilder) buildResourceURI() (*string, error) {
provider = metricNamespaceArray[0]
metricNamespaceArray = metricNamespaceArray[1:]
} else {
return nil, fmt.Errorf("metricNamespace is not in the correct format")
return nil, errorsource.DownstreamError(fmt.Errorf("metricNamespace is not in the correct format"), false)
}
var resourceNameArray []string
@ -76,7 +78,7 @@ func (params *urlBuilder) buildResourceURI() (*string, error) {
if i < len(resourceNameArray) {
urlArray = append(urlArray, namespace, resourceNameArray[i])
} else {
return nil, fmt.Errorf("resourceNameArray does not have enough elements")
return nil, errorsource.DownstreamError(fmt.Errorf("resourceNameArray does not have enough elements"), false)
}
}

View File

@ -15,6 +15,7 @@ import (
"github.com/grafana/grafana-plugin-sdk-go/backend/log"
"github.com/grafana/grafana-plugin-sdk-go/backend/tracing"
"github.com/grafana/grafana-plugin-sdk-go/data"
"github.com/grafana/grafana-plugin-sdk-go/experimental/errorsource"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
@ -63,15 +64,14 @@ func (e *AzureResourceGraphDatasource) ExecuteTimeSeriesQuery(ctx context.Contex
Responses: map[string]backend.DataResponse{},
}
queries, err := e.buildQueries(originalQueries, dsInfo)
if err != nil {
return nil, err
}
for _, query := range queries {
res, err := e.executeQuery(ctx, query, dsInfo, client, url)
for _, query := range originalQueries {
graphQuery, err := e.buildQuery(query, dsInfo)
if err != nil {
result.Responses[query.RefID] = backend.DataResponse{Error: err}
return nil, err
}
res, err := e.executeQuery(ctx, graphQuery, dsInfo, client, url)
if err != nil {
errorsource.AddErrorToResponse(query.RefID, result, err)
continue
}
result.Responses[query.RefID] = *res
@ -87,38 +87,33 @@ type argJSONQuery struct {
} `json:"azureResourceGraph"`
}
func (e *AzureResourceGraphDatasource) buildQueries(queries []backend.DataQuery, dsInfo types.DatasourceInfo) ([]*AzureResourceGraphQuery, error) {
azureResourceGraphQueries := make([]*AzureResourceGraphQuery, len(queries))
for i, query := range queries {
queryJSONModel := argJSONQuery{}
err := json.Unmarshal(query.JSON, &queryJSONModel)
if err != nil {
return nil, fmt.Errorf("failed to decode the Azure Resource Graph query object from JSON: %w", err)
}
azureResourceGraphTarget := queryJSONModel.AzureResourceGraph
resultFormat := azureResourceGraphTarget.ResultFormat
if resultFormat == "" {
resultFormat = "table"
}
interpolatedQuery, err := macros.KqlInterpolate(query, dsInfo, azureResourceGraphTarget.Query)
if err != nil {
return nil, err
}
azureResourceGraphQueries[i] = &AzureResourceGraphQuery{
RefID: query.RefID,
ResultFormat: resultFormat,
JSON: query.JSON,
InterpolatedQuery: interpolatedQuery,
TimeRange: query.TimeRange,
QueryType: query.QueryType,
}
func (e *AzureResourceGraphDatasource) buildQuery(query backend.DataQuery, dsInfo types.DatasourceInfo) (*AzureResourceGraphQuery, error) {
queryJSONModel := argJSONQuery{}
err := json.Unmarshal(query.JSON, &queryJSONModel)
if err != nil {
return nil, fmt.Errorf("failed to decode the Azure Resource Graph query object from JSON: %w", err)
}
return azureResourceGraphQueries, nil
azureResourceGraphTarget := queryJSONModel.AzureResourceGraph
resultFormat := azureResourceGraphTarget.ResultFormat
if resultFormat == "" {
resultFormat = "table"
}
interpolatedQuery, err := macros.KqlInterpolate(query, dsInfo, azureResourceGraphTarget.Query)
if err != nil {
return nil, err
}
return &AzureResourceGraphQuery{
RefID: query.RefID,
ResultFormat: resultFormat,
JSON: query.JSON,
InterpolatedQuery: interpolatedQuery,
TimeRange: query.TimeRange,
QueryType: query.QueryType,
}, nil
}
func (e *AzureResourceGraphDatasource) executeQuery(ctx context.Context, query *AzureResourceGraphQuery, dsInfo types.DatasourceInfo, client *http.Client, dsURL string) (*backend.DataResponse, error) {
@ -164,7 +159,7 @@ func (e *AzureResourceGraphDatasource) executeQuery(ctx context.Context, query *
res, err := client.Do(req)
if err != nil {
return nil, err
return nil, errorsource.DownstreamError(err, false)
}
defer func() {
@ -224,7 +219,7 @@ func (e *AzureResourceGraphDatasource) unmarshalResponse(res *http.Response) (Az
}()
if res.StatusCode/100 != 2 {
return AzureResourceGraphResponse{}, fmt.Errorf("%s. Azure Resource Graph error: %s", res.Status, string(body))
return AzureResourceGraphResponse{}, errorsource.SourceError(backend.ErrorSourceFromHTTPStatus(res.StatusCode), fmt.Errorf("%s. Azure Resource Graph error: %s", res.Status, string(body)), false)
}
var data AzureResourceGraphResponse

View File

@ -25,11 +25,11 @@ func TestBuildingAzureResourceGraphQueries(t *testing.T) {
fromStart := time.Date(2018, 3, 15, 13, 0, 0, 0, time.UTC).In(time.Local)
tests := []struct {
name string
queryModel []backend.DataQuery
timeRange backend.TimeRange
azureResourceGraphQueries []*AzureResourceGraphQuery
Err require.ErrorAssertionFunc
name string
queryModel []backend.DataQuery
timeRange backend.TimeRange
azureResourceGraphQuery AzureResourceGraphQuery
Err require.ErrorAssertionFunc
}{
{
name: "Query with macros should be interpolated",
@ -49,20 +49,18 @@ func TestBuildingAzureResourceGraphQueries(t *testing.T) {
RefID: "A",
},
},
azureResourceGraphQueries: []*AzureResourceGraphQuery{
{
RefID: "A",
ResultFormat: "table",
URL: "",
JSON: []byte(`{
azureResourceGraphQuery: AzureResourceGraphQuery{
RefID: "A",
ResultFormat: "table",
URL: "",
JSON: []byte(`{
"queryType": "Azure Resource Graph",
"azureResourceGraph": {
"query": "resources | where $__contains(name,'res1','res2')",
"resultFormat": "table"
}
}`),
InterpolatedQuery: "resources | where ['name'] in ('res1','res2')",
},
InterpolatedQuery: "resources | where ['name'] in ('res1','res2')",
},
Err: require.NoError,
},
@ -70,9 +68,9 @@ func TestBuildingAzureResourceGraphQueries(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
queries, err := datasource.buildQueries(tt.queryModel, types.DatasourceInfo{})
query, err := datasource.buildQuery(tt.queryModel[0], types.DatasourceInfo{})
tt.Err(t, err)
if diff := cmp.Diff(tt.azureResourceGraphQueries, queries, cmpopts.IgnoreUnexported(struct{}{})); diff != "" {
if diff := cmp.Diff(&tt.azureResourceGraphQuery, query, cmpopts.IgnoreUnexported(struct{}{})); diff != "" {
t.Errorf("Result mismatch (-want +got):\n%s", diff)
}
})

View File

@ -7,6 +7,7 @@ import (
"time"
"github.com/grafana/grafana-plugin-sdk-go/backend/gtime"
"github.com/grafana/grafana-plugin-sdk-go/experimental/errorsource"
)
// TimeGrain handles conversions between
@ -26,7 +27,7 @@ func CreateISO8601DurationFromIntervalMS(it int64) (string, error) {
timeValueString := formatted[0 : len(formatted)-1]
timeValue, err := strconv.Atoi(timeValueString)
if err != nil {
return "", fmt.Errorf("could not parse interval %q to an ISO 8061 duration: %w", it, err)
return "", errorsource.DownstreamError(fmt.Errorf("could not parse interval %q to an ISO 8061 duration: %w", it, err), false)
}
unit := formatted[len(formatted)-1:]