mirror of
https://github.com/grafana/grafana.git
synced 2025-02-15 01:53:33 -06:00
Azure Monitor: Remove infra logger (#73652)
* removed infra logs * improve health check * remove debug and error logs * feedback * Update pkg/tsdb/azuremonitor/azuremonitor-resource-handler.go * Update pkg/tsdb/azuremonitor/loganalytics/azure-log-analytics-datasource.go * fix close body error * update test * resource request should return errors * go linter * go linter
This commit is contained in:
parent
26d5afecaf
commit
2a835301c3
@ -7,6 +7,7 @@ import (
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend/resource/httpadapter"
|
||||
|
||||
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/types"
|
||||
@ -25,19 +26,19 @@ func getTarget(original string) (target string, err error) {
|
||||
type httpServiceProxy struct {
|
||||
}
|
||||
|
||||
func (s *httpServiceProxy) Do(rw http.ResponseWriter, req *http.Request, cli *http.Client) http.ResponseWriter {
|
||||
func (s *httpServiceProxy) Do(rw http.ResponseWriter, req *http.Request, cli *http.Client) (http.ResponseWriter, error) {
|
||||
res, err := cli.Do(req)
|
||||
if err != nil {
|
||||
rw.WriteHeader(http.StatusInternalServerError)
|
||||
_, err = rw.Write([]byte(fmt.Sprintf("unexpected error %v", err)))
|
||||
if err != nil {
|
||||
logger.Error("Unable to write HTTP response", "error", err)
|
||||
return nil, fmt.Errorf("unable to write HTTP response: %v", err)
|
||||
}
|
||||
return nil
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err := res.Body.Close(); err != nil {
|
||||
logger.Warn("Failed to close response body", "err", err)
|
||||
backend.Logger.Warn("Failed to close response body", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
@ -46,14 +47,14 @@ func (s *httpServiceProxy) Do(rw http.ResponseWriter, req *http.Request, cli *ht
|
||||
rw.WriteHeader(http.StatusInternalServerError)
|
||||
_, err = rw.Write([]byte(fmt.Sprintf("unexpected error %v", err)))
|
||||
if err != nil {
|
||||
logger.Error("Unable to write HTTP response", "error", err)
|
||||
return nil, fmt.Errorf("unable to write HTTP response: %v", err)
|
||||
}
|
||||
return nil
|
||||
return nil, err
|
||||
}
|
||||
rw.WriteHeader(res.StatusCode)
|
||||
_, err = rw.Write(body)
|
||||
if err != nil {
|
||||
logger.Error("Unable to write HTTP response", "error", err)
|
||||
return nil, fmt.Errorf("unable to write HTTP response: %v", err)
|
||||
}
|
||||
|
||||
for k, v := range res.Header {
|
||||
@ -63,7 +64,7 @@ func (s *httpServiceProxy) Do(rw http.ResponseWriter, req *http.Request, cli *ht
|
||||
}
|
||||
}
|
||||
// Returning the response write for testing purposes
|
||||
return rw
|
||||
return rw, nil
|
||||
}
|
||||
|
||||
func (s *Service) getDataSourceFromHTTPReq(req *http.Request) (types.DatasourceInfo, error) {
|
||||
@ -84,13 +85,13 @@ func writeResponse(rw http.ResponseWriter, code int, msg string) {
|
||||
rw.WriteHeader(http.StatusBadRequest)
|
||||
_, err := rw.Write([]byte(msg))
|
||||
if err != nil {
|
||||
logger.Error("Unable to write HTTP response", "error", err)
|
||||
backend.Logger.Error("Unable to write HTTP response", "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) handleResourceReq(subDataSource string) func(rw http.ResponseWriter, req *http.Request) {
|
||||
return func(rw http.ResponseWriter, req *http.Request) {
|
||||
logger.Debug("Received resource call", "url", req.URL.String(), "method", req.Method)
|
||||
backend.Logger.Debug("Received resource call", "url", req.URL.String(), "method", req.Method)
|
||||
|
||||
newPath, err := getTarget(req.URL.Path)
|
||||
if err != nil {
|
||||
@ -114,7 +115,11 @@ func (s *Service) handleResourceReq(subDataSource string) func(rw http.ResponseW
|
||||
req.URL.Host = serviceURL.Host
|
||||
req.URL.Scheme = serviceURL.Scheme
|
||||
|
||||
s.executors[subDataSource].ResourceRequest(rw, req, service.HTTPClient)
|
||||
rw, err = s.executors[subDataSource].ResourceRequest(rw, req, service.HTTPClient)
|
||||
if err != nil {
|
||||
writeResponse(rw, http.StatusInternalServerError, fmt.Sprintf("unexpected error %v", err))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -66,7 +66,10 @@ func Test_proxyRequest(t *testing.T) {
|
||||
}
|
||||
rw := httptest.NewRecorder()
|
||||
proxy := httpServiceProxy{}
|
||||
res := proxy.Do(rw, req, srv.Client())
|
||||
res, err := proxy.Do(rw, req, srv.Client())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if res.Header().Get("foo") != "bar" {
|
||||
t.Errorf("Unexpected headers: %v", res.Header())
|
||||
}
|
||||
@ -90,9 +93,9 @@ type fakeProxy struct {
|
||||
requestedURL string
|
||||
}
|
||||
|
||||
func (s *fakeProxy) Do(rw http.ResponseWriter, req *http.Request, cli *http.Client) http.ResponseWriter {
|
||||
func (s *fakeProxy) Do(rw http.ResponseWriter, req *http.Request, cli *http.Client) (http.ResponseWriter, error) {
|
||||
s.requestedURL = req.URL.String()
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func Test_handleResourceReq(t *testing.T) {
|
||||
|
@ -17,7 +17,6 @@ import (
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend/instancemgmt"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend/resource/httpadapter"
|
||||
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
"github.com/grafana/grafana/pkg/infra/tracing"
|
||||
"github.com/grafana/grafana/pkg/services/featuremgmt"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
@ -27,8 +26,6 @@ import (
|
||||
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/types"
|
||||
)
|
||||
|
||||
var logger = log.New("tsdb.azuremonitor")
|
||||
|
||||
func ProvideService(cfg *setting.Cfg, httpClientProvider *httpclient.Provider, features featuremgmt.FeatureToggles, tracer tracing.Tracer) *Service {
|
||||
proxy := &httpServiceProxy{}
|
||||
executors := map[string]azDatasourceExecutor{
|
||||
@ -159,8 +156,8 @@ func getAzureRoutes(cloud string, jsonData json.RawMessage) (map[string]types.Az
|
||||
}
|
||||
|
||||
type azDatasourceExecutor interface {
|
||||
ExecuteTimeSeriesQuery(ctx context.Context, logger log.Logger, originalQueries []backend.DataQuery, dsInfo types.DatasourceInfo, client *http.Client, url string, tracer tracing.Tracer) (*backend.QueryDataResponse, error)
|
||||
ResourceRequest(rw http.ResponseWriter, req *http.Request, cli *http.Client)
|
||||
ExecuteTimeSeriesQuery(ctx context.Context, originalQueries []backend.DataQuery, dsInfo types.DatasourceInfo, client *http.Client, url string, tracer tracing.Tracer) (*backend.QueryDataResponse, error)
|
||||
ResourceRequest(rw http.ResponseWriter, req *http.Request, cli *http.Client) (http.ResponseWriter, error)
|
||||
}
|
||||
|
||||
func (s *Service) getDataSourceFromPluginReq(ctx context.Context, req *backend.QueryDataRequest) (types.DatasourceInfo, error) {
|
||||
@ -194,7 +191,7 @@ func (s *Service) newQueryMux() *datasource.QueryTypeMux {
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("missing service for %s", dst)
|
||||
}
|
||||
return executor.ExecuteTimeSeriesQuery(ctx, logger, req.Queries, dsInfo, service.HTTPClient, service.URL, s.tracer)
|
||||
return executor.ExecuteTimeSeriesQuery(ctx, req.Queries, dsInfo, service.HTTPClient, service.URL, s.tracer)
|
||||
})
|
||||
}
|
||||
return mux
|
||||
@ -214,7 +211,7 @@ func (s *Service) getDSInfo(ctx context.Context, pluginCtx backend.PluginContext
|
||||
return instance, nil
|
||||
}
|
||||
|
||||
func checkAzureMonitorMetricsHealth(dsInfo types.DatasourceInfo) (*http.Response, error) {
|
||||
func queryMetricHealth(dsInfo types.DatasourceInfo) (*http.Response, error) {
|
||||
subscriptionsApiVersion := "2020-01-01"
|
||||
url := fmt.Sprintf("%v/subscriptions?api-version=%v", dsInfo.Routes["Azure Monitor"].URL, subscriptionsApiVersion)
|
||||
request, err := http.NewRequest(http.MethodGet, url, nil)
|
||||
@ -298,6 +295,93 @@ func checkAzureMonitorResourceGraphHealth(dsInfo types.DatasourceInfo, subscript
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func metricCheckHealth(dsInfo types.DatasourceInfo) (message string, defaultSubscription string, status backend.HealthStatus) {
|
||||
defaultSubscription = dsInfo.Settings.SubscriptionId
|
||||
metricsRes, err := queryMetricHealth(dsInfo)
|
||||
if err != nil {
|
||||
if ok := errors.Is(err, types.ErrorAzureHealthCheck); ok {
|
||||
return fmt.Sprintf("Error connecting to Azure Monitor endpoint: %s", err.Error()), defaultSubscription, backend.HealthStatusError
|
||||
}
|
||||
return err.Error(), defaultSubscription, backend.HealthStatusError
|
||||
}
|
||||
defer func() {
|
||||
err := metricsRes.Body.Close()
|
||||
if err != nil {
|
||||
message += err.Error()
|
||||
status = backend.HealthStatusError
|
||||
}
|
||||
}()
|
||||
if metricsRes.StatusCode != 200 {
|
||||
body, err := io.ReadAll(metricsRes.Body)
|
||||
if err != nil {
|
||||
return err.Error(), defaultSubscription, backend.HealthStatusError
|
||||
}
|
||||
return fmt.Sprintf("Error connecting to Azure Monitor endpoint: %s", string(body)), defaultSubscription, backend.HealthStatusError
|
||||
}
|
||||
subscriptions, err := parseSubscriptions(metricsRes)
|
||||
if err != nil {
|
||||
return err.Error(), defaultSubscription, backend.HealthStatusError
|
||||
}
|
||||
if defaultSubscription == "" && len(subscriptions) > 0 {
|
||||
defaultSubscription = subscriptions[0]
|
||||
}
|
||||
|
||||
return "Successfully connected to Azure Monitor endpoint.", defaultSubscription, backend.HealthStatusOk
|
||||
}
|
||||
|
||||
func logAnalyticsCheckHealth(dsInfo types.DatasourceInfo, defaultSubscription string) (message string, status backend.HealthStatus) {
|
||||
logsRes, err := checkAzureLogAnalyticsHealth(dsInfo, defaultSubscription)
|
||||
if err != nil {
|
||||
if err.Error() == "no default workspace found" {
|
||||
return "No Log Analytics workspaces found.", backend.HealthStatusUnknown
|
||||
}
|
||||
if ok := errors.Is(err, types.ErrorAzureHealthCheck); ok {
|
||||
return fmt.Sprintf("Error connecting to Azure Log Analytics endpoint: %s", err.Error()), backend.HealthStatusUnknown
|
||||
}
|
||||
return err.Error(), backend.HealthStatusError
|
||||
}
|
||||
defer func() {
|
||||
err := logsRes.Body.Close()
|
||||
if err != nil {
|
||||
message += err.Error()
|
||||
status = backend.HealthStatusError
|
||||
}
|
||||
}()
|
||||
if logsRes.StatusCode != 200 {
|
||||
body, err := io.ReadAll(logsRes.Body)
|
||||
if err != nil {
|
||||
return err.Error(), backend.HealthStatusError
|
||||
}
|
||||
return fmt.Sprintf("Error connecting to Azure Log Analytics endpoint: %s", string(body)), backend.HealthStatusError
|
||||
}
|
||||
return "Successfully connected to Azure Log Analytics endpoint.", backend.HealthStatusOk
|
||||
}
|
||||
|
||||
func graphLogHealthCheck(dsInfo types.DatasourceInfo, defaultSubscription string) (message string, status backend.HealthStatus) {
|
||||
resourceGraphRes, err := checkAzureMonitorResourceGraphHealth(dsInfo, defaultSubscription)
|
||||
if err != nil {
|
||||
if ok := errors.Is(err, types.ErrorAzureHealthCheck); ok {
|
||||
return fmt.Sprintf("Error connecting to Azure Resource Graph endpoint: %s", err.Error()), backend.HealthStatusError
|
||||
}
|
||||
return err.Error(), backend.HealthStatusError
|
||||
}
|
||||
defer func() {
|
||||
err := resourceGraphRes.Body.Close()
|
||||
if err != nil {
|
||||
message += err.Error()
|
||||
status = backend.HealthStatusError
|
||||
}
|
||||
}()
|
||||
if resourceGraphRes.StatusCode != 200 {
|
||||
body, err := io.ReadAll(resourceGraphRes.Body)
|
||||
if err != nil {
|
||||
return err.Error(), backend.HealthStatusError
|
||||
}
|
||||
return fmt.Sprintf("Error connecting to Azure Resource Graph endpoint: %s", string(body)), backend.HealthStatusError
|
||||
}
|
||||
return "Successfully connected to Azure Resource Graph endpoint.", backend.HealthStatusOk
|
||||
}
|
||||
|
||||
func parseSubscriptions(res *http.Response) ([]string, error) {
|
||||
var target struct {
|
||||
Value []struct {
|
||||
@ -331,94 +415,22 @@ func (s *Service) CheckHealth(ctx context.Context, req *backend.CheckHealthReque
|
||||
}
|
||||
|
||||
status := backend.HealthStatusOk
|
||||
metricsLog := "Successfully connected to Azure Monitor endpoint."
|
||||
logAnalyticsLog := "Successfully connected to Azure Log Analytics endpoint."
|
||||
graphLog := "Successfully connected to Azure Resource Graph endpoint."
|
||||
defaultSubscription := dsInfo.Settings.SubscriptionId
|
||||
|
||||
metricsRes, err := checkAzureMonitorMetricsHealth(dsInfo)
|
||||
if err != nil || metricsRes.StatusCode != 200 {
|
||||
status = backend.HealthStatusError
|
||||
if err != nil {
|
||||
if ok := errors.Is(err, types.ErrorAzureHealthCheck); ok {
|
||||
metricsLog = fmt.Sprintf("Error connecting to Azure Monitor endpoint: %s", err.Error())
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
body, err := io.ReadAll(metricsRes.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
metricsLog = fmt.Sprintf("Error connecting to Azure Monitor endpoint: %s", string(body))
|
||||
}
|
||||
} else {
|
||||
subscriptions, err := parseSubscriptions(metricsRes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if defaultSubscription == "" && len(subscriptions) > 0 {
|
||||
defaultSubscription = subscriptions[0]
|
||||
}
|
||||
metricsLog, defaultSubscription, metricsStatus := metricCheckHealth(dsInfo)
|
||||
if metricsStatus != backend.HealthStatusOk {
|
||||
status = metricsStatus
|
||||
}
|
||||
|
||||
logsRes, err := checkAzureLogAnalyticsHealth(dsInfo, defaultSubscription)
|
||||
if err != nil || logsRes.StatusCode != 200 {
|
||||
status = backend.HealthStatusError
|
||||
if err != nil {
|
||||
if err.Error() == "no default workspace found" {
|
||||
status = backend.HealthStatusUnknown
|
||||
logAnalyticsLog = "No Log Analytics workspaces found."
|
||||
} else if ok := errors.Is(err, types.ErrorAzureHealthCheck); ok {
|
||||
logAnalyticsLog = fmt.Sprintf("Error connecting to Azure Log Analytics endpoint: %s", err.Error())
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
body, err := io.ReadAll(logsRes.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logAnalyticsLog = fmt.Sprintf("Error connecting to Azure Log Analytics endpoint: %s", string(body))
|
||||
}
|
||||
logAnalyticsLog, logAnalyticsStatus := logAnalyticsCheckHealth(dsInfo, defaultSubscription)
|
||||
if logAnalyticsStatus != backend.HealthStatusOk {
|
||||
status = logAnalyticsStatus
|
||||
}
|
||||
|
||||
resourceGraphRes, err := checkAzureMonitorResourceGraphHealth(dsInfo, defaultSubscription)
|
||||
if err != nil || resourceGraphRes.StatusCode != 200 {
|
||||
status = backend.HealthStatusError
|
||||
if err != nil {
|
||||
if ok := errors.Is(err, types.ErrorAzureHealthCheck); ok {
|
||||
graphLog = fmt.Sprintf("Error connecting to Azure Resource Graph endpoint: %s", err.Error())
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
body, err := io.ReadAll(resourceGraphRes.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
graphLog = fmt.Sprintf("Error connecting to Azure Resource Graph endpoint: %s", string(body))
|
||||
}
|
||||
graphLog, graphStatus := graphLogHealthCheck(dsInfo, defaultSubscription)
|
||||
if graphStatus != backend.HealthStatusOk {
|
||||
status = graphStatus
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if metricsRes != nil {
|
||||
if err := metricsRes.Body.Close(); err != nil {
|
||||
backend.Logger.Error("Failed to close response body", "err", err)
|
||||
}
|
||||
}
|
||||
if logsRes != nil {
|
||||
if err := logsRes.Body.Close(); logsRes != nil && err != nil {
|
||||
backend.Logger.Error("Failed to close response body", "err", err)
|
||||
}
|
||||
}
|
||||
if resourceGraphRes != nil {
|
||||
if err := resourceGraphRes.Body.Close(); resourceGraphRes != nil && err != nil {
|
||||
backend.Logger.Error("Failed to close response body", "err", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if status == backend.HealthStatusOk {
|
||||
return &backend.CheckHealthResult{
|
||||
Status: status,
|
||||
|
@ -17,7 +17,6 @@ import (
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend/httpclient"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend/instancemgmt"
|
||||
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
"github.com/grafana/grafana/pkg/infra/tracing"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/types"
|
||||
@ -132,10 +131,11 @@ type fakeExecutor struct {
|
||||
expectedURL string
|
||||
}
|
||||
|
||||
func (f *fakeExecutor) ResourceRequest(rw http.ResponseWriter, req *http.Request, cli *http.Client) {
|
||||
func (f *fakeExecutor) ResourceRequest(rw http.ResponseWriter, req *http.Request, cli *http.Client) (http.ResponseWriter, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (f *fakeExecutor) ExecuteTimeSeriesQuery(ctx context.Context, logger log.Logger, originalQueries []backend.DataQuery, dsInfo types.DatasourceInfo, client *http.Client, url string, tracer tracing.Tracer) (*backend.QueryDataResponse, error) {
|
||||
func (f *fakeExecutor) ExecuteTimeSeriesQuery(ctx context.Context, originalQueries []backend.DataQuery, dsInfo types.DatasourceInfo, client *http.Client, url string, tracer tracing.Tracer) (*backend.QueryDataResponse, error) {
|
||||
if client == nil {
|
||||
f.t.Errorf("The HTTP client for %s is missing", f.queryType)
|
||||
} else {
|
||||
|
@ -21,7 +21,6 @@ import (
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"k8s.io/utils/strings/slices"
|
||||
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
"github.com/grafana/grafana/pkg/infra/tracing"
|
||||
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/kinds/dataquery"
|
||||
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/macros"
|
||||
@ -51,24 +50,28 @@ type AzureLogAnalyticsQuery struct {
|
||||
IntersectTime bool
|
||||
}
|
||||
|
||||
func (e *AzureLogAnalyticsDatasource) ResourceRequest(rw http.ResponseWriter, req *http.Request, cli *http.Client) {
|
||||
e.Proxy.Do(rw, req, cli)
|
||||
func (e *AzureLogAnalyticsDatasource) ResourceRequest(rw http.ResponseWriter, req *http.Request, cli *http.Client) (http.ResponseWriter, error) {
|
||||
return e.Proxy.Do(rw, req, cli)
|
||||
}
|
||||
|
||||
// executeTimeSeriesQuery does the following:
|
||||
// 1. build the AzureMonitor url and querystring for each query
|
||||
// 2. executes each query by calling the Azure Monitor API
|
||||
// 3. parses the responses for each query into data frames
|
||||
func (e *AzureLogAnalyticsDatasource) ExecuteTimeSeriesQuery(ctx context.Context, logger log.Logger, originalQueries []backend.DataQuery, dsInfo types.DatasourceInfo, client *http.Client, url string, tracer tracing.Tracer) (*backend.QueryDataResponse, error) {
|
||||
func (e *AzureLogAnalyticsDatasource) ExecuteTimeSeriesQuery(ctx context.Context, originalQueries []backend.DataQuery, dsInfo types.DatasourceInfo, client *http.Client, url string, tracer tracing.Tracer) (*backend.QueryDataResponse, error) {
|
||||
result := backend.NewQueryDataResponse()
|
||||
ctxLogger := logger.FromContext(ctx)
|
||||
queries, err := e.buildQueries(ctx, ctxLogger, originalQueries, dsInfo, tracer)
|
||||
queries, err := e.buildQueries(ctx, originalQueries, dsInfo, tracer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, query := range queries {
|
||||
result.Responses[query.RefID] = e.executeQuery(ctx, ctxLogger, query, dsInfo, client, url, tracer)
|
||||
res, err := e.executeQuery(ctx, query, dsInfo, client, url, tracer)
|
||||
if err != nil {
|
||||
result.Responses[query.RefID] = backend.DataResponse{Error: err}
|
||||
continue
|
||||
}
|
||||
result.Responses[query.RefID] = *res
|
||||
}
|
||||
|
||||
return result, nil
|
||||
@ -88,7 +91,7 @@ func getApiURL(resourceOrWorkspace string, isAppInsightsQuery bool) string {
|
||||
}
|
||||
}
|
||||
|
||||
func (e *AzureLogAnalyticsDatasource) buildQueries(ctx context.Context, logger log.Logger, queries []backend.DataQuery, dsInfo types.DatasourceInfo, tracer tracing.Tracer) ([]*AzureLogAnalyticsQuery, error) {
|
||||
func (e *AzureLogAnalyticsDatasource) buildQueries(ctx context.Context, queries []backend.DataQuery, dsInfo types.DatasourceInfo, tracer tracing.Tracer) ([]*AzureLogAnalyticsQuery, error) {
|
||||
azureLogAnalyticsQueries := []*AzureLogAnalyticsQuery{}
|
||||
appInsightsRegExp, err := regexp.Compile("providers/Microsoft.Insights/components")
|
||||
if err != nil {
|
||||
@ -113,7 +116,6 @@ func (e *AzureLogAnalyticsDatasource) buildQueries(ctx context.Context, logger l
|
||||
}
|
||||
|
||||
azureLogAnalyticsTarget := queryJSONModel.AzureLogAnalytics
|
||||
logger.Debug("AzureLogAnalytics", "target", azureLogAnalyticsTarget)
|
||||
|
||||
if azureLogAnalyticsTarget.ResultFormat != nil {
|
||||
resultFormat = *azureLogAnalyticsTarget.ResultFormat
|
||||
@ -154,7 +156,6 @@ func (e *AzureLogAnalyticsDatasource) buildQueries(ctx context.Context, logger l
|
||||
}
|
||||
|
||||
azureTracesTarget := queryJSONModel.AzureTraces
|
||||
logger.Debug("AzureTraces", "target", azureTracesTarget)
|
||||
|
||||
if azureTracesTarget.ResultFormat == nil {
|
||||
resultFormat = types.Table
|
||||
@ -180,7 +181,7 @@ func (e *AzureLogAnalyticsDatasource) buildQueries(ctx context.Context, logger l
|
||||
operationId := ""
|
||||
if queryJSONModel.AzureTraces.OperationId != nil && *queryJSONModel.AzureTraces.OperationId != "" {
|
||||
operationId = *queryJSONModel.AzureTraces.OperationId
|
||||
resourcesMap, err = getCorrelationWorkspaces(ctx, logger, resourceOrWorkspace, resourcesMap, dsInfo, operationId, tracer)
|
||||
resourcesMap, err = getCorrelationWorkspaces(ctx, resourceOrWorkspace, resourcesMap, dsInfo, operationId, tracer)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to retrieve correlation resources for operation ID - %s: %s", operationId, err)
|
||||
}
|
||||
@ -204,15 +205,15 @@ func (e *AzureLogAnalyticsDatasource) buildQueries(ctx context.Context, logger l
|
||||
traceParentExploreQuery = buildTracesQuery(operationId, &parentSpanIdVariable, queryJSONModel.AzureTraces.TraceTypes, queryJSONModel.AzureTraces.Filters, &resultFormat, queryResources)
|
||||
traceLogsExploreQuery = buildTracesLogsQuery(operationId, queryResources)
|
||||
}
|
||||
traceExploreQuery, err = macros.KqlInterpolate(logger, query, dsInfo, traceExploreQuery, "TimeGenerated")
|
||||
traceExploreQuery, err = macros.KqlInterpolate(query, dsInfo, traceExploreQuery, "TimeGenerated")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create traces explore query: %s", err)
|
||||
}
|
||||
traceParentExploreQuery, err = macros.KqlInterpolate(logger, query, dsInfo, traceParentExploreQuery, "TimeGenerated")
|
||||
traceParentExploreQuery, err = macros.KqlInterpolate(query, dsInfo, traceParentExploreQuery, "TimeGenerated")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create parent span traces explore query: %s", err)
|
||||
}
|
||||
traceLogsExploreQuery, err = macros.KqlInterpolate(logger, query, dsInfo, traceLogsExploreQuery, "TimeGenerated")
|
||||
traceLogsExploreQuery, err = macros.KqlInterpolate(query, dsInfo, traceLogsExploreQuery, "TimeGenerated")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create traces logs explore query: %s", err)
|
||||
}
|
||||
@ -222,7 +223,7 @@ func (e *AzureLogAnalyticsDatasource) buildQueries(ctx context.Context, logger l
|
||||
|
||||
apiURL := getApiURL(resourceOrWorkspace, appInsightsQuery)
|
||||
|
||||
rawQuery, err := macros.KqlInterpolate(logger, query, dsInfo, queryString, "TimeGenerated")
|
||||
rawQuery, err := macros.KqlInterpolate(query, dsInfo, queryString, "TimeGenerated")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -247,45 +248,27 @@ func (e *AzureLogAnalyticsDatasource) buildQueries(ctx context.Context, logger l
|
||||
return azureLogAnalyticsQueries, nil
|
||||
}
|
||||
|
||||
func (e *AzureLogAnalyticsDatasource) executeQuery(ctx context.Context, logger log.Logger, query *AzureLogAnalyticsQuery, dsInfo types.DatasourceInfo, client *http.Client,
|
||||
url string, tracer tracing.Tracer) backend.DataResponse {
|
||||
dataResponse := backend.DataResponse{}
|
||||
|
||||
dataResponseErrorWithExecuted := func(err error) backend.DataResponse {
|
||||
dataResponse.Error = err
|
||||
dataResponse.Frames = data.Frames{
|
||||
&data.Frame{
|
||||
RefID: query.RefID,
|
||||
Meta: &data.FrameMeta{
|
||||
ExecutedQueryString: query.Query,
|
||||
},
|
||||
},
|
||||
}
|
||||
return dataResponse
|
||||
}
|
||||
|
||||
func (e *AzureLogAnalyticsDatasource) executeQuery(ctx context.Context, query *AzureLogAnalyticsQuery, dsInfo types.DatasourceInfo, client *http.Client, url string, tracer tracing.Tracer) (*backend.DataResponse, error) {
|
||||
// If azureLogAnalyticsSameAs is defined and set to false, return an error
|
||||
if sameAs, ok := dsInfo.JSONData["azureLogAnalyticsSameAs"]; ok && !sameAs.(bool) {
|
||||
return dataResponseErrorWithExecuted(fmt.Errorf("credentials for Log Analytics are no longer supported. Go to the data source configuration to update Azure Monitor credentials"))
|
||||
return nil, fmt.Errorf("credentials for Log Analytics are no longer supported. Go to the data source configuration to update Azure Monitor credentials")
|
||||
}
|
||||
|
||||
queryJSONModel := dataquery.AzureMonitorQuery{}
|
||||
err := json.Unmarshal(query.JSON, &queryJSONModel)
|
||||
if err != nil {
|
||||
dataResponse.Error = err
|
||||
return dataResponse
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if query.QueryType == string(dataquery.AzureQueryTypeAzureTraces) {
|
||||
if dataquery.ResultFormat(query.ResultFormat) == (dataquery.ResultFormatTrace) && query.Query == "" {
|
||||
return dataResponseErrorWithExecuted(fmt.Errorf("cannot visualise trace events using the trace visualiser"))
|
||||
return nil, fmt.Errorf("cannot visualise trace events using the trace visualiser")
|
||||
}
|
||||
}
|
||||
|
||||
req, err := e.createRequest(ctx, logger, url, query)
|
||||
req, err := e.createRequest(ctx, url, query)
|
||||
if err != nil {
|
||||
dataResponse.Error = err
|
||||
return dataResponse
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctx, span := tracer.Start(ctx, "azure log analytics query")
|
||||
@ -299,42 +282,39 @@ func (e *AzureLogAnalyticsDatasource) executeQuery(ctx context.Context, logger l
|
||||
|
||||
tracer.Inject(ctx, req.Header, span)
|
||||
|
||||
logger.Debug("AzureLogAnalytics", "Request ApiURL", req.URL.String())
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
return dataResponseErrorWithExecuted(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err := res.Body.Close()
|
||||
if err != nil {
|
||||
logger.Warn("failed to close response body", "error", err)
|
||||
}
|
||||
backend.Logger.Error("Failed to close response body", "err", err)
|
||||
}()
|
||||
|
||||
logResponse, err := e.unmarshalResponse(logger, res)
|
||||
logResponse, err := e.unmarshalResponse(res)
|
||||
if err != nil {
|
||||
return dataResponseErrorWithExecuted(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t, err := logResponse.GetPrimaryResultTable()
|
||||
if err != nil {
|
||||
return dataResponseErrorWithExecuted(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
frame, err := ResponseTableToFrame(t, query.RefID, query.Query, dataquery.AzureQueryType(query.QueryType), dataquery.ResultFormat(query.ResultFormat))
|
||||
if err != nil {
|
||||
return dataResponseErrorWithExecuted(err)
|
||||
return nil, err
|
||||
}
|
||||
frame = appendErrorNotice(frame, logResponse.Error)
|
||||
if frame == nil {
|
||||
return dataResponse
|
||||
dataResponse := backend.DataResponse{}
|
||||
return &dataResponse, nil
|
||||
}
|
||||
|
||||
azurePortalBaseUrl, err := GetAzurePortalUrl(dsInfo.Cloud)
|
||||
if err != nil {
|
||||
dataResponse.Error = err
|
||||
return dataResponse
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if query.QueryType == string(dataquery.AzureQueryTypeAzureTraces) && query.ResultFormat == string(dataquery.ResultFormatTrace) {
|
||||
@ -359,22 +339,19 @@ func (e *AzureLogAnalyticsDatasource) executeQuery(ctx context.Context, logger l
|
||||
|
||||
queryUrl, err := getQueryUrl(query.Query, query.Resources, azurePortalBaseUrl, query.TimeRange)
|
||||
if err != nil {
|
||||
dataResponse.Error = err
|
||||
return dataResponse
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if query.QueryType == string(dataquery.AzureQueryTypeAzureTraces) {
|
||||
tracesUrl, err := getTracesQueryUrl(query.Resources, azurePortalBaseUrl)
|
||||
if err != nil {
|
||||
dataResponse.Error = err
|
||||
return dataResponse
|
||||
return nil, err
|
||||
}
|
||||
|
||||
queryJSONModel := dataquery.AzureMonitorQuery{}
|
||||
err = json.Unmarshal(query.JSON, &queryJSONModel)
|
||||
if err != nil {
|
||||
dataResponse.Error = err
|
||||
return dataResponse
|
||||
return nil, err
|
||||
}
|
||||
traceIdVariable := "${__data.fields.traceID}"
|
||||
resultFormat := dataquery.ResultFormatTrace
|
||||
@ -434,9 +411,8 @@ func (e *AzureLogAnalyticsDatasource) executeQuery(ctx context.Context, logger l
|
||||
} else {
|
||||
AddConfigLinks(*frame, queryUrl, nil)
|
||||
}
|
||||
|
||||
dataResponse.Frames = data.Frames{frame}
|
||||
return dataResponse
|
||||
dataResponse := backend.DataResponse{Frames: data.Frames{frame}}
|
||||
return &dataResponse, nil
|
||||
}
|
||||
|
||||
func appendErrorNotice(frame *data.Frame, err *AzureLogAnalyticsAPIError) *data.Frame {
|
||||
@ -450,7 +426,7 @@ func appendErrorNotice(frame *data.Frame, err *AzureLogAnalyticsAPIError) *data.
|
||||
return frame
|
||||
}
|
||||
|
||||
func (e *AzureLogAnalyticsDatasource) createRequest(ctx context.Context, logger log.Logger, queryURL string, query *AzureLogAnalyticsQuery) (*http.Request, error) {
|
||||
func (e *AzureLogAnalyticsDatasource) createRequest(ctx context.Context, queryURL string, query *AzureLogAnalyticsQuery) (*http.Request, error) {
|
||||
body := map[string]interface{}{
|
||||
"query": query.Query,
|
||||
}
|
||||
@ -475,7 +451,6 @@ func (e *AzureLogAnalyticsDatasource) createRequest(ctx context.Context, logger
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, queryURL, bytes.NewBuffer(jsonValue))
|
||||
if err != nil {
|
||||
logger.Debug("Failed to create request", "error", err)
|
||||
return nil, fmt.Errorf("%v: %w", "failed to create request", err)
|
||||
}
|
||||
req.URL.Path = "/"
|
||||
@ -552,14 +527,13 @@ func getTracesQueryUrl(resources []string, azurePortalUrl string) (string, error
|
||||
return portalUrl, nil
|
||||
}
|
||||
|
||||
func getCorrelationWorkspaces(ctx context.Context, logger log.Logger, baseResource string, resourcesMap map[string]bool, dsInfo types.DatasourceInfo, operationId string, tracer tracing.Tracer) (map[string]bool, error) {
|
||||
func getCorrelationWorkspaces(ctx context.Context, baseResource string, resourcesMap map[string]bool, dsInfo types.DatasourceInfo, operationId string, tracer tracing.Tracer) (map[string]bool, error) {
|
||||
azMonService := dsInfo.Services["Azure Monitor"]
|
||||
correlationUrl := azMonService.URL + fmt.Sprintf("%s/providers/microsoft.insights/transactions/%s", baseResource, operationId)
|
||||
|
||||
callCorrelationAPI := func(url string) (AzureCorrelationAPIResponse, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewBuffer([]byte{}))
|
||||
if err != nil {
|
||||
logger.Debug("Failed to create request", "error", err)
|
||||
return AzureCorrelationAPIResponse{}, fmt.Errorf("%v: %w", "failed to create request", err)
|
||||
}
|
||||
req.URL.Path = url
|
||||
@ -578,7 +552,6 @@ func getCorrelationWorkspaces(ctx context.Context, logger log.Logger, baseResour
|
||||
|
||||
tracer.Inject(ctx, req.Header, span)
|
||||
|
||||
logger.Debug("AzureLogAnalytics", "Traces Correlation ApiURL", req.URL.String())
|
||||
res, err := azMonService.HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
return AzureCorrelationAPIResponse{}, err
|
||||
@ -587,15 +560,14 @@ func getCorrelationWorkspaces(ctx context.Context, logger log.Logger, baseResour
|
||||
if err != nil {
|
||||
return AzureCorrelationAPIResponse{}, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err := res.Body.Close()
|
||||
if err != nil {
|
||||
logger.Warn("failed to close response body", "error", err)
|
||||
if err := res.Body.Close(); err != nil {
|
||||
backend.Logger.Error("Failed to close response body", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if res.StatusCode/100 != 2 {
|
||||
logger.Debug("Request failed", "status", res.Status, "body", string(body))
|
||||
return AzureCorrelationAPIResponse{}, fmt.Errorf("request failed, status: %s, body: %s", res.Status, string(body))
|
||||
}
|
||||
var data AzureCorrelationAPIResponse
|
||||
@ -603,7 +575,6 @@ func getCorrelationWorkspaces(ctx context.Context, logger log.Logger, baseResour
|
||||
d.UseNumber()
|
||||
err = d.Decode(&data)
|
||||
if err != nil {
|
||||
logger.Debug("Failed to unmarshal Azure Traces correlation API response", "error", err, "status", res.Status, "body", string(body))
|
||||
return AzureCorrelationAPIResponse{}, err
|
||||
}
|
||||
|
||||
@ -689,19 +660,17 @@ func (ar *AzureLogAnalyticsResponse) GetPrimaryResultTable() (*types.AzureRespon
|
||||
return nil, fmt.Errorf("no data as PrimaryResult table is missing from the response")
|
||||
}
|
||||
|
||||
func (e *AzureLogAnalyticsDatasource) unmarshalResponse(logger log.Logger, res *http.Response) (AzureLogAnalyticsResponse, error) {
|
||||
func (e *AzureLogAnalyticsDatasource) unmarshalResponse(res *http.Response) (AzureLogAnalyticsResponse, error) {
|
||||
body, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return AzureLogAnalyticsResponse{}, err
|
||||
}
|
||||
defer func() {
|
||||
if err := res.Body.Close(); err != nil {
|
||||
logger.Warn("Failed to close response body", "err", err)
|
||||
}
|
||||
err := res.Body.Close()
|
||||
backend.Logger.Error("Failed to close response body", "err", err)
|
||||
}()
|
||||
|
||||
if res.StatusCode/100 != 2 {
|
||||
logger.Debug("Request failed", "status", res.Status, "body", string(body))
|
||||
return AzureLogAnalyticsResponse{}, fmt.Errorf("request failed, status: %s, body: %s", res.Status, string(body))
|
||||
}
|
||||
|
||||
@ -710,7 +679,6 @@ func (e *AzureLogAnalyticsDatasource) unmarshalResponse(logger log.Logger, res *
|
||||
d.UseNumber()
|
||||
err = d.Decode(&data)
|
||||
if err != nil {
|
||||
logger.Debug("Failed to unmarshal Azure Log Analytics response", "error", err, "status", res.Status, "body", string(body))
|
||||
return AzureLogAnalyticsResponse{}, err
|
||||
}
|
||||
|
||||
|
@ -16,14 +16,11 @@ import (
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend/httpclient"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
"github.com/grafana/grafana/pkg/infra/tracing"
|
||||
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/kinds/dataquery"
|
||||
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/types"
|
||||
)
|
||||
|
||||
var logger = log.New("test")
|
||||
|
||||
func TestBuildingAzureLogAnalyticsQueries(t *testing.T) {
|
||||
datasource := &AzureLogAnalyticsDatasource{}
|
||||
fromStart := time.Date(2018, 3, 15, 13, 0, 0, 0, time.UTC).In(time.Local)
|
||||
@ -1396,7 +1393,7 @@ func TestBuildingAzureLogAnalyticsQueries(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
queries, err := datasource.buildQueries(ctx, logger, tt.queryModel, dsInfo, tracer)
|
||||
queries, err := datasource.buildQueries(ctx, tt.queryModel, dsInfo, tracer)
|
||||
tt.Err(t, err)
|
||||
if diff := cmp.Diff(tt.azureLogAnalyticsQueries[0], queries[0]); diff != "" {
|
||||
t.Errorf("Result mismatch (-want +got): \n%s", diff)
|
||||
@ -1411,7 +1408,7 @@ func TestLogAnalyticsCreateRequest(t *testing.T) {
|
||||
|
||||
t.Run("creates a request", func(t *testing.T) {
|
||||
ds := AzureLogAnalyticsDatasource{}
|
||||
req, err := ds.createRequest(ctx, logger, url, &AzureLogAnalyticsQuery{
|
||||
req, err := ds.createRequest(ctx, url, &AzureLogAnalyticsQuery{
|
||||
Resources: []string{"r"},
|
||||
Query: "Perf",
|
||||
IntersectTime: false,
|
||||
@ -1435,7 +1432,7 @@ func TestLogAnalyticsCreateRequest(t *testing.T) {
|
||||
|
||||
t.Run("creates a request with timespan", func(t *testing.T) {
|
||||
ds := AzureLogAnalyticsDatasource{}
|
||||
req, err := ds.createRequest(ctx, logger, url, &AzureLogAnalyticsQuery{
|
||||
req, err := ds.createRequest(ctx, url, &AzureLogAnalyticsQuery{
|
||||
Resources: []string{"r"},
|
||||
Query: "Perf",
|
||||
IntersectTime: true,
|
||||
@ -1459,7 +1456,7 @@ func TestLogAnalyticsCreateRequest(t *testing.T) {
|
||||
|
||||
t.Run("creates a request with multiple resources", func(t *testing.T) {
|
||||
ds := AzureLogAnalyticsDatasource{}
|
||||
req, err := ds.createRequest(ctx, logger, url, &AzureLogAnalyticsQuery{
|
||||
req, err := ds.createRequest(ctx, url, &AzureLogAnalyticsQuery{
|
||||
Resources: []string{"/subscriptions/test-sub/resourceGroups/test-rg/providers/Microsoft.OperationalInsights/workspaces/r1", "/subscriptions/test-sub/resourceGroups/test-rg/providers/Microsoft.OperationalInsights/workspaces/r2"},
|
||||
Query: "Perf",
|
||||
QueryType: string(dataquery.AzureQueryTypeAzureLogAnalytics),
|
||||
@ -1479,7 +1476,7 @@ func TestLogAnalyticsCreateRequest(t *testing.T) {
|
||||
ds := AzureLogAnalyticsDatasource{}
|
||||
from := time.Now()
|
||||
to := from.Add(3 * time.Hour)
|
||||
req, err := ds.createRequest(ctx, logger, url, &AzureLogAnalyticsQuery{
|
||||
req, err := ds.createRequest(ctx, url, &AzureLogAnalyticsQuery{
|
||||
Resources: []string{"/subscriptions/test-sub/resourceGroups/test-rg/providers/Microsoft.OperationalInsights/workspaces/r1", "/subscriptions/test-sub/resourceGroups/test-rg/providers/Microsoft.OperationalInsights/workspaces/r2"},
|
||||
Query: "Perf",
|
||||
QueryType: string(dataquery.AzureQueryTypeAzureLogAnalytics),
|
||||
@ -1503,7 +1500,7 @@ func TestLogAnalyticsCreateRequest(t *testing.T) {
|
||||
ds := AzureLogAnalyticsDatasource{}
|
||||
from := time.Now()
|
||||
to := from.Add(3 * time.Hour)
|
||||
req, err := ds.createRequest(ctx, logger, url, &AzureLogAnalyticsQuery{
|
||||
req, err := ds.createRequest(ctx, url, &AzureLogAnalyticsQuery{
|
||||
Resources: []string{"/subscriptions/test-sub/resourceGroups/test-rg/providers/Microsoft.Insights/components/r1", "/subscriptions/test-sub/resourceGroups/test-rg/providers/Microsoft.Insights/components/r2"},
|
||||
QueryType: string(dataquery.AzureQueryTypeAzureTraces),
|
||||
TimeRange: backend.TimeRange{
|
||||
@ -1538,11 +1535,8 @@ func Test_executeQueryErrorWithDifferentLogAnalyticsCreds(t *testing.T) {
|
||||
TimeRange: backend.TimeRange{},
|
||||
}
|
||||
tracer := tracing.InitializeTracerForTest()
|
||||
res := ds.executeQuery(ctx, logger, query, dsInfo, &http.Client{}, dsInfo.Services["Azure Log Analytics"].URL, tracer)
|
||||
if res.Error == nil {
|
||||
t.Fatal("expecting an error")
|
||||
}
|
||||
if !strings.Contains(res.Error.Error(), "credentials for Log Analytics are no longer supported") {
|
||||
_, err := ds.executeQuery(ctx, query, dsInfo, &http.Client{}, dsInfo.Services["Azure Log Analytics"].URL, tracer)
|
||||
if !strings.Contains(err.Error(), "credentials for Log Analytics are no longer supported") {
|
||||
t.Error("expecting the error to inform of bad credentials")
|
||||
}
|
||||
}
|
||||
|
@ -9,7 +9,6 @@ import (
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/kinds/dataquery"
|
||||
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/types"
|
||||
"github.com/grafana/grafana/pkg/tsdb/intervalv2"
|
||||
@ -34,17 +33,17 @@ type kqlMacroEngine struct {
|
||||
// - $__escapeMulti('\\vm\eth0\Total','\\vm\eth2\Total') -> @'\\vm\eth0\Total',@'\\vm\eth2\Total'
|
||||
|
||||
// KqlInterpolate interpolates macros for Kusto Query Language (KQL) queries
|
||||
func KqlInterpolate(logger log.Logger, query backend.DataQuery, dsInfo types.DatasourceInfo, kql string, defaultTimeField ...string) (string, error) {
|
||||
func KqlInterpolate(query backend.DataQuery, dsInfo types.DatasourceInfo, kql string, defaultTimeField ...string) (string, error) {
|
||||
engine := kqlMacroEngine{}
|
||||
|
||||
defaultTimeFieldForAllDatasources := "timestamp"
|
||||
if len(defaultTimeField) > 0 && query.QueryType != string(dataquery.AzureQueryTypeAzureTraces) {
|
||||
defaultTimeFieldForAllDatasources = defaultTimeField[0]
|
||||
}
|
||||
return engine.Interpolate(logger, query, dsInfo, kql, defaultTimeFieldForAllDatasources)
|
||||
return engine.Interpolate(query, dsInfo, kql, defaultTimeFieldForAllDatasources)
|
||||
}
|
||||
|
||||
func (m *kqlMacroEngine) Interpolate(logger log.Logger, query backend.DataQuery, dsInfo types.DatasourceInfo, kql string, defaultTimeField string) (string, error) {
|
||||
func (m *kqlMacroEngine) Interpolate(query backend.DataQuery, dsInfo types.DatasourceInfo, kql string, defaultTimeField string) (string, error) {
|
||||
m.timeRange = query.TimeRange
|
||||
m.query = query
|
||||
rExp, _ := regexp.Compile(sExpr)
|
||||
@ -74,7 +73,7 @@ func (m *kqlMacroEngine) Interpolate(logger log.Logger, query backend.DataQuery,
|
||||
for i, arg := range args {
|
||||
args[i] = strings.Trim(arg, " ")
|
||||
}
|
||||
res, err := m.evaluateMacro(logger, groups[1], defaultTimeField, args, dsInfo)
|
||||
res, err := m.evaluateMacro(groups[1], defaultTimeField, args, dsInfo)
|
||||
if err != nil && macroError == nil {
|
||||
macroError = err
|
||||
return "macro_error()"
|
||||
@ -94,7 +93,7 @@ type interval struct {
|
||||
Interval string
|
||||
}
|
||||
|
||||
func (m *kqlMacroEngine) evaluateMacro(logger log.Logger, name string, defaultTimeField string, args []string, dsInfo types.DatasourceInfo) (string, error) {
|
||||
func (m *kqlMacroEngine) evaluateMacro(name string, defaultTimeField string, args []string, dsInfo types.DatasourceInfo) (string, error) {
|
||||
switch name {
|
||||
case "timeFilter":
|
||||
timeColumn := defaultTimeField
|
||||
@ -118,7 +117,6 @@ func (m *kqlMacroEngine) evaluateMacro(logger log.Logger, name string, defaultTi
|
||||
var queryInterval interval
|
||||
err := json.Unmarshal(m.query.JSON, &queryInterval)
|
||||
if err != nil {
|
||||
logger.Warn("Unable to parse model from query", "JSON", m.query.JSON)
|
||||
it = defaultInterval
|
||||
} else {
|
||||
var (
|
||||
@ -130,7 +128,6 @@ func (m *kqlMacroEngine) evaluateMacro(logger log.Logger, name string, defaultTi
|
||||
}
|
||||
it, err = intervalv2.GetIntervalFrom(dsInterval, queryInterval.Interval, queryInterval.IntervalMs, defaultInterval)
|
||||
if err != nil {
|
||||
logger.Warn("Unable to get interval from query", "model", queryInterval)
|
||||
it = defaultInterval
|
||||
}
|
||||
}
|
||||
|
@ -9,7 +9,6 @@ import (
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/types"
|
||||
)
|
||||
|
||||
@ -138,7 +137,7 @@ func TestAzureLogAnalyticsMacros(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
defaultTimeField := "TimeGenerated"
|
||||
rawQuery, err := KqlInterpolate(log.New("test"), tt.query, types.DatasourceInfo{}, tt.kql, defaultTimeField)
|
||||
rawQuery, err := KqlInterpolate(tt.query, types.DatasourceInfo{}, tt.kql, defaultTimeField)
|
||||
tt.Err(t, err)
|
||||
if diff := cmp.Diff(tt.expected, rawQuery, cmpopts.EquateNaNs()); diff != "" {
|
||||
t.Errorf("Result mismatch (-want +got):\n%s", diff)
|
||||
|
@ -17,10 +17,8 @@ import (
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
"github.com/grafana/grafana/pkg/infra/tracing"
|
||||
"github.com/grafana/grafana/pkg/services/featuremgmt"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/kinds/dataquery"
|
||||
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/loganalytics"
|
||||
azTime "github.com/grafana/grafana/pkg/tsdb/azuremonitor/time"
|
||||
@ -41,31 +39,35 @@ var (
|
||||
|
||||
const AzureMonitorAPIVersion = "2021-05-01"
|
||||
|
||||
func (e *AzureMonitorDatasource) ResourceRequest(rw http.ResponseWriter, req *http.Request, cli *http.Client) {
|
||||
e.Proxy.Do(rw, req, cli)
|
||||
func (e *AzureMonitorDatasource) ResourceRequest(rw http.ResponseWriter, req *http.Request, cli *http.Client) (http.ResponseWriter, error) {
|
||||
return e.Proxy.Do(rw, req, cli)
|
||||
}
|
||||
|
||||
// executeTimeSeriesQuery does the following:
|
||||
// 1. build the AzureMonitor url and querystring for each query
|
||||
// 2. executes each query by calling the Azure Monitor API
|
||||
// 3. parses the responses for each query into data frames
|
||||
func (e *AzureMonitorDatasource) ExecuteTimeSeriesQuery(ctx context.Context, logger log.Logger, originalQueries []backend.DataQuery, dsInfo types.DatasourceInfo, client *http.Client, url string, tracer tracing.Tracer) (*backend.QueryDataResponse, error) {
|
||||
func (e *AzureMonitorDatasource) ExecuteTimeSeriesQuery(ctx context.Context, originalQueries []backend.DataQuery, dsInfo types.DatasourceInfo, client *http.Client, url string, tracer tracing.Tracer) (*backend.QueryDataResponse, error) {
|
||||
result := backend.NewQueryDataResponse()
|
||||
ctxLogger := logger.FromContext(ctx)
|
||||
|
||||
queries, err := e.buildQueries(ctxLogger, originalQueries, dsInfo)
|
||||
queries, err := e.buildQueries(originalQueries, dsInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, query := range queries {
|
||||
result.Responses[query.RefID] = e.executeQuery(ctx, ctxLogger, query, dsInfo, client, url, tracer)
|
||||
res, err := e.executeQuery(ctx, query, dsInfo, client, url, tracer)
|
||||
if err != nil {
|
||||
result.Responses[query.RefID] = backend.DataResponse{Error: err}
|
||||
continue
|
||||
}
|
||||
result.Responses[query.RefID] = *res
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (e *AzureMonitorDatasource) buildQueries(logger log.Logger, queries []backend.DataQuery, dsInfo types.DatasourceInfo) ([]*types.AzureMonitorQuery, error) {
|
||||
func (e *AzureMonitorDatasource) buildQueries(queries []backend.DataQuery, dsInfo types.DatasourceInfo) ([]*types.AzureMonitorQuery, error) {
|
||||
azureMonitorQueries := []*types.AzureMonitorQuery{}
|
||||
|
||||
for _, query := range queries {
|
||||
@ -173,10 +175,6 @@ func (e *AzureMonitorDatasource) buildQueries(logger log.Logger, queries []backe
|
||||
}
|
||||
target = params.Encode()
|
||||
|
||||
if setting.Env == setting.Dev {
|
||||
logger.Debug("Azuremonitor request", "params", params)
|
||||
}
|
||||
|
||||
sub := ""
|
||||
if queryJSONModel.Subscription != nil {
|
||||
sub = *queryJSONModel.Subscription
|
||||
@ -245,8 +243,8 @@ func getParams(azJSONModel *dataquery.AzureMetricQuery, query backend.DataQuery)
|
||||
return params, nil
|
||||
}
|
||||
|
||||
func (e *AzureMonitorDatasource) retrieveSubscriptionDetails(cli *http.Client, ctx context.Context, logger log.Logger, tracer tracing.Tracer, subscriptionId string, baseUrl string, dsId int64, orgId int64) (string, error) {
|
||||
req, err := e.createRequest(ctx, logger, fmt.Sprintf("%s/subscriptions/%s", baseUrl, subscriptionId))
|
||||
func (e *AzureMonitorDatasource) retrieveSubscriptionDetails(cli *http.Client, ctx context.Context, tracer tracing.Tracer, subscriptionId string, baseUrl string, dsId int64, orgId int64) (string, error) {
|
||||
req, err := e.createRequest(ctx, fmt.Sprintf("%s/subscriptions/%s", baseUrl, subscriptionId))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to retrieve subscription details for subscription %s: %s", subscriptionId, err)
|
||||
}
|
||||
@ -261,15 +259,14 @@ func (e *AzureMonitorDatasource) retrieveSubscriptionDetails(cli *http.Client, c
|
||||
|
||||
defer span.End()
|
||||
tracer.Inject(ctx, req.Header, span)
|
||||
logger.Debug("AzureMonitor", "Subscription Details Req")
|
||||
res, err := cli.Do(req)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to request subscription details: %s", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := res.Body.Close(); err != nil {
|
||||
logger.Warn("failed to close response body", "err", err)
|
||||
}
|
||||
err := res.Body.Close()
|
||||
backend.Logger.Error("Failed to close response body", "err", err)
|
||||
}()
|
||||
|
||||
body, err := io.ReadAll(res.Body)
|
||||
@ -290,14 +287,11 @@ func (e *AzureMonitorDatasource) retrieveSubscriptionDetails(cli *http.Client, c
|
||||
return data.DisplayName, nil
|
||||
}
|
||||
|
||||
func (e *AzureMonitorDatasource) executeQuery(ctx context.Context, logger log.Logger, query *types.AzureMonitorQuery, dsInfo types.DatasourceInfo, cli *http.Client,
|
||||
url string, tracer tracing.Tracer) backend.DataResponse {
|
||||
dataResponse := backend.DataResponse{}
|
||||
|
||||
req, err := e.createRequest(ctx, logger, url)
|
||||
func (e *AzureMonitorDatasource) executeQuery(ctx context.Context, query *types.AzureMonitorQuery, dsInfo types.DatasourceInfo, cli *http.Client,
|
||||
url string, tracer tracing.Tracer) (*backend.DataResponse, error) {
|
||||
req, err := e.createRequest(ctx, url)
|
||||
if err != nil {
|
||||
dataResponse.Error = err
|
||||
return dataResponse
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req.URL.Path = path.Join(req.URL.Path, query.URL)
|
||||
@ -317,49 +311,43 @@ func (e *AzureMonitorDatasource) executeQuery(ctx context.Context, logger log.Lo
|
||||
defer span.End()
|
||||
tracer.Inject(ctx, req.Header, span)
|
||||
|
||||
logger.Debug("AzureMonitor", "Request ApiURL", req.URL.String())
|
||||
logger.Debug("AzureMonitor", "Target", query.Target)
|
||||
res, err := cli.Do(req)
|
||||
if err != nil {
|
||||
dataResponse.Error = err
|
||||
return dataResponse
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := res.Body.Close(); err != nil {
|
||||
logger.Warn("failed to close response body", "err", err)
|
||||
}
|
||||
err := res.Body.Close()
|
||||
backend.Logger.Error("Failed to close response body", "err", err)
|
||||
}()
|
||||
|
||||
data, err := e.unmarshalResponse(logger, res)
|
||||
data, err := e.unmarshalResponse(res)
|
||||
if err != nil {
|
||||
dataResponse.Error = err
|
||||
return dataResponse
|
||||
return nil, err
|
||||
}
|
||||
|
||||
azurePortalUrl, err := loganalytics.GetAzurePortalUrl(dsInfo.Cloud)
|
||||
if err != nil {
|
||||
dataResponse.Error = err
|
||||
return dataResponse
|
||||
return nil, err
|
||||
}
|
||||
|
||||
subscription, err := e.retrieveSubscriptionDetails(cli, ctx, logger, tracer, query.Subscription, dsInfo.Routes["Azure Monitor"].URL, dsInfo.DatasourceID, dsInfo.OrgID)
|
||||
subscription, err := e.retrieveSubscriptionDetails(cli, ctx, tracer, query.Subscription, dsInfo.Routes["Azure Monitor"].URL, dsInfo.DatasourceID, dsInfo.OrgID)
|
||||
if err != nil {
|
||||
logger.Warn(err.Error())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dataResponse.Frames, err = e.parseResponse(data, query, azurePortalUrl, subscription)
|
||||
frames, err := e.parseResponse(data, query, azurePortalUrl, subscription)
|
||||
if err != nil {
|
||||
dataResponse.Error = err
|
||||
return dataResponse
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dataResponse
|
||||
dataResponse := backend.DataResponse{Frames: frames}
|
||||
return &dataResponse, nil
|
||||
}
|
||||
|
||||
func (e *AzureMonitorDatasource) createRequest(ctx context.Context, logger log.Logger, url string) (*http.Request, error) {
|
||||
func (e *AzureMonitorDatasource) createRequest(ctx context.Context, url string) (*http.Request, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
logger.Debug("failed to create request", "error", err)
|
||||
return nil, fmt.Errorf("%v: %w", "failed to create request", err)
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
@ -367,21 +355,19 @@ func (e *AzureMonitorDatasource) createRequest(ctx context.Context, logger log.L
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func (e *AzureMonitorDatasource) unmarshalResponse(logger log.Logger, res *http.Response) (types.AzureMonitorResponse, error) {
|
||||
func (e *AzureMonitorDatasource) unmarshalResponse(res *http.Response) (types.AzureMonitorResponse, error) {
|
||||
body, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return types.AzureMonitorResponse{}, err
|
||||
}
|
||||
|
||||
if res.StatusCode/100 != 2 {
|
||||
logger.Debug("Request failed", "status", res.Status, "body", string(body))
|
||||
return types.AzureMonitorResponse{}, fmt.Errorf("request failed, status: %s, error: %s", res.Status, string(body))
|
||||
}
|
||||
|
||||
var data types.AzureMonitorResponse
|
||||
err = json.Unmarshal(body, &data)
|
||||
if err != nil {
|
||||
logger.Debug("failed to unmarshal AzureMonitor response", "error", err, "status", res.Status, "body", string(body))
|
||||
return types.AzureMonitorResponse{}, err
|
||||
}
|
||||
|
||||
|
@ -19,7 +19,6 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/kinds/dataquery"
|
||||
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/testdata"
|
||||
azTime "github.com/grafana/grafana/pkg/tsdb/azuremonitor/time"
|
||||
@ -294,7 +293,7 @@ func TestAzureMonitorBuildQueries(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
queries, err := datasource.buildQueries(log.New("test"), tsdbQuery, dsInfo)
|
||||
queries, err := datasource.buildQueries(tsdbQuery, dsInfo)
|
||||
require.NoError(t, err)
|
||||
|
||||
resources := map[string]dataquery.AzureMonitorResource{}
|
||||
@ -359,7 +358,7 @@ func TestCustomNamespace(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
result, err := datasource.buildQueries(log.New("test"), q, types.DatasourceInfo{})
|
||||
result, err := datasource.buildQueries(q, types.DatasourceInfo{})
|
||||
require.NoError(t, err)
|
||||
expected := "custom/namespace"
|
||||
require.Equal(t, expected, result[0].Params.Get("metricnamespace"))
|
||||
@ -690,7 +689,7 @@ func TestAzureMonitorCreateRequest(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ds := AzureMonitorDatasource{}
|
||||
req, err := ds.createRequest(ctx, log.New("test"), url)
|
||||
req, err := ds.createRequest(ctx, url)
|
||||
tt.Err(t, err)
|
||||
if req.URL.String() != tt.expectedURL {
|
||||
t.Errorf("Expecting %s, got %s", tt.expectedURL, req.URL.String())
|
||||
|
@ -15,7 +15,6 @@ import (
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
"github.com/grafana/grafana/pkg/infra/tracing"
|
||||
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/kinds/dataquery"
|
||||
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/loganalytics"
|
||||
@ -48,27 +47,31 @@ type AzureResourceGraphQuery struct {
|
||||
const ArgAPIVersion = "2021-06-01-preview"
|
||||
const argQueryProviderName = "/providers/Microsoft.ResourceGraph/resources"
|
||||
|
||||
func (e *AzureResourceGraphDatasource) ResourceRequest(rw http.ResponseWriter, req *http.Request, cli *http.Client) {
|
||||
e.Proxy.Do(rw, req, cli)
|
||||
func (e *AzureResourceGraphDatasource) ResourceRequest(rw http.ResponseWriter, req *http.Request, cli *http.Client) (http.ResponseWriter, error) {
|
||||
return e.Proxy.Do(rw, req, cli)
|
||||
}
|
||||
|
||||
// executeTimeSeriesQuery does the following:
|
||||
// 1. builds the AzureMonitor url and querystring for each query
|
||||
// 2. executes each query by calling the Azure Monitor API
|
||||
// 3. parses the responses for each query into data frames
|
||||
func (e *AzureResourceGraphDatasource) ExecuteTimeSeriesQuery(ctx context.Context, logger log.Logger, originalQueries []backend.DataQuery, dsInfo types.DatasourceInfo, client *http.Client, url string, tracer tracing.Tracer) (*backend.QueryDataResponse, error) {
|
||||
func (e *AzureResourceGraphDatasource) ExecuteTimeSeriesQuery(ctx context.Context, originalQueries []backend.DataQuery, dsInfo types.DatasourceInfo, client *http.Client, url string, tracer tracing.Tracer) (*backend.QueryDataResponse, error) {
|
||||
result := &backend.QueryDataResponse{
|
||||
Responses: map[string]backend.DataResponse{},
|
||||
}
|
||||
ctxLogger := logger.FromContext(ctx)
|
||||
|
||||
queries, err := e.buildQueries(ctxLogger, originalQueries, dsInfo)
|
||||
queries, err := e.buildQueries(originalQueries, dsInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, query := range queries {
|
||||
result.Responses[query.RefID] = e.executeQuery(ctx, ctxLogger, query, dsInfo, client, url, tracer)
|
||||
res, err := e.executeQuery(ctx, query, dsInfo, client, url, tracer)
|
||||
if err != nil {
|
||||
result.Responses[query.RefID] = backend.DataResponse{Error: err}
|
||||
continue
|
||||
}
|
||||
result.Responses[query.RefID] = *res
|
||||
}
|
||||
|
||||
return result, nil
|
||||
@ -81,7 +84,7 @@ type argJSONQuery struct {
|
||||
} `json:"azureResourceGraph"`
|
||||
}
|
||||
|
||||
func (e *AzureResourceGraphDatasource) buildQueries(logger log.Logger, queries []backend.DataQuery, dsInfo types.DatasourceInfo) ([]*AzureResourceGraphQuery, error) {
|
||||
func (e *AzureResourceGraphDatasource) buildQueries(queries []backend.DataQuery, dsInfo types.DatasourceInfo) ([]*AzureResourceGraphQuery, error) {
|
||||
var azureResourceGraphQueries []*AzureResourceGraphQuery
|
||||
|
||||
for _, query := range queries {
|
||||
@ -92,14 +95,13 @@ func (e *AzureResourceGraphDatasource) buildQueries(logger log.Logger, queries [
|
||||
}
|
||||
|
||||
azureResourceGraphTarget := queryJSONModel.AzureResourceGraph
|
||||
logger.Debug("AzureResourceGraph", "target", azureResourceGraphTarget)
|
||||
|
||||
resultFormat := azureResourceGraphTarget.ResultFormat
|
||||
if resultFormat == "" {
|
||||
resultFormat = "table"
|
||||
}
|
||||
|
||||
interpolatedQuery, err := macros.KqlInterpolate(logger, query, dsInfo, azureResourceGraphTarget.Query)
|
||||
interpolatedQuery, err := macros.KqlInterpolate(query, dsInfo, azureResourceGraphTarget.Query)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -118,31 +120,15 @@ func (e *AzureResourceGraphDatasource) buildQueries(logger log.Logger, queries [
|
||||
return azureResourceGraphQueries, nil
|
||||
}
|
||||
|
||||
func (e *AzureResourceGraphDatasource) executeQuery(ctx context.Context, logger log.Logger, query *AzureResourceGraphQuery, dsInfo types.DatasourceInfo, client *http.Client,
|
||||
dsURL string, tracer tracing.Tracer) backend.DataResponse {
|
||||
dataResponse := backend.DataResponse{}
|
||||
|
||||
func (e *AzureResourceGraphDatasource) executeQuery(ctx context.Context, query *AzureResourceGraphQuery, dsInfo types.DatasourceInfo, client *http.Client,
|
||||
dsURL string, tracer tracing.Tracer) (*backend.DataResponse, error) {
|
||||
params := url.Values{}
|
||||
params.Add("api-version", ArgAPIVersion)
|
||||
|
||||
dataResponseErrorWithExecuted := func(err error) backend.DataResponse {
|
||||
dataResponse = backend.DataResponse{Error: err}
|
||||
frames := data.Frames{
|
||||
&data.Frame{
|
||||
RefID: query.RefID,
|
||||
Meta: &data.FrameMeta{
|
||||
ExecutedQueryString: query.InterpolatedQuery,
|
||||
},
|
||||
},
|
||||
}
|
||||
dataResponse.Frames = frames
|
||||
return dataResponse
|
||||
}
|
||||
var model dataquery.AzureMonitorQuery
|
||||
err := json.Unmarshal(query.JSON, &model)
|
||||
if err != nil {
|
||||
dataResponse.Error = err
|
||||
return dataResponse
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reqBody, err := json.Marshal(map[string]interface{}{
|
||||
@ -152,15 +138,13 @@ func (e *AzureResourceGraphDatasource) executeQuery(ctx context.Context, logger
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
dataResponse.Error = err
|
||||
return dataResponse
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req, err := e.createRequest(ctx, logger, reqBody, dsURL)
|
||||
req, err := e.createRequest(ctx, reqBody, dsURL)
|
||||
|
||||
if err != nil {
|
||||
dataResponse.Error = err
|
||||
return dataResponse
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req.URL.Path = path.Join(req.URL.Path, argQueryProviderName)
|
||||
@ -177,36 +161,34 @@ func (e *AzureResourceGraphDatasource) executeQuery(ctx context.Context, logger
|
||||
|
||||
tracer.Inject(ctx, req.Header, span)
|
||||
|
||||
logger.Debug("AzureResourceGraph", "Request ApiURL", req.URL.String())
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
return dataResponseErrorWithExecuted(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err := res.Body.Close()
|
||||
if err != nil {
|
||||
logger.Warn("failed to close response body", "error", err)
|
||||
}
|
||||
backend.Logger.Error("Failed to close response body", "err", err)
|
||||
}()
|
||||
|
||||
argResponse, err := e.unmarshalResponse(logger, res)
|
||||
argResponse, err := e.unmarshalResponse(res)
|
||||
if err != nil {
|
||||
return dataResponseErrorWithExecuted(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
frame, err := loganalytics.ResponseTableToFrame(&argResponse.Data, query.RefID, query.InterpolatedQuery, dataquery.AzureQueryType(query.QueryType), dataquery.ResultFormat(query.ResultFormat))
|
||||
if err != nil {
|
||||
return dataResponseErrorWithExecuted(err)
|
||||
return nil, err
|
||||
}
|
||||
if frame == nil {
|
||||
// empty response
|
||||
return dataResponse
|
||||
dataResponse := backend.DataResponse{}
|
||||
return &dataResponse, nil
|
||||
}
|
||||
|
||||
azurePortalUrl, err := loganalytics.GetAzurePortalUrl(dsInfo.Cloud)
|
||||
if err != nil {
|
||||
return dataResponseErrorWithExecuted(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
url := azurePortalUrl + "/#blade/HubsExtension/ArgQueryBlade/query/" + url.PathEscape(query.InterpolatedQuery)
|
||||
@ -216,14 +198,14 @@ func (e *AzureResourceGraphDatasource) executeQuery(ctx context.Context, logger
|
||||
}
|
||||
frameWithLink.Meta.ExecutedQueryString = req.URL.RawQuery
|
||||
|
||||
dataResponse := backend.DataResponse{}
|
||||
dataResponse.Frames = data.Frames{&frameWithLink}
|
||||
return dataResponse
|
||||
return &dataResponse, nil
|
||||
}
|
||||
|
||||
func (e *AzureResourceGraphDatasource) createRequest(ctx context.Context, logger log.Logger, reqBody []byte, url string) (*http.Request, error) {
|
||||
func (e *AzureResourceGraphDatasource) createRequest(ctx context.Context, reqBody []byte, url string) (*http.Request, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewBuffer(reqBody))
|
||||
if err != nil {
|
||||
logger.Debug("Failed to create request", "error", err)
|
||||
return nil, fmt.Errorf("%v: %w", "failed to create request", err)
|
||||
}
|
||||
req.URL.Path = "/"
|
||||
@ -232,19 +214,18 @@ func (e *AzureResourceGraphDatasource) createRequest(ctx context.Context, logger
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func (e *AzureResourceGraphDatasource) unmarshalResponse(logger log.Logger, res *http.Response) (AzureResourceGraphResponse, error) {
|
||||
func (e *AzureResourceGraphDatasource) unmarshalResponse(res *http.Response) (AzureResourceGraphResponse, error) {
|
||||
body, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return AzureResourceGraphResponse{}, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := res.Body.Close(); err != nil {
|
||||
logger.Warn("Failed to close response body", "err", err)
|
||||
}
|
||||
err := res.Body.Close()
|
||||
backend.Logger.Error("Failed to close response body", "err", err)
|
||||
}()
|
||||
|
||||
if res.StatusCode/100 != 2 {
|
||||
logger.Debug("Request failed", "status", res.Status, "body", string(body))
|
||||
return AzureResourceGraphResponse{}, fmt.Errorf("%s. Azure Resource Graph error: %s", res.Status, string(body))
|
||||
}
|
||||
|
||||
@ -253,7 +234,6 @@ func (e *AzureResourceGraphDatasource) unmarshalResponse(logger log.Logger, res
|
||||
d.UseNumber()
|
||||
err = d.Decode(&data)
|
||||
if err != nil {
|
||||
logger.Debug("Failed to unmarshal azure resource graph response", "error", err, "status", res.Status, "body", string(body))
|
||||
return AzureResourceGraphResponse{}, err
|
||||
}
|
||||
|
||||
|
@ -17,13 +17,10 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/loganalytics"
|
||||
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/types"
|
||||
)
|
||||
|
||||
var logger = log.New("test")
|
||||
|
||||
func TestBuildingAzureResourceGraphQueries(t *testing.T) {
|
||||
datasource := &AzureResourceGraphDatasource{}
|
||||
fromStart := time.Date(2018, 3, 15, 13, 0, 0, 0, time.UTC).In(time.Local)
|
||||
@ -74,7 +71,7 @@ func TestBuildingAzureResourceGraphQueries(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
queries, err := datasource.buildQueries(logger, tt.queryModel, types.DatasourceInfo{})
|
||||
queries, err := datasource.buildQueries(tt.queryModel, types.DatasourceInfo{})
|
||||
tt.Err(t, err)
|
||||
if diff := cmp.Diff(tt.azureResourceGraphQueries, queries, cmpopts.IgnoreUnexported(struct{}{})); diff != "" {
|
||||
t.Errorf("Result mismatch (-want +got):\n%s", diff)
|
||||
@ -106,7 +103,7 @@ func TestAzureResourceGraphCreateRequest(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ds := AzureResourceGraphDatasource{}
|
||||
req, err := ds.createRequest(ctx, logger, []byte{}, url)
|
||||
req, err := ds.createRequest(ctx, []byte{}, url)
|
||||
tt.Err(t, err)
|
||||
if req.URL.String() != tt.expectedURL {
|
||||
t.Errorf("Expecting %s, got %s", tt.expectedURL, req.URL.String())
|
||||
@ -158,7 +155,7 @@ func TestGetAzurePortalUrl(t *testing.T) {
|
||||
|
||||
func TestUnmarshalResponse400(t *testing.T) {
|
||||
datasource := &AzureResourceGraphDatasource{}
|
||||
res, err := datasource.unmarshalResponse(logger, &http.Response{
|
||||
res, err := datasource.unmarshalResponse(&http.Response{
|
||||
StatusCode: 400,
|
||||
Status: "400 Bad Request",
|
||||
Body: io.NopCloser(strings.NewReader(("Azure Error Message"))),
|
||||
@ -172,7 +169,7 @@ func TestUnmarshalResponse400(t *testing.T) {
|
||||
|
||||
func TestUnmarshalResponse200Invalid(t *testing.T) {
|
||||
datasource := &AzureResourceGraphDatasource{}
|
||||
res, err := datasource.unmarshalResponse(logger, &http.Response{
|
||||
res, err := datasource.unmarshalResponse(&http.Response{
|
||||
StatusCode: 200,
|
||||
Status: "OK",
|
||||
Body: io.NopCloser(strings.NewReader(("Azure Data"))),
|
||||
@ -187,7 +184,7 @@ func TestUnmarshalResponse200Invalid(t *testing.T) {
|
||||
|
||||
func TestUnmarshalResponse200(t *testing.T) {
|
||||
datasource := &AzureResourceGraphDatasource{}
|
||||
res, err2 := datasource.unmarshalResponse(logger, &http.Response{
|
||||
res, err2 := datasource.unmarshalResponse(&http.Response{
|
||||
StatusCode: 200,
|
||||
Status: "OK",
|
||||
Body: io.NopCloser(strings.NewReader("{}")),
|
||||
|
@ -190,7 +190,7 @@ type MetricVisualization struct {
|
||||
}
|
||||
|
||||
type ServiceProxy interface {
|
||||
Do(rw http.ResponseWriter, req *http.Request, cli *http.Client) http.ResponseWriter
|
||||
Do(rw http.ResponseWriter, req *http.Request, cli *http.Client) (http.ResponseWriter, error)
|
||||
}
|
||||
|
||||
type LogAnalyticsWorkspaceFeatures struct {
|
||||
|
Loading…
Reference in New Issue
Block a user