Chore: Update cloud monitoring and Azure data sources to support contextual logs (#57844)

* update cloud monitoring to use log from context
* update azure monitor to use contextual logger
This commit is contained in:
Yuriy Tseretyan 2022-11-04 09:28:38 -04:00 committed by GitHub
parent 72d0c6b428
commit ff5cc3e640
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 176 additions and 177 deletions

View File

@ -1,23 +0,0 @@
package azlog
import "github.com/grafana/grafana/pkg/infra/log"
var (
azlog = log.New("tsdb.azuremonitor")
)
func Warn(msg string, args ...interface{}) {
azlog.Warn(msg, args)
}
func Debug(msg string, args ...interface{}) {
azlog.Debug(msg, args)
}
func Error(msg string, args ...interface{}) {
azlog.Error(msg, args)
}
func Info(msg string, args ...interface{}) {
azlog.Info(msg, args)
}

View File

@ -8,7 +8,7 @@ import (
"strings"
"github.com/grafana/grafana-plugin-sdk-go/backend/resource/httpadapter"
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/azlog"
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/types"
)
@ -22,7 +22,8 @@ func getTarget(original string) (target string, err error) {
return
}
type httpServiceProxy struct{}
type httpServiceProxy struct {
}
func (s *httpServiceProxy) Do(rw http.ResponseWriter, req *http.Request, cli *http.Client) http.ResponseWriter {
res, err := cli.Do(req)
@ -30,13 +31,13 @@ func (s *httpServiceProxy) Do(rw http.ResponseWriter, req *http.Request, cli *ht
rw.WriteHeader(http.StatusInternalServerError)
_, err = rw.Write([]byte(fmt.Sprintf("unexpected error %v", err)))
if err != nil {
azlog.Error("Unable to write HTTP response", "error", err)
logger.Error("Unable to write HTTP response", "error", err)
}
return nil
}
defer func() {
if err := res.Body.Close(); err != nil {
azlog.Warn("Failed to close response body", "err", err)
logger.Warn("Failed to close response body", "err", err)
}
}()
@ -45,14 +46,14 @@ func (s *httpServiceProxy) Do(rw http.ResponseWriter, req *http.Request, cli *ht
rw.WriteHeader(http.StatusInternalServerError)
_, err = rw.Write([]byte(fmt.Sprintf("unexpected error %v", err)))
if err != nil {
azlog.Error("Unable to write HTTP response", "error", err)
logger.Error("Unable to write HTTP response", "error", err)
}
return nil
}
rw.WriteHeader(res.StatusCode)
_, err = rw.Write(body)
if err != nil {
azlog.Error("Unable to write HTTP response", "error", err)
logger.Error("Unable to write HTTP response", "error", err)
}
for k, v := range res.Header {
@ -83,13 +84,13 @@ func writeResponse(rw http.ResponseWriter, code int, msg string) {
rw.WriteHeader(http.StatusBadRequest)
_, err := rw.Write([]byte(msg))
if err != nil {
azlog.Error("Unable to write HTTP response", "error", err)
logger.Error("Unable to write HTTP response", "error", err)
}
}
func (s *Service) handleResourceReq(subDataSource string) func(rw http.ResponseWriter, req *http.Request) {
return func(rw http.ResponseWriter, req *http.Request) {
azlog.Debug("Received resource call", "url", req.URL.String(), "method", req.Method)
logger.Debug("Received resource call", "url", req.URL.String(), "method", req.Method)
newPath, err := getTarget(req.URL.Path)
if err != nil {

View File

@ -16,7 +16,9 @@ import (
"github.com/grafana/grafana-plugin-sdk-go/backend/httpclient"
"github.com/grafana/grafana-plugin-sdk-go/backend/instancemgmt"
"github.com/grafana/grafana-plugin-sdk-go/backend/resource/httpadapter"
"github.com/grafana/grafana/pkg/components/simplejson"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/infra/tracing"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/loganalytics"
@ -25,6 +27,8 @@ import (
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/types"
)
var logger = log.New("tsdb.azuremonitor")
func ProvideService(cfg *setting.Cfg, httpClientProvider *httpclient.Provider, tracer tracing.Tracer) *Service {
proxy := &httpServiceProxy{}
executors := map[string]azDatasourceExecutor{
@ -159,7 +163,7 @@ func getAzureRoutes(cloud string, jsonData json.RawMessage) (map[string]types.Az
}
type azDatasourceExecutor interface {
ExecuteTimeSeriesQuery(ctx context.Context, originalQueries []backend.DataQuery, dsInfo types.DatasourceInfo, client *http.Client, url string, tracer tracing.Tracer) (*backend.QueryDataResponse, error)
ExecuteTimeSeriesQuery(ctx context.Context, logger log.Logger, originalQueries []backend.DataQuery, dsInfo types.DatasourceInfo, client *http.Client, url string, tracer tracing.Tracer) (*backend.QueryDataResponse, error)
ResourceRequest(rw http.ResponseWriter, req *http.Request, cli *http.Client)
}
@ -191,7 +195,7 @@ func (s *Service) newQueryMux() *datasource.QueryTypeMux {
if !ok {
return nil, fmt.Errorf("missing service for %s", dst)
}
return executor.ExecuteTimeSeriesQuery(ctx, req.Queries, dsInfo, service.HTTPClient, service.URL, s.tracer)
return executor.ExecuteTimeSeriesQuery(ctx, logger, req.Queries, dsInfo, service.HTTPClient, service.URL, s.tracer)
})
}
return mux

View File

@ -17,6 +17,7 @@ import (
"github.com/grafana/grafana-plugin-sdk-go/backend/httpclient"
"github.com/grafana/grafana-plugin-sdk-go/backend/instancemgmt"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/infra/tracing"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/types"
@ -134,8 +135,7 @@ type fakeExecutor struct {
func (f *fakeExecutor) ResourceRequest(rw http.ResponseWriter, req *http.Request, cli *http.Client) {
}
func (f *fakeExecutor) ExecuteTimeSeriesQuery(ctx context.Context, originalQueries []backend.DataQuery, dsInfo types.DatasourceInfo, client *http.Client,
url string, tracer tracing.Tracer) (*backend.QueryDataResponse, error) {
func (f *fakeExecutor) ExecuteTimeSeriesQuery(ctx context.Context, logger log.Logger, originalQueries []backend.DataQuery, dsInfo types.DatasourceInfo, client *http.Client, url string, tracer tracing.Tracer) (*backend.QueryDataResponse, error) {
if client == nil {
f.t.Errorf("The HTTP client for %s is missing", f.queryType)
} else {

View File

@ -18,8 +18,8 @@ import (
"go.opentelemetry.io/otel/attribute"
"github.com/grafana/grafana/pkg/components/simplejson"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/infra/tracing"
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/azlog"
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/macros"
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/types"
)
@ -49,17 +49,16 @@ func (e *AzureLogAnalyticsDatasource) ResourceRequest(rw http.ResponseWriter, re
// 1. build the AzureMonitor url and querystring for each query
// 2. executes each query by calling the Azure Monitor API
// 3. parses the responses for each query into data frames
func (e *AzureLogAnalyticsDatasource) ExecuteTimeSeriesQuery(ctx context.Context, originalQueries []backend.DataQuery, dsInfo types.DatasourceInfo, client *http.Client,
url string, tracer tracing.Tracer) (*backend.QueryDataResponse, error) {
func (e *AzureLogAnalyticsDatasource) ExecuteTimeSeriesQuery(ctx context.Context, logger log.Logger, originalQueries []backend.DataQuery, dsInfo types.DatasourceInfo, client *http.Client, url string, tracer tracing.Tracer) (*backend.QueryDataResponse, error) {
result := backend.NewQueryDataResponse()
queries, err := e.buildQueries(originalQueries, dsInfo)
ctxLogger := logger.FromContext(ctx)
queries, err := e.buildQueries(ctxLogger, originalQueries, dsInfo)
if err != nil {
return nil, err
}
for _, query := range queries {
result.Responses[query.RefID] = e.executeQuery(ctx, query, dsInfo, client, url, tracer)
result.Responses[query.RefID] = e.executeQuery(ctx, ctxLogger, query, dsInfo, client, url, tracer)
}
return result, nil
@ -88,7 +87,7 @@ func getApiURL(queryJSONModel types.LogJSONQuery) string {
}
}
func (e *AzureLogAnalyticsDatasource) buildQueries(queries []backend.DataQuery, dsInfo types.DatasourceInfo) ([]*AzureLogAnalyticsQuery, error) {
func (e *AzureLogAnalyticsDatasource) buildQueries(logger log.Logger, queries []backend.DataQuery, dsInfo types.DatasourceInfo) ([]*AzureLogAnalyticsQuery, error) {
azureLogAnalyticsQueries := []*AzureLogAnalyticsQuery{}
for _, query := range queries {
@ -99,7 +98,7 @@ func (e *AzureLogAnalyticsDatasource) buildQueries(queries []backend.DataQuery,
}
azureLogAnalyticsTarget := queryJSONModel.AzureLogAnalytics
azlog.Debug("AzureLogAnalytics", "target", azureLogAnalyticsTarget)
logger.Debug("AzureLogAnalytics", "target", azureLogAnalyticsTarget)
resultFormat := azureLogAnalyticsTarget.ResultFormat
if resultFormat == "" {
@ -109,7 +108,7 @@ func (e *AzureLogAnalyticsDatasource) buildQueries(queries []backend.DataQuery,
apiURL := getApiURL(queryJSONModel)
params := url.Values{}
rawQuery, err := macros.KqlInterpolate(query, dsInfo, azureLogAnalyticsTarget.Query, "TimeGenerated")
rawQuery, err := macros.KqlInterpolate(logger, query, dsInfo, azureLogAnalyticsTarget.Query, "TimeGenerated")
if err != nil {
return nil, err
}
@ -129,7 +128,7 @@ func (e *AzureLogAnalyticsDatasource) buildQueries(queries []backend.DataQuery,
return azureLogAnalyticsQueries, nil
}
func (e *AzureLogAnalyticsDatasource) executeQuery(ctx context.Context, query *AzureLogAnalyticsQuery, dsInfo types.DatasourceInfo, client *http.Client,
func (e *AzureLogAnalyticsDatasource) executeQuery(ctx context.Context, logger log.Logger, query *AzureLogAnalyticsQuery, dsInfo types.DatasourceInfo, client *http.Client,
url string, tracer tracing.Tracer) backend.DataResponse {
dataResponse := backend.DataResponse{}
@ -151,7 +150,7 @@ func (e *AzureLogAnalyticsDatasource) executeQuery(ctx context.Context, query *A
return dataResponseErrorWithExecuted(fmt.Errorf("credentials for Log Analytics are no longer supported. Go to the data source configuration to update Azure Monitor credentials"))
}
req, err := e.createRequest(ctx, dsInfo, url)
req, err := e.createRequest(ctx, logger, url)
if err != nil {
dataResponse.Error = err
return dataResponse
@ -171,13 +170,13 @@ func (e *AzureLogAnalyticsDatasource) executeQuery(ctx context.Context, query *A
tracer.Inject(ctx, req.Header, span)
azlog.Debug("AzureLogAnalytics", "Request ApiURL", req.URL.String())
logger.Debug("AzureLogAnalytics", "Request ApiURL", req.URL.String())
res, err := client.Do(req)
if err != nil {
return dataResponseErrorWithExecuted(err)
}
logResponse, err := e.unmarshalResponse(res)
logResponse, err := e.unmarshalResponse(logger, res)
if err != nil {
return dataResponseErrorWithExecuted(err)
}
@ -204,7 +203,7 @@ func (e *AzureLogAnalyticsDatasource) executeQuery(ctx context.Context, query *A
model.Get("azureLogAnalytics").Get("workspace").MustString())
if err != nil {
frame.AppendNotices(data.Notice{Severity: data.NoticeSeverityWarning, Text: "could not add custom metadata: " + err.Error()})
azlog.Warn("failed to add custom metadata to azure log analytics response", err)
logger.Warn("failed to add custom metadata to azure log analytics response", err)
}
if query.ResultFormat == types.TimeSeries {
@ -229,10 +228,10 @@ func appendErrorNotice(frame *data.Frame, err *AzureLogAnalyticsAPIError) {
}
}
func (e *AzureLogAnalyticsDatasource) createRequest(ctx context.Context, dsInfo types.DatasourceInfo, url string) (*http.Request, error) {
func (e *AzureLogAnalyticsDatasource) createRequest(ctx context.Context, logger log.Logger, url string) (*http.Request, error) {
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
if err != nil {
azlog.Debug("Failed to create request", "error", err)
logger.Debug("Failed to create request", "error", err)
return nil, fmt.Errorf("%v: %w", "failed to create request", err)
}
req.URL.Path = "/"
@ -279,19 +278,19 @@ func (ar *AzureLogAnalyticsResponse) GetPrimaryResultTable() (*types.AzureRespon
return nil, fmt.Errorf("no data as PrimaryResult table is missing from the response")
}
func (e *AzureLogAnalyticsDatasource) unmarshalResponse(res *http.Response) (AzureLogAnalyticsResponse, error) {
func (e *AzureLogAnalyticsDatasource) unmarshalResponse(logger log.Logger, res *http.Response) (AzureLogAnalyticsResponse, error) {
body, err := io.ReadAll(res.Body)
if err != nil {
return AzureLogAnalyticsResponse{}, err
}
defer func() {
if err := res.Body.Close(); err != nil {
azlog.Warn("Failed to close response body", "err", err)
logger.Warn("Failed to close response body", "err", err)
}
}()
if res.StatusCode/100 != 2 {
azlog.Debug("Request failed", "status", res.Status, "body", string(body))
logger.Debug("Request failed", "status", res.Status, "body", string(body))
return AzureLogAnalyticsResponse{}, fmt.Errorf("request failed, status: %s, body: %s", res.Status, string(body))
}
@ -300,7 +299,7 @@ func (e *AzureLogAnalyticsDatasource) unmarshalResponse(res *http.Response) (Azu
d.UseNumber()
err = d.Decode(&data)
if err != nil {
azlog.Debug("Failed to unmarshal Azure Log Analytics response", "error", err, "status", res.Status, "body", string(body))
logger.Debug("Failed to unmarshal Azure Log Analytics response", "error", err, "status", res.Status, "body", string(body))
return AzureLogAnalyticsResponse{}, err
}

View File

@ -11,11 +11,15 @@ import (
"github.com/google/go-cmp/cmp"
"github.com/grafana/grafana-plugin-sdk-go/backend"
"github.com/stretchr/testify/require"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/infra/tracing"
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/types"
"github.com/stretchr/testify/require"
)
var logger = log.New("test")
func TestBuildingAzureLogAnalyticsQueries(t *testing.T) {
datasource := &AzureLogAnalyticsDatasource{}
fromStart := time.Date(2018, 3, 15, 13, 0, 0, 0, time.UTC).In(time.Local)
@ -172,7 +176,7 @@ func TestBuildingAzureLogAnalyticsQueries(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
queries, err := datasource.buildQueries(tt.queryModel, types.DatasourceInfo{})
queries, err := datasource.buildQueries(logger, tt.queryModel, types.DatasourceInfo{})
tt.Err(t, err)
if diff := cmp.Diff(tt.azureLogAnalyticsQueries[0], queries[0]); diff != "" {
t.Errorf("Result mismatch (-want +got):\n%s", diff)
@ -184,7 +188,6 @@ func TestBuildingAzureLogAnalyticsQueries(t *testing.T) {
func TestLogAnalyticsCreateRequest(t *testing.T) {
ctx := context.Background()
url := "http://ds"
dsInfo := types.DatasourceInfo{}
tests := []struct {
name string
@ -203,7 +206,7 @@ func TestLogAnalyticsCreateRequest(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ds := AzureLogAnalyticsDatasource{}
req, err := ds.createRequest(ctx, dsInfo, url)
req, err := ds.createRequest(ctx, logger, url)
tt.Err(t, err)
if req.URL.String() != tt.expectedURL {
t.Errorf("Expecting %s, got %s", tt.expectedURL, req.URL.String())
@ -231,7 +234,7 @@ func Test_executeQueryErrorWithDifferentLogAnalyticsCreds(t *testing.T) {
TimeRange: backend.TimeRange{},
}
tracer := tracing.InitializeTracerForTest()
res := ds.executeQuery(ctx, query, dsInfo, &http.Client{}, dsInfo.Services["Azure Log Analytics"].URL, tracer)
res := ds.executeQuery(ctx, logger, query, dsInfo, &http.Client{}, dsInfo.Services["Azure Log Analytics"].URL, tracer)
if res.Error == nil {
t.Fatal("expecting an error")
}

View File

@ -9,8 +9,8 @@ import (
"github.com/grafana/grafana-plugin-sdk-go/backend"
"github.com/grafana/grafana/pkg/components/simplejson"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/services/datasources"
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/azlog"
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/types"
"github.com/grafana/grafana/pkg/tsdb/legacydata/interval"
)
@ -34,17 +34,17 @@ type kqlMacroEngine struct {
// - $__escapeMulti('\\vm\eth0\Total','\\vm\eth2\Total') -> @'\\vm\eth0\Total',@'\\vm\eth2\Total'
// KqlInterpolate interpolates macros for Kusto Query Language (KQL) queries
func KqlInterpolate(query backend.DataQuery, dsInfo types.DatasourceInfo, kql string, defaultTimeField ...string) (string, error) {
func KqlInterpolate(logger log.Logger, query backend.DataQuery, dsInfo types.DatasourceInfo, kql string, defaultTimeField ...string) (string, error) {
engine := kqlMacroEngine{}
defaultTimeFieldForAllDatasources := "timestamp"
if len(defaultTimeField) > 0 {
defaultTimeFieldForAllDatasources = defaultTimeField[0]
}
return engine.Interpolate(query, dsInfo, kql, defaultTimeFieldForAllDatasources)
return engine.Interpolate(logger, query, dsInfo, kql, defaultTimeFieldForAllDatasources)
}
func (m *kqlMacroEngine) Interpolate(query backend.DataQuery, dsInfo types.DatasourceInfo, kql string, defaultTimeField string) (string, error) {
func (m *kqlMacroEngine) Interpolate(logger log.Logger, query backend.DataQuery, dsInfo types.DatasourceInfo, kql string, defaultTimeField string) (string, error) {
m.timeRange = query.TimeRange
m.query = query
rExp, _ := regexp.Compile(sExpr)
@ -74,7 +74,7 @@ func (m *kqlMacroEngine) Interpolate(query backend.DataQuery, dsInfo types.Datas
for i, arg := range args {
args[i] = strings.Trim(arg, " ")
}
res, err := m.evaluateMacro(groups[1], defaultTimeField, args, dsInfo)
res, err := m.evaluateMacro(logger, groups[1], defaultTimeField, args, dsInfo)
if err != nil && macroError == nil {
macroError = err
return "macro_error()"
@ -89,7 +89,7 @@ func (m *kqlMacroEngine) Interpolate(query backend.DataQuery, dsInfo types.Datas
return kql, nil
}
func (m *kqlMacroEngine) evaluateMacro(name string, defaultTimeField string, args []string, dsInfo types.DatasourceInfo) (string, error) {
func (m *kqlMacroEngine) evaluateMacro(logger log.Logger, name string, defaultTimeField string, args []string, dsInfo types.DatasourceInfo) (string, error) {
switch name {
case "timeFilter":
timeColumn := defaultTimeField
@ -112,14 +112,14 @@ func (m *kqlMacroEngine) evaluateMacro(name string, defaultTimeField string, arg
defaultInterval := time.Duration((to - from) / 60)
model, err := simplejson.NewJson(m.query.JSON)
if err != nil {
azlog.Warn("Unable to parse model from query", "JSON", m.query.JSON)
logger.Warn("Unable to parse model from query", "JSON", m.query.JSON)
it = defaultInterval
} else {
it, err = interval.GetIntervalFrom(&datasources.DataSource{
JsonData: simplejson.NewFromAny(dsInfo.JSONData),
}, model, defaultInterval)
if err != nil {
azlog.Warn("Unable to get interval from query", "model", model)
logger.Warn("Unable to get interval from query", "model", model)
it = defaultInterval
}
}

View File

@ -7,8 +7,10 @@ import (
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/grafana/grafana-plugin-sdk-go/backend"
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/types"
"github.com/stretchr/testify/require"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/types"
)
func TestAzureLogAnalyticsMacros(t *testing.T) {
@ -126,7 +128,7 @@ func TestAzureLogAnalyticsMacros(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
defaultTimeField := "TimeGenerated"
rawQuery, err := KqlInterpolate(tt.query, types.DatasourceInfo{}, tt.kql, defaultTimeField)
rawQuery, err := KqlInterpolate(log.New("test"), tt.query, types.DatasourceInfo{}, tt.kql, defaultTimeField)
tt.Err(t, err)
if diff := cmp.Diff(tt.expected, rawQuery, cmpopts.EquateNaNs()); diff != "" {
t.Errorf("Result mismatch (-want +got):\n%s", diff)

View File

@ -17,9 +17,9 @@ import (
"github.com/grafana/grafana-plugin-sdk-go/data"
"go.opentelemetry.io/otel/attribute"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/infra/tracing"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/azlog"
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/resourcegraph"
azTime "github.com/grafana/grafana/pkg/tsdb/azuremonitor/time"
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/types"
@ -46,23 +46,23 @@ func (e *AzureMonitorDatasource) ResourceRequest(rw http.ResponseWriter, req *ht
// 1. build the AzureMonitor url and querystring for each query
// 2. executes each query by calling the Azure Monitor API
// 3. parses the responses for each query into data frames
func (e *AzureMonitorDatasource) ExecuteTimeSeriesQuery(ctx context.Context, originalQueries []backend.DataQuery, dsInfo types.DatasourceInfo, client *http.Client,
url string, tracer tracing.Tracer) (*backend.QueryDataResponse, error) {
func (e *AzureMonitorDatasource) ExecuteTimeSeriesQuery(ctx context.Context, logger log.Logger, originalQueries []backend.DataQuery, dsInfo types.DatasourceInfo, client *http.Client, url string, tracer tracing.Tracer) (*backend.QueryDataResponse, error) {
result := backend.NewQueryDataResponse()
ctxLogger := logger.FromContext(ctx)
queries, err := e.buildQueries(originalQueries, dsInfo)
queries, err := e.buildQueries(ctxLogger, originalQueries, dsInfo)
if err != nil {
return nil, err
}
for _, query := range queries {
result.Responses[query.RefID] = e.executeQuery(ctx, query, dsInfo, client, url, tracer)
result.Responses[query.RefID] = e.executeQuery(ctx, ctxLogger, query, dsInfo, client, url, tracer)
}
return result, nil
}
func (e *AzureMonitorDatasource) buildQueries(queries []backend.DataQuery, dsInfo types.DatasourceInfo) ([]*types.AzureMonitorQuery, error) {
func (e *AzureMonitorDatasource) buildQueries(logger log.Logger, queries []backend.DataQuery, dsInfo types.DatasourceInfo) ([]*types.AzureMonitorQuery, error) {
azureMonitorQueries := []*types.AzureMonitorQuery{}
for _, query := range queries {
@ -171,7 +171,7 @@ func (e *AzureMonitorDatasource) buildQueries(queries []backend.DataQuery, dsInf
target = params.Encode()
if setting.Env == setting.Dev {
azlog.Debug("Azuremonitor request", "params", params)
logger.Debug("Azuremonitor request", "params", params)
}
azureMonitorQueries = append(azureMonitorQueries, &types.AzureMonitorQuery{
@ -188,11 +188,11 @@ func (e *AzureMonitorDatasource) buildQueries(queries []backend.DataQuery, dsInf
return azureMonitorQueries, nil
}
func (e *AzureMonitorDatasource) executeQuery(ctx context.Context, query *types.AzureMonitorQuery, dsInfo types.DatasourceInfo, cli *http.Client,
func (e *AzureMonitorDatasource) executeQuery(ctx context.Context, logger log.Logger, query *types.AzureMonitorQuery, dsInfo types.DatasourceInfo, cli *http.Client,
url string, tracer tracing.Tracer) backend.DataResponse {
dataResponse := backend.DataResponse{}
req, err := e.createRequest(ctx, dsInfo, url)
req, err := e.createRequest(ctx, logger, url)
if err != nil {
dataResponse.Error = err
return dataResponse
@ -215,8 +215,8 @@ func (e *AzureMonitorDatasource) executeQuery(ctx context.Context, query *types.
defer span.End()
tracer.Inject(ctx, req.Header, span)
azlog.Debug("AzureMonitor", "Request ApiURL", req.URL.String())
azlog.Debug("AzureMonitor", "Target", query.Target)
logger.Debug("AzureMonitor", "Request ApiURL", req.URL.String())
logger.Debug("AzureMonitor", "Target", query.Target)
res, err := cli.Do(req)
if err != nil {
dataResponse.Error = err
@ -224,11 +224,11 @@ func (e *AzureMonitorDatasource) executeQuery(ctx context.Context, query *types.
}
defer func() {
if err := res.Body.Close(); err != nil {
azlog.Warn("Failed to close response body", "err", err)
logger.Warn("Failed to close response body", "err", err)
}
}()
data, err := e.unmarshalResponse(res)
data, err := e.unmarshalResponse(logger, res)
if err != nil {
dataResponse.Error = err
return dataResponse
@ -249,10 +249,10 @@ func (e *AzureMonitorDatasource) executeQuery(ctx context.Context, query *types.
return dataResponse
}
func (e *AzureMonitorDatasource) createRequest(ctx context.Context, dsInfo types.DatasourceInfo, url string) (*http.Request, error) {
func (e *AzureMonitorDatasource) createRequest(ctx context.Context, logger log.Logger, url string) (*http.Request, error) {
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
if err != nil {
azlog.Debug("Failed to create request", "error", err)
logger.Debug("Failed to create request", "error", err)
return nil, fmt.Errorf("%v: %w", "Failed to create request", err)
}
req.Header.Set("Content-Type", "application/json")
@ -260,21 +260,21 @@ func (e *AzureMonitorDatasource) createRequest(ctx context.Context, dsInfo types
return req, nil
}
func (e *AzureMonitorDatasource) unmarshalResponse(res *http.Response) (types.AzureMonitorResponse, error) {
func (e *AzureMonitorDatasource) unmarshalResponse(logger log.Logger, res *http.Response) (types.AzureMonitorResponse, error) {
body, err := io.ReadAll(res.Body)
if err != nil {
return types.AzureMonitorResponse{}, err
}
if res.StatusCode/100 != 2 {
azlog.Debug("Request failed", "status", res.Status, "body", string(body))
logger.Debug("Request failed", "status", res.Status, "body", string(body))
return types.AzureMonitorResponse{}, fmt.Errorf("request failed, status: %s", res.Status)
}
var data types.AzureMonitorResponse
err = json.Unmarshal(body, &data)
if err != nil {
azlog.Debug("Failed to unmarshal AzureMonitor response", "error", err, "status", res.Status, "body", string(body))
logger.Debug("Failed to unmarshal AzureMonitor response", "error", err, "status", res.Status, "body", string(body))
return types.AzureMonitorResponse{}, err
}

View File

@ -15,11 +15,13 @@ import (
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/grafana/grafana-plugin-sdk-go/backend"
"github.com/grafana/grafana-plugin-sdk-go/data"
"github.com/grafana/grafana/pkg/components/simplejson"
azTime "github.com/grafana/grafana/pkg/tsdb/azuremonitor/time"
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/types"
"github.com/stretchr/testify/require"
ptr "github.com/xorcare/pointer"
"github.com/grafana/grafana/pkg/components/simplejson"
"github.com/grafana/grafana/pkg/infra/log"
azTime "github.com/grafana/grafana/pkg/tsdb/azuremonitor/time"
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/types"
)
func TestAzureMonitorBuildQueries(t *testing.T) {
@ -279,7 +281,7 @@ func TestAzureMonitorBuildQueries(t *testing.T) {
azureMonitorQuery.URL = "/subscriptions/12345678-aaaa-bbbb-cccc-123456789abc/resourceGroups/grafanastaging/providers/Microsoft.Compute/virtualMachines/grafana/providers/microsoft.insights/metrics"
}
queries, err := datasource.buildQueries(tsdbQuery, dsInfo)
queries, err := datasource.buildQueries(log.New("test"), tsdbQuery, dsInfo)
require.NoError(t, err)
if diff := cmp.Diff(azureMonitorQuery, queries[0], cmpopts.IgnoreUnexported(simplejson.Json{}), cmpopts.IgnoreFields(types.AzureMonitorQuery{}, "Params")); diff != "" {
t.Errorf("Result mismatch (-want +got):\n%s", diff)
@ -310,7 +312,7 @@ func TestCustomNamespace(t *testing.T) {
},
}
result, err := datasource.buildQueries(q, types.DatasourceInfo{})
result, err := datasource.buildQueries(log.New("test"), q, types.DatasourceInfo{})
require.NoError(t, err)
expected := "custom/namespace"
require.Equal(t, expected, result[0].Params.Get("metricnamespace"))
@ -737,7 +739,6 @@ func loadTestFile(t *testing.T, name string) types.AzureMonitorResponse {
func TestAzureMonitorCreateRequest(t *testing.T) {
ctx := context.Background()
dsInfo := types.DatasourceInfo{}
url := "http://ds/"
tests := []struct {
@ -759,7 +760,7 @@ func TestAzureMonitorCreateRequest(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ds := AzureMonitorDatasource{}
req, err := ds.createRequest(ctx, dsInfo, url)
req, err := ds.createRequest(ctx, log.New("test"), url)
tt.Err(t, err)
if req.URL.String() != tt.expectedURL {
t.Errorf("Expecting %s, got %s", tt.expectedURL, req.URL.String())

View File

@ -17,9 +17,9 @@ import (
"go.opentelemetry.io/otel/attribute"
"github.com/grafana/grafana/pkg/components/simplejson"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/infra/tracing"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/azlog"
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/loganalytics"
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/macros"
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/types"
@ -57,19 +57,19 @@ func (e *AzureResourceGraphDatasource) ResourceRequest(rw http.ResponseWriter, r
// 1. builds the AzureMonitor url and querystring for each query
// 2. executes each query by calling the Azure Monitor API
// 3. parses the responses for each query into data frames
func (e *AzureResourceGraphDatasource) ExecuteTimeSeriesQuery(ctx context.Context, originalQueries []backend.DataQuery, dsInfo types.DatasourceInfo, client *http.Client,
url string, tracer tracing.Tracer) (*backend.QueryDataResponse, error) {
func (e *AzureResourceGraphDatasource) ExecuteTimeSeriesQuery(ctx context.Context, logger log.Logger, originalQueries []backend.DataQuery, dsInfo types.DatasourceInfo, client *http.Client, url string, tracer tracing.Tracer) (*backend.QueryDataResponse, error) {
result := &backend.QueryDataResponse{
Responses: map[string]backend.DataResponse{},
}
ctxLogger := logger.FromContext(ctx)
queries, err := e.buildQueries(originalQueries, dsInfo)
queries, err := e.buildQueries(ctxLogger, originalQueries, dsInfo)
if err != nil {
return nil, err
}
for _, query := range queries {
result.Responses[query.RefID] = e.executeQuery(ctx, query, dsInfo, client, url, tracer)
result.Responses[query.RefID] = e.executeQuery(ctx, ctxLogger, query, dsInfo, client, url, tracer)
}
return result, nil
@ -82,7 +82,7 @@ type argJSONQuery struct {
} `json:"azureResourceGraph"`
}
func (e *AzureResourceGraphDatasource) buildQueries(queries []backend.DataQuery, dsInfo types.DatasourceInfo) ([]*AzureResourceGraphQuery, error) {
func (e *AzureResourceGraphDatasource) buildQueries(logger log.Logger, queries []backend.DataQuery, dsInfo types.DatasourceInfo) ([]*AzureResourceGraphQuery, error) {
var azureResourceGraphQueries []*AzureResourceGraphQuery
for _, query := range queries {
@ -93,14 +93,14 @@ func (e *AzureResourceGraphDatasource) buildQueries(queries []backend.DataQuery,
}
azureResourceGraphTarget := queryJSONModel.AzureResourceGraph
azlog.Debug("AzureResourceGraph", "target", azureResourceGraphTarget)
logger.Debug("AzureResourceGraph", "target", azureResourceGraphTarget)
resultFormat := azureResourceGraphTarget.ResultFormat
if resultFormat == "" {
resultFormat = "table"
}
interpolatedQuery, err := macros.KqlInterpolate(query, dsInfo, azureResourceGraphTarget.Query)
interpolatedQuery, err := macros.KqlInterpolate(logger, query, dsInfo, azureResourceGraphTarget.Query)
if err != nil {
return nil, err
@ -118,7 +118,7 @@ func (e *AzureResourceGraphDatasource) buildQueries(queries []backend.DataQuery,
return azureResourceGraphQueries, nil
}
func (e *AzureResourceGraphDatasource) executeQuery(ctx context.Context, query *AzureResourceGraphQuery, dsInfo types.DatasourceInfo, client *http.Client,
func (e *AzureResourceGraphDatasource) executeQuery(ctx context.Context, logger log.Logger, query *AzureResourceGraphQuery, dsInfo types.DatasourceInfo, client *http.Client,
dsURL string, tracer tracing.Tracer) backend.DataResponse {
dataResponse := backend.DataResponse{}
@ -156,7 +156,7 @@ func (e *AzureResourceGraphDatasource) executeQuery(ctx context.Context, query *
return dataResponse
}
req, err := e.createRequest(ctx, dsInfo, reqBody, dsURL)
req, err := e.createRequest(ctx, logger, reqBody, dsURL)
if err != nil {
dataResponse.Error = err
@ -177,13 +177,13 @@ func (e *AzureResourceGraphDatasource) executeQuery(ctx context.Context, query *
tracer.Inject(ctx, req.Header, span)
azlog.Debug("AzureResourceGraph", "Request ApiURL", req.URL.String())
logger.Debug("AzureResourceGraph", "Request ApiURL", req.URL.String())
res, err := client.Do(req)
if err != nil {
return dataResponseErrorWithExecuted(err)
}
argResponse, err := e.unmarshalResponse(res)
argResponse, err := e.unmarshalResponse(logger, res)
if err != nil {
return dataResponseErrorWithExecuted(err)
}
@ -224,10 +224,10 @@ func AddConfigLinks(frame data.Frame, dl string) data.Frame {
return frame
}
func (e *AzureResourceGraphDatasource) createRequest(ctx context.Context, dsInfo types.DatasourceInfo, reqBody []byte, url string) (*http.Request, error) {
func (e *AzureResourceGraphDatasource) createRequest(ctx context.Context, logger log.Logger, reqBody []byte, url string) (*http.Request, error) {
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewBuffer(reqBody))
if err != nil {
azlog.Debug("Failed to create request", "error", err)
logger.Debug("Failed to create request", "error", err)
return nil, fmt.Errorf("%v: %w", "failed to create request", err)
}
req.URL.Path = "/"
@ -237,19 +237,19 @@ func (e *AzureResourceGraphDatasource) createRequest(ctx context.Context, dsInfo
return req, nil
}
func (e *AzureResourceGraphDatasource) unmarshalResponse(res *http.Response) (AzureResourceGraphResponse, error) {
func (e *AzureResourceGraphDatasource) unmarshalResponse(logger log.Logger, res *http.Response) (AzureResourceGraphResponse, error) {
body, err := io.ReadAll(res.Body)
if err != nil {
return AzureResourceGraphResponse{}, err
}
defer func() {
if err := res.Body.Close(); err != nil {
azlog.Warn("Failed to close response body", "err", err)
logger.Warn("Failed to close response body", "err", err)
}
}()
if res.StatusCode/100 != 2 {
azlog.Debug("Request failed", "status", res.Status, "body", string(body))
logger.Debug("Request failed", "status", res.Status, "body", string(body))
return AzureResourceGraphResponse{}, fmt.Errorf("%s. Azure Resource Graph error: %s", res.Status, string(body))
}
@ -258,7 +258,7 @@ func (e *AzureResourceGraphDatasource) unmarshalResponse(res *http.Response) (Az
d.UseNumber()
err = d.Decode(&data)
if err != nil {
azlog.Debug("Failed to unmarshal azure resource graph response", "error", err, "status", res.Status, "body", string(body))
logger.Debug("Failed to unmarshal azure resource graph response", "error", err, "status", res.Status, "body", string(body))
return AzureResourceGraphResponse{}, err
}

View File

@ -14,12 +14,16 @@ import (
"github.com/grafana/grafana-plugin-sdk-go/backend"
"github.com/grafana/grafana-plugin-sdk-go/data"
"github.com/grafana/grafana/pkg/components/simplejson"
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/grafana/grafana/pkg/components/simplejson"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/tsdb/azuremonitor/types"
)
var logger = log.New("test")
func TestBuildingAzureResourceGraphQueries(t *testing.T) {
datasource := &AzureResourceGraphDatasource{}
fromStart := time.Date(2018, 3, 15, 13, 0, 0, 0, time.UTC).In(time.Local)
@ -70,7 +74,7 @@ func TestBuildingAzureResourceGraphQueries(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
queries, err := datasource.buildQueries(tt.queryModel, types.DatasourceInfo{})
queries, err := datasource.buildQueries(logger, tt.queryModel, types.DatasourceInfo{})
tt.Err(t, err)
if diff := cmp.Diff(tt.azureResourceGraphQueries, queries, cmpopts.IgnoreUnexported(simplejson.Json{})); diff != "" {
t.Errorf("Result mismatch (-want +got):\n%s", diff)
@ -82,7 +86,6 @@ func TestBuildingAzureResourceGraphQueries(t *testing.T) {
func TestAzureResourceGraphCreateRequest(t *testing.T) {
ctx := context.Background()
url := "http://ds"
dsInfo := types.DatasourceInfo{}
tests := []struct {
name string
@ -104,7 +107,7 @@ func TestAzureResourceGraphCreateRequest(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ds := AzureResourceGraphDatasource{}
req, err := ds.createRequest(ctx, dsInfo, []byte{}, url)
req, err := ds.createRequest(ctx, logger, []byte{}, url)
tt.Err(t, err)
if req.URL.String() != tt.expectedURL {
t.Errorf("Expecting %s, got %s", tt.expectedURL, req.URL.String())
@ -157,7 +160,7 @@ func TestGetAzurePortalUrl(t *testing.T) {
func TestUnmarshalResponse400(t *testing.T) {
datasource := &AzureResourceGraphDatasource{}
res, err := datasource.unmarshalResponse(&http.Response{
res, err := datasource.unmarshalResponse(logger, &http.Response{
StatusCode: 400,
Status: "400 Bad Request",
Body: io.NopCloser(strings.NewReader(("Azure Error Message"))),
@ -171,7 +174,7 @@ func TestUnmarshalResponse400(t *testing.T) {
func TestUnmarshalResponse200Invalid(t *testing.T) {
datasource := &AzureResourceGraphDatasource{}
res, err := datasource.unmarshalResponse(&http.Response{
res, err := datasource.unmarshalResponse(logger, &http.Response{
StatusCode: 200,
Status: "OK",
Body: io.NopCloser(strings.NewReader(("Azure Data"))),
@ -186,7 +189,7 @@ func TestUnmarshalResponse200Invalid(t *testing.T) {
func TestUnmarshalResponse200(t *testing.T) {
datasource := &AzureResourceGraphDatasource{}
res, err2 := datasource.unmarshalResponse(&http.Response{
res, err2 := datasource.unmarshalResponse(logger, &http.Response{
StatusCode: 200,
Status: "OK",
Body: io.NopCloser(strings.NewReader("{}")),

View File

@ -8,6 +8,8 @@ import (
"github.com/grafana/grafana-plugin-sdk-go/backend"
"github.com/grafana/grafana-plugin-sdk-go/data"
"github.com/grafana/grafana/pkg/infra/log"
)
type annotationEvent struct {
@ -17,11 +19,11 @@ type annotationEvent struct {
Text string
}
func (s *Service) executeAnnotationQuery(ctx context.Context, req *backend.QueryDataRequest, dsInfo datasourceInfo) (
func (s *Service) executeAnnotationQuery(ctx context.Context, logger log.Logger, req *backend.QueryDataRequest, dsInfo datasourceInfo) (
*backend.QueryDataResponse, error) {
resp := backend.NewQueryDataResponse()
queries, err := s.buildQueryExecutors(req)
queries, err := s.buildQueryExecutors(logger, req)
if err != nil {
return resp, err
}
@ -60,7 +62,7 @@ func (timeSeriesQuery cloudMonitoringTimeSeriesQuery) transformAnnotationToFrame
frame.AppendRow(a.Time, a.Title, a.Tags, a.Text)
}
result.Frames = append(result.Frames, frame)
slog.Info("anno", "len", len(annotations))
timeSeriesQuery.logger.Info("anno", "len", len(annotations))
}
func formatAnnotationText(annotationText string, pointValue string, metricType string, metricLabels map[string]string, resourceLabels map[string]string) string {

View File

@ -222,6 +222,7 @@ func newInstanceSettings(httpClientProvider httpclient.Provider) datasource.Inst
// QueryData takes in the frontend queries, parses them into the CloudMonitoring query format
// executes the queries against the CloudMonitoring API and parses the response into data frames
func (s *Service) QueryData(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) {
logger := slog.FromContext(ctx)
resp := backend.NewQueryDataResponse()
if len(req.Queries) == 0 {
return resp, fmt.Errorf("query contains no queries")
@ -240,20 +241,20 @@ func (s *Service) QueryData(ctx context.Context, req *backend.QueryDataRequest)
switch model.Type {
case "annotationQuery":
resp, err = s.executeAnnotationQuery(ctx, req, *dsInfo)
resp, err = s.executeAnnotationQuery(ctx, logger, req, *dsInfo)
case "timeSeriesQuery":
fallthrough
default:
resp, err = s.executeTimeSeriesQuery(ctx, req, *dsInfo)
resp, err = s.executeTimeSeriesQuery(ctx, logger, req, *dsInfo)
}
return resp, err
}
func (s *Service) executeTimeSeriesQuery(ctx context.Context, req *backend.QueryDataRequest, dsInfo datasourceInfo) (
func (s *Service) executeTimeSeriesQuery(ctx context.Context, logger log.Logger, req *backend.QueryDataRequest, dsInfo datasourceInfo) (
*backend.QueryDataResponse, error) {
resp := backend.NewQueryDataResponse()
queryExecutors, err := s.buildQueryExecutors(req)
queryExecutors, err := s.buildQueryExecutors(logger, req)
if err != nil {
return resp, err
}
@ -304,7 +305,7 @@ func queryModel(query backend.DataQuery) (grafanaQuery, error) {
return q, nil
}
func (s *Service) buildQueryExecutors(req *backend.QueryDataRequest) ([]cloudMonitoringQueryExecutor, error) {
func (s *Service) buildQueryExecutors(logger log.Logger, req *backend.QueryDataRequest) ([]cloudMonitoringQueryExecutor, error) {
var cloudMonitoringQueryExecutors []cloudMonitoringQueryExecutor
startTime := req.Queries[0].TimeRange.From
endTime := req.Queries[0].TimeRange.To
@ -326,6 +327,7 @@ func (s *Service) buildQueryExecutors(req *backend.QueryDataRequest) ([]cloudMon
cmtsf := &cloudMonitoringTimeSeriesFilter{
RefID: query.RefID,
GroupBys: []string{},
logger: logger,
}
switch q.QueryType {
case metricQueryType:
@ -369,7 +371,7 @@ func (s *Service) buildQueryExecutors(req *backend.QueryDataRequest) ([]cloudMon
cmtsf.Params = params
if setting.Env == setting.Dev {
slog.Debug("CloudMonitoring request", "params", params)
logger.Debug("CloudMonitoring request", "params", params)
}
cloudMonitoringQueryExecutors = append(cloudMonitoringQueryExecutors, queryInterface)
@ -606,7 +608,7 @@ func calcBucketBound(bucketOptions cloudMonitoringBucketOptions, n int) string {
return bucketBound
}
func (s *Service) createRequest(ctx context.Context, dsInfo *datasourceInfo, proxyPass string, body io.Reader) (*http.Request, error) {
func (s *Service) createRequest(logger log.Logger, dsInfo *datasourceInfo, proxyPass string, body io.Reader) (*http.Request, error) {
u, err := url.Parse(dsInfo.url)
if err != nil {
return nil, err
@ -619,7 +621,7 @@ func (s *Service) createRequest(ctx context.Context, dsInfo *datasourceInfo, pro
}
req, err := http.NewRequest(method, dsInfo.services[cloudMonitor].url, body)
if err != nil {
slog.Error("Failed to create request", "error", err)
logger.Error("Failed to create request", "error", err)
return nil, fmt.Errorf("failed to create request: %w", err)
}
@ -636,7 +638,7 @@ func (s *Service) getDefaultProject(ctx context.Context, dsInfo datasourceInfo)
return dsInfo.defaultProject, nil
}
func unmarshalResponse(res *http.Response) (cloudMonitoringResponse, error) {
func unmarshalResponse(logger log.Logger, res *http.Response) (cloudMonitoringResponse, error) {
body, err := io.ReadAll(res.Body)
if err != nil {
return cloudMonitoringResponse{}, err
@ -644,19 +646,19 @@ func unmarshalResponse(res *http.Response) (cloudMonitoringResponse, error) {
defer func() {
if err := res.Body.Close(); err != nil {
slog.Warn("Failed to close response body", "err", err)
logger.Warn("Failed to close response body", "err", err)
}
}()
if res.StatusCode/100 != 2 {
slog.Error("Request failed", "status", res.Status, "body", string(body))
logger.Error("Request failed", "status", res.Status, "body", string(body))
return cloudMonitoringResponse{}, fmt.Errorf("query failed: %s", string(body))
}
var data cloudMonitoringResponse
err = json.Unmarshal(body, &data)
if err != nil {
slog.Error("Failed to unmarshal CloudMonitoring response", "error", err, "status", res.Status, "body", string(body))
logger.Error("Failed to unmarshal CloudMonitoring response", "error", err, "status", res.Status, "body", string(body))
return cloudMonitoringResponse{}, fmt.Errorf("failed to unmarshal query response: %w", err)
}

View File

@ -23,7 +23,7 @@ func TestCloudMonitoring(t *testing.T) {
t.Run("Parse migrated queries from frontend and build Google Cloud Monitoring API queries", func(t *testing.T) {
t.Run("and query has no aggregation set", func(t *testing.T) {
qes, err := service.buildQueryExecutors(baseReq())
qes, err := service.buildQueryExecutors(slog, baseReq())
require.NoError(t, err)
queries := getCloudMonitoringQueriesFromInterface(t, qes)
@ -64,7 +64,7 @@ func TestCloudMonitoring(t *testing.T) {
"filters": ["key", "=", "value", "AND", "key2", "=", "value2", "AND", "resource.type", "=", "another/resource/type"]
}`)
qes, err := service.buildQueryExecutors(query)
qes, err := service.buildQueryExecutors(slog, query)
require.NoError(t, err)
queries := getCloudMonitoringQueriesFromInterface(t, qes)
assert.Equal(t, 1, len(queries))
@ -96,7 +96,7 @@ func TestCloudMonitoring(t *testing.T) {
"filters": ["key", "=", "value", "AND", "key2", "=", "value2"]
}`)
qes, err := service.buildQueryExecutors(req)
qes, err := service.buildQueryExecutors(slog, req)
require.NoError(t, err)
queries := getCloudMonitoringQueriesFromInterface(t, qes)
assert.Equal(t, `+1000s`, queries[0].Params["aggregation.alignmentPeriod"][0])
@ -124,7 +124,7 @@ func TestCloudMonitoring(t *testing.T) {
"filters": ["key", "=", "value", "AND", "key2", "=", "value2"]
}`)
qes, err := service.buildQueryExecutors(req)
qes, err := service.buildQueryExecutors(slog, req)
require.NoError(t, err)
queries := getCloudMonitoringQueriesFromInterface(t, qes)
assert.Equal(t, `+60s`, queries[0].Params["aggregation.alignmentPeriod"][0])
@ -158,7 +158,7 @@ func TestCloudMonitoring(t *testing.T) {
"alignmentPeriod": "cloud-monitoring-auto"
}`)
qes, err := service.buildQueryExecutors(req)
qes, err := service.buildQueryExecutors(slog, req)
require.NoError(t, err)
queries := getCloudMonitoringQueriesFromInterface(t, qes)
assert.Equal(t, `+60s`, queries[0].Params["aggregation.alignmentPeriod"][0])
@ -173,7 +173,7 @@ func TestCloudMonitoring(t *testing.T) {
"alignmentPeriod": "cloud-monitoring-auto"
}`)
qes, err := service.buildQueryExecutors(req)
qes, err := service.buildQueryExecutors(slog, req)
require.NoError(t, err)
queries := getCloudMonitoringQueriesFromInterface(t, qes)
assert.Equal(t, `+60s`, queries[0].Params["aggregation.alignmentPeriod"][0])
@ -188,7 +188,7 @@ func TestCloudMonitoring(t *testing.T) {
"alignmentPeriod": "cloud-monitoring-auto"
}`)
qes, err := service.buildQueryExecutors(req)
qes, err := service.buildQueryExecutors(slog, req)
require.NoError(t, err)
queries := getCloudMonitoringQueriesFromInterface(t, qes)
assert.Equal(t, `+300s`, queries[0].Params["aggregation.alignmentPeriod"][0])
@ -203,7 +203,7 @@ func TestCloudMonitoring(t *testing.T) {
"alignmentPeriod": "cloud-monitoring-auto"
}`)
qes, err := service.buildQueryExecutors(req)
qes, err := service.buildQueryExecutors(slog, req)
require.NoError(t, err)
queries := getCloudMonitoringQueriesFromInterface(t, qes)
assert.Equal(t, `+3600s`, queries[0].Params["aggregation.alignmentPeriod"][0])
@ -222,7 +222,7 @@ func TestCloudMonitoring(t *testing.T) {
"alignmentPeriod": "stackdriver-auto"
}`)
qes, err := service.buildQueryExecutors(req)
qes, err := service.buildQueryExecutors(slog, req)
require.NoError(t, err)
queries := getCloudMonitoringQueriesFromInterface(t, qes)
assert.Equal(t, `+60s`, queries[0].Params["aggregation.alignmentPeriod"][0])
@ -252,7 +252,7 @@ func TestCloudMonitoring(t *testing.T) {
"alignmentPeriod": "stackdriver-auto"
}`)
qes, err := service.buildQueryExecutors(req)
qes, err := service.buildQueryExecutors(slog, req)
require.NoError(t, err)
queries := getCloudMonitoringQueriesFromInterface(t, qes)
assert.Equal(t, `+60s`, queries[0].Params["aggregation.alignmentPeriod"][0])
@ -282,7 +282,7 @@ func TestCloudMonitoring(t *testing.T) {
"alignmentPeriod": "stackdriver-auto"
}`)
qes, err := service.buildQueryExecutors(req)
qes, err := service.buildQueryExecutors(slog, req)
require.NoError(t, err)
queries := getCloudMonitoringQueriesFromInterface(t, qes)
assert.Equal(t, `+300s`, queries[0].Params["aggregation.alignmentPeriod"][0])
@ -312,7 +312,7 @@ func TestCloudMonitoring(t *testing.T) {
"alignmentPeriod": "stackdriver-auto"
}`)
qes, err := service.buildQueryExecutors(req)
qes, err := service.buildQueryExecutors(slog, req)
require.NoError(t, err)
queries := getCloudMonitoringQueriesFromInterface(t, qes)
assert.Equal(t, `+3600s`, queries[0].Params["aggregation.alignmentPeriod"][0])
@ -342,7 +342,7 @@ func TestCloudMonitoring(t *testing.T) {
"alignmentPeriod": "+600s"
}`)
qes, err := service.buildQueryExecutors(req)
qes, err := service.buildQueryExecutors(slog, req)
require.NoError(t, err)
queries := getCloudMonitoringQueriesFromInterface(t, qes)
assert.Equal(t, `+600s`, queries[0].Params["aggregation.alignmentPeriod"][0])
@ -372,7 +372,7 @@ func TestCloudMonitoring(t *testing.T) {
"view": "FULL"
}`)
qes, err := service.buildQueryExecutors(req)
qes, err := service.buildQueryExecutors(slog, req)
require.NoError(t, err)
queries := getCloudMonitoringQueriesFromInterface(t, qes)
@ -416,7 +416,7 @@ func TestCloudMonitoring(t *testing.T) {
"view": "FULL"
}`)
qes, err := service.buildQueryExecutors(req)
qes, err := service.buildQueryExecutors(slog, req)
require.NoError(t, err)
queries := getCloudMonitoringQueriesFromInterface(t, qes)
@ -476,7 +476,7 @@ func TestCloudMonitoring(t *testing.T) {
},
}
t.Run("and query type is metrics", func(t *testing.T) {
qes, err := service.buildQueryExecutors(req)
qes, err := service.buildQueryExecutors(slog, req)
require.NoError(t, err)
queries := getCloudMonitoringQueriesFromInterface(t, qes)
@ -523,7 +523,7 @@ func TestCloudMonitoring(t *testing.T) {
"sloQuery": {}
}`)
qes, err = service.buildQueryExecutors(req)
qes, err = service.buildQueryExecutors(slog, req)
require.NoError(t, err)
tqueries := make([]*cloudMonitoringTimeSeriesQuery, 0)
for _, qi := range qes {
@ -554,7 +554,7 @@ func TestCloudMonitoring(t *testing.T) {
"metricQuery": {}
}`)
qes, err := service.buildQueryExecutors(req)
qes, err := service.buildQueryExecutors(slog, req)
require.NoError(t, err)
queries := getCloudMonitoringQueriesFromInterface(t, qes)
@ -582,7 +582,7 @@ func TestCloudMonitoring(t *testing.T) {
"metricQuery": {}
}`)
qes, err = service.buildQueryExecutors(req)
qes, err = service.buildQueryExecutors(slog, req)
require.NoError(t, err)
qqueries := getCloudMonitoringQueriesFromInterface(t, qes)
assert.Equal(t, "ALIGN_NEXT_OLDER", qqueries[0].Params["aggregation.perSeriesAligner"][0])
@ -605,7 +605,7 @@ func TestCloudMonitoring(t *testing.T) {
"metricQuery": {}
}`)
qes, err = service.buildQueryExecutors(req)
qes, err = service.buildQueryExecutors(slog, req)
require.NoError(t, err)
qqqueries := getCloudMonitoringQueriesFromInterface(t, qes)
assert.Equal(t, `aggregation.alignmentPeriod=%2B60s&aggregation.perSeriesAligner=ALIGN_NEXT_OLDER&filter=select_slo_burn_rate%28%22projects%2Ftest-proj%2Fservices%2Ftest-service%2FserviceLevelObjectives%2Ftest-slo%22%2C+%221h%22%29&interval.endTime=2018-03-15T13%3A34%3A00Z&interval.startTime=2018-03-15T13%3A00%3A00Z`, qqqueries[0].Target)
@ -710,7 +710,7 @@ func TestCloudMonitoring(t *testing.T) {
"view": "FULL"
}`)
qes, err := service.buildQueryExecutors(req)
qes, err := service.buildQueryExecutors(slog, req)
require.NoError(t, err)
queries := getCloudMonitoringQueriesFromInterface(t, qes)
@ -738,7 +738,7 @@ func TestCloudMonitoring(t *testing.T) {
"preprocessor": "none"
}`)
qes, err := service.buildQueryExecutors(req)
qes, err := service.buildQueryExecutors(slog, req)
require.NoError(t, err)
queries := getCloudMonitoringQueriesFromInterface(t, qes)
@ -766,7 +766,7 @@ func TestCloudMonitoring(t *testing.T) {
"preprocessor": "rate"
}`)
qes, err := service.buildQueryExecutors(req)
qes, err := service.buildQueryExecutors(slog, req)
require.NoError(t, err)
queries := getCloudMonitoringQueriesFromInterface(t, qes)
@ -792,7 +792,7 @@ func TestCloudMonitoring(t *testing.T) {
"preprocessor": "rate"
}`)
qes, err := service.buildQueryExecutors(req)
qes, err := service.buildQueryExecutors(slog, req)
require.NoError(t, err)
queries := getCloudMonitoringQueriesFromInterface(t, qes)
@ -820,7 +820,7 @@ func TestCloudMonitoring(t *testing.T) {
"preprocessor": "delta"
}`)
qes, err := service.buildQueryExecutors(req)
qes, err := service.buildQueryExecutors(slog, req)
require.NoError(t, err)
queries := getCloudMonitoringQueriesFromInterface(t, qes)
@ -846,7 +846,7 @@ func TestCloudMonitoring(t *testing.T) {
"preprocessor": "delta"
}`)
qes, err := service.buildQueryExecutors(req)
qes, err := service.buildQueryExecutors(slog, req)
require.NoError(t, err)
queries := getCloudMonitoringQueriesFromInterface(t, qes)

View File

@ -26,7 +26,7 @@ func (timeSeriesFilter *cloudMonitoringTimeSeriesFilter) doRequestFilterPage(ctx
return cloudMonitoringResponse{}, err
}
dnext, err := unmarshalResponse(res)
dnext, err := unmarshalResponse(timeSeriesFilter.logger, res)
if err != nil {
return cloudMonitoringResponse{}, err
}
@ -45,9 +45,9 @@ func (timeSeriesFilter *cloudMonitoringTimeSeriesFilter) run(ctx context.Context
dr.Error = err
return dr, cloudMonitoringResponse{}, "", nil
}
slog.Info("No project name set on query, using project name from datasource", "projectName", projectName)
timeSeriesFilter.logger.Info("No project name set on query, using project name from datasource", "projectName", projectName)
}
r, err := s.createRequest(ctx, &dsInfo, path.Join("/v3/projects", projectName, "timeSeries"), nil)
r, err := s.createRequest(timeSeriesFilter.logger, &dsInfo, path.Join("/v3/projects", projectName, "timeSeries"), nil)
if err != nil {
dr.Error = err
return dr, cloudMonitoringResponse{}, "", nil

View File

@ -17,6 +17,7 @@ import (
"github.com/grafana/grafana-plugin-sdk-go/data"
"go.opentelemetry.io/otel/attribute"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/infra/tracing"
"github.com/grafana/grafana/pkg/tsdb/intervalv2"
)
@ -35,7 +36,7 @@ func (timeSeriesQuery *cloudMonitoringTimeSeriesQuery) appendGraphPeriod(req *ba
return ""
}
func doRequestQueryPage(requestBody map[string]interface{}, r *http.Request, dsInfo datasourceInfo) (cloudMonitoringResponse, error) {
func doRequestQueryPage(log log.Logger, requestBody map[string]interface{}, r *http.Request, dsInfo datasourceInfo) (cloudMonitoringResponse, error) {
buf, err := json.Marshal(requestBody)
if err != nil {
return cloudMonitoringResponse{}, err
@ -46,7 +47,7 @@ func doRequestQueryPage(requestBody map[string]interface{}, r *http.Request, dsI
return cloudMonitoringResponse{}, err
}
dnext, err := unmarshalResponse(res)
dnext, err := unmarshalResponse(log, res)
if err != nil {
return cloudMonitoringResponse{}, err
}
@ -65,7 +66,7 @@ func (timeSeriesQuery *cloudMonitoringTimeSeriesQuery) run(ctx context.Context,
dr.Error = err
return dr, cloudMonitoringResponse{}, "", nil
}
slog.Info("No project name set on query, using project name from datasource", "projectName", projectName)
timeSeriesQuery.logger.Info("No project name set on query, using project name from datasource", "projectName", projectName)
}
timeSeriesQuery.Query += timeSeriesQuery.appendGraphPeriod(req)
@ -84,7 +85,7 @@ func (timeSeriesQuery *cloudMonitoringTimeSeriesQuery) run(ctx context.Context,
requestBody := map[string]interface{}{
"query": timeSeriesQuery.Query,
}
r, err := s.createRequest(ctx, &dsInfo, p, bytes.NewBuffer([]byte{}))
r, err := s.createRequest(timeSeriesQuery.logger, &dsInfo, p, bytes.NewBuffer([]byte{}))
if err != nil {
dr.Error = err
return dr, cloudMonitoringResponse{}, "", nil
@ -92,7 +93,7 @@ func (timeSeriesQuery *cloudMonitoringTimeSeriesQuery) run(ctx context.Context,
tracer.Inject(ctx, r.Header, span)
r = r.WithContext(ctx)
d, err := doRequestQueryPage(requestBody, r, dsInfo)
d, err := doRequestQueryPage(timeSeriesQuery.logger, requestBody, r, dsInfo)
if err != nil {
dr.Error = err
return dr, cloudMonitoringResponse{}, "", nil
@ -102,7 +103,7 @@ func (timeSeriesQuery *cloudMonitoringTimeSeriesQuery) run(ctx context.Context,
"query": timeSeriesQuery.Query,
"pageToken": d.NextPageToken,
}
nextPage, err := doRequestQueryPage(requestBody, r, dsInfo)
nextPage, err := doRequestQueryPage(timeSeriesQuery.logger, requestBody, r, dsInfo)
if err != nil {
dr.Error = err
return dr, cloudMonitoringResponse{}, "", nil
@ -184,7 +185,7 @@ func (timeSeriesQuery *cloudMonitoringTimeSeriesQuery) parseResponse(queryRes *b
metricName := formatLegendKeys(d.Key, defaultMetricName, seriesLabels, nil,
&cloudMonitoringTimeSeriesFilter{
ProjectName: timeSeriesQuery.ProjectName, AliasBy: timeSeriesQuery.AliasBy,
ProjectName: timeSeriesQuery.ProjectName, AliasBy: timeSeriesQuery.AliasBy, logger: timeSeriesQuery.logger,
})
dataField := frame.Fields[1]
dataField.Name = metricName
@ -218,7 +219,7 @@ func (timeSeriesQuery *cloudMonitoringTimeSeriesQuery) parseResponse(queryRes *b
timeField := data.NewField(data.TimeSeriesTimeFieldName, nil, []time.Time{})
valueField := data.NewField(data.TimeSeriesValueFieldName, nil, []float64{})
frameName := formatLegendKeys(d.Key, defaultMetricName, nil, additionalLabels, &cloudMonitoringTimeSeriesFilter{ProjectName: timeSeriesQuery.ProjectName, AliasBy: timeSeriesQuery.AliasBy})
frameName := formatLegendKeys(d.Key, defaultMetricName, nil, additionalLabels, &cloudMonitoringTimeSeriesFilter{ProjectName: timeSeriesQuery.ProjectName, AliasBy: timeSeriesQuery.AliasBy, logger: timeSeriesQuery.logger})
valueField.Name = frameName
valueField.Labels = seriesLabels
setDisplayNameAsFieldName(valueField)
@ -246,7 +247,7 @@ func (timeSeriesQuery *cloudMonitoringTimeSeriesQuery) parseResponse(queryRes *b
additionalLabels := data.Labels{"bucket": bucketBound}
timeField := data.NewField(data.TimeSeriesTimeFieldName, nil, []time.Time{})
valueField := data.NewField(data.TimeSeriesValueFieldName, nil, []float64{})
frameName := formatLegendKeys(d.Key, defaultMetricName, seriesLabels, additionalLabels, &cloudMonitoringTimeSeriesFilter{ProjectName: timeSeriesQuery.ProjectName, AliasBy: timeSeriesQuery.AliasBy})
frameName := formatLegendKeys(d.Key, defaultMetricName, seriesLabels, additionalLabels, &cloudMonitoringTimeSeriesFilter{ProjectName: timeSeriesQuery.ProjectName, AliasBy: timeSeriesQuery.AliasBy, logger: timeSeriesQuery.logger})
valueField.Name = frameName
valueField.Labels = seriesLabels
setDisplayNameAsFieldName(valueField)
@ -340,7 +341,7 @@ func (timeSeriesQuery *cloudMonitoringTimeSeriesQuery) parseToAnnotations(queryR
func (timeSeriesQuery *cloudMonitoringTimeSeriesQuery) buildDeepLink() string {
u, err := url.Parse("https://console.cloud.google.com/monitoring/metrics-explorer")
if err != nil {
slog.Error("Failed to generate deep link: unable to parse metrics explorer URL", "projectName", timeSeriesQuery.ProjectName, "query", timeSeriesQuery.RefID)
timeSeriesQuery.logger.Error("Failed to generate deep link: unable to parse metrics explorer URL", "projectName", timeSeriesQuery.ProjectName, "query", timeSeriesQuery.RefID)
return ""
}
@ -373,7 +374,7 @@ func (timeSeriesQuery *cloudMonitoringTimeSeriesQuery) buildDeepLink() string {
blob, err := json.Marshal(pageState)
if err != nil {
slog.Error("Failed to generate deep link", "pageState", pageState, "ProjectName", timeSeriesQuery.ProjectName, "query", timeSeriesQuery.RefID)
timeSeriesQuery.logger.Error("Failed to generate deep link", "pageState", pageState, "ProjectName", timeSeriesQuery.ProjectName, "query", timeSeriesQuery.RefID)
return ""
}
@ -382,7 +383,7 @@ func (timeSeriesQuery *cloudMonitoringTimeSeriesQuery) buildDeepLink() string {
accountChooserURL, err := url.Parse("https://accounts.google.com/AccountChooser")
if err != nil {
slog.Error("Failed to generate deep link: unable to parse account chooser URL", "ProjectName", timeSeriesQuery.ProjectName, "query", timeSeriesQuery.RefID)
timeSeriesQuery.logger.Error("Failed to generate deep link: unable to parse account chooser URL", "ProjectName", timeSeriesQuery.ProjectName, "query", timeSeriesQuery.RefID)
return ""
}
accountChooserQuery := accountChooserURL.Query()

View File

@ -6,6 +6,8 @@ import (
"time"
"github.com/grafana/grafana-plugin-sdk-go/backend"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/infra/tracing"
)
@ -30,6 +32,7 @@ type (
Selector string
Service string
Slo string
logger log.Logger
}
// Used to build MQL queries
@ -41,6 +44,7 @@ type (
AliasBy string
timeRange backend.TimeRange
GraphPeriod string
logger log.Logger
}
metricQuery struct {