mirror of
https://github.com/grafana/grafana.git
synced 2025-02-25 18:55:37 -06:00
CloudWatch: Remove simplejson in favor of 'encoding/json' (#51062)
This commit is contained in:
parent
eb6d6d0d2b
commit
05cdef5004
@ -3,13 +3,13 @@ package cloudwatch
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/cloudwatch"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
)
|
||||
|
||||
type annotationEvent struct {
|
||||
@ -19,29 +19,37 @@ type annotationEvent struct {
|
||||
Text string
|
||||
}
|
||||
|
||||
func (e *cloudWatchExecutor) executeAnnotationQuery(pluginCtx backend.PluginContext, model *simplejson.Json, query backend.DataQuery) (*backend.QueryDataResponse, error) {
|
||||
func (e *cloudWatchExecutor) executeAnnotationQuery(pluginCtx backend.PluginContext, model DataQueryJson, query backend.DataQuery) (*backend.QueryDataResponse, error) {
|
||||
result := backend.NewQueryDataResponse()
|
||||
statistic := ""
|
||||
|
||||
usePrefixMatch := model.Get("prefixMatching").MustBool(false)
|
||||
region := model.Get("region").MustString("")
|
||||
namespace := model.Get("namespace").MustString("")
|
||||
metricName := model.Get("metricName").MustString("")
|
||||
dimensions := model.Get("dimensions").MustMap()
|
||||
statistic := model.Get("statistic").MustString()
|
||||
period := int64(model.Get("period").MustInt(0))
|
||||
if period == 0 && !usePrefixMatch {
|
||||
if model.Statistic != nil {
|
||||
statistic = *model.Statistic
|
||||
}
|
||||
|
||||
var period int64
|
||||
if model.Period != "" {
|
||||
p, err := strconv.ParseInt(model.Period, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
period = p
|
||||
}
|
||||
|
||||
if period == 0 && !model.PrefixMatching {
|
||||
period = 300
|
||||
}
|
||||
actionPrefix := model.Get("actionPrefix").MustString("")
|
||||
alarmNamePrefix := model.Get("alarmNamePrefix").MustString("")
|
||||
|
||||
cli, err := e.getCWClient(pluginCtx, region)
|
||||
actionPrefix := model.ActionPrefix
|
||||
alarmNamePrefix := model.AlarmNamePrefix
|
||||
|
||||
cli, err := e.getCWClient(pluginCtx, model.Region)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var alarmNames []*string
|
||||
if usePrefixMatch {
|
||||
if model.PrefixMatching {
|
||||
params := &cloudwatch.DescribeAlarmsInput{
|
||||
MaxRecords: aws.Int64(100),
|
||||
ActionPrefix: aws.String(actionPrefix),
|
||||
@ -51,14 +59,14 @@ func (e *cloudWatchExecutor) executeAnnotationQuery(pluginCtx backend.PluginCont
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%v: %w", "failed to call cloudwatch:DescribeAlarms", err)
|
||||
}
|
||||
alarmNames = filterAlarms(resp, namespace, metricName, dimensions, statistic, period)
|
||||
alarmNames = filterAlarms(resp, model.Namespace, model.MetricName, model.Dimensions, statistic, period)
|
||||
} else {
|
||||
if region == "" || namespace == "" || metricName == "" || statistic == "" {
|
||||
if model.Region == "" || model.Namespace == "" || model.MetricName == "" || statistic == "" {
|
||||
return result, errors.New("invalid annotations query")
|
||||
}
|
||||
|
||||
var qd []*cloudwatch.Dimension
|
||||
for k, v := range dimensions {
|
||||
for k, v := range model.Dimensions {
|
||||
if vv, ok := v.([]interface{}); ok {
|
||||
for _, vvv := range vv {
|
||||
if vvvv, ok := vvv.(string); ok {
|
||||
@ -71,8 +79,8 @@ func (e *cloudWatchExecutor) executeAnnotationQuery(pluginCtx backend.PluginCont
|
||||
}
|
||||
}
|
||||
params := &cloudwatch.DescribeAlarmsForMetricInput{
|
||||
Namespace: aws.String(namespace),
|
||||
MetricName: aws.String(metricName),
|
||||
Namespace: aws.String(model.Namespace),
|
||||
MetricName: aws.String(model.MetricName),
|
||||
Dimensions: qd,
|
||||
Statistic: aws.String(statistic),
|
||||
Period: aws.Int64(period),
|
||||
|
@ -25,7 +25,6 @@ import (
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend/instancemgmt"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend/resource/httpadapter"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/infra/httpclient"
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
"github.com/grafana/grafana/pkg/services/featuremgmt"
|
||||
@ -49,6 +48,20 @@ type datasourceInfo struct {
|
||||
HTTPClient *http.Client
|
||||
}
|
||||
|
||||
type DataQueryJson struct {
|
||||
QueryType string `json:"type,omitempty"`
|
||||
QueryMode string
|
||||
PrefixMatching bool
|
||||
Region string
|
||||
Namespace string
|
||||
MetricName string
|
||||
Dimensions map[string]interface{}
|
||||
Statistic *string
|
||||
Period string
|
||||
ActionPrefix string
|
||||
AlarmNamePrefix string
|
||||
}
|
||||
|
||||
const (
|
||||
cloudWatchTSFormat = "2006-01-02 15:04:05.000"
|
||||
defaultRegion = "default"
|
||||
@ -59,10 +72,12 @@ const (
|
||||
|
||||
alertMaxAttempts = 8
|
||||
alertPollPeriod = 1000 * time.Millisecond
|
||||
logsQueryMode = "Logs"
|
||||
)
|
||||
|
||||
var plog = log.New("tsdb.cloudwatch")
|
||||
var aliasFormat = regexp.MustCompile(`\{\{\s*(.+?)\s*\}\}`)
|
||||
var baseLimit = int64(1)
|
||||
|
||||
func ProvideService(cfg *setting.Cfg, httpClientProvider httpclient.Provider, features featuremgmt.FeatureToggles) *CloudWatchService {
|
||||
plog.Debug("initing")
|
||||
@ -188,7 +203,12 @@ func (e *cloudWatchExecutor) checkHealthLogs(ctx context.Context, pluginCtx back
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = e.handleDescribeLogGroups(ctx, logsClient, simplejson.NewFromAny(map[string]interface{}{"limit": "1"}))
|
||||
|
||||
parameters := LogQueryJson{
|
||||
Limit: &baseLimit,
|
||||
}
|
||||
|
||||
_, err = e.handleDescribeLogGroups(ctx, logsClient, parameters)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -282,16 +302,16 @@ func (e *cloudWatchExecutor) getRGTAClient(pluginCtx backend.PluginContext, regi
|
||||
}
|
||||
|
||||
func (e *cloudWatchExecutor) alertQuery(ctx context.Context, logsClient cloudwatchlogsiface.CloudWatchLogsAPI,
|
||||
queryContext backend.DataQuery, model *simplejson.Json) (*cloudwatchlogs.GetQueryResultsOutput, error) {
|
||||
queryContext backend.DataQuery, model LogQueryJson) (*cloudwatchlogs.GetQueryResultsOutput, error) {
|
||||
startQueryOutput, err := e.executeStartQuery(ctx, logsClient, model, queryContext.TimeRange)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
requestParams := simplejson.NewFromAny(map[string]interface{}{
|
||||
"region": model.Get("region").MustString(""),
|
||||
"queryId": *startQueryOutput.QueryId,
|
||||
})
|
||||
requestParams := LogQueryJson{
|
||||
Region: model.Region,
|
||||
QueryId: *startQueryOutput.QueryId,
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(alertPollPeriod)
|
||||
defer ticker.Stop()
|
||||
@ -324,18 +344,19 @@ func (e *cloudWatchExecutor) QueryData(ctx context.Context, req *backend.QueryDa
|
||||
frontend, but because alerts are executed on the backend the logic needs to be reimplemented here.
|
||||
*/
|
||||
q := req.Queries[0]
|
||||
model, err := simplejson.NewJson(q.JSON)
|
||||
var model DataQueryJson
|
||||
err := json.Unmarshal(q.JSON, &model)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, fromAlert := req.Headers["FromAlert"]
|
||||
isLogAlertQuery := fromAlert && model.Get("queryMode").MustString("") == "Logs"
|
||||
isLogAlertQuery := fromAlert && model.QueryMode == logsQueryMode
|
||||
|
||||
if isLogAlertQuery {
|
||||
return e.executeLogAlertQuery(ctx, req)
|
||||
}
|
||||
|
||||
queryType := model.Get("type").MustString("")
|
||||
queryType := model.QueryType
|
||||
|
||||
var result *backend.QueryDataResponse
|
||||
switch queryType {
|
||||
@ -356,21 +377,22 @@ func (e *cloudWatchExecutor) executeLogAlertQuery(ctx context.Context, req *back
|
||||
resp := backend.NewQueryDataResponse()
|
||||
|
||||
for _, q := range req.Queries {
|
||||
model, err := simplejson.NewJson(q.JSON)
|
||||
var model LogQueryJson
|
||||
err := json.Unmarshal(q.JSON, &model)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
model.Set("subtype", "StartQuery")
|
||||
model.Set("queryString", model.Get("expression").MustString(""))
|
||||
model.Subtype = "StartQuery"
|
||||
model.QueryString = model.Expression
|
||||
|
||||
region := model.Get("region").MustString(defaultRegion)
|
||||
if region == defaultRegion {
|
||||
region := model.Region
|
||||
if model.Region == "" || region == defaultRegion {
|
||||
dsInfo, err := e.getDSInfo(req.PluginContext)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
model.Set("region", dsInfo.region)
|
||||
model.Region = dsInfo.region
|
||||
}
|
||||
|
||||
logsClient, err := e.getCWLogsClient(req.PluginContext, region)
|
||||
@ -389,10 +411,8 @@ func (e *cloudWatchExecutor) executeLogAlertQuery(ctx context.Context, req *back
|
||||
}
|
||||
|
||||
var frames []*data.Frame
|
||||
|
||||
statsGroups := model.Get("statsGroups").MustStringArray()
|
||||
if len(statsGroups) > 0 && len(dataframe.Fields) > 0 {
|
||||
frames, err = groupResults(dataframe, statsGroups)
|
||||
if len(model.StatsGroups) > 0 && len(dataframe.Fields) > 0 {
|
||||
frames, err = groupResults(dataframe, model.StatsGroups)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -2,6 +2,7 @@ package cloudwatch
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
@ -13,13 +14,13 @@ import (
|
||||
"github.com/aws/aws-sdk-go/service/cloudwatchlogs/cloudwatchlogsiface"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
LimitExceededException = "LimitExceededException"
|
||||
defaultLimit = 10
|
||||
limitExceededException = "LimitExceededException"
|
||||
defaultLimit = int64(10)
|
||||
logGroupDefaultLimit = int64(50)
|
||||
)
|
||||
|
||||
type AWSError struct {
|
||||
@ -28,6 +29,26 @@ type AWSError struct {
|
||||
Payload map[string]string
|
||||
}
|
||||
|
||||
type LogQueryJson struct {
|
||||
LogType string `json:"type"`
|
||||
SubType string
|
||||
Limit *int64
|
||||
Time int64
|
||||
StartTime int64
|
||||
EndTime int64
|
||||
LogGroupName string
|
||||
LogGroupNames []string
|
||||
LogGroupNamePrefix string
|
||||
LogStreamName string
|
||||
StartFromHead bool
|
||||
Region string
|
||||
QueryString string
|
||||
QueryId string
|
||||
StatsGroups []string
|
||||
Subtype string
|
||||
Expression string
|
||||
}
|
||||
|
||||
func (e *AWSError) Error() string {
|
||||
return fmt.Sprintf("%s: %s", e.Code, e.Message)
|
||||
}
|
||||
@ -39,7 +60,8 @@ func (e *cloudWatchExecutor) executeLogActions(ctx context.Context, req *backend
|
||||
eg, ectx := errgroup.WithContext(ctx)
|
||||
|
||||
for _, query := range req.Queries {
|
||||
model, err := simplejson.NewJson(query.JSON)
|
||||
var model LogQueryJson
|
||||
err := json.Unmarshal(query.JSON, &model)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -58,7 +80,7 @@ func (e *cloudWatchExecutor) executeLogActions(ctx context.Context, req *backend
|
||||
return err
|
||||
}
|
||||
|
||||
groupedFrames, err := groupResponseFrame(dataframe, model.Get("statsGroups").MustStringArray())
|
||||
groupedFrames, err := groupResponseFrame(dataframe, model.StatsGroups)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -86,25 +108,24 @@ func (e *cloudWatchExecutor) executeLogActions(ctx context.Context, req *backend
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (e *cloudWatchExecutor) executeLogAction(ctx context.Context, model *simplejson.Json, query backend.DataQuery, pluginCtx backend.PluginContext) (*data.Frame, error) {
|
||||
subType := model.Get("subtype").MustString()
|
||||
|
||||
func (e *cloudWatchExecutor) executeLogAction(ctx context.Context, model LogQueryJson, query backend.DataQuery, pluginCtx backend.PluginContext) (*data.Frame, error) {
|
||||
dsInfo, err := e.getDSInfo(pluginCtx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defaultRegion := dsInfo.region
|
||||
region := dsInfo.region
|
||||
if model.Region != "" {
|
||||
region = model.Region
|
||||
}
|
||||
|
||||
region := model.Get("region").MustString(defaultRegion)
|
||||
logsClient, err := e.getCWLogsClient(pluginCtx, region)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var data *data.Frame = nil
|
||||
|
||||
switch subType {
|
||||
switch model.SubType {
|
||||
case "DescribeLogGroups":
|
||||
data, err = e.handleDescribeLogGroups(ctx, logsClient, model)
|
||||
case "GetLogGroupFields":
|
||||
@ -119,38 +140,36 @@ func (e *cloudWatchExecutor) executeLogAction(ctx context.Context, model *simple
|
||||
data, err = e.handleGetLogEvents(ctx, logsClient, model)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute log action with subtype: %s: %w", subType, err)
|
||||
return nil, fmt.Errorf("failed to execute log action with subtype: %s: %w", model.SubType, err)
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (e *cloudWatchExecutor) handleGetLogEvents(ctx context.Context, logsClient cloudwatchlogsiface.CloudWatchLogsAPI,
|
||||
parameters *simplejson.Json) (*data.Frame, error) {
|
||||
queryRequest := &cloudwatchlogs.GetLogEventsInput{
|
||||
Limit: aws.Int64(parameters.Get("limit").MustInt64(defaultLimit)),
|
||||
StartFromHead: aws.Bool(parameters.Get("startFromHead").MustBool(false)),
|
||||
parameters LogQueryJson) (*data.Frame, error) {
|
||||
limit := defaultLimit
|
||||
if parameters.Limit != nil && *parameters.Limit > 0 {
|
||||
limit = *parameters.Limit
|
||||
}
|
||||
|
||||
logGroupName, err := parameters.Get("logGroupName").String()
|
||||
if err != nil {
|
||||
queryRequest := &cloudwatchlogs.GetLogEventsInput{
|
||||
Limit: aws.Int64(limit),
|
||||
StartFromHead: aws.Bool(parameters.StartFromHead),
|
||||
}
|
||||
|
||||
if parameters.LogGroupName == "" {
|
||||
return nil, fmt.Errorf("Error: Parameter 'logGroupName' is required")
|
||||
}
|
||||
queryRequest.SetLogGroupName(logGroupName)
|
||||
queryRequest.SetLogGroupName(parameters.LogGroupName)
|
||||
|
||||
logStreamName, err := parameters.Get("logStreamName").String()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error: Parameter 'logStream' is required")
|
||||
if parameters.LogStreamName == "" {
|
||||
return nil, fmt.Errorf("Error: Parameter 'logStreamName' is required")
|
||||
}
|
||||
queryRequest.SetLogStreamName(logStreamName)
|
||||
queryRequest.SetLogStreamName(parameters.LogStreamName)
|
||||
|
||||
if startTime, err := parameters.Get("startTime").Int64(); err == nil {
|
||||
queryRequest.SetStartTime(startTime)
|
||||
}
|
||||
|
||||
if endTime, err := parameters.Get("endTime").Int64(); err == nil {
|
||||
queryRequest.SetEndTime(endTime)
|
||||
}
|
||||
queryRequest.SetStartTime(parameters.StartTime)
|
||||
queryRequest.SetEndTime(parameters.EndTime)
|
||||
|
||||
logEvents, err := logsClient.GetLogEventsWithContext(ctx, queryRequest)
|
||||
if err != nil {
|
||||
@ -178,19 +197,22 @@ func (e *cloudWatchExecutor) handleGetLogEvents(ctx context.Context, logsClient
|
||||
}
|
||||
|
||||
func (e *cloudWatchExecutor) handleDescribeLogGroups(ctx context.Context,
|
||||
logsClient cloudwatchlogsiface.CloudWatchLogsAPI, parameters *simplejson.Json) (*data.Frame, error) {
|
||||
logGroupNamePrefix := parameters.Get("logGroupNamePrefix").MustString("")
|
||||
logsClient cloudwatchlogsiface.CloudWatchLogsAPI, parameters LogQueryJson) (*data.Frame, error) {
|
||||
logGroupLimit := logGroupDefaultLimit
|
||||
if parameters.Limit != nil && *parameters.Limit != 0 {
|
||||
logGroupLimit = *parameters.Limit
|
||||
}
|
||||
|
||||
var response *cloudwatchlogs.DescribeLogGroupsOutput = nil
|
||||
var err error
|
||||
if len(logGroupNamePrefix) == 0 {
|
||||
if len(parameters.LogGroupNamePrefix) == 0 {
|
||||
response, err = logsClient.DescribeLogGroupsWithContext(ctx, &cloudwatchlogs.DescribeLogGroupsInput{
|
||||
Limit: aws.Int64(parameters.Get("limit").MustInt64(50)),
|
||||
Limit: aws.Int64(logGroupLimit),
|
||||
})
|
||||
} else {
|
||||
response, err = logsClient.DescribeLogGroupsWithContext(ctx, &cloudwatchlogs.DescribeLogGroupsInput{
|
||||
Limit: aws.Int64(parameters.Get("limit").MustInt64(50)),
|
||||
LogGroupNamePrefix: aws.String(logGroupNamePrefix),
|
||||
Limit: aws.Int64(logGroupLimit),
|
||||
LogGroupNamePrefix: aws.String(parameters.LogGroupNamePrefix),
|
||||
})
|
||||
}
|
||||
if err != nil || response == nil {
|
||||
@ -209,7 +231,7 @@ func (e *cloudWatchExecutor) handleDescribeLogGroups(ctx context.Context,
|
||||
}
|
||||
|
||||
func (e *cloudWatchExecutor) executeStartQuery(ctx context.Context, logsClient cloudwatchlogsiface.CloudWatchLogsAPI,
|
||||
parameters *simplejson.Json, timeRange backend.TimeRange) (*cloudwatchlogs.StartQueryOutput, error) {
|
||||
parameters LogQueryJson, timeRange backend.TimeRange) (*cloudwatchlogs.StartQueryOutput, error) {
|
||||
startTime := timeRange.From
|
||||
endTime := timeRange.To
|
||||
|
||||
@ -222,7 +244,7 @@ func (e *cloudWatchExecutor) executeStartQuery(ctx context.Context, logsClient c
|
||||
// The usage of ltrim around the @log/@logStream fields is a necessary workaround, as without it,
|
||||
// CloudWatch wouldn't consider a query using a non-alised @log/@logStream valid.
|
||||
modifiedQueryString := "fields @timestamp,ltrim(@log) as " + logIdentifierInternal + ",ltrim(@logStream) as " +
|
||||
logStreamIdentifierInternal + "|" + parameters.Get("queryString").MustString("")
|
||||
logStreamIdentifierInternal + "|" + parameters.QueryString
|
||||
|
||||
startQueryInput := &cloudwatchlogs.StartQueryInput{
|
||||
StartTime: aws.Int64(startTime.Unix()),
|
||||
@ -232,25 +254,25 @@ func (e *cloudWatchExecutor) executeStartQuery(ctx context.Context, logsClient c
|
||||
// and also a little bit more but as CW logs accept only seconds as integers there is not much to do about
|
||||
// that.
|
||||
EndTime: aws.Int64(int64(math.Ceil(float64(endTime.UnixNano()) / 1e9))),
|
||||
LogGroupNames: aws.StringSlice(parameters.Get("logGroupNames").MustStringArray()),
|
||||
LogGroupNames: aws.StringSlice(parameters.LogGroupNames),
|
||||
QueryString: aws.String(modifiedQueryString),
|
||||
}
|
||||
|
||||
if resultsLimit, err := parameters.Get("limit").Int64(); err == nil {
|
||||
startQueryInput.Limit = aws.Int64(resultsLimit)
|
||||
if parameters.Limit != nil {
|
||||
startQueryInput.Limit = aws.Int64(*parameters.Limit)
|
||||
}
|
||||
|
||||
return logsClient.StartQueryWithContext(ctx, startQueryInput)
|
||||
}
|
||||
|
||||
func (e *cloudWatchExecutor) handleStartQuery(ctx context.Context, logsClient cloudwatchlogsiface.CloudWatchLogsAPI,
|
||||
model *simplejson.Json, timeRange backend.TimeRange, refID string) (*data.Frame, error) {
|
||||
model LogQueryJson, timeRange backend.TimeRange, refID string) (*data.Frame, error) {
|
||||
startQueryResponse, err := e.executeStartQuery(ctx, logsClient, model, timeRange)
|
||||
if err != nil {
|
||||
var awsErr awserr.Error
|
||||
if errors.As(err, &awsErr) && awsErr.Code() == "LimitExceededException" {
|
||||
plog.Debug("executeStartQuery limit exceeded", "err", awsErr)
|
||||
return nil, &AWSError{Code: LimitExceededException, Message: err.Error()}
|
||||
return nil, &AWSError{Code: limitExceededException, Message: err.Error()}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
@ -258,11 +280,14 @@ func (e *cloudWatchExecutor) handleStartQuery(ctx context.Context, logsClient cl
|
||||
dataFrame := data.NewFrame(refID, data.NewField("queryId", nil, []string{*startQueryResponse.QueryId}))
|
||||
dataFrame.RefID = refID
|
||||
|
||||
clientRegion := model.Get("region").MustString("default")
|
||||
region := "default"
|
||||
if model.Region != "" {
|
||||
region = model.Region
|
||||
}
|
||||
|
||||
dataFrame.Meta = &data.FrameMeta{
|
||||
Custom: map[string]interface{}{
|
||||
"Region": clientRegion,
|
||||
"Region": region,
|
||||
},
|
||||
}
|
||||
|
||||
@ -270,9 +295,9 @@ func (e *cloudWatchExecutor) handleStartQuery(ctx context.Context, logsClient cl
|
||||
}
|
||||
|
||||
func (e *cloudWatchExecutor) executeStopQuery(ctx context.Context, logsClient cloudwatchlogsiface.CloudWatchLogsAPI,
|
||||
parameters *simplejson.Json) (*cloudwatchlogs.StopQueryOutput, error) {
|
||||
parameters LogQueryJson) (*cloudwatchlogs.StopQueryOutput, error) {
|
||||
queryInput := &cloudwatchlogs.StopQueryInput{
|
||||
QueryId: aws.String(parameters.Get("queryId").MustString()),
|
||||
QueryId: aws.String(parameters.QueryId),
|
||||
}
|
||||
|
||||
response, err := logsClient.StopQueryWithContext(ctx, queryInput)
|
||||
@ -291,7 +316,7 @@ func (e *cloudWatchExecutor) executeStopQuery(ctx context.Context, logsClient cl
|
||||
}
|
||||
|
||||
func (e *cloudWatchExecutor) handleStopQuery(ctx context.Context, logsClient cloudwatchlogsiface.CloudWatchLogsAPI,
|
||||
parameters *simplejson.Json) (*data.Frame, error) {
|
||||
parameters LogQueryJson) (*data.Frame, error) {
|
||||
response, err := e.executeStopQuery(ctx, logsClient, parameters)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -302,16 +327,16 @@ func (e *cloudWatchExecutor) handleStopQuery(ctx context.Context, logsClient clo
|
||||
}
|
||||
|
||||
func (e *cloudWatchExecutor) executeGetQueryResults(ctx context.Context, logsClient cloudwatchlogsiface.CloudWatchLogsAPI,
|
||||
parameters *simplejson.Json) (*cloudwatchlogs.GetQueryResultsOutput, error) {
|
||||
parameters LogQueryJson) (*cloudwatchlogs.GetQueryResultsOutput, error) {
|
||||
queryInput := &cloudwatchlogs.GetQueryResultsInput{
|
||||
QueryId: aws.String(parameters.Get("queryId").MustString()),
|
||||
QueryId: aws.String(parameters.QueryId),
|
||||
}
|
||||
|
||||
return logsClient.GetQueryResultsWithContext(ctx, queryInput)
|
||||
}
|
||||
|
||||
func (e *cloudWatchExecutor) handleGetQueryResults(ctx context.Context, logsClient cloudwatchlogsiface.CloudWatchLogsAPI,
|
||||
parameters *simplejson.Json, refID string) (*data.Frame, error) {
|
||||
parameters LogQueryJson, refID string) (*data.Frame, error) {
|
||||
getQueryResultsOutput, err := e.executeGetQueryResults(ctx, logsClient, parameters)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -329,10 +354,10 @@ func (e *cloudWatchExecutor) handleGetQueryResults(ctx context.Context, logsClie
|
||||
}
|
||||
|
||||
func (e *cloudWatchExecutor) handleGetLogGroupFields(ctx context.Context, logsClient cloudwatchlogsiface.CloudWatchLogsAPI,
|
||||
parameters *simplejson.Json, refID string) (*data.Frame, error) {
|
||||
parameters LogQueryJson, refID string) (*data.Frame, error) {
|
||||
queryInput := &cloudwatchlogs.GetLogGroupFieldsInput{
|
||||
LogGroupName: aws.String(parameters.Get("logGroupName").MustString()),
|
||||
Time: aws.Int64(parameters.Get("time").MustInt64()),
|
||||
LogGroupName: aws.String(parameters.LogGroupName),
|
||||
Time: aws.Int64(parameters.Time),
|
||||
}
|
||||
|
||||
getLogGroupFieldsOutput, err := logsClient.GetLogGroupFieldsWithContext(ctx, queryInput)
|
||||
|
@ -406,33 +406,6 @@ func Test_executeStartQuery(t *testing.T) {
|
||||
}, cli.calls.startQueryWithContext)
|
||||
})
|
||||
|
||||
t.Run("cannot parse limit as float", func(t *testing.T) {
|
||||
cli = fakeCWLogsClient{}
|
||||
im := datasource.NewInstanceManager(func(s backend.DataSourceInstanceSettings) (instancemgmt.Instance, error) {
|
||||
return datasourceInfo{}, nil
|
||||
})
|
||||
executor := newExecutor(im, newTestConfig(), &fakeSessionCache{}, featuremgmt.WithFeatures())
|
||||
|
||||
_, err := executor.QueryData(context.Background(), &backend.QueryDataRequest{
|
||||
PluginContext: backend.PluginContext{DataSourceInstanceSettings: &backend.DataSourceInstanceSettings{}},
|
||||
Queries: []backend.DataQuery{
|
||||
{
|
||||
RefID: "A",
|
||||
TimeRange: backend.TimeRange{From: time.Unix(0, 0), To: time.Unix(1, 0)},
|
||||
JSON: json.RawMessage(`{
|
||||
"type": "logAction",
|
||||
"subtype": "StartQuery",
|
||||
"limit": 12.0
|
||||
}`),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
assert.NoError(t, err)
|
||||
require.Len(t, cli.calls.startQueryWithContext, 1)
|
||||
assert.Nil(t, cli.calls.startQueryWithContext[0].Limit)
|
||||
})
|
||||
|
||||
t.Run("does not populate StartQueryInput.limit when no limit provided", func(t *testing.T) {
|
||||
cli = fakeCWLogsClient{}
|
||||
im := datasource.NewInstanceManager(func(s backend.DataSourceInstanceSettings) (instancemgmt.Instance, error) {
|
||||
|
@ -1,6 +1,7 @@
|
||||
package cloudwatch
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
@ -12,32 +13,60 @@ import (
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/services/featuremgmt"
|
||||
)
|
||||
|
||||
var validMetricDataID = regexp.MustCompile(`^[a-z][a-zA-Z0-9_]*$`)
|
||||
|
||||
type QueryJson struct {
|
||||
Datasource map[string]string `json:"datasource,omitempty"`
|
||||
Dimensions map[string]interface{} `json:"dimensions,omitempty"`
|
||||
Expression string `json:"expression,omitempty"`
|
||||
Id string `json:"id,omitempty"`
|
||||
Label *string `json:"label,omitempty"`
|
||||
MatchExact *bool `json:"matchExact,omitempty"`
|
||||
MaxDataPoints int `json:"maxDataPoints,omitempty"`
|
||||
MetricEditorMode *int `json:"metricEditorMode,omitempty"`
|
||||
MetricName string `json:"metricName,omitempty"`
|
||||
MetricQueryType metricQueryType `json:"metricQueryType,omitempty"`
|
||||
Namespace string `json:"namespace,omitempty"`
|
||||
Period string `json:"period,omitempty"`
|
||||
RefId string `json:"refId,omitempty"`
|
||||
Region string `json:"region,omitempty"`
|
||||
SqlExpression string `json:"sqlExpression,omitempty"`
|
||||
Statistic *string `json:"statistic,omitempty"`
|
||||
Statistics []*string `json:"statistics,omitempty"`
|
||||
TimezoneUTCOffset string `json:"timezoneUTCOffset,omitempty"`
|
||||
QueryType string `json:"queryType,omitempty"`
|
||||
Hide *bool `json:"hide,omitempty"`
|
||||
Alias *string `json:"alias,omitempty"`
|
||||
}
|
||||
|
||||
// parseQueries parses the json queries and returns a map of cloudWatchQueries by region. The cloudWatchQuery has a 1 to 1 mapping to a query editor row
|
||||
func (e *cloudWatchExecutor) parseQueries(queries []backend.DataQuery, startTime time.Time, endTime time.Time) (map[string][]*cloudWatchQuery, error) {
|
||||
requestQueries := make(map[string][]*cloudWatchQuery)
|
||||
|
||||
migratedQueries, err := migrateLegacyQuery(queries, e.features.IsEnabled(featuremgmt.FlagCloudWatchDynamicLabels))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, query := range migratedQueries {
|
||||
model, err := simplejson.NewJson(query.JSON)
|
||||
var model QueryJson
|
||||
err := json.Unmarshal(query.JSON, &model)
|
||||
if err != nil {
|
||||
return nil, &queryError{err: err, RefID: query.RefID}
|
||||
}
|
||||
|
||||
queryType := model.Get("type").MustString()
|
||||
queryType := model.QueryType
|
||||
if queryType != "timeSeriesQuery" && queryType != "" {
|
||||
continue
|
||||
}
|
||||
|
||||
if model.MatchExact == nil {
|
||||
trueBooleanValue := true
|
||||
model.MatchExact = &trueBooleanValue
|
||||
}
|
||||
|
||||
refID := query.RefID
|
||||
query, err := parseRequestQuery(model, refID, startTime, endTime)
|
||||
if err != nil {
|
||||
@ -58,7 +87,8 @@ func migrateLegacyQuery(queries []backend.DataQuery, dynamicLabelsEnabled bool)
|
||||
migratedQueries := []*backend.DataQuery{}
|
||||
for _, q := range queries {
|
||||
query := q
|
||||
queryJson, err := simplejson.NewJson(query.JSON)
|
||||
var queryJson *QueryJson
|
||||
err := json.Unmarshal(query.JSON, &queryJson)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -67,12 +97,10 @@ func migrateLegacyQuery(queries []backend.DataQuery, dynamicLabelsEnabled bool)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, labelExists := queryJson.CheckGet("label")
|
||||
if !labelExists && dynamicLabelsEnabled {
|
||||
if queryJson.Label == nil && dynamicLabelsEnabled {
|
||||
migrateAliasToDynamicLabel(queryJson)
|
||||
}
|
||||
|
||||
query.JSON, err = queryJson.MarshalJSON()
|
||||
query.JSON, err = json.Marshal(queryJson)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -86,16 +114,15 @@ func migrateLegacyQuery(queries []backend.DataQuery, dynamicLabelsEnabled bool)
|
||||
// migrateStatisticsToStatistic migrates queries that has a `statistics` field to use the `statistic` field instead.
|
||||
// In case the query used more than one stat, the first stat in the slice will be used in the statistic field
|
||||
// Read more here https://github.com/grafana/grafana/issues/30629
|
||||
func migrateStatisticsToStatistic(queryJson *simplejson.Json) error {
|
||||
_, err := queryJson.Get("statistic").String()
|
||||
func migrateStatisticsToStatistic(queryJson *QueryJson) error {
|
||||
// If there's not a statistic property in the json, we know it's the legacy format and then it has to be migrated
|
||||
if err != nil {
|
||||
stats, err := queryJson.Get("statistics").StringArray()
|
||||
if err != nil {
|
||||
if queryJson.Statistic == nil {
|
||||
if queryJson.Statistics == nil {
|
||||
return fmt.Errorf("query must have either statistic or statistics field")
|
||||
}
|
||||
queryJson.Del("statistics")
|
||||
queryJson.Set("statistic", stats[0])
|
||||
|
||||
queryJson.Statistic = queryJson.Statistics[0]
|
||||
queryJson.Statistics = nil
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -112,10 +139,13 @@ var aliasPatterns = map[string]string{
|
||||
|
||||
var legacyAliasRegexp = regexp.MustCompile(`{{\s*(.+?)\s*}}`)
|
||||
|
||||
func migrateAliasToDynamicLabel(queryJson *simplejson.Json) {
|
||||
fullAliasField := queryJson.Get("alias").MustString()
|
||||
if fullAliasField != "" {
|
||||
matches := legacyAliasRegexp.FindAllStringSubmatch(fullAliasField, -1)
|
||||
func migrateAliasToDynamicLabel(queryJson *QueryJson) {
|
||||
fullAliasField := ""
|
||||
|
||||
if queryJson.Alias != nil && *queryJson.Alias != "" {
|
||||
matches := legacyAliasRegexp.FindAllStringSubmatch(*queryJson.Alias, -1)
|
||||
fullAliasField = *queryJson.Alias
|
||||
|
||||
for _, groups := range matches {
|
||||
fullMatch := groups[0]
|
||||
subgroup := groups[1]
|
||||
@ -126,36 +156,36 @@ func migrateAliasToDynamicLabel(queryJson *simplejson.Json) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
queryJson.Set("label", fullAliasField)
|
||||
queryJson.Label = &fullAliasField
|
||||
}
|
||||
|
||||
func parseRequestQuery(model *simplejson.Json, refId string, startTime time.Time, endTime time.Time) (*cloudWatchQuery, error) {
|
||||
func parseRequestQuery(model QueryJson, refId string, startTime time.Time, endTime time.Time) (*cloudWatchQuery, error) {
|
||||
plog.Debug("Parsing request query", "query", model)
|
||||
cloudWatchQuery := cloudWatchQuery{
|
||||
Alias: "",
|
||||
Label: "",
|
||||
MatchExact: true,
|
||||
Statistic: "",
|
||||
ReturnData: false,
|
||||
UsedExpression: "",
|
||||
RefId: refId,
|
||||
Id: model.Id,
|
||||
Region: model.Region,
|
||||
Namespace: model.Namespace,
|
||||
MetricName: model.MetricName,
|
||||
MetricQueryType: model.MetricQueryType,
|
||||
SqlExpression: model.SqlExpression,
|
||||
TimezoneUTCOffset: model.TimezoneUTCOffset,
|
||||
Expression: model.Expression,
|
||||
}
|
||||
reNumber := regexp.MustCompile(`^\d+$`)
|
||||
region, err := model.Get("region").String()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
namespace, err := model.Get("namespace").String()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get namespace: %v", err)
|
||||
}
|
||||
metricName, err := model.Get("metricName").String()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get metricName: %v", err)
|
||||
}
|
||||
dimensions, err := parseDimensions(model)
|
||||
dimensions, err := parseDimensions(model.Dimensions)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse dimensions: %v", err)
|
||||
}
|
||||
cloudWatchQuery.Dimensions = dimensions
|
||||
|
||||
statistic, err := model.Get("statistic").String()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse statistic: %v", err)
|
||||
}
|
||||
|
||||
p := model.Get("period").MustString("")
|
||||
p := model.Period
|
||||
var period int
|
||||
if strings.ToLower(p) == "auto" || p == "" {
|
||||
deltaInSeconds := endTime.Sub(startTime).Seconds()
|
||||
@ -182,9 +212,9 @@ func parseRequestQuery(model *simplejson.Json, refId string, startTime time.Time
|
||||
period = int(d.Seconds())
|
||||
}
|
||||
}
|
||||
cloudWatchQuery.Period = period
|
||||
|
||||
id := model.Get("id").MustString("")
|
||||
if id == "" {
|
||||
if model.Id == "" {
|
||||
// Why not just use refId if id is not specified in the frontend? When specifying an id in the editor,
|
||||
// and alphabetical must be used. The id must be unique, so if an id like for example a, b or c would be used,
|
||||
// it would likely collide with some ref id. That's why the `query` prefix is used.
|
||||
@ -193,55 +223,48 @@ func parseRequestQuery(model *simplejson.Json, refId string, startTime time.Time
|
||||
uuid := uuid.NewString()
|
||||
suffix = strings.Replace(uuid, "-", "", -1)
|
||||
}
|
||||
id = fmt.Sprintf("query%s", suffix)
|
||||
cloudWatchQuery.Id = fmt.Sprintf("query%s", suffix)
|
||||
}
|
||||
expression := model.Get("expression").MustString("")
|
||||
sqlExpression := model.Get("sqlExpression").MustString("")
|
||||
alias := model.Get("alias").MustString()
|
||||
label := model.Get("label").MustString()
|
||||
returnData := !model.Get("hide").MustBool(false)
|
||||
queryType := model.Get("type").MustString()
|
||||
timezoneUTCOffset := model.Get("timezoneUTCOffset").MustString("")
|
||||
|
||||
if queryType == "" {
|
||||
if model.Hide != nil {
|
||||
cloudWatchQuery.ReturnData = !*model.Hide
|
||||
}
|
||||
|
||||
if model.QueryType == "" {
|
||||
// If no type is provided we assume we are called by alerting service, which requires to return data!
|
||||
// Note, this is sort of a hack, but the official Grafana interfaces do not carry the information
|
||||
// who (which service) called the TsdbQueryEndpoint.Query(...) function.
|
||||
returnData = true
|
||||
cloudWatchQuery.ReturnData = true
|
||||
}
|
||||
|
||||
matchExact := model.Get("matchExact").MustBool(true)
|
||||
metricQueryType := metricQueryType(model.Get("metricQueryType").MustInt(0))
|
||||
|
||||
var metricEditorModeValue metricEditorMode
|
||||
memv, err := model.Get("metricEditorMode").Int()
|
||||
if err != nil && len(expression) > 0 {
|
||||
if model.MetricEditorMode == nil && len(model.Expression) > 0 {
|
||||
// this should only ever happen if this is an alerting query that has not yet been migrated in the frontend
|
||||
metricEditorModeValue = MetricEditorModeRaw
|
||||
cloudWatchQuery.MetricEditorMode = MetricEditorModeRaw
|
||||
} else {
|
||||
metricEditorModeValue = metricEditorMode(memv)
|
||||
if model.MetricEditorMode != nil {
|
||||
cloudWatchQuery.MetricEditorMode = metricEditorMode(*model.MetricEditorMode)
|
||||
} else {
|
||||
cloudWatchQuery.MetricEditorMode = metricEditorMode(0)
|
||||
}
|
||||
}
|
||||
|
||||
return &cloudWatchQuery{
|
||||
RefId: refId,
|
||||
Region: region,
|
||||
Id: id,
|
||||
Namespace: namespace,
|
||||
MetricName: metricName,
|
||||
Statistic: statistic,
|
||||
Expression: expression,
|
||||
ReturnData: returnData,
|
||||
Dimensions: dimensions,
|
||||
Period: period,
|
||||
Alias: alias,
|
||||
Label: label,
|
||||
MatchExact: matchExact,
|
||||
UsedExpression: "",
|
||||
MetricQueryType: metricQueryType,
|
||||
MetricEditorMode: metricEditorModeValue,
|
||||
SqlExpression: sqlExpression,
|
||||
TimezoneUTCOffset: timezoneUTCOffset,
|
||||
}, nil
|
||||
if model.Statistic != nil {
|
||||
cloudWatchQuery.Statistic = *model.Statistic
|
||||
}
|
||||
|
||||
if model.MatchExact != nil {
|
||||
cloudWatchQuery.MatchExact = *model.MatchExact
|
||||
}
|
||||
|
||||
if model.Alias != nil {
|
||||
cloudWatchQuery.Alias = *model.Alias
|
||||
}
|
||||
|
||||
if model.Label != nil {
|
||||
cloudWatchQuery.Label = *model.Label
|
||||
}
|
||||
|
||||
return &cloudWatchQuery, nil
|
||||
}
|
||||
|
||||
func getRetainedPeriods(timeSince time.Duration) []int {
|
||||
@ -257,9 +280,9 @@ func getRetainedPeriods(timeSince time.Duration) []int {
|
||||
}
|
||||
}
|
||||
|
||||
func parseDimensions(model *simplejson.Json) (map[string][]string, error) {
|
||||
func parseDimensions(dimensions map[string]interface{}) (map[string][]string, error) {
|
||||
parsedDimensions := make(map[string][]string)
|
||||
for k, v := range model.Get("dimensions").MustMap() {
|
||||
for k, v := range dimensions {
|
||||
// This is for backwards compatibility. Before 6.5 dimensions values were stored as strings and not arrays
|
||||
if value, ok := v.(string); ok {
|
||||
parsedDimensions[k] = []string{value}
|
||||
|
@ -1,12 +1,12 @@
|
||||
package cloudwatch
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@ -37,32 +37,33 @@ func TestRequestParser(t *testing.T) {
|
||||
|
||||
migratedQuery := migratedQueries[0]
|
||||
assert.Equal(t, "A", migratedQuery.RefID)
|
||||
model, err := simplejson.NewJson(migratedQuery.JSON)
|
||||
var model QueryJson
|
||||
err = json.Unmarshal(migratedQuery.JSON, &model)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "Average", model.Get("statistic").MustString())
|
||||
res, err := model.Get("statistic").Array()
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, res)
|
||||
assert.Equal(t, "Average", *model.Statistic)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("New dimensions structure", func(t *testing.T) {
|
||||
query := simplejson.NewFromAny(map[string]interface{}{
|
||||
fixtureJSON := []byte(`{
|
||||
"refId": "ref1",
|
||||
"region": "us-east-1",
|
||||
"namespace": "ec2",
|
||||
"metricName": "CPUUtilization",
|
||||
"id": "",
|
||||
"expression": "",
|
||||
"dimensions": map[string]interface{}{
|
||||
"InstanceId": []interface{}{"test"},
|
||||
"InstanceType": []interface{}{"test2", "test3"},
|
||||
"dimensions": {
|
||||
"InstanceId": ["test"],
|
||||
"InstanceType": ["test2", "test3"]
|
||||
},
|
||||
"statistic": "Average",
|
||||
"period": "600",
|
||||
"hide": false,
|
||||
})
|
||||
"hide": false
|
||||
}`)
|
||||
|
||||
var query QueryJson
|
||||
err := json.Unmarshal(fixtureJSON, &query)
|
||||
require.NoError(t, err)
|
||||
res, err := parseRequestQuery(query, "ref1", time.Now().Add(-2*time.Hour), time.Now().Add(-time.Hour))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "us-east-1", res.Region)
|
||||
@ -81,21 +82,25 @@ func TestRequestParser(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Old dimensions structure (backwards compatibility)", func(t *testing.T) {
|
||||
query := simplejson.NewFromAny(map[string]interface{}{
|
||||
fixtureJSON := []byte(`{
|
||||
"refId": "ref1",
|
||||
"region": "us-east-1",
|
||||
"namespace": "ec2",
|
||||
"metricName": "CPUUtilization",
|
||||
"id": "",
|
||||
"expression": "",
|
||||
"dimensions": map[string]interface{}{
|
||||
"InstanceId": "test",
|
||||
"InstanceType": "test2",
|
||||
"dimensions": {
|
||||
"InstanceId": ["test"],
|
||||
"InstanceType": ["test2"]
|
||||
},
|
||||
"statistic": "Average",
|
||||
"period": "600",
|
||||
"hide": false,
|
||||
})
|
||||
"hide": false
|
||||
}`)
|
||||
|
||||
var query QueryJson
|
||||
err := json.Unmarshal(fixtureJSON, &query)
|
||||
require.NoError(t, err)
|
||||
|
||||
res, err := parseRequestQuery(query, "ref1", time.Now().Add(-2*time.Hour), time.Now().Add(-time.Hour))
|
||||
require.NoError(t, err)
|
||||
@ -115,21 +120,25 @@ func TestRequestParser(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Period defined in the editor by the user is being used when time range is short", func(t *testing.T) {
|
||||
query := simplejson.NewFromAny(map[string]interface{}{
|
||||
fixtureJSON := []byte(`{
|
||||
"refId": "ref1",
|
||||
"region": "us-east-1",
|
||||
"namespace": "ec2",
|
||||
"metricName": "CPUUtilization",
|
||||
"id": "",
|
||||
"expression": "",
|
||||
"dimensions": map[string]interface{}{
|
||||
"InstanceId": "test",
|
||||
"InstanceType": "test2",
|
||||
"dimensions": {
|
||||
"InstanceId": ["test"],
|
||||
"InstanceType": ["test2"]
|
||||
},
|
||||
"statistic": "Average",
|
||||
"hide": false,
|
||||
})
|
||||
query.Set("period", "900")
|
||||
"hide": false
|
||||
}`)
|
||||
|
||||
var query QueryJson
|
||||
err := json.Unmarshal(fixtureJSON, &query)
|
||||
require.NoError(t, err)
|
||||
query.Period = "900"
|
||||
|
||||
res, err := parseRequestQuery(query, "ref1", time.Now().Add(-2*time.Hour), time.Now().Add(-time.Hour))
|
||||
require.NoError(t, err)
|
||||
@ -137,24 +146,28 @@ func TestRequestParser(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Period is parsed correctly if not defined by user", func(t *testing.T) {
|
||||
query := simplejson.NewFromAny(map[string]interface{}{
|
||||
fixtureJSON := []byte(`{
|
||||
"refId": "ref1",
|
||||
"region": "us-east-1",
|
||||
"namespace": "ec2",
|
||||
"metricName": "CPUUtilization",
|
||||
"id": "",
|
||||
"expression": "",
|
||||
"dimensions": map[string]interface{}{
|
||||
"InstanceId": "test",
|
||||
"InstanceType": "test2",
|
||||
"dimensions": {
|
||||
"InstanceId": ["test"],
|
||||
"InstanceType": ["test2"]
|
||||
},
|
||||
"statistic": "Average",
|
||||
"hide": false,
|
||||
"period": "auto",
|
||||
})
|
||||
"period": "auto"
|
||||
}`)
|
||||
|
||||
var query QueryJson
|
||||
err := json.Unmarshal(fixtureJSON, &query)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("Time range is 5 minutes", func(t *testing.T) {
|
||||
query.Set("period", "auto")
|
||||
query.Period = "auto"
|
||||
to := time.Now()
|
||||
from := to.Local().Add(time.Minute * time.Duration(5))
|
||||
|
||||
@ -164,7 +177,7 @@ func TestRequestParser(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Time range is 1 day", func(t *testing.T) {
|
||||
query.Set("period", "auto")
|
||||
query.Period = "auto"
|
||||
to := time.Now()
|
||||
from := to.AddDate(0, 0, -1)
|
||||
|
||||
@ -174,7 +187,7 @@ func TestRequestParser(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Time range is 2 days", func(t *testing.T) {
|
||||
query.Set("period", "auto")
|
||||
query.Period = "auto"
|
||||
to := time.Now()
|
||||
from := to.AddDate(0, 0, -2)
|
||||
res, err := parseRequestQuery(query, "ref1", from, to)
|
||||
@ -183,7 +196,7 @@ func TestRequestParser(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Time range is 7 days", func(t *testing.T) {
|
||||
query.Set("period", "auto")
|
||||
query.Period = "auto"
|
||||
to := time.Now()
|
||||
from := to.AddDate(0, 0, -7)
|
||||
|
||||
@ -193,7 +206,7 @@ func TestRequestParser(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Time range is 30 days", func(t *testing.T) {
|
||||
query.Set("period", "auto")
|
||||
query.Period = "auto"
|
||||
to := time.Now()
|
||||
from := to.AddDate(0, 0, -30)
|
||||
|
||||
@ -203,7 +216,7 @@ func TestRequestParser(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Time range is 90 days", func(t *testing.T) {
|
||||
query.Set("period", "auto")
|
||||
query.Period = "auto"
|
||||
to := time.Now()
|
||||
from := to.AddDate(0, 0, -90)
|
||||
|
||||
@ -213,7 +226,7 @@ func TestRequestParser(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Time range is 1 year", func(t *testing.T) {
|
||||
query.Set("period", "auto")
|
||||
query.Period = "auto"
|
||||
to := time.Now()
|
||||
from := to.AddDate(-1, 0, 0)
|
||||
|
||||
@ -223,7 +236,7 @@ func TestRequestParser(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Time range is 2 years", func(t *testing.T) {
|
||||
query.Set("period", "auto")
|
||||
query.Period = "auto"
|
||||
to := time.Now()
|
||||
from := to.AddDate(-2, 0, 0)
|
||||
|
||||
@ -233,7 +246,7 @@ func TestRequestParser(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Time range is 2 days, but 16 days ago", func(t *testing.T) {
|
||||
query.Set("period", "auto")
|
||||
query.Period = "auto"
|
||||
to := time.Now().AddDate(0, 0, -14)
|
||||
from := to.AddDate(0, 0, -2)
|
||||
res, err := parseRequestQuery(query, "ref1", from, to)
|
||||
@ -242,7 +255,7 @@ func TestRequestParser(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Time range is 2 days, but 90 days ago", func(t *testing.T) {
|
||||
query.Set("period", "auto")
|
||||
query.Period = "auto"
|
||||
to := time.Now().AddDate(0, 0, -88)
|
||||
from := to.AddDate(0, 0, -2)
|
||||
res, err := parseRequestQuery(query, "ref1", from, to)
|
||||
@ -251,7 +264,7 @@ func TestRequestParser(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Time range is 2 days, but 456 days ago", func(t *testing.T) {
|
||||
query.Set("period", "auto")
|
||||
query.Period = "auto"
|
||||
to := time.Now().AddDate(0, 0, -454)
|
||||
from := to.AddDate(0, 0, -2)
|
||||
res, err := parseRequestQuery(query, "ref1", from, to)
|
||||
@ -273,7 +286,7 @@ func TestRequestParser(t *testing.T) {
|
||||
|
||||
t.Run("and an expression is specified it should be metric search builder", func(t *testing.T) {
|
||||
query := getBaseJsonQuery()
|
||||
query.Set("expression", "SUM(a)")
|
||||
query.Expression = "SUM(a)"
|
||||
res, err := parseRequestQuery(query, "ref1", time.Now().Add(-2*time.Hour), time.Now().Add(-time.Hour))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, MetricQueryTypeSearch, res.MetricQueryType)
|
||||
@ -284,7 +297,7 @@ func TestRequestParser(t *testing.T) {
|
||||
|
||||
t.Run("and an expression is specified it should be metric search builder", func(t *testing.T) {
|
||||
query := getBaseJsonQuery()
|
||||
query.Set("expression", "SUM(a)")
|
||||
query.Expression = "SUM(a)"
|
||||
res, err := parseRequestQuery(query, "ref1", time.Now().Add(-2*time.Hour), time.Now().Add(-time.Hour))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, MetricQueryTypeSearch, res.MetricQueryType)
|
||||
@ -303,7 +316,7 @@ func TestRequestParser(t *testing.T) {
|
||||
|
||||
t.Run("Valid id is generated if ID is not provided and refId is not a valid MetricData ID", func(t *testing.T) {
|
||||
query := getBaseJsonQuery()
|
||||
query.Set("refId", "$$")
|
||||
query.RefId = "$$"
|
||||
res, err := parseRequestQuery(query, "$$", time.Now().Add(-2*time.Hour), time.Now().Add(-time.Hour))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "$$", res.RefId)
|
||||
@ -312,8 +325,11 @@ func TestRequestParser(t *testing.T) {
|
||||
|
||||
t.Run("parseRequestQuery sets label when label is present in json query", func(t *testing.T) {
|
||||
query := getBaseJsonQuery()
|
||||
query.Set("alias", "some alias")
|
||||
query.Set("label", "some label")
|
||||
alias := "some alias"
|
||||
query.Alias = &alias
|
||||
|
||||
label := "some label"
|
||||
query.Label = &label
|
||||
|
||||
res, err := parseRequestQuery(query, "ref1", time.Now().Add(-2*time.Hour), time.Now().Add(-time.Hour))
|
||||
|
||||
@ -323,15 +339,22 @@ func TestRequestParser(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func getBaseJsonQuery() *simplejson.Json {
|
||||
return simplejson.NewFromAny(map[string]interface{}{
|
||||
func getBaseJsonQuery() QueryJson {
|
||||
fixtureJSON := []byte(`{
|
||||
"refId": "ref1",
|
||||
"region": "us-east-1",
|
||||
"namespace": "ec2",
|
||||
"metricName": "CPUUtilization",
|
||||
"statistic": "Average",
|
||||
"period": "900",
|
||||
})
|
||||
"period": "900"
|
||||
}`)
|
||||
|
||||
var query QueryJson
|
||||
err := json.Unmarshal(fixtureJSON, &query)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
func Test_migrateAliasToDynamicLabel_single_query_preserves_old_alias_and_creates_new_label(t *testing.T) {
|
||||
@ -352,7 +375,7 @@ func Test_migrateAliasToDynamicLabel_single_query_preserves_old_alias_and_create
|
||||
}
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
queryJson, err := simplejson.NewJson([]byte(fmt.Sprintf(`{
|
||||
queryJson := []byte(fmt.Sprintf(`{
|
||||
"region": "us-east-1",
|
||||
"namespace": "ec2",
|
||||
"metricName": "CPUUtilization",
|
||||
@ -363,26 +386,35 @@ func Test_migrateAliasToDynamicLabel_single_query_preserves_old_alias_and_create
|
||||
"statistic": "Average",
|
||||
"period": "600",
|
||||
"hide": false
|
||||
}`, tc.inputAlias)))
|
||||
}`, tc.inputAlias))
|
||||
|
||||
var query QueryJson
|
||||
err := json.Unmarshal(queryJson, &query)
|
||||
require.NoError(t, err)
|
||||
|
||||
migrateAliasToDynamicLabel(queryJson)
|
||||
migrateAliasToDynamicLabel(&query)
|
||||
|
||||
assert.Equal(t, simplejson.NewFromAny(
|
||||
map[string]interface{}{
|
||||
"alias": tc.inputAlias,
|
||||
"dimensions": map[string]interface{}{"InstanceId": []interface{}{"test"}},
|
||||
"hide": false,
|
||||
"label": tc.expectedLabel,
|
||||
"metricName": "CPUUtilization",
|
||||
"namespace": "ec2",
|
||||
"period": "600",
|
||||
"region": "us-east-1",
|
||||
"statistic": "Average"}), queryJson)
|
||||
matchedJson := []byte(fmt.Sprintf(`{
|
||||
"alias": "%s",
|
||||
"dimensions": {
|
||||
"InstanceId": ["test"]
|
||||
},
|
||||
"hide": false,
|
||||
"label": "%s",
|
||||
"metricName": "CPUUtilization",
|
||||
"namespace": "ec2",
|
||||
"period": "600",
|
||||
"region": "us-east-1",
|
||||
"statistic": "Average"
|
||||
}`, tc.inputAlias, tc.expectedLabel))
|
||||
|
||||
result, err := json.Marshal(query)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.JSONEq(t, string(matchedJson), string(result))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_Test_migrateLegacyQuery(t *testing.T) {
|
||||
t.Run("migrates alias to label when label does not already exist and feature toggle enabled", func(t *testing.T) {
|
||||
migratedQueries, err := migrateLegacyQuery(
|
||||
|
@ -10,7 +10,6 @@ import (
|
||||
"github.com/aws/aws-sdk-go/service/cloudwatch"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/services/featuremgmt"
|
||||
)
|
||||
|
||||
@ -297,9 +296,9 @@ func createDataLinks(link string) []data.DataLink {
|
||||
func createMeta(query *cloudWatchQuery) *data.FrameMeta {
|
||||
return &data.FrameMeta{
|
||||
ExecutedQueryString: query.UsedExpression,
|
||||
Custom: simplejson.NewFromAny(map[string]interface{}{
|
||||
"period": query.Period,
|
||||
"id": query.Id,
|
||||
}),
|
||||
Custom: fmt.Sprintf(`{
|
||||
"period": %d,
|
||||
"id": %s,
|
||||
}`, query.Period, query.Id),
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user