2022-05-13 13:28:54 -05:00
|
|
|
package querydata
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2022-10-28 09:11:06 -05:00
|
|
|
"fmt"
|
2022-07-04 04:18:45 -05:00
|
|
|
"net/http"
|
2022-05-13 13:28:54 -05:00
|
|
|
"regexp"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
|
|
|
"github.com/grafana/grafana-plugin-sdk-go/data"
|
2022-10-27 11:05:06 -05:00
|
|
|
"go.opentelemetry.io/otel/attribute"
|
|
|
|
|
2022-05-13 13:28:54 -05:00
|
|
|
"github.com/grafana/grafana/pkg/infra/log"
|
|
|
|
"github.com/grafana/grafana/pkg/infra/tracing"
|
|
|
|
"github.com/grafana/grafana/pkg/services/featuremgmt"
|
|
|
|
"github.com/grafana/grafana/pkg/tsdb/intervalv2"
|
|
|
|
"github.com/grafana/grafana/pkg/tsdb/prometheus/client"
|
|
|
|
"github.com/grafana/grafana/pkg/tsdb/prometheus/models"
|
2022-12-30 12:04:35 -06:00
|
|
|
"github.com/grafana/grafana/pkg/tsdb/prometheus/querydata/exemplar"
|
2022-07-04 04:18:45 -05:00
|
|
|
"github.com/grafana/grafana/pkg/tsdb/prometheus/utils"
|
2022-05-13 13:28:54 -05:00
|
|
|
"github.com/grafana/grafana/pkg/util/maputil"
|
|
|
|
)
|
|
|
|
|
|
|
|
const legendFormatAuto = "__auto"
|
|
|
|
|
|
|
|
var legendFormatRegexp = regexp.MustCompile(`\{\{\s*(.+?)\s*\}\}`)
|
|
|
|
|
|
|
|
type ExemplarEvent struct {
|
|
|
|
Time time.Time
|
|
|
|
Value float64
|
|
|
|
Labels map[string]string
|
|
|
|
}
|
|
|
|
|
2022-07-04 04:18:45 -05:00
|
|
|
// QueryData handles querying but different from buffered package uses a custom client instead of default Go Prom
|
|
|
|
// client.
|
2022-05-13 13:28:54 -05:00
|
|
|
type QueryData struct {
|
2022-12-30 12:04:35 -06:00
|
|
|
intervalCalculator intervalv2.Calculator
|
|
|
|
tracer tracing.Tracer
|
|
|
|
client *client.Client
|
|
|
|
log log.Logger
|
|
|
|
ID int64
|
|
|
|
URL string
|
|
|
|
TimeInterval string
|
|
|
|
enableWideSeries bool
|
2023-03-29 10:26:32 -05:00
|
|
|
enableDataplane bool
|
2023-01-11 07:27:47 -06:00
|
|
|
exemplarSampler func() exemplar.Sampler
|
2022-05-13 13:28:54 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func New(
|
2022-07-04 04:18:45 -05:00
|
|
|
httpClient *http.Client,
|
2022-05-13 13:28:54 -05:00
|
|
|
features featuremgmt.FeatureToggles,
|
|
|
|
tracer tracing.Tracer,
|
|
|
|
settings backend.DataSourceInstanceSettings,
|
|
|
|
plog log.Logger,
|
|
|
|
) (*QueryData, error) {
|
2022-07-04 04:18:45 -05:00
|
|
|
jsonData, err := utils.GetJsonData(settings)
|
2022-05-13 13:28:54 -05:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2022-07-04 04:18:45 -05:00
|
|
|
httpMethod, _ := maputil.GetStringOptional(jsonData, "httpMethod")
|
2022-05-13 13:28:54 -05:00
|
|
|
|
2022-07-04 04:18:45 -05:00
|
|
|
timeInterval, err := maputil.GetStringOptional(jsonData, "timeInterval")
|
2022-05-13 13:28:54 -05:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2022-07-04 04:18:45 -05:00
|
|
|
promClient := client.NewClient(httpClient, httpMethod, settings.URL)
|
|
|
|
|
2022-12-30 12:04:35 -06:00
|
|
|
// standard deviation sampler is the default for backwards compatibility
|
2023-01-11 07:27:47 -06:00
|
|
|
exemplarSampler := exemplar.NewStandardDeviationSampler
|
2022-12-30 12:04:35 -06:00
|
|
|
|
|
|
|
if features.IsEnabled(featuremgmt.FlagDisablePrometheusExemplarSampling) {
|
2023-01-11 07:27:47 -06:00
|
|
|
exemplarSampler = exemplar.NewNoOpSampler
|
2022-12-30 12:04:35 -06:00
|
|
|
}
|
|
|
|
|
2022-05-13 13:28:54 -05:00
|
|
|
return &QueryData{
|
2022-12-30 12:04:35 -06:00
|
|
|
intervalCalculator: intervalv2.NewCalculator(),
|
|
|
|
tracer: tracer,
|
|
|
|
log: plog,
|
|
|
|
client: promClient,
|
|
|
|
TimeInterval: timeInterval,
|
|
|
|
ID: settings.ID,
|
|
|
|
URL: settings.URL,
|
|
|
|
enableWideSeries: features.IsEnabled(featuremgmt.FlagPrometheusWideSeries),
|
2023-03-29 10:26:32 -05:00
|
|
|
enableDataplane: features.IsEnabled(featuremgmt.FlagPrometheusDataplane),
|
2022-12-30 12:04:35 -06:00
|
|
|
exemplarSampler: exemplarSampler,
|
2022-05-13 13:28:54 -05:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *QueryData) Execute(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) {
|
2022-12-30 12:04:35 -06:00
|
|
|
fromAlert := req.Headers["FromAlert"] == "true"
|
2022-05-13 13:28:54 -05:00
|
|
|
result := backend.QueryDataResponse{
|
|
|
|
Responses: backend.Responses{},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, q := range req.Queries {
|
|
|
|
query, err := models.Parse(q, s.TimeInterval, s.intervalCalculator, fromAlert)
|
|
|
|
if err != nil {
|
|
|
|
return &result, err
|
|
|
|
}
|
2023-01-31 12:26:45 -06:00
|
|
|
r := s.fetch(ctx, s.client, query, req.Headers)
|
2022-05-13 13:28:54 -05:00
|
|
|
if r == nil {
|
2023-03-22 04:59:39 -05:00
|
|
|
s.log.FromContext(ctx).Debug("Received nil response from runQuery", "query", query.Expr)
|
2022-05-13 13:28:54 -05:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
result.Responses[q.RefID] = *r
|
|
|
|
}
|
|
|
|
|
|
|
|
return &result, nil
|
|
|
|
}
|
|
|
|
|
2023-01-31 12:26:45 -06:00
|
|
|
func (s *QueryData) fetch(ctx context.Context, client *client.Client, q *models.Query, headers map[string]string) *backend.DataResponse {
|
2022-07-04 04:18:45 -05:00
|
|
|
traceCtx, end := s.trace(ctx, q)
|
|
|
|
defer end()
|
2022-05-13 13:28:54 -05:00
|
|
|
|
2022-10-27 11:05:06 -05:00
|
|
|
logger := s.log.FromContext(traceCtx)
|
|
|
|
logger.Debug("Sending query", "start", q.Start, "end", q.End, "step", q.Step, "query", q.Expr)
|
|
|
|
|
2023-01-31 12:26:45 -06:00
|
|
|
dr := &backend.DataResponse{
|
2022-05-13 13:28:54 -05:00
|
|
|
Frames: data.Frames{},
|
|
|
|
Error: nil,
|
|
|
|
}
|
|
|
|
|
2022-10-28 09:11:06 -05:00
|
|
|
if q.InstantQuery {
|
2023-01-31 12:26:45 -06:00
|
|
|
res := s.instantQuery(traceCtx, client, q, headers)
|
|
|
|
dr.Error = res.Error
|
|
|
|
dr.Frames = res.Frames
|
2022-05-13 13:28:54 -05:00
|
|
|
}
|
|
|
|
|
2022-10-28 09:11:06 -05:00
|
|
|
if q.RangeQuery {
|
2023-01-31 12:26:45 -06:00
|
|
|
res := s.rangeQuery(traceCtx, client, q, headers)
|
|
|
|
if res.Error != nil {
|
|
|
|
if dr.Error == nil {
|
|
|
|
dr.Error = res.Error
|
2022-10-28 09:11:06 -05:00
|
|
|
} else {
|
2023-01-31 12:26:45 -06:00
|
|
|
dr.Error = fmt.Errorf("%v %w", dr.Error, res.Error)
|
2022-10-28 09:11:06 -05:00
|
|
|
}
|
|
|
|
}
|
2023-01-31 12:26:45 -06:00
|
|
|
dr.Frames = append(dr.Frames, res.Frames...)
|
2022-05-13 13:28:54 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
if q.ExemplarQuery {
|
2023-01-31 12:26:45 -06:00
|
|
|
res := s.exemplarQuery(traceCtx, client, q, headers)
|
|
|
|
if res.Error != nil {
|
2022-05-13 13:28:54 -05:00
|
|
|
// If exemplar query returns error, we want to only log it and
|
|
|
|
// continue with other results processing
|
2023-01-31 12:26:45 -06:00
|
|
|
logger.Error("Exemplar query failed", "query", q.Expr, "err", res.Error)
|
2022-05-13 13:28:54 -05:00
|
|
|
}
|
2023-01-31 12:26:45 -06:00
|
|
|
dr.Frames = append(dr.Frames, res.Frames...)
|
2022-05-13 13:28:54 -05:00
|
|
|
}
|
|
|
|
|
2023-01-31 12:26:45 -06:00
|
|
|
return dr
|
2022-05-13 13:28:54 -05:00
|
|
|
}
|
|
|
|
|
2023-01-31 12:26:45 -06:00
|
|
|
func (s *QueryData) rangeQuery(ctx context.Context, c *client.Client, q *models.Query, headers map[string]string) backend.DataResponse {
|
2022-12-21 06:25:58 -06:00
|
|
|
res, err := c.QueryRange(ctx, q)
|
2023-02-28 12:08:01 -06:00
|
|
|
if err != nil {
|
|
|
|
return backend.DataResponse{
|
|
|
|
Error: err,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-23 08:10:03 -06:00
|
|
|
defer func() {
|
|
|
|
err := res.Body.Close()
|
|
|
|
if err != nil {
|
|
|
|
s.log.Warn("failed to close query range response body", "error", err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2023-02-28 12:08:01 -06:00
|
|
|
return s.parseResponse(ctx, q, res)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *QueryData) instantQuery(ctx context.Context, c *client.Client, q *models.Query, headers map[string]string) backend.DataResponse {
|
|
|
|
res, err := c.QueryInstant(ctx, q)
|
2022-05-13 13:28:54 -05:00
|
|
|
if err != nil {
|
2023-01-31 12:26:45 -06:00
|
|
|
return backend.DataResponse{
|
|
|
|
Error: err,
|
|
|
|
}
|
2022-05-13 13:28:54 -05:00
|
|
|
}
|
|
|
|
|
2023-02-23 08:10:03 -06:00
|
|
|
defer func() {
|
|
|
|
err := res.Body.Close()
|
|
|
|
if err != nil {
|
|
|
|
s.log.Warn("failed to close response body", "error", err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2023-02-28 12:08:01 -06:00
|
|
|
return s.parseResponse(ctx, q, res)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *QueryData) exemplarQuery(ctx context.Context, c *client.Client, q *models.Query, headers map[string]string) backend.DataResponse {
|
|
|
|
res, err := c.QueryExemplars(ctx, q)
|
2022-05-13 13:28:54 -05:00
|
|
|
if err != nil {
|
2023-01-31 12:26:45 -06:00
|
|
|
return backend.DataResponse{
|
|
|
|
Error: err,
|
|
|
|
}
|
2022-05-13 13:28:54 -05:00
|
|
|
}
|
|
|
|
|
2023-02-23 08:10:03 -06:00
|
|
|
defer func() {
|
|
|
|
err := res.Body.Close()
|
|
|
|
if err != nil {
|
|
|
|
s.log.Warn("failed to close response body", "error", err)
|
|
|
|
}
|
|
|
|
}()
|
2022-05-13 13:28:54 -05:00
|
|
|
return s.parseResponse(ctx, q, res)
|
|
|
|
}
|
|
|
|
|
2022-07-04 04:18:45 -05:00
|
|
|
func (s *QueryData) trace(ctx context.Context, q *models.Query) (context.Context, func()) {
|
|
|
|
return utils.StartTrace(ctx, s.tracer, "datasource.prometheus", []utils.Attribute{
|
|
|
|
{Key: "expr", Value: q.Expr, Kv: attribute.Key("expr").String(q.Expr)},
|
|
|
|
{Key: "start_unixnano", Value: q.Start, Kv: attribute.Key("start_unixnano").Int64(q.Start.UnixNano())},
|
|
|
|
{Key: "stop_unixnano", Value: q.End, Kv: attribute.Key("stop_unixnano").Int64(q.End.UnixNano())},
|
|
|
|
})
|
|
|
|
}
|