mirror of
https://github.com/grafana/grafana.git
synced 2025-02-25 18:55:37 -06:00
* organize layout, make design uniform, add doc link * fix e2e test * move overhauled config parts into prometheus code * update tooltips with doc links * clean component styles for section padding, top and bottom 32px * make additional settings subsection headers h6 * use secondary gray for section descriptions * fix merge issues * change inlineswitch to switch only in prom settings because the other components are shared * remove legacy formfield and input, replace with inlinefield and input from grafana-ui * find more formfield and UI design fixes like changing inlineformlabel to inlinefield * remove unused inline form label * replace legacy duration validations with <FieldValidationMessage> * fix styles secondary gray from theme * change language, headings and datasource -> data source * update alert setting styles with new component * update prom url heading and tooltip * update default editor tooltip and set builder as default editor * update interval tooltip * update prom type tooltip * update custom query params tooltip * update exemplar internal link tooltip * fix inline form styling inconsistency * allow for using the DataSourceHTTPSettings component without the connection string * remove overhaul component, re-use dshtttps comp, and use connection input in config editor * make tooltips interactive to click links * consistent label width across the elements we can control for now, fix exemplar switch * make connection url a component * refactor onBlur validation * remove unused component * add tests for config validations * add more meaningful health check * fix e2e test * fix e2e test * fix e2e test * add error handling for more url errors * remove unnecessary conversion * health check tests * Clean up the health check * health check unit tests * health check unit tests improved * make pretty for drone * lint check go * lint check go * add required attr to connection component * Update public/app/plugins/datasource/prometheus/configuration/Connection.tsx Co-authored-by: Torkel Ödegaard <torkel@grafana.com> * fix read only issue for provisioned datasources * validate multiple durations for incremental query setting * use sentence case for headers * use className consistently for styles * add tests for url regex * remove console logs * only use query as healthcheck as the healthy api is not supported by Mimir * fix exemplar e2e test * remove overhaul prop from custom headers setting component * remove connection section and use DatasourceHttpSettings connection with custom label and interactive tooltip * add spaces back * spaces --------- Co-authored-by: ismail simsek <ismailsimsek09@gmail.com> Co-authored-by: Torkel Ödegaard <torkel@grafana.com>
223 lines
6.2 KiB
Go
223 lines
6.2 KiB
Go
package querydata
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"net/http"
|
|
"regexp"
|
|
"time"
|
|
|
|
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
|
"github.com/grafana/grafana-plugin-sdk-go/data"
|
|
"go.opentelemetry.io/otel/attribute"
|
|
|
|
"github.com/grafana/grafana/pkg/infra/log"
|
|
"github.com/grafana/grafana/pkg/infra/tracing"
|
|
"github.com/grafana/grafana/pkg/services/featuremgmt"
|
|
"github.com/grafana/grafana/pkg/tsdb/intervalv2"
|
|
"github.com/grafana/grafana/pkg/tsdb/prometheus/client"
|
|
"github.com/grafana/grafana/pkg/tsdb/prometheus/models"
|
|
"github.com/grafana/grafana/pkg/tsdb/prometheus/querydata/exemplar"
|
|
"github.com/grafana/grafana/pkg/tsdb/prometheus/utils"
|
|
"github.com/grafana/grafana/pkg/util/maputil"
|
|
)
|
|
|
|
const legendFormatAuto = "__auto"
|
|
|
|
var legendFormatRegexp = regexp.MustCompile(`\{\{\s*(.+?)\s*\}\}`)
|
|
|
|
type ExemplarEvent struct {
|
|
Time time.Time
|
|
Value float64
|
|
Labels map[string]string
|
|
}
|
|
|
|
// QueryData handles querying but different from buffered package uses a custom client instead of default Go Prom
|
|
// client.
|
|
type QueryData struct {
|
|
intervalCalculator intervalv2.Calculator
|
|
tracer tracing.Tracer
|
|
client *client.Client
|
|
log log.Logger
|
|
ID int64
|
|
URL string
|
|
TimeInterval string
|
|
enableWideSeries bool
|
|
enableDataplane bool
|
|
exemplarSampler func() exemplar.Sampler
|
|
}
|
|
|
|
func New(
|
|
httpClient *http.Client,
|
|
features featuremgmt.FeatureToggles,
|
|
tracer tracing.Tracer,
|
|
settings backend.DataSourceInstanceSettings,
|
|
plog log.Logger,
|
|
) (*QueryData, error) {
|
|
jsonData, err := utils.GetJsonData(settings)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
httpMethod, _ := maputil.GetStringOptional(jsonData, "httpMethod")
|
|
|
|
timeInterval, err := maputil.GetStringOptional(jsonData, "timeInterval")
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
promClient := client.NewClient(httpClient, httpMethod, settings.URL)
|
|
|
|
// standard deviation sampler is the default for backwards compatibility
|
|
exemplarSampler := exemplar.NewStandardDeviationSampler
|
|
|
|
if features.IsEnabled(featuremgmt.FlagDisablePrometheusExemplarSampling) {
|
|
exemplarSampler = exemplar.NewNoOpSampler
|
|
}
|
|
|
|
return &QueryData{
|
|
intervalCalculator: intervalv2.NewCalculator(),
|
|
tracer: tracer,
|
|
log: plog,
|
|
client: promClient,
|
|
TimeInterval: timeInterval,
|
|
ID: settings.ID,
|
|
URL: settings.URL,
|
|
enableWideSeries: features.IsEnabled(featuremgmt.FlagPrometheusWideSeries),
|
|
enableDataplane: features.IsEnabled(featuremgmt.FlagPrometheusDataplane),
|
|
exemplarSampler: exemplarSampler,
|
|
}, nil
|
|
}
|
|
|
|
func (s *QueryData) Execute(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) {
|
|
fromAlert := req.Headers["FromAlert"] == "true"
|
|
result := backend.QueryDataResponse{
|
|
Responses: backend.Responses{},
|
|
}
|
|
|
|
for _, q := range req.Queries {
|
|
query, err := models.Parse(q, s.TimeInterval, s.intervalCalculator, fromAlert)
|
|
if err != nil {
|
|
return &result, err
|
|
}
|
|
r := s.fetch(ctx, s.client, query, req.Headers)
|
|
if r == nil {
|
|
s.log.FromContext(ctx).Debug("Received nil response from runQuery", "query", query.Expr)
|
|
continue
|
|
}
|
|
result.Responses[q.RefID] = *r
|
|
}
|
|
|
|
return &result, nil
|
|
}
|
|
|
|
func (s *QueryData) fetch(ctx context.Context, client *client.Client, q *models.Query, headers map[string]string) *backend.DataResponse {
|
|
traceCtx, end := s.trace(ctx, q)
|
|
defer end()
|
|
|
|
logger := s.log.FromContext(traceCtx)
|
|
logger.Debug("Sending query", "start", q.Start, "end", q.End, "step", q.Step, "query", q.Expr)
|
|
|
|
dr := &backend.DataResponse{
|
|
Frames: data.Frames{},
|
|
Error: nil,
|
|
}
|
|
|
|
if q.InstantQuery {
|
|
res := s.instantQuery(traceCtx, client, q, headers)
|
|
dr.Error = res.Error
|
|
dr.Frames = res.Frames
|
|
}
|
|
|
|
if q.RangeQuery {
|
|
res := s.rangeQuery(traceCtx, client, q, headers)
|
|
if res.Error != nil {
|
|
if dr.Error == nil {
|
|
dr.Error = res.Error
|
|
} else {
|
|
dr.Error = fmt.Errorf("%v %w", dr.Error, res.Error)
|
|
}
|
|
}
|
|
dr.Frames = append(dr.Frames, res.Frames...)
|
|
}
|
|
|
|
if q.ExemplarQuery {
|
|
res := s.exemplarQuery(traceCtx, client, q, headers)
|
|
if res.Error != nil {
|
|
// If exemplar query returns error, we want to only log it and
|
|
// continue with other results processing
|
|
logger.Error("Exemplar query failed", "query", q.Expr, "err", res.Error)
|
|
}
|
|
dr.Frames = append(dr.Frames, res.Frames...)
|
|
}
|
|
|
|
return dr
|
|
}
|
|
|
|
func (s *QueryData) rangeQuery(ctx context.Context, c *client.Client, q *models.Query, headers map[string]string) backend.DataResponse {
|
|
res, err := c.QueryRange(ctx, q)
|
|
if err != nil {
|
|
return backend.DataResponse{
|
|
Error: err,
|
|
}
|
|
}
|
|
|
|
defer func() {
|
|
err := res.Body.Close()
|
|
if err != nil {
|
|
s.log.Warn("failed to close query range response body", "error", err)
|
|
}
|
|
}()
|
|
|
|
return s.parseResponse(ctx, q, res)
|
|
}
|
|
|
|
func (s *QueryData) instantQuery(ctx context.Context, c *client.Client, q *models.Query, headers map[string]string) backend.DataResponse {
|
|
res, err := c.QueryInstant(ctx, q)
|
|
if err != nil {
|
|
return backend.DataResponse{
|
|
Error: err,
|
|
}
|
|
}
|
|
|
|
// This is only for health check fall back scenario
|
|
if res.StatusCode != 200 && q.RefId == "__healthcheck__" {
|
|
return backend.DataResponse{
|
|
Error: fmt.Errorf(res.Status),
|
|
}
|
|
}
|
|
|
|
defer func() {
|
|
err := res.Body.Close()
|
|
if err != nil {
|
|
s.log.Warn("failed to close response body", "error", err)
|
|
}
|
|
}()
|
|
|
|
return s.parseResponse(ctx, q, res)
|
|
}
|
|
|
|
func (s *QueryData) exemplarQuery(ctx context.Context, c *client.Client, q *models.Query, headers map[string]string) backend.DataResponse {
|
|
res, err := c.QueryExemplars(ctx, q)
|
|
if err != nil {
|
|
return backend.DataResponse{
|
|
Error: err,
|
|
}
|
|
}
|
|
|
|
defer func() {
|
|
err := res.Body.Close()
|
|
if err != nil {
|
|
s.log.Warn("failed to close response body", "error", err)
|
|
}
|
|
}()
|
|
return s.parseResponse(ctx, q, res)
|
|
}
|
|
|
|
func (s *QueryData) trace(ctx context.Context, q *models.Query) (context.Context, func()) {
|
|
return utils.StartTrace(ctx, s.tracer, "datasource.prometheus", []utils.Attribute{
|
|
{Key: "expr", Value: q.Expr, Kv: attribute.Key("expr").String(q.Expr)},
|
|
{Key: "start_unixnano", Value: q.Start, Kv: attribute.Key("start_unixnano").Int64(q.Start.UnixNano())},
|
|
{Key: "stop_unixnano", Value: q.End, Kv: attribute.Key("stop_unixnano").Int64(q.End.UnixNano())},
|
|
})
|
|
}
|