From 84e5c3af223b71aea86d70865aeb5e2b41bdfd32 Mon Sep 17 00:00:00 2001 From: Kyle Brandt Date: Mon, 26 Aug 2024 08:53:38 -0400 Subject: [PATCH] Prometheus/Promlib: Remove the PrometheusDataplane feature so always enabled (#92218) This has been default behavior since v10 --------- Co-authored-by: Brendan O'Handley --- .../feature-toggles/index.md | 95 +++++++++---------- .../src/types/featureToggles.gen.ts | 1 - .../src/result_transformer.test.ts | 5 - .../src/result_transformer.ts | 30 +++--- pkg/promlib/querydata/request.go | 6 +- pkg/promlib/querydata/request_test.go | 8 +- pkg/promlib/querydata/response.go | 20 ++-- pkg/promlib/querydata/response_test.go | 10 +- .../testdata/range_auto.result.golden.jsonc | 7 +- .../range_infinity.result.golden.jsonc | 7 +- .../range_missing.result.golden.jsonc | 11 +-- .../testdata/range_nan.result.golden.jsonc | 7 +- .../testdata/range_simple.result.golden.jsonc | 22 ++--- pkg/services/featuremgmt/registry.go | 8 -- pkg/services/featuremgmt/toggles_gen.csv | 1 - pkg/services/featuremgmt/toggles_gen.go | 4 - pkg/services/featuremgmt/toggles_gen.json | 2 + 17 files changed, 106 insertions(+), 138 deletions(-) diff --git a/docs/sources/setup-grafana/configure-grafana/feature-toggles/index.md b/docs/sources/setup-grafana/configure-grafana/feature-toggles/index.md index d5476d7e9bf..220007f2a34 100644 --- a/docs/sources/setup-grafana/configure-grafana/feature-toggles/index.md +++ b/docs/sources/setup-grafana/configure-grafana/feature-toggles/index.md @@ -21,54 +21,53 @@ For more information about feature release stages, refer to [Release life cycle Most [generally available](https://grafana.com/docs/release-life-cycle/#general-availability) features are enabled by default. You can disable these feature by setting the feature flag to "false" in the configuration. -| Feature toggle name | Description | Enabled by default | -| -------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------ | -| `disableEnvelopeEncryption` | Disable envelope encryption (emergency only) | | -| `publicDashboards` | [Deprecated] Public dashboards are now enabled by default; to disable them, use the configuration setting. This feature toggle will be removed in the next major version. | Yes | -| `featureHighlights` | Highlight Grafana Enterprise features | | -| `correlations` | Correlations page | Yes | -| `autoMigrateXYChartPanel` | Migrate old XYChart panel to new XYChart2 model | Yes | -| `cloudWatchCrossAccountQuerying` | Enables cross-account querying in CloudWatch datasources | Yes | -| `nestedFolders` | Enable folder nesting | Yes | -| `logsContextDatasourceUi` | Allow datasource to provide custom UI for context view | Yes | -| `lokiQuerySplitting` | Split large interval queries into subqueries with smaller time intervals | Yes | -| `prometheusMetricEncyclopedia` | Adds the metrics explorer component to the Prometheus query builder as an option in metric select | Yes | -| `influxdbBackendMigration` | Query InfluxDB InfluxQL without the proxy | Yes | -| `prometheusDataplane` | Changes responses to from Prometheus to be compliant with the dataplane specification. In particular, when this feature toggle is active, the numeric `Field.Name` is set from 'Value' to the value of the `__name__` label. | Yes | -| `lokiMetricDataplane` | Changes metric responses from Loki to be compliant with the dataplane specification. | Yes | -| `dataplaneFrontendFallback` | Support dataplane contract field name change for transformations and field name matchers where the name is different | Yes | -| `recordedQueriesMulti` | Enables writing multiple items from a single query within Recorded Queries | Yes | -| `logsExploreTableVisualisation` | A table visualisation for logs in Explore | Yes | -| `transformationsRedesign` | Enables the transformations redesign | Yes | -| `traceQLStreaming` | Enables response streaming of TraceQL queries of the Tempo data source | | -| `awsAsyncQueryCaching` | Enable caching for async queries for Redshift and Athena. Requires that the datasource has caching and async query support enabled | Yes | -| `prometheusConfigOverhaulAuth` | Update the Prometheus configuration page with the new auth component | Yes | -| `alertingNoDataErrorExecution` | Changes how Alerting state manager handles execution of NoData/Error | Yes | -| `angularDeprecationUI` | Display Angular warnings in dashboards and panels | Yes | -| `dashgpt` | Enable AI powered features in dashboards | Yes | -| `alertingInsights` | Show the new alerting insights landing page | Yes | -| `panelMonitoring` | Enables panel monitoring through logs and measurements | Yes | -| `formatString` | Enable format string transformer | Yes | -| `transformationsVariableSupport` | Allows using variables in transformations | Yes | -| `kubernetesPlaylists` | Use the kubernetes API in the frontend for playlists, and route /api/playlist requests to k8s | Yes | -| `recoveryThreshold` | Enables feature recovery threshold (aka hysteresis) for threshold server-side expression | Yes | -| `lokiStructuredMetadata` | Enables the loki data source to request structured metadata from the Loki server | Yes | -| `managedPluginsInstall` | Install managed plugins directly from plugins catalog | Yes | -| `addFieldFromCalculationStatFunctions` | Add cumulative and window functions to the add field from calculation transformation | Yes | -| `annotationPermissionUpdate` | Change the way annotation permissions work by scoping them to folders and dashboards. | Yes | -| `ssoSettingsApi` | Enables the SSO settings API and the OAuth configuration UIs in Grafana | Yes | -| `logsInfiniteScrolling` | Enables infinite scrolling for the Logs panel in Explore and Dashboards | Yes | -| `exploreMetrics` | Enables the new Explore Metrics core app | Yes | -| `alertingSimplifiedRouting` | Enables users to easily configure alert notifications by specifying a contact point directly when editing or creating an alert rule | Yes | -| `logRowsPopoverMenu` | Enable filtering menu displayed when text of a log line is selected | Yes | -| `lokiQueryHints` | Enables query hints for Loki | Yes | -| `alertingQueryOptimization` | Optimizes eligible queries in order to reduce load on datasources | | -| `groupToNestedTableTransformation` | Enables the group to nested table transformation | Yes | -| `tlsMemcached` | Use TLS-enabled memcached in the enterprise caching feature | Yes | -| `cloudWatchNewLabelParsing` | Updates CloudWatch label parsing to be more accurate | Yes | -| `pluginProxyPreserveTrailingSlash` | Preserve plugin proxy trailing slash. | | -| `openSearchBackendFlowEnabled` | Enables the backend query flow for Open Search datasource plugin | Yes | -| `cloudWatchRoundUpEndTime` | Round up end time for metric queries to the next minute to avoid missing data | Yes | +| Feature toggle name | Description | Enabled by default | +| -------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------ | +| `disableEnvelopeEncryption` | Disable envelope encryption (emergency only) | | +| `publicDashboards` | [Deprecated] Public dashboards are now enabled by default; to disable them, use the configuration setting. This feature toggle will be removed in the next major version. | Yes | +| `featureHighlights` | Highlight Grafana Enterprise features | | +| `correlations` | Correlations page | Yes | +| `autoMigrateXYChartPanel` | Migrate old XYChart panel to new XYChart2 model | Yes | +| `cloudWatchCrossAccountQuerying` | Enables cross-account querying in CloudWatch datasources | Yes | +| `nestedFolders` | Enable folder nesting | Yes | +| `logsContextDatasourceUi` | Allow datasource to provide custom UI for context view | Yes | +| `lokiQuerySplitting` | Split large interval queries into subqueries with smaller time intervals | Yes | +| `prometheusMetricEncyclopedia` | Adds the metrics explorer component to the Prometheus query builder as an option in metric select | Yes | +| `influxdbBackendMigration` | Query InfluxDB InfluxQL without the proxy | Yes | +| `lokiMetricDataplane` | Changes metric responses from Loki to be compliant with the dataplane specification. | Yes | +| `dataplaneFrontendFallback` | Support dataplane contract field name change for transformations and field name matchers where the name is different | Yes | +| `recordedQueriesMulti` | Enables writing multiple items from a single query within Recorded Queries | Yes | +| `logsExploreTableVisualisation` | A table visualisation for logs in Explore | Yes | +| `transformationsRedesign` | Enables the transformations redesign | Yes | +| `traceQLStreaming` | Enables response streaming of TraceQL queries of the Tempo data source | | +| `awsAsyncQueryCaching` | Enable caching for async queries for Redshift and Athena. Requires that the datasource has caching and async query support enabled | Yes | +| `prometheusConfigOverhaulAuth` | Update the Prometheus configuration page with the new auth component | Yes | +| `alertingNoDataErrorExecution` | Changes how Alerting state manager handles execution of NoData/Error | Yes | +| `angularDeprecationUI` | Display Angular warnings in dashboards and panels | Yes | +| `dashgpt` | Enable AI powered features in dashboards | Yes | +| `alertingInsights` | Show the new alerting insights landing page | Yes | +| `panelMonitoring` | Enables panel monitoring through logs and measurements | Yes | +| `formatString` | Enable format string transformer | Yes | +| `transformationsVariableSupport` | Allows using variables in transformations | Yes | +| `kubernetesPlaylists` | Use the kubernetes API in the frontend for playlists, and route /api/playlist requests to k8s | Yes | +| `recoveryThreshold` | Enables feature recovery threshold (aka hysteresis) for threshold server-side expression | Yes | +| `lokiStructuredMetadata` | Enables the loki data source to request structured metadata from the Loki server | Yes | +| `managedPluginsInstall` | Install managed plugins directly from plugins catalog | Yes | +| `addFieldFromCalculationStatFunctions` | Add cumulative and window functions to the add field from calculation transformation | Yes | +| `annotationPermissionUpdate` | Change the way annotation permissions work by scoping them to folders and dashboards. | Yes | +| `ssoSettingsApi` | Enables the SSO settings API and the OAuth configuration UIs in Grafana | Yes | +| `logsInfiniteScrolling` | Enables infinite scrolling for the Logs panel in Explore and Dashboards | Yes | +| `exploreMetrics` | Enables the new Explore Metrics core app | Yes | +| `alertingSimplifiedRouting` | Enables users to easily configure alert notifications by specifying a contact point directly when editing or creating an alert rule | Yes | +| `logRowsPopoverMenu` | Enable filtering menu displayed when text of a log line is selected | Yes | +| `lokiQueryHints` | Enables query hints for Loki | Yes | +| `alertingQueryOptimization` | Optimizes eligible queries in order to reduce load on datasources | | +| `groupToNestedTableTransformation` | Enables the group to nested table transformation | Yes | +| `tlsMemcached` | Use TLS-enabled memcached in the enterprise caching feature | Yes | +| `cloudWatchNewLabelParsing` | Updates CloudWatch label parsing to be more accurate | Yes | +| `pluginProxyPreserveTrailingSlash` | Preserve plugin proxy trailing slash. | | +| `openSearchBackendFlowEnabled` | Enables the backend query flow for Open Search datasource plugin | Yes | +| `cloudWatchRoundUpEndTime` | Round up end time for metric queries to the next minute to avoid missing data | Yes | ## Public preview feature toggles diff --git a/packages/grafana-data/src/types/featureToggles.gen.ts b/packages/grafana-data/src/types/featureToggles.gen.ts index b3f888b540d..2b3667bed7a 100644 --- a/packages/grafana-data/src/types/featureToggles.gen.ts +++ b/packages/grafana-data/src/types/featureToggles.gen.ts @@ -59,7 +59,6 @@ export interface FeatureToggles { influxqlStreamingParser?: boolean; influxdbRunQueriesInParallel?: boolean; prometheusRunQueriesInParallel?: boolean; - prometheusDataplane?: boolean; lokiMetricDataplane?: boolean; lokiLogsDataplane?: boolean; dataplaneFrontendFallback?: boolean; diff --git a/packages/grafana-prometheus/src/result_transformer.test.ts b/packages/grafana-prometheus/src/result_transformer.test.ts index 761216b4230..5dfc9d53ee9 100644 --- a/packages/grafana-prometheus/src/result_transformer.test.ts +++ b/packages/grafana-prometheus/src/result_transformer.test.ts @@ -29,11 +29,6 @@ jest.mock('@grafana/runtime', () => ({ }, }; }, - config: { - featureToggles: { - prometheusDataplane: true, - }, - }, })); describe('Prometheus Result Transformer', () => { diff --git a/packages/grafana-prometheus/src/result_transformer.ts b/packages/grafana-prometheus/src/result_transformer.ts index 7a901a0f995..d3bac7fff9c 100644 --- a/packages/grafana-prometheus/src/result_transformer.ts +++ b/packages/grafana-prometheus/src/result_transformer.ts @@ -17,7 +17,7 @@ import { TIME_SERIES_TIME_FIELD_NAME, TIME_SERIES_VALUE_FIELD_NAME, } from '@grafana/data'; -import { config, getDataSourceSrv } from '@grafana/runtime'; +import { getDataSourceSrv } from '@grafana/runtime'; import { ExemplarTraceIdDestination, PromMetric, PromQuery, PromValue } from './types'; @@ -54,21 +54,19 @@ export function transformV2( options: { exemplarTraceIdDestinations?: ExemplarTraceIdDestination[] } ) { // migration for dataplane field name issue - if (config.featureToggles.prometheusDataplane) { - // update displayNameFromDS in the field config - response.data.forEach((f: DataFrame) => { - const target = request.targets.find((t) => t.refId === f.refId); - // check that the legend is selected as auto - if (target && target.legendFormat === '__auto') { - f.fields.forEach((field) => { - if (field.labels?.__name__ && field.labels?.__name__ === field.name) { - const fieldCopy = { ...field, name: TIME_SERIES_VALUE_FIELD_NAME }; - field.config.displayNameFromDS = getFieldDisplayName(fieldCopy, f, response.data); - } - }); - } - }); - } + // update displayNameFromDS in the field config + response.data.forEach((f: DataFrame) => { + const target = request.targets.find((t) => t.refId === f.refId); + // check that the legend is selected as auto + if (target && target.legendFormat === '__auto') { + f.fields.forEach((field) => { + if (field.labels?.__name__ && field.labels?.__name__ === field.name) { + const fieldCopy = { ...field, name: TIME_SERIES_VALUE_FIELD_NAME }; + field.config.displayNameFromDS = getFieldDisplayName(fieldCopy, f, response.data); + } + }); + } + }); const [tableFrames, framesWithoutTable] = partition(response.data, (df) => isTableResult(df, request)); const processedTableFrames = transformDFToTable(tableFrames); diff --git a/pkg/promlib/querydata/request.go b/pkg/promlib/querydata/request.go index c7e605315d8..3ee3a0a14f1 100644 --- a/pkg/promlib/querydata/request.go +++ b/pkg/promlib/querydata/request.go @@ -242,7 +242,7 @@ func (s *QueryData) rangeQuery(ctx context.Context, c *client.Client, q *models. } }() - return s.parseResponse(ctx, q, res, enablePrometheusDataplaneFlag) + return s.parseResponse(ctx, q, res) } func (s *QueryData) instantQuery(ctx context.Context, c *client.Client, q *models.Query, enablePrometheusDataplaneFlag bool) backend.DataResponse { @@ -268,7 +268,7 @@ func (s *QueryData) instantQuery(ctx context.Context, c *client.Client, q *model } }() - return s.parseResponse(ctx, q, res, enablePrometheusDataplaneFlag) + return s.parseResponse(ctx, q, res) } func (s *QueryData) exemplarQuery(ctx context.Context, c *client.Client, q *models.Query, enablePrometheusDataplaneFlag bool) backend.DataResponse { @@ -285,7 +285,7 @@ func (s *QueryData) exemplarQuery(ctx context.Context, c *client.Client, q *mode s.log.Warn("Failed to close response body", "error", err) } }() - return s.parseResponse(ctx, q, res, enablePrometheusDataplaneFlag) + return s.parseResponse(ctx, q, res) } func addDataResponse(res *backend.DataResponse, dr *backend.DataResponse) { diff --git a/pkg/promlib/querydata/request_test.go b/pkg/promlib/querydata/request_test.go index 3a1ac8986f9..0977b9036d3 100644 --- a/pkg/promlib/querydata/request_test.go +++ b/pkg/promlib/querydata/request_test.go @@ -137,7 +137,7 @@ func TestPrometheus_parseTimeSeriesResponse(t *testing.T) { require.Equal(t, "Time", res[0].Fields[0].Name) require.Len(t, res[0].Fields[1].Labels, 2) require.Equal(t, "app=Application, tag2=tag2", res[0].Fields[1].Labels.String()) - require.Equal(t, "legend Application", res[0].Name) + require.Equal(t, "legend Application", res[0].Fields[1].Config.DisplayNameFromDS) // Ensure the timestamps are UTC zoned testValue := res[0].Fields[0].At(0) @@ -231,7 +231,7 @@ func TestPrometheus_parseTimeSeriesResponse(t *testing.T) { require.Equal(t, res[0].Fields[0].Name, "Time") require.Len(t, res[0].Fields[1].Labels, 2) require.Equal(t, res[0].Fields[1].Labels.String(), "app=Application, tag2=tag2") - require.Equal(t, "{app=\"Application\", tag2=\"tag2\"}", res[0].Name) + require.Equal(t, `{app="Application", tag2="tag2"}`, res[0].Fields[1].Config.DisplayNameFromDS) }) t.Run("matrix response with NaN value should be changed to null", func(t *testing.T) { @@ -269,7 +269,7 @@ func TestPrometheus_parseTimeSeriesResponse(t *testing.T) { res, err := execute(tctx, query, result) require.NoError(t, err) - require.Equal(t, "{app=\"Application\"}", res[0].Name) + require.Equal(t, `{app="Application"}`, res[0].Fields[1].Config.DisplayNameFromDS) require.True(t, math.IsNaN(res[0].Fields[1].At(0).(float64))) }) @@ -308,7 +308,7 @@ func TestPrometheus_parseTimeSeriesResponse(t *testing.T) { require.Equal(t, res[0].Fields[0].Name, "Time") require.Len(t, res[0].Fields[1].Labels, 2) require.Equal(t, res[0].Fields[1].Labels.String(), "app=Application, tag2=tag2") - require.Equal(t, "legend Application", res[0].Name) + require.Equal(t, "legend Application", res[0].Fields[1].Config.DisplayNameFromDS) // Ensure the timestamps are UTC zoned testValue := res[0].Fields[0].At(0) diff --git a/pkg/promlib/querydata/response.go b/pkg/promlib/querydata/response.go index 7181400df16..b1cfec8e7fa 100644 --- a/pkg/promlib/querydata/response.go +++ b/pkg/promlib/querydata/response.go @@ -18,7 +18,7 @@ import ( "github.com/grafana/grafana/pkg/promlib/utils" ) -func (s *QueryData) parseResponse(ctx context.Context, q *models.Query, res *http.Response, enablePrometheusDataplaneFlag bool) backend.DataResponse { +func (s *QueryData) parseResponse(ctx context.Context, q *models.Query, res *http.Response) backend.DataResponse { defer func() { if err := res.Body.Close(); err != nil { s.log.FromContext(ctx).Error("Failed to close response body", "err", err) @@ -29,9 +29,7 @@ func (s *QueryData) parseResponse(ctx context.Context, q *models.Query, res *htt defer endSpan() iter := jsoniter.Parse(jsoniter.ConfigDefault, res.Body, 1024) - r := converter.ReadPrometheusStyleResult(iter, converter.Options{ - Dataplane: enablePrometheusDataplaneFlag, - }) + r := converter.ReadPrometheusStyleResult(iter, converter.Options{Dataplane: true}) r.Status = backend.Status(res.StatusCode) // Add frame to attach metadata @@ -41,7 +39,7 @@ func (s *QueryData) parseResponse(ctx context.Context, q *models.Query, res *htt // The ExecutedQueryString can be viewed in QueryInspector in UI for i, frame := range r.Frames { - addMetadataToMultiFrame(q, frame, enablePrometheusDataplaneFlag) + addMetadataToMultiFrame(q, frame) if i == 0 { frame.Meta.ExecutedQueryString = executedQueryString(q) } @@ -106,7 +104,7 @@ func (s *QueryData) processExemplars(ctx context.Context, q *models.Query, dr ba } } -func addMetadataToMultiFrame(q *models.Query, frame *data.Frame, enableDataplane bool) { +func addMetadataToMultiFrame(q *models.Query, frame *data.Frame) { if frame.Meta == nil { frame.Meta = &data.FrameMeta{} } @@ -120,13 +118,9 @@ func addMetadataToMultiFrame(q *models.Query, frame *data.Frame, enableDataplane frame.Fields[1].Config = &data.FieldConfig{DisplayNameFromDS: customName} } - if enableDataplane { - valueField := frame.Fields[1] - if n, ok := valueField.Labels["__name__"]; ok { - valueField.Name = n - } - } else { - frame.Name = customName + valueField := frame.Fields[1] + if n, ok := valueField.Labels["__name__"]; ok { + valueField.Name = n } } diff --git a/pkg/promlib/querydata/response_test.go b/pkg/promlib/querydata/response_test.go index 8cec7ba460a..5c7a2e6c555 100644 --- a/pkg/promlib/querydata/response_test.go +++ b/pkg/promlib/querydata/response_test.go @@ -19,7 +19,7 @@ func TestQueryData_parseResponse(t *testing.T) { t.Run("resultType is before result the field must parsed normally", func(t *testing.T) { resBody := `{"data":{"resultType":"vector", "result":[{"metric":{"__name__":"some_name","environment":"some_env","id":"some_id","instance":"some_instance:1234","job":"some_job","name":"another_name","region":"some_region"},"value":[1.1,"2"]}]},"status":"success"}` res := &http.Response{Body: io.NopCloser(bytes.NewBufferString(resBody))} - result := qd.parseResponse(context.Background(), &models.Query{}, res, false) + result := qd.parseResponse(context.Background(), &models.Query{}, res) assert.Nil(t, result.Error) assert.Len(t, result.Frames, 1) }) @@ -27,7 +27,7 @@ func TestQueryData_parseResponse(t *testing.T) { t.Run("resultType is after the result field must parsed normally", func(t *testing.T) { resBody := `{"data":{"result":[{"metric":{"__name__":"some_name","environment":"some_env","id":"some_id","instance":"some_instance:1234","job":"some_job","name":"another_name","region":"some_region"},"value":[1.1,"2"]}],"resultType":"vector"},"status":"success"}` res := &http.Response{Body: io.NopCloser(bytes.NewBufferString(resBody))} - result := qd.parseResponse(context.Background(), &models.Query{}, res, false) + result := qd.parseResponse(context.Background(), &models.Query{}, res) assert.Nil(t, result.Error) assert.Len(t, result.Frames, 1) }) @@ -35,7 +35,7 @@ func TestQueryData_parseResponse(t *testing.T) { t.Run("no resultType is existed in the data", func(t *testing.T) { resBody := `{"data":{"result":[{"metric":{"__name__":"some_name","environment":"some_env","id":"some_id","instance":"some_instance:1234","job":"some_job","name":"another_name","region":"some_region"},"value":[1.1,"2"]}]},"status":"success"}` res := &http.Response{Body: io.NopCloser(bytes.NewBufferString(resBody))} - result := qd.parseResponse(context.Background(), &models.Query{}, res, false) + result := qd.parseResponse(context.Background(), &models.Query{}, res) assert.Error(t, result.Error) assert.Equal(t, result.Error.Error(), "no resultType found") }) @@ -43,7 +43,7 @@ func TestQueryData_parseResponse(t *testing.T) { t.Run("resultType is set as empty string before result", func(t *testing.T) { resBody := `{"data":{"resultType":"", "result":[{"metric":{"__name__":"some_name","environment":"some_env","id":"some_id","instance":"some_instance:1234","job":"some_job","name":"another_name","region":"some_region"},"value":[1.1,"2"]}]},"status":"success"}` res := &http.Response{Body: io.NopCloser(bytes.NewBufferString(resBody))} - result := qd.parseResponse(context.Background(), &models.Query{}, res, false) + result := qd.parseResponse(context.Background(), &models.Query{}, res) assert.Error(t, result.Error) assert.Equal(t, result.Error.Error(), "unknown result type: ") }) @@ -51,7 +51,7 @@ func TestQueryData_parseResponse(t *testing.T) { t.Run("resultType is set as empty string after result", func(t *testing.T) { resBody := `{"data":{"result":[{"metric":{"__name__":"some_name","environment":"some_env","id":"some_id","instance":"some_instance:1234","job":"some_job","name":"another_name","region":"some_region"},"value":[1.1,"2"]}],"resultType":""},"status":"success"}` res := &http.Response{Body: io.NopCloser(bytes.NewBufferString(resBody))} - result := qd.parseResponse(context.Background(), &models.Query{}, res, false) + result := qd.parseResponse(context.Background(), &models.Query{}, res) assert.Error(t, result.Error) assert.Equal(t, result.Error.Error(), "unknown result type: ") }) diff --git a/pkg/promlib/testdata/range_auto.result.golden.jsonc b/pkg/promlib/testdata/range_auto.result.golden.jsonc index 13bc995348a..9bb659c7679 100644 --- a/pkg/promlib/testdata/range_auto.result.golden.jsonc +++ b/pkg/promlib/testdata/range_auto.result.golden.jsonc @@ -4,14 +4,14 @@ // "type": "timeseries-multi", // "typeVersion": [ // 0, -// 0 +// 1 // ], // "custom": { // "resultType": "matrix" // }, // "executedQueryString": "Expr: histogram_quantile(0.95, sum(rate(tns_request_duration_seconds_bucket[4s])) by (le))\nStep: 1s" // } -// Name: histogram_quantile(0.95, sum(rate(tns_request_duration_seconds_bucket[4s])) by (le)) +// Name: // Dimensions: 2 Fields by 301 Rows // +-----------------------------------+----------------------+ // | Name: Time | Name: Value | @@ -37,12 +37,11 @@ "frames": [ { "schema": { - "name": "histogram_quantile(0.95, sum(rate(tns_request_duration_seconds_bucket[4s])) by (le))", "meta": { "type": "timeseries-multi", "typeVersion": [ 0, - 0 + 1 ], "custom": { "resultType": "matrix" diff --git a/pkg/promlib/testdata/range_infinity.result.golden.jsonc b/pkg/promlib/testdata/range_infinity.result.golden.jsonc index c3e837d79bd..db0f3edfbce 100644 --- a/pkg/promlib/testdata/range_infinity.result.golden.jsonc +++ b/pkg/promlib/testdata/range_infinity.result.golden.jsonc @@ -4,14 +4,14 @@ // "type": "timeseries-multi", // "typeVersion": [ // 0, -// 0 +// 1 // ], // "custom": { // "resultType": "matrix" // }, // "executedQueryString": "Expr: 1 / 0\nStep: 1s" // } -// Name: 1 / 0 +// Name: // Dimensions: 2 Fields by 3 Rows // +-------------------------------+-----------------+ // | Name: Time | Name: Value | @@ -30,12 +30,11 @@ "frames": [ { "schema": { - "name": "1 / 0", "meta": { "type": "timeseries-multi", "typeVersion": [ 0, - 0 + 1 ], "custom": { "resultType": "matrix" diff --git a/pkg/promlib/testdata/range_missing.result.golden.jsonc b/pkg/promlib/testdata/range_missing.result.golden.jsonc index bb0c635b1a1..4272beb9993 100644 --- a/pkg/promlib/testdata/range_missing.result.golden.jsonc +++ b/pkg/promlib/testdata/range_missing.result.golden.jsonc @@ -4,17 +4,17 @@ // "type": "timeseries-multi", // "typeVersion": [ // 0, -// 0 +// 1 // ], // "custom": { // "resultType": "matrix" // }, // "executedQueryString": "Expr: test1\nStep: 1s" // } -// Name: go_goroutines{job="prometheus"} +// Name: // Dimensions: 2 Fields by 3 Rows // +-------------------------------+------------------------------------------------+ -// | Name: Time | Name: Value | +// | Name: Time | Name: go_goroutines | // | Labels: | Labels: __name__=go_goroutines, job=prometheus | // | Type: []time.Time | Type: []float64 | // +-------------------------------+------------------------------------------------+ @@ -30,12 +30,11 @@ "frames": [ { "schema": { - "name": "go_goroutines{job=\"prometheus\"}", "meta": { "type": "timeseries-multi", "typeVersion": [ 0, - 0 + 1 ], "custom": { "resultType": "matrix" @@ -54,7 +53,7 @@ } }, { - "name": "Value", + "name": "go_goroutines", "type": "number", "typeInfo": { "frame": "float64" diff --git a/pkg/promlib/testdata/range_nan.result.golden.jsonc b/pkg/promlib/testdata/range_nan.result.golden.jsonc index f9f7bfedc5e..9da1ecfeca7 100644 --- a/pkg/promlib/testdata/range_nan.result.golden.jsonc +++ b/pkg/promlib/testdata/range_nan.result.golden.jsonc @@ -4,14 +4,14 @@ // "type": "timeseries-multi", // "typeVersion": [ // 0, -// 0 +// 1 // ], // "custom": { // "resultType": "matrix" // }, // "executedQueryString": "Expr: \nStep: 1s" // } -// Name: {handler="/api/v1/query_range", job="prometheus"} +// Name: // Dimensions: 2 Fields by 3 Rows // +-------------------------------+-----------------------------------------------------+ // | Name: Time | Name: Value | @@ -30,12 +30,11 @@ "frames": [ { "schema": { - "name": "{handler=\"/api/v1/query_range\", job=\"prometheus\"}", "meta": { "type": "timeseries-multi", "typeVersion": [ 0, - 0 + 1 ], "custom": { "resultType": "matrix" diff --git a/pkg/promlib/testdata/range_simple.result.golden.jsonc b/pkg/promlib/testdata/range_simple.result.golden.jsonc index cc64185db37..a0fe631e2e3 100644 --- a/pkg/promlib/testdata/range_simple.result.golden.jsonc +++ b/pkg/promlib/testdata/range_simple.result.golden.jsonc @@ -4,17 +4,17 @@ // "type": "timeseries-multi", // "typeVersion": [ // 0, -// 0 +// 1 // ], // "custom": { // "resultType": "matrix" // }, // "executedQueryString": "Expr: \nStep: 1s" // } -// Name: prometheus_http_requests_total{code="200", handler="/api/v1/query_range", job="prometheus"} +// Name: // Dimensions: 2 Fields by 3 Rows // +-----------------------------------+--------------------------------------------------------------------------------------------------------+ -// | Name: Time | Name: Value | +// | Name: Time | Name: prometheus_http_requests_total | // | Labels: | Labels: __name__=prometheus_http_requests_total, code=200, handler=/api/v1/query_range, job=prometheus | // | Type: []time.Time | Type: []float64 | // +-----------------------------------+--------------------------------------------------------------------------------------------------------+ @@ -29,16 +29,16 @@ // "type": "timeseries-multi", // "typeVersion": [ // 0, -// 0 +// 1 // ], // "custom": { // "resultType": "matrix" // } // } -// Name: prometheus_http_requests_total{code="400", handler="/api/v1/query_range", job="prometheus"} +// Name: // Dimensions: 2 Fields by 2 Rows // +-----------------------------------+--------------------------------------------------------------------------------------------------------+ -// | Name: Time | Name: Value | +// | Name: Time | Name: prometheus_http_requests_total | // | Labels: | Labels: __name__=prometheus_http_requests_total, code=400, handler=/api/v1/query_range, job=prometheus | // | Type: []time.Time | Type: []float64 | // +-----------------------------------+--------------------------------------------------------------------------------------------------------+ @@ -53,12 +53,11 @@ "frames": [ { "schema": { - "name": "prometheus_http_requests_total{code=\"200\", handler=\"/api/v1/query_range\", job=\"prometheus\"}", "meta": { "type": "timeseries-multi", "typeVersion": [ 0, - 0 + 1 ], "custom": { "resultType": "matrix" @@ -77,7 +76,7 @@ } }, { - "name": "Value", + "name": "prometheus_http_requests_total", "type": "number", "typeInfo": { "frame": "float64" @@ -111,12 +110,11 @@ }, { "schema": { - "name": "prometheus_http_requests_total{code=\"400\", handler=\"/api/v1/query_range\", job=\"prometheus\"}", "meta": { "type": "timeseries-multi", "typeVersion": [ 0, - 0 + 1 ], "custom": { "resultType": "matrix" @@ -134,7 +132,7 @@ } }, { - "name": "Value", + "name": "prometheus_http_requests_total", "type": "number", "typeInfo": { "frame": "float64" diff --git a/pkg/services/featuremgmt/registry.go b/pkg/services/featuremgmt/registry.go index de676bcc6b0..f760aa302da 100644 --- a/pkg/services/featuremgmt/registry.go +++ b/pkg/services/featuremgmt/registry.go @@ -318,14 +318,6 @@ var ( FrontendOnly: false, Owner: grafanaObservabilityMetricsSquad, }, - { - Name: "prometheusDataplane", - Description: "Changes responses to from Prometheus to be compliant with the dataplane specification. In particular, when this feature toggle is active, the numeric `Field.Name` is set from 'Value' to the value of the `__name__` label.", - Expression: "true", - Stage: FeatureStageGeneralAvailability, - Owner: grafanaObservabilityMetricsSquad, - AllowSelfServe: true, - }, { Name: "lokiMetricDataplane", Description: "Changes metric responses from Loki to be compliant with the dataplane specification.", diff --git a/pkg/services/featuremgmt/toggles_gen.csv b/pkg/services/featuremgmt/toggles_gen.csv index a01dd7b99ee..37eeb2bb3bc 100644 --- a/pkg/services/featuremgmt/toggles_gen.csv +++ b/pkg/services/featuremgmt/toggles_gen.csv @@ -40,7 +40,6 @@ influxdbBackendMigration,GA,@grafana/observability-metrics,false,false,true influxqlStreamingParser,experimental,@grafana/observability-metrics,false,false,false influxdbRunQueriesInParallel,privatePreview,@grafana/observability-metrics,false,false,false prometheusRunQueriesInParallel,privatePreview,@grafana/observability-metrics,false,false,false -prometheusDataplane,GA,@grafana/observability-metrics,false,false,false lokiMetricDataplane,GA,@grafana/observability-logs,false,false,false lokiLogsDataplane,experimental,@grafana/observability-logs,false,false,false dataplaneFrontendFallback,GA,@grafana/observability-metrics,false,false,true diff --git a/pkg/services/featuremgmt/toggles_gen.go b/pkg/services/featuremgmt/toggles_gen.go index a12046011d0..29b3def626e 100644 --- a/pkg/services/featuremgmt/toggles_gen.go +++ b/pkg/services/featuremgmt/toggles_gen.go @@ -171,10 +171,6 @@ const ( // Enables running Prometheus queries in parallel FlagPrometheusRunQueriesInParallel = "prometheusRunQueriesInParallel" - // FlagPrometheusDataplane - // Changes responses to from Prometheus to be compliant with the dataplane specification. In particular, when this feature toggle is active, the numeric `Field.Name` is set from 'Value' to the value of the `__name__` label. - FlagPrometheusDataplane = "prometheusDataplane" - // FlagLokiMetricDataplane // Changes metric responses from Loki to be compliant with the dataplane specification. FlagLokiMetricDataplane = "lokiMetricDataplane" diff --git a/pkg/services/featuremgmt/toggles_gen.json b/pkg/services/featuremgmt/toggles_gen.json index 1071e9af30a..533293c469b 100644 --- a/pkg/services/featuremgmt/toggles_gen.json +++ b/pkg/services/featuremgmt/toggles_gen.json @@ -1619,6 +1619,7 @@ "name": "lokiMetricDataplane", "resourceVersion": "1720021873452", "creationTimestamp": "2023-04-13T13:07:08Z", + "deletionTimestamp": "2024-08-21T13:49:48Z", "annotations": { "grafana.app/updatedTimestamp": "2024-07-03 15:51:13.452477 +0000 UTC" } @@ -2184,6 +2185,7 @@ "name": "prometheusDataplane", "resourceVersion": "1720021873452", "creationTimestamp": "2023-03-29T15:26:32Z", + "deletionTimestamp": "2024-08-21T13:35:19Z", "annotations": { "grafana.app/updatedTimestamp": "2024-07-03 15:51:13.452477 +0000 UTC" }