diff --git a/docs/sources/setup-grafana/configure-grafana/feature-toggles/index.md b/docs/sources/setup-grafana/configure-grafana/feature-toggles/index.md index 5111856e609..c4d741c6e92 100644 --- a/docs/sources/setup-grafana/configure-grafana/feature-toggles/index.md +++ b/docs/sources/setup-grafana/configure-grafana/feature-toggles/index.md @@ -77,6 +77,7 @@ Most [generally available](https://grafana.com/docs/release-life-cycle/#general- | `pinNavItems` | Enables pinning of nav items | Yes | | `openSearchBackendFlowEnabled` | Enables the backend query flow for Open Search datasource plugin | Yes | | `cloudWatchRoundUpEndTime` | Round up end time for metric queries to the next minute to avoid missing data | Yes | +| `azureMonitorDisableLogLimit` | Disables the log limit restriction for Azure Monitor when true. The limit is enabled by default. | | ## Public preview feature toggles diff --git a/packages/grafana-data/src/types/featureToggles.gen.ts b/packages/grafana-data/src/types/featureToggles.gen.ts index 644e5531261..a0673322b10 100644 --- a/packages/grafana-data/src/types/featureToggles.gen.ts +++ b/packages/grafana-data/src/types/featureToggles.gen.ts @@ -225,4 +225,5 @@ export interface FeatureToggles { unifiedStorageBigObjectsSupport?: boolean; timeRangeProvider?: boolean; prometheusUsesCombobox?: boolean; + azureMonitorDisableLogLimit?: boolean; } diff --git a/pkg/services/featuremgmt/registry.go b/pkg/services/featuremgmt/registry.go index 2141ce0cd6d..5de1807b66a 100644 --- a/pkg/services/featuremgmt/registry.go +++ b/pkg/services/featuremgmt/registry.go @@ -1548,6 +1548,13 @@ var ( Stage: FeatureStageExperimental, Owner: grafanaObservabilityMetricsSquad, }, + { + Name: "azureMonitorDisableLogLimit", + Description: "Disables the log limit restriction for Azure Monitor when true. The limit is enabled by default.", + Stage: FeatureStageGeneralAvailability, + Owner: grafanaPartnerPluginsSquad, + Expression: "false", + }, } ) diff --git a/pkg/services/featuremgmt/toggles_gen.csv b/pkg/services/featuremgmt/toggles_gen.csv index a452651a933..99efc0e76cd 100644 --- a/pkg/services/featuremgmt/toggles_gen.csv +++ b/pkg/services/featuremgmt/toggles_gen.csv @@ -206,3 +206,4 @@ pluginsSriChecks,experimental,@grafana/plugins-platform-backend,false,false,fals unifiedStorageBigObjectsSupport,experimental,@grafana/search-and-storage,false,false,false timeRangeProvider,experimental,@grafana/grafana-frontend-platform,false,false,false prometheusUsesCombobox,experimental,@grafana/observability-metrics,false,false,false +azureMonitorDisableLogLimit,GA,@grafana/partner-datasources,false,false,false diff --git a/pkg/services/featuremgmt/toggles_gen.go b/pkg/services/featuremgmt/toggles_gen.go index 5e91ba9dd42..9b24818435b 100644 --- a/pkg/services/featuremgmt/toggles_gen.go +++ b/pkg/services/featuremgmt/toggles_gen.go @@ -834,4 +834,8 @@ const ( // FlagPrometheusUsesCombobox // Use new combobox component for Prometheus query editor FlagPrometheusUsesCombobox = "prometheusUsesCombobox" + + // FlagAzureMonitorDisableLogLimit + // Disables the log limit restriction for Azure Monitor when true. The limit is enabled by default. + FlagAzureMonitorDisableLogLimit = "azureMonitorDisableLogLimit" ) diff --git a/pkg/services/featuremgmt/toggles_gen.json b/pkg/services/featuremgmt/toggles_gen.json index bb89eb8bcfc..65906e21cb7 100644 --- a/pkg/services/featuremgmt/toggles_gen.json +++ b/pkg/services/featuremgmt/toggles_gen.json @@ -605,6 +605,39 @@ "codeowner": "@grafana/aws-datasources" } }, + { + "metadata": { + "name": "azureMonitorDisableLogLimit", + "resourceVersion": "1727698096407", + "creationTimestamp": "2024-09-30T11:51:51Z", + "deletionTimestamp": "2024-10-22T09:44:12Z", + "annotations": { + "grafana.app/updatedTimestamp": "2024-09-30 12:08:16.407109 +0000 UTC" + } + }, + "spec": { + "description": "Disables the log limit restriction for Azure Monitor when true. The limit is enabled by default.", + "stage": "GA", + "codeowner": "@grafana/partner-datasources", + "expression": "false" + } + }, + { + "metadata": { + "name": "azureMonitorLogLimit", + "resourceVersion": "1727696791818", + "creationTimestamp": "2024-09-30T11:45:45Z", + "deletionTimestamp": "2024-09-30T11:51:51Z", + "annotations": { + "grafana.app/updatedTimestamp": "2024-09-30 11:46:31.818302 +0000 UTC" + } + }, + "spec": { + "description": "Control the log limit restriction for Azure Monitor", + "stage": "GA", + "codeowner": "@grafana/partner-datasources" + } + }, { "metadata": { "name": "azureMonitorPrometheusExemplars", diff --git a/pkg/tsdb/azuremonitor/loganalytics/azure-log-analytics-datasource.go b/pkg/tsdb/azuremonitor/loganalytics/azure-log-analytics-datasource.go index 62b2dd1267c..361ed9279be 100644 --- a/pkg/tsdb/azuremonitor/loganalytics/azure-log-analytics-datasource.go +++ b/pkg/tsdb/azuremonitor/loganalytics/azure-log-analytics-datasource.go @@ -320,7 +320,8 @@ func (e *AzureLogAnalyticsDatasource) executeQuery(ctx context.Context, query *A return nil, err } - frame, err := ResponseTableToFrame(t, query.RefID, query.Query, query.QueryType, query.ResultFormat) + logLimitDisabled := backend.GrafanaConfigFromContext(ctx).FeatureToggles().IsEnabled("azureMonitorDisableLogLimit") + frame, err := ResponseTableToFrame(t, query.RefID, query.Query, query.QueryType, query.ResultFormat, logLimitDisabled) if err != nil { return nil, err } diff --git a/pkg/tsdb/azuremonitor/loganalytics/azure-response-table-frame.go b/pkg/tsdb/azuremonitor/loganalytics/azure-response-table-frame.go index 3b72999914e..efe24ca10a9 100644 --- a/pkg/tsdb/azuremonitor/loganalytics/azure-response-table-frame.go +++ b/pkg/tsdb/azuremonitor/loganalytics/azure-response-table-frame.go @@ -45,12 +45,12 @@ func apiErrorToNotice(err *AzureLogAnalyticsAPIError) data.Notice { } // ResponseTableToFrame converts an AzureResponseTable to a data.Frame. -func ResponseTableToFrame(table *types.AzureResponseTable, refID string, executedQuery string, queryType dataquery.AzureQueryType, resultFormat dataquery.ResultFormat) (*data.Frame, error) { +func ResponseTableToFrame(table *types.AzureResponseTable, refID string, executedQuery string, queryType dataquery.AzureQueryType, resultFormat dataquery.ResultFormat, logLimitDisabled bool) (*data.Frame, error) { if len(table.Rows) == 0 { return nil, nil } - converterFrame, err := converterFrameForTable(table, queryType, resultFormat) + converterFrame, err := converterFrameForTable(table, queryType, resultFormat, logLimitDisabled) if err != nil { return nil, err } @@ -66,7 +66,7 @@ func ResponseTableToFrame(table *types.AzureResponseTable, refID string, execute return converterFrame.Frame, nil } -func converterFrameForTable(t *types.AzureResponseTable, queryType dataquery.AzureQueryType, resultFormat dataquery.ResultFormat) (*data.FrameInputConverter, error) { +func converterFrameForTable(t *types.AzureResponseTable, queryType dataquery.AzureQueryType, resultFormat dataquery.ResultFormat, logLimitDisabled bool) (*data.FrameInputConverter, error) { converters := []data.FieldConverter{} colNames := make([]string, len(t.Columns)) colTypes := make([]string, len(t.Columns)) // for metadata @@ -84,6 +84,14 @@ func converterFrameForTable(t *types.AzureResponseTable, queryType dataquery.Azu converters = append(converters, converter) } + rowLimit := 30000 + limitExceeded := false + if len(t.Rows) > rowLimit && resultFormat == dataquery.ResultFormatLogs && !logLimitDisabled { + // We limit the number of rows to 30k to prevent crashing the browser tab as the logs viz is not virtualised. + t.Rows = t.Rows[:rowLimit] + limitExceeded = true + } + fic, err := data.NewFrameInputConverter(converters, len(t.Rows)) if err != nil { return nil, err @@ -98,6 +106,13 @@ func converterFrameForTable(t *types.AzureResponseTable, queryType dataquery.Azu Custom: &LogAnalyticsMeta{ColumnTypes: colTypes}, } + if limitExceeded { + fic.Frame.AppendNotices(data.Notice{ + Severity: data.NoticeSeverityWarning, + Text: "The number of results in the result set has been limited to 30,000.", + }) + } + return fic, nil } diff --git a/pkg/tsdb/azuremonitor/loganalytics/azure-response-table-frame_test.go b/pkg/tsdb/azuremonitor/loganalytics/azure-response-table-frame_test.go index 139ba7987d9..c73ca055cac 100644 --- a/pkg/tsdb/azuremonitor/loganalytics/azure-response-table-frame_test.go +++ b/pkg/tsdb/azuremonitor/loganalytics/azure-response-table-frame_test.go @@ -5,12 +5,14 @@ import ( "fmt" "os" "path/filepath" + "strconv" "strings" "testing" "github.com/grafana/grafana-plugin-sdk-go/data" "github.com/grafana/grafana/pkg/tsdb/azuremonitor/kinds/dataquery" "github.com/grafana/grafana/pkg/tsdb/azuremonitor/testdata" + "github.com/grafana/grafana/pkg/tsdb/azuremonitor/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -54,7 +56,7 @@ func TestLogTableToFrame(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { res := loadTestFileWithNumber(t, tt.testFile) - frame, err := ResponseTableToFrame(&res.Tables[0], "A", "query", dataquery.AzureQueryTypeAzureLogAnalytics, dataquery.ResultFormatTable) + frame, err := ResponseTableToFrame(&res.Tables[0], "A", "query", dataquery.AzureQueryTypeAzureLogAnalytics, dataquery.ResultFormatTable, false) appendErrorNotice(frame, res.Error) require.NoError(t, err) @@ -112,7 +114,7 @@ func TestTraceTableToFrame(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { res := loadTestFileWithNumber(t, tt.testFile) - frame, err := ResponseTableToFrame(&res.Tables[0], "A", "query", tt.queryType, tt.resultFormat) + frame, err := ResponseTableToFrame(&res.Tables[0], "A", "query", tt.queryType, tt.resultFormat, false) appendErrorNotice(frame, res.Error) require.NoError(t, err) @@ -121,6 +123,62 @@ func TestTraceTableToFrame(t *testing.T) { } } +func TestLargeLogsResponse(t *testing.T) { + t.Run("large logs response with limit enabled", func(t *testing.T) { + res := AzureLogAnalyticsResponse{ + Tables: []types.AzureResponseTable{ + {Name: "PrimaryResult", + Columns: []struct { + Name string `json:"name"` + Type string `json:"type"` + }{ + {Name: "value", Type: "int"}, + }}, + }, + } + rows := [][]any{} + for i := 0; i <= 30000; i++ { + rows = append(rows, []any{json.Number(strconv.Itoa(i))}) + } + res.Tables[0].Rows = rows + resultFormat := dataquery.ResultFormatLogs + frame, err := ResponseTableToFrame(&res.Tables[0], "A", "query", dataquery.AzureQueryTypeAzureLogAnalytics, resultFormat, false) + appendErrorNotice(frame, res.Error) + require.NoError(t, err) + require.Equal(t, frame.Rows(), 30000) + require.Len(t, frame.Meta.Notices, 1) + require.Equal(t, frame.Meta.Notices[0], data.Notice{ + Severity: data.NoticeSeverityWarning, + Text: "The number of results in the result set has been limited to 30,000.", + }) + }) + + t.Run("large logs response with limit disabled", func(t *testing.T) { + res := AzureLogAnalyticsResponse{ + Tables: []types.AzureResponseTable{ + {Name: "PrimaryResult", + Columns: []struct { + Name string `json:"name"` + Type string `json:"type"` + }{ + {Name: "value", Type: "int"}, + }}, + }, + } + rows := [][]any{} + for i := 0; i < 40000; i++ { + rows = append(rows, []any{json.Number(strconv.Itoa(i))}) + } + res.Tables[0].Rows = rows + resultFormat := dataquery.ResultFormatLogs + frame, err := ResponseTableToFrame(&res.Tables[0], "A", "query", dataquery.AzureQueryTypeAzureLogAnalytics, resultFormat, true) + appendErrorNotice(frame, res.Error) + require.NoError(t, err) + require.Equal(t, frame.Rows(), 40000) + require.Nil(t, frame.Meta.Notices) + }) +} + func loadTestFileWithNumber(t *testing.T, name string) AzureLogAnalyticsResponse { t.Helper() path := filepath.Join("../testdata", name) diff --git a/pkg/tsdb/azuremonitor/resourcegraph/azure-resource-graph-datasource.go b/pkg/tsdb/azuremonitor/resourcegraph/azure-resource-graph-datasource.go index dfa1f70537d..699ee7055a5 100644 --- a/pkg/tsdb/azuremonitor/resourcegraph/azure-resource-graph-datasource.go +++ b/pkg/tsdb/azuremonitor/resourcegraph/azure-resource-graph-datasource.go @@ -172,7 +172,7 @@ func (e *AzureResourceGraphDatasource) executeQuery(ctx context.Context, query * return nil, err } - frame, err := loganalytics.ResponseTableToFrame(&argResponse.Data, query.RefID, query.InterpolatedQuery, dataquery.AzureQueryType(query.QueryType), dataquery.ResultFormat(query.ResultFormat)) + frame, err := loganalytics.ResponseTableToFrame(&argResponse.Data, query.RefID, query.InterpolatedQuery, dataquery.AzureQueryType(query.QueryType), dataquery.ResultFormat(query.ResultFormat), false) if err != nil { return nil, err }