Merge branch 'main' into drclau/unistor/replace-authenticators-3

This commit is contained in:
Claudiu Dragalina-Paraipan 2024-08-30 10:45:51 +03:00
commit 0fd1988bed
167 changed files with 3723 additions and 961 deletions

6
.github/CODEOWNERS vendored
View File

@ -36,7 +36,7 @@
/docs/ @grafana/docs-tooling
/docs/.codespellignore @grafana/docs-tooling
/docs/sources/ @Eve832
/docs/sources/ @irenerl24
/docs/sources/administration/ @jdbaldry
/docs/sources/alerting/ @brendamuir
@ -115,7 +115,7 @@
/pkg/services/annotations/ @grafana/grafana-search-and-storage
/pkg/services/apikey/ @grafana/identity-squad
/pkg/services/cleanup/ @grafana/grafana-backend-group
/pkg/services/contexthandler/ @grafana/grafana-backend-group
/pkg/services/contexthandler/ @grafana/grafana-backend-group @grafana/grafana-app-platform-squad
/pkg/services/correlations/ @grafana/explore-squad
/pkg/services/dashboardimport/ @grafana/grafana-backend-group
/pkg/services/dashboards/ @grafana/grafana-app-platform-squad
@ -320,7 +320,7 @@
/e2e/ @grafana/grafana-frontend-platform
/e2e/cloud-plugins-suite/ @grafana/partner-datasources
/e2e/plugin-e2e/plugin-e2e-api-tests/ @grafana/plugins-platform-frontend
/e2e/test-plugins/grafana-extensionstest-app/ @grafana/plugins-platform-frontend
/e2e/test-plugins/grafana-extensionstest-app/ @grafana/plugins-platform-frontend
# Packages
/packages/ @grafana/grafana-frontend-platform @grafana/plugins-platform-frontend

View File

@ -37,6 +37,8 @@ In [Grafana Enterprise]({{< relref "../../introduction/grafana-enterprise/" >}})
{{% admonition type="note" %}}
Service accounts can only act in the organization they are created for. If you have the same task that is needed for multiple organizations, we recommend creating service accounts in each organization.
Service accounts can't be used for instance-wide operations, such as global user management and organization management. For these tasks, you need to use a user with [Grafana server administrator permissions]({{< relref "../roles-and-permissions/#grafana-server-administrators" >}}).
{{% /admonition %}}
{{< vimeo 742056367 >}}

View File

@ -58,7 +58,6 @@ In the following JSON, id is shown as null which is the default value assigned t
"to": "now"
},
"timepicker": {
"time_options": [],
"refresh_intervals": []
},
"templating": {
@ -137,17 +136,6 @@ The grid has a negative gravity that moves panels up if there is empty space abo
"now": true,
"hidden": false,
"nowDelay": "",
"time_options": [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
],
"refresh_intervals": [
"5s",
"10s",
@ -175,7 +163,6 @@ Usage of the fields is explained below:
| **now** | |
| **hidden** | whether timepicker is hidden or not |
| **nowDelay** | override the now time by entering a time delay. Use this option to accommodate known delays in data aggregation to avoid null values. |
| **time_options** | options available in the time picker dropdown |
| **refresh_intervals** | interval options available in the refresh picker dropdown |
| **status** | |
| **type** | |

View File

@ -18,9 +18,15 @@ title: 'Admin HTTP API '
# Admin API
The Admin HTTP API does not currently work with an API Token. API Tokens are currently only linked to an organization and an organization role. They cannot be given
the permission of server admin, only users can be given that permission. So in order to use these API calls you will have to use Basic Auth and the Grafana user
must have the Grafana Admin permission. (The default admin user is called `admin` and has permission to use this API.)
{{< admonition type="caution" >}}
You can't authenticate to the Admin HTTP API with service account tokens.
Service accounts are limited to an organization and an organization role.
They can't be granted [Grafana server administrator permissions](/docs/grafana/<GRAFANA_VERSION>/administration/roles-and-permissions/#grafana-server-administrators).
To use these API endpoints you have to use Basic authentication and the Grafana user must have the Grafana server administrator permission.
The `admin` user that Grafana is provisioned with by default has permissions to use these API endpoints.
{{< /admonition >}}
> If you are running Grafana Enterprise, for some endpoints you'll need to have specific permissions. Refer to [Role-based access control permissions]({{< relref "../../administration/roles-and-permissions/access-control/custom-role-actions-scopes/" >}}) for more information.

View File

@ -217,31 +217,7 @@ Content-Length: 1300
"from": "now-6h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"time_options": [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
]
},
"timepicker": {},
"timezone": "browser",
"title": "test",
"version": 1
@ -328,31 +304,7 @@ Content-Length: 1300
"from": "now-6h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"time_options": [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
]
},
"timepicker": {},
"timezone": "browser",
"title": "test",
"version": 1

View File

@ -21,6 +21,16 @@ title: Licensing HTTP API
Licensing is only available in Grafana Enterprise. Read more about [Grafana Enterprise]({{< relref "/docs/grafana/latest/introduction/grafana-enterprise" >}}).
{{< admonition type="caution" >}}
You can't authenticate to the Licensing HTTP API with service account tokens.
Service accounts are limited to an organization and an organization role.
They can't be granted [Grafana server administrator permissions](/docs/grafana/<GRAFANA_VERSION>/administration/roles-and-permissions/#grafana-server-administrators).
To use these API endpoints you have to use Basic authentication and the Grafana user must have the Grafana server administrator permission.
The `admin` user that Grafana is provisioned with by default has permissions to use these API endpoints.
{{< /admonition >}}
> If you are running Grafana Enterprise, for some endpoints you'll need to have specific permissions. Refer to [Role-based access control permissions]({{< relref "/docs/grafana/latest/administration/roles-and-permissions/access-control/custom-role-actions-scopes" >}}) for more information.
## Check license availability

View File

@ -282,11 +282,15 @@ Content-Type: application/json
## Admin Organizations API
The Admin Organizations HTTP API does not currently work with an API Token. API Tokens are currently
only linked to an organization and an organization role. They cannot be given the permission of server
admin, only users can be given that permission. So in order to use these API calls you will have to
use Basic Auth and the Grafana user must have the Grafana Admin permission (The default admin user
is called `admin` and has permission to use this API).
{{< admonition type="caution" >}}
You can't authenticate to the Admin Organizations HTTP API with service account tokens.
Service accounts are limited to an organization and an organization role.
They can't be granted [Grafana server administrator permissions](/docs/grafana/<GRAFANA_VERSION>/administration/roles-and-permissions/#grafana-server-administrators).
To use these API endpoints you have to use Basic authentication and the Grafana user must have the Grafana server administrator permission.
The `admin` user that Grafana is provisioned with by default has permissions to use these API endpoints.
{{< /admonition >}}
### Get Organization by Id

View File

@ -24,11 +24,17 @@ refs:
# User API
The Users HTTP API does not currently work with an API Token. API Tokens are linked to an organization and an organization role. They cannot be given
the permission of server users access, only users can be given that permission. To use these API calls you can use Basic Auth and the Grafana
user must have the Grafana Admin role.
{{< admonition type="caution" >}}
You can't authenticate to the User HTTP API with service account tokens.
Service accounts are limited to an organization and an organization role.
They can't be granted [Grafana server administrator permissions](/docs/grafana/<GRAFANA_VERSION>/administration/roles-and-permissions/#grafana-server-administrators).
API Tokens can be used with Organization HTTP API to get users of specific organization.
Alternatively, you can use the [Organization HTTP API](/docs/grafana/<GRAFANA_VERSION>/developers/http_api/org/#current-organization-api) with service account tokens to manage users in a specific organization
To use these API endpoints you have to use Basic authentication and the Grafana user must have the Grafana server administrator permission.
The `admin` user that Grafana is provisioned with by default has permissions to use these API endpoints.
{{< /admonition >}}
> If you are running Grafana Enterprise, for some endpoints you'll need to have specific permissions. Refer to [Role-based access control permissions](ref:role-based-access-control-permissions) for more information.

View File

@ -4,7 +4,7 @@ labels:
- enterprise
- oss
title: Correlations Editor in Explore
weight: 400
weight: 20
---
# Correlations Editor in Explore

View File

@ -8,7 +8,7 @@ labels:
keywords:
- Explore
title: Query inspector in Explore
weight: 40
weight: 15
---
# Query inspector in Explore

View File

@ -9,7 +9,7 @@ labels:
- enterprise
- oss
title: Logs in Explore
weight: 15
weight: 25
---
# Logs in Explore

View File

@ -8,7 +8,7 @@ labels:
- enterprise
- oss
title: Traces in Explore
weight: 20
weight: 40
---
# Traces in Explore

View File

@ -41,31 +41,57 @@ refs:
# Bar chart
Bar charts allow you to graph categorical data.
A bar chart is a visual representation that uses rectangular bars, where the length of each bar represents each value.
You can use the bar chart visualization when you want to compare values over different categories or time periods. The visualization can display the bars horizontally or vertically, and can be customized to group or stack bars for more complex data analysis.
{{< figure src="/static/img/docs/bar-chart-panel/barchart_small_example.png" max-width="1000px" caption="Bar chart" >}}
{{< figure src="/static/img/docs/bar-chart-panel/barchart_small_example.png" max-width="1000px" alt="Bar chart" >}}
You can use the bar chart visualization if you need to show:
- Population distribution by age or location
- CPU usage per application
- Sales per division
- Server cost distribution
## Configure a bar chart
The following video shows you how to create and configure a bar chart visualization:
{{< youtube id="qyKE9-71KkE" >}}
{{< docs/play title="Grafana Bar Charts and Pie Charts" url="https://play.grafana.org/d/ktMs4D6Mk/" >}}
## Supported data formats
Only one data frame is supported and it must have at least one string field that will be used as the category for an X or Y axis and one or more numerical fields.
To create a bar chart visualization, you need a dataset containing one string or time field (or column) and at least one numeric field, though preferably more than one to make best use of the visualization.
Example:
The text or time field is used to label the bars or values in each row of data and the numeric fields are represented by proportionally sized bars.
| Browser | Market share |
| ------- | ------------ |
| Chrome | 50 |
| IE | 17.5 |
### Example 1
If you have more than one numerical field the visualization will show grouped bars.
| Group | Value1 | Value2 | Value3 |
| ----- | ------ | ------ | ------ |
| uno | 5 | 3 | 2 |
### Visualizing time series or multiple result sets
![Bar chart single row example](/media/docs/grafana/panels-visualizations/screenshot-grafana-11.1-barchart-example1.png 'Bar chart single row example')
If you have multiple time series or tables you first need to join them using a join or reduce transform. For example if you
have multiple time series and you want to compare their last and max value add the **Reduce** transform and specify **Max** and **Last** as options under **Calculations**.
If you have more than one text or time field, by default, the visualization uses the first one, but you can change this in the x-axis option as described in the [Bar chart options](#bar-chart-options) section.
{{< figure src="/static/img/docs/bar-chart-panel/bar-chart-time-series-v8-0.png" max-width="1025px" caption="Bar chart time series example" >}}
### Example 2
If your dataset contains multiple rows, the visualization displays multiple bar chart groups where each group contains multiple bars representing all the numeric values for a row.
| Group | Value1 | Value2 | Value3 |
| ----- | ------ | ------ | ------ |
| uno | 5 | 3 | 2 |
| dos | 10 | 6 | 4 |
| tres | 20 | 8 | 2 |
![Bar chart multiple row example](/media/docs/grafana/panels-visualizations/screenshot-grafana-11.1-barchart-example2.png 'Bar chart multiple row example')
While the first field can be time-based and you can use a bar chart to plot time-series data, for large amounts of time-series data, we recommend that you use the [time series visualization](https://grafana.com/docs/grafana/latest/panels-visualizations/visualizations/time-series/) and configure it to be displayed as bars.
We recommend that you only use one dataset in a bar chart because using multiple datasets can result in unexpected behavior.
## Panel options
@ -75,6 +101,10 @@ have multiple time series and you want to compare their last and max value add t
Use these options to refine your visualization.
### X Axis
Specify which field is used for the x-axis.
### Orientation
- **Auto** - Grafana decides the bar orientation based on what the panel dimensions.

View File

@ -25,14 +25,70 @@ refs:
# Bar gauge
Bar gauges simplify your data by reducing every field to a single value. You choose how Grafana calculates the reduction.
This panel can show one or more bar gauges depending on how many series, rows, or columns your query returns.
Bar gauges simplify your data by reducing every field to a single value. You choose how Grafana calculates the reduction. This visualization can show one or more bar gauges depending on how many series, rows, or columns your query returns.
{{< figure src="/static/img/docs/v66/bar_gauge_cover.png" max-width="1025px" alt="Bar gauge panel" >}}
The bar gauge visualization displays values as bars with various lengths or fills proportional to the values they represent. They differ from traditional bar charts in that they act as gauges displaying metrics between ranges. One example is a thermometer displaying body temperature in a bar filling up.
You can use a bar gauge visualization when you need to show:
- Key performance indicators (KPIs)
- System health
- Savings goals
- Attendance
- Process completion rates
{{< docs/play title="Bar Gauge" url="https://play.grafana.org/d/vmie2cmWz/" >}}
## Supported data formats
To create a bar gauge visualization, you need a dataset querying at least one numeric field. Every numeric field in the dataset is displayed as a bar gauge. Text or time fields aren't required but if they're present, they're used for labeling.
### Example 1
| Label | Value1 | Value2 | Value3 |
| ----- | ------ | ------ | ------ |
| Row1 | 5 | 3 | 2 |
![Bar gauge with single row of data](/media/docs/grafana/panels-visualizations/screenshot-grafana-12.1-bargauge-example1.png)
The minimum and maximum range for the bar gauges is automatically pulled from the largest and smallest numeric values in the dataset. You can also manually define the minimum and maximum values as indicated in the [Standard options](#standard-options) section.
You can also define the minimum and maximum from the dataset provided.
### Example 2
| Label | Value | Max | Min |
| ----- | ----- | --- | --- |
| Row1 | 3 | 6 | 1 |
![Bar gauge with single row of data including maximum and minimum](/media/docs/grafana/panels-visualizations/screenshot-grafana-12.1-bargauge-example2.png)
If you dont want to show gauges for the min and max values, you can configure only one field to be displayed as described in the [Value options](#value-options) section.
![Bar gauge, single row of data with max and min displaying value](/media/docs/grafana/panels-visualizations/screenshot-grafana-12.1-bargauge-example3.png)
Even if the min and max arent displayed, the visualization still pulls the range from the data set.
### Example 3
The bar gauge visualization also supports multiple records (rows) in the dataset.
| Label | Value1 | Value2 | Value3 |
| ----- | ------ | ------ | ------ |
| Row1 | 5 | 3 | 2 |
| Row2 | 10 | 6 | 4 |
| Row3 | 20 | 8 | 2 |
![Bar gauge with multiple rows of data displaying last row](/media/docs/grafana/panels-visualizations/screenshot-grafana-12.1-bargauge-example4.png)
By default, the visualization is configured to [calculate](#value-options) a single value per column or series and to display only the last set of data. However, it derives the minimum and maximum from the full dataset even if those values arent visible. In this example, that means only the last row of data is displayed in the gauges and the minimum and maximum values are defined as 2 and 20, pulled from the whole dataset.
If you want to show one gauge per cell you can change the [Show](#show) setting from [Calculate](#calculate) to [All values](#all-values) and each bar is labeled by concatenating the text column with each value's column name.
![Bar gauge with multiple rows of data displaying all the values](/media/docs/grafana/panels-visualizations/screenshot-grafana-12.1-bargauge-example5.png)
## Panel options
{{< docs/shared lookup="visualizations/panel-options.md" source="grafana" version="<GRAFANA_VERSION>" >}}
@ -141,6 +197,10 @@ Automatically show y-axis scrollbar when there's a large amount of data.
This option only applies when bar size is set to manual.
{{% /admonition %}}
## Legend options
{{< docs/shared lookup="visualizations/legend-options-1.md" source="grafana" version="<GRAFANA_VERSION>" >}}
## Standard options
{{< docs/shared lookup="visualizations/standard-options.md" source="grafana" version="<GRAFANA_VERSION>" >}}
@ -157,6 +217,10 @@ This option only applies when bar size is set to manual.
{{< docs/shared lookup="visualizations/thresholds-options-2.md" source="grafana" version="<GRAFANA_VERSION>" >}}
Last, colors of the bar gauge thresholds can be configured as described above.
![Bar gauge with colored thresholds configured](/media/docs/grafana/panels-visualizations/screenshot-grafana-12.1-bargauge-example6.png)
## Field overrides
{{< docs/shared lookup="visualizations/overrides-options.md" source="grafana" version="<GRAFANA_VERSION>" >}}

View File

@ -194,6 +194,10 @@ Experimental features might be changed or removed without prior notice.
| `dataplaneAggregator` | Enable grafana dataplane aggregator |
| `adhocFilterOneOf` | Exposes a new 'one of' operator for ad-hoc filters. This operator allows users to filter by multiple values in a single filter. |
| `lokiSendDashboardPanelNames` | Send dashboard and panel names to Loki when querying |
| `singleTopNav` | Unifies the top search bar and breadcrumb bar into one |
| `exploreLogsShardSplitting` | Used in Explore Logs to split queries into multiple queries based on the number of shards |
| `exploreLogsAggregatedMetrics` | Used in Explore Logs to query by aggregated metrics |
| `exploreLogsLimitedTimeRange` | Used in Explore Logs to limit the time range |
## Development feature toggles

View File

@ -232,10 +232,7 @@ For more information on how to configure dashboard providers, refer to [Dashboar
"from": "now-6h",
"to": "now"
},
"timepicker": {
"refresh_intervals": ["5s", "10s", "30s", "1m", "5m", "15m", "30m", "1h", "2h", "1d"],
"time_options": ["5m", "15m", "1h", "6h", "12h", "24h", "2d", "7d", "30d"]
},
"timepicker": {},
"timezone": "browser",
"title": "Cluster",
"version": 0

View File

@ -19,4 +19,51 @@ weight: 1000
{{< docs/shared lookup="upgrade/upgrade-common-tasks.md" source="grafana" version="<GRAFANA_VERSION>" >}}
## Ensure that your data source UIDs are following the correct standard
We have had a standard ways to define UIDs for Grafana objects for years (at least [since Grafana 5](https://github.com/grafana/grafana/issues/7883)). While all of our internal code is complying to this format, we did not yet had strict enforcement of this format in REST APIs and provisioning paths that allow creation and update of data sources.
In Grafana `11.1` we [introduced](https://github.com/grafana/grafana/pull/86598) a warning that is sent to Grafana server logs every time a data source instance is being created or updated using an invalid UID format.
In Grafana `11.2` we [added](https://github.com/grafana/grafana/pull/89363/files) a new feature flag called `failWrongDSUID` that is turned off by default. When enabled, the REST APIs and provisioning will start rejecting and requests to create or update datasource instances that have a wrong UID.
In Grafana `11.5`, we are going to turn feature flag `failWrongDSUID` on by default, but there will still be an option to turn it off.
In Grafana `12`, this will be the default behavior and will not be configurable.
### Correct UID format
You can find the exact regex definition [here](https://github.com/grafana/grafana/blob/c92f5169d1c83508beb777f71a93336179fe426e/pkg/util/shortid_generator.go#L32-L45).
A data source UID can only contain:
- latin characters (`a-Z`)
- numbers (`0-9`)
- dash symbols (`-`)
### How can I know if I am affected?
- You can fetch all your data sources via `/api/datasources` API ([docs](https://grafana.com/docs/grafana/latest/developers/http_api/data_source/#get-all-data-sources)). Look into `uid` fields comparing it to the correct format. Below you'll find a script that could help, but please note it is missing authentication that you would [have to add yourself](https://grafana.com/docs/grafana/latest/developers/http_api/#authenticating-api-requests).
```
curl http://localhost:3000/api/datasources | jq '.[] | select((.uid | test("^[a-zA-Z0-9\\-_]+$") | not) or (.uid | length > 40)) | {id, uid, name, type}'
```
- Alternatively, you can check the server logs for the `Invalid datasource uid` ([reference](https://github.com/grafana/grafana/blob/68751ed3107c4d15d33f34b15183ee276611785c/pkg/services/datasources/service/store.go#L429))
### What can I do if I am affected?
You will need to create a new data source with the correct UID and update your dashboards and alert rules to use it.
### How can I update my dashboards to use the new or updated data source?
1. Go to the dashboard using this data source and update it by selecting the new or updated data source from the picker below your panel.
2. Update the dashboard's JSON model directly via search and replace. Navigate to [dashboard json model](https://grafana.com/docs/grafana/latest/dashboards/build-dashboards/view-dashboard-json-model/) and carefully replace all the instances of old `uid` with the newly created `uid`.
{{< figure src="/media/docs/grafana/screenshot-grafana-11-datasource-uid-enforcement.png" alt="Updating JSON Model of a Dashboard">}}
### How can I update my alert rules to use the new or updated data source?
Open the alert rule you want to adjust and search for the data source that is being used for the query/alert condition. From there, select the new data source from the dropdown menu and save the alert rule.
## Technical notes

View File

@ -51,7 +51,9 @@ export const importDashboard = (dashboardToImport: Dashboard, queryTimeout?: num
e2e.components.Panels.Panel.menu(panel.title).click({ force: true }); // force click because menu is hidden and show on hover
e2e.components.Panels.Panel.menuItems('Inspect').should('be.visible').click();
e2e.components.Tab.title('JSON').should('be.visible').click();
e2e.components.PanelInspector.Json.content().should('be.visible').contains('Panel JSON').click({ force: true });
e2e.components.PanelInspector.Json.content().should('be.visible');
e2e.components.ReactMonacoEditor.editorLazy().should('be.visible');
cy.contains('Panel JSON').click({ force: true });
e2e.components.Select.option().should('be.visible').contains('Panel data').click();
// ensures that panel has loaded without knowingly hitting an error

2
go.mod
View File

@ -89,7 +89,7 @@ require (
github.com/grafana/grafana-cloud-migration-snapshot v1.2.0 // @grafana/grafana-operator-experience-squad
github.com/grafana/grafana-google-sdk-go v0.1.0 // @grafana/partner-datasources
github.com/grafana/grafana-openapi-client-go v0.0.0-20231213163343-bd475d63fb79 // @grafana/grafana-backend-group
github.com/grafana/grafana-plugin-sdk-go v0.244.0 // @grafana/plugins-platform-backend
github.com/grafana/grafana-plugin-sdk-go v0.245.0 // @grafana/plugins-platform-backend
github.com/grafana/grafana/pkg/aggregator v0.0.0-20240813192817-1b0e6b5c09b2 // @grafana/grafana-app-platform-squad
github.com/grafana/grafana/pkg/apimachinery v0.0.0-20240821155123-6891eb1d35da // @grafana/grafana-app-platform-squad
github.com/grafana/grafana/pkg/apiserver v0.0.0-20240821155123-6891eb1d35da // @grafana/grafana-app-platform-squad

4
go.sum
View File

@ -2281,8 +2281,8 @@ github.com/grafana/grafana-google-sdk-go v0.1.0/go.mod h1:Vo2TKWfDVmNTELBUM+3lkr
github.com/grafana/grafana-openapi-client-go v0.0.0-20231213163343-bd475d63fb79 h1:r+mU5bGMzcXCRVAuOrTn54S80qbfVkvTdUJZfSfTNbs=
github.com/grafana/grafana-openapi-client-go v0.0.0-20231213163343-bd475d63fb79/go.mod h1:wc6Hbh3K2TgCUSfBC/BOzabItujtHMESZeFk5ZhdxhQ=
github.com/grafana/grafana-plugin-sdk-go v0.114.0/go.mod h1:D7x3ah+1d4phNXpbnOaxa/osSaZlwh9/ZUnGGzegRbk=
github.com/grafana/grafana-plugin-sdk-go v0.244.0 h1:ZZxHbiiF6QcsnlbPFyZGmzNDoTC1pLeHXUQYoskWt5c=
github.com/grafana/grafana-plugin-sdk-go v0.244.0/go.mod h1:H3FXrJMUlwocQ6UYj8Ds5I9EzRAVOcdRcgaRE3mXQqk=
github.com/grafana/grafana-plugin-sdk-go v0.245.0 h1:2KCKA86//O20ffL6WKzHGx5scBbdV7GyEFGnH8Hdv7M=
github.com/grafana/grafana-plugin-sdk-go v0.245.0/go.mod h1:1X8Kgo/SK91Qo1WBCKjPSKrfgjpQys1OkQsHhA78TLg=
github.com/grafana/grafana/pkg/aggregator v0.0.0-20240813192817-1b0e6b5c09b2 h1:2H9x4q53pkfUGtSNYD1qSBpNnxrFgylof/TYADb5xMI=
github.com/grafana/grafana/pkg/aggregator v0.0.0-20240813192817-1b0e6b5c09b2/go.mod h1:gBLBniiSUQvyt4LRrpIeysj8Many0DV+hdUKifRE0Ec=
github.com/grafana/grafana/pkg/apimachinery v0.0.0-20240821155123-6891eb1d35da h1:2E3c/I3ayAy4Z1GwIPqXNZcpUccRapE1aBXA1ho4g7o=

View File

@ -545,6 +545,7 @@ github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA=
github.com/chenzhuoyu/iasm v0.9.0 h1:9fhXjVzq5hUy2gkhhgHl95zG2cEAhw9OSGs8toWWAwo=
github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog=
github.com/chromedp/cdproto v0.0.0-20220208224320-6efb837e6bc2/go.mod h1:At5TxYYdxkbQL0TSefRjhLE3Q0lgvqKKMSFUglJ7i1U=
github.com/chromedp/chromedp v0.9.2 h1:dKtNz4kApb06KuSXoTQIyUC2TrA0fhGDwNZf3bcgfKw=
github.com/chromedp/sysutil v1.0.0 h1:+ZxhTpfpZlmchB58ih/LBHX52ky7w2VhQVKQMucy3Ic=
github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM=
@ -622,11 +623,14 @@ github.com/elastic/go-sysinfo v1.11.2 h1:mcm4OSYVMyws6+n2HIVMGkln5HOpo5Ie1ZmbbNn
github.com/elastic/go-sysinfo v1.11.2/go.mod h1:GKqR8bbMK/1ITnez9NIsIfXQr25aLhRJa7AfT8HpBFQ=
github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0=
github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss=
github.com/elazarl/goproxy v0.0.0-20230731152917-f99041a5c027/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM=
github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ=
github.com/expr-lang/expr v1.16.2 h1:JvMnzUs3LeVHBvGFcXYmXo+Q6DPDmzrlcSBO6Wy3w4s=
github.com/expr-lang/expr v1.16.2/go.mod h1:uCkhfG+x7fcZ5A5sXHKuQ07jGZRl6J0FCAaf2k4PtVQ=
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g=
github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw=
github.com/flosch/pongo2/v4 v4.0.2 h1:gv+5Pe3vaSVmiJvh/BZa82b7/00YUGm0PIyVVLop0Hw=
github.com/flosch/pongo2/v4 v4.0.2/go.mod h1:B5ObFANs/36VwxxlgKpdchIJHMvHB562PW+BWPhwZD8=
github.com/fogleman/gg v1.3.0 h1:/7zJX8F6AaYQc57WQCyN9cAIz+4bCJGO9B+dyW29am8=
@ -638,6 +642,7 @@ github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbS
github.com/fsouza/fake-gcs-server v1.7.0 h1:Un0BXUXrRWYSmYyC1Rqm2e2WJfTPyDy/HGMz31emTi8=
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
github.com/getkin/kin-openapi v0.124.0/go.mod h1:wb1aSZA/iWmorQP9KTAS/phLj/t17B5jT7+fS8ed9NM=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
@ -659,6 +664,7 @@ github.com/go-kit/kit v0.13.0/go.mod h1:phqEHMMUbyrCFCTgH48JueqrM3md2HcAZ8N3XE4F
github.com/go-latex/latex v0.0.0-20230307184459-12ec69307ad9 h1:NxXI5pTAtpEaU49bpLpQoDsu1zrteW/vxzTz8Cd2UAs=
github.com/go-latex/latex v0.0.0-20230307184459-12ec69307ad9/go.mod h1:gWuR/CrFDDeVRFQwHPvsv9soJVB/iqymhuZQuJ3a9OM=
github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/swag v0.22.8/go.mod h1:6QT22icPLEqAM/z/TChgb4WAveCHF92+2gF0CNjHpPI=
github.com/go-pdf/fpdf v0.6.0 h1:MlgtGIfsdMEEQJr2le6b/HNr1ZlQwxyWr77r2aj2U/8=
github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
@ -755,6 +761,7 @@ github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSAS
github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465 h1:KwWnWVWCNtNq/ewIX7HIKnELmEx2nDP42yskD/pi7QE=
github.com/influxdata/influxdb v1.7.6 h1:8mQ7A/V+3noMGCt/P9pD09ISaiz9XvgCk303UYA3gcs=
github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab h1:HqW4xhhynfjrtEiiSGcQUd6vrK23iMam1FO8rI7mwig=
github.com/invopop/yaml v0.2.0/go.mod h1:2XuRLgs/ouIrW3XNzuNj7J3Nvu/Dig5MXvbCEdiBN3Q=
github.com/iris-contrib/schema v0.0.6 h1:CPSBLyx2e91H2yJzPuhGuifVRnZBBJ3pCOMbOvPZaTw=
github.com/iris-contrib/schema v0.0.6/go.mod h1:iYszG0IOsuIsfzjymw1kMzTL8YQcCWlm65f3wX8J5iA=
github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0=
@ -967,6 +974,8 @@ github.com/pires/go-proxyproto v0.7.0 h1:IukmRewDQFWC7kfnb66CSomk2q/seBuilHBYFwy
github.com/pires/go-proxyproto v0.7.0/go.mod h1:Vz/1JPY/OACxWGQNIRY2BeyDmpoaWmEP40O9LbuiFR4=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A=
github.com/pkg/profile v1.2.1 h1:F++O52m40owAmADcojzM+9gyjmMOY/T4oYJkgFDH8RE=
github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA=
github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo=
github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo=
github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk=
github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo=
@ -1239,6 +1248,7 @@ google.golang.org/genproto/googleapis/api v0.0.0-20240513163218-0867130af1f8/go.
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU=
google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117/go.mod h1:OimBR/bc1wPO9iV4NC2bpyjy3VnAwZh5EBPQdtaE5oo=
google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4/go.mod h1:px9SlOOZBg1wM1zdnr8jEL4CNGUBZ+ZKYtNPApNQc4c=
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw=
google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d/go.mod h1:mw8MG/Qz5wfgYr6VqVCiZcHe/GJEfI+oGGDCohaVgB0=
google.golang.org/genproto/googleapis/api v0.0.0-20240722135656-d784300faade/go.mod h1:mw8MG/Qz5wfgYr6VqVCiZcHe/GJEfI+oGGDCohaVgB0=
google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f/go.mod h1:AHT0dDg3SoMOgZGnZk29b5xTbPHMoEC8qthmBLJCpys=
@ -1273,6 +1283,7 @@ gopkg.in/src-d/go-billy.v4 v4.3.2 h1:0SQA1pRztfTFx2miS8sA97XvooFeNOmvUenF4o0EcVg
gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98=
gopkg.in/telebot.v3 v3.2.1 h1:3I4LohaAyJBiivGmkfB+CiVu7QFOWkuZ4+KHgO/G3rs=
gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.1.3 h1:qTakTkI6ni6LFD5sBwwsdSO+AQqbSIxOauHTTQKZ/7o=
howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM=
howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=

View File

@ -226,7 +226,7 @@
"sass-loader": "14.2.1",
"smtp-tester": "^2.1.0",
"style-loader": "4.0.0",
"stylelint": "16.8.2",
"stylelint": "16.9.0",
"stylelint-config-sass-guidelines": "11.1.0",
"terser-webpack-plugin": "5.3.10",
"testing-library-selector": "0.3.1",

View File

@ -203,4 +203,8 @@ export interface FeatureToggles {
dataplaneAggregator?: boolean;
adhocFilterOneOf?: boolean;
lokiSendDashboardPanelNames?: boolean;
singleTopNav?: boolean;
exploreLogsShardSplitting?: boolean;
exploreLogsAggregatedMetrics?: boolean;
exploreLogsLimitedTimeRange?: boolean;
}

View File

@ -1,7 +1,10 @@
export interface ScopeDashboardBindingSpec {
dashboard: string;
dashboardTitle: string;
scope: string;
}
export interface ScopeDashboardBindingStatus {
dashboardTitle: string;
groups?: string[];
}
@ -11,6 +14,7 @@ export interface ScopeDashboardBinding {
name: string;
};
spec: ScopeDashboardBindingSpec;
status: ScopeDashboardBindingStatus;
}
export type ScopeFilterOperator = 'equals' | 'not-equals' | 'regex-match' | 'regex-not-match';

View File

@ -12,7 +12,7 @@ import * as common from '@grafana/schema';
export const pluginVersion = "11.3.0-pre";
export interface Options extends common.SingleStatBaseOptions {
export interface Options extends common.OptionsWithLegend, common.SingleStatBaseOptions {
displayMode: common.BarGaugeDisplayMode;
maxVizHeight: number;
minVizHeight: number;

View File

@ -0,0 +1,94 @@
import { render, screen, fireEvent } from '@testing-library/react';
import userEvent from '@testing-library/user-event';
import { Combobox, Option } from './Combobox';
// Mock data for the Combobox options
const options: Option[] = [
{ label: 'Option 1', value: '1' },
{ label: 'Option 2', value: '2' },
{ label: 'Option 3', value: '3', description: 'This is option 3' },
{ label: 'Option 4', value: '4' },
];
describe('Combobox', () => {
const onChangeHandler = jest.fn();
beforeAll(() => {
const mockGetBoundingClientRect = jest.fn(() => ({
width: 120,
height: 120,
top: 0,
left: 0,
bottom: 0,
right: 0,
}));
Object.defineProperty(Element.prototype, 'getBoundingClientRect', {
value: mockGetBoundingClientRect,
});
});
it('renders without error', () => {
render(<Combobox options={options} value={null} onChange={onChangeHandler} />);
expect(screen.getByRole('combobox')).toBeInTheDocument();
});
it('should allow selecting a value by clicking directly', async () => {
render(<Combobox options={options} onChange={onChangeHandler} value={null} />);
const input = screen.getByRole('combobox');
userEvent.click(input);
const item = await screen.findByRole('option', { name: 'Option 1' });
await userEvent.click(item);
expect(screen.getByDisplayValue('Option 1')).toBeInTheDocument();
expect(onChangeHandler).toHaveBeenCalledWith(options[0]);
});
it('selects value by clicking that needs scrolling', async () => {
render(<Combobox options={options} value={null} onChange={onChangeHandler} />);
await userEvent.click(screen.getByRole('combobox'));
fireEvent.scroll(screen.getByRole('listbox'), { target: { scrollY: 200 } });
await userEvent.click(screen.getByText('Option 4'));
expect(screen.getByDisplayValue('Option 4')).toBeInTheDocument();
expect(onChangeHandler).toHaveBeenCalledWith(options[3]);
});
it('selects value by searching and pressing enter', async () => {
render(<Combobox options={options} value={null} onChange={onChangeHandler} />);
const input = screen.getByRole('combobox');
await userEvent.type(input, 'Option 3');
await userEvent.keyboard('{ArrowDown}{Enter}');
expect(onChangeHandler).toHaveBeenCalledWith(options[2]);
expect(screen.getByDisplayValue('Option 3')).toBeInTheDocument();
});
it('selects value by using keyboard only', async () => {
render(<Combobox options={options} value={null} onChange={onChangeHandler} />);
const input = screen.getByRole('combobox');
await userEvent.click(input);
await userEvent.keyboard('{ArrowDown}{ArrowDown}{Enter}');
expect(onChangeHandler).toHaveBeenCalledWith(options[1]);
expect(screen.queryByDisplayValue('Option 2')).toBeInTheDocument();
});
it('clears selected value', async () => {
render(<Combobox options={options} value={options[1].value} onChange={onChangeHandler} />);
expect(screen.queryByDisplayValue('Option 2')).toBeInTheDocument();
const input = screen.getByRole('combobox');
await userEvent.click(input);
const clearButton = screen.getByTitle('Clear value');
await userEvent.click(clearButton);
expect(onChangeHandler).toHaveBeenCalledWith(null);
expect(screen.queryByDisplayValue('Option 2')).not.toBeInTheDocument();
});
});

View File

@ -296,7 +296,8 @@ const getStyles = (theme: GrafanaTheme2) => {
mask: css({
// The !important here is to override the default .rc-drawer-mask styles
backgroundColor: 'transparent !important',
position: 'fixed',
// eslint-disable-next-line @typescript-eslint/consistent-type-assertions
position: 'fixed !important' as 'fixed',
'&:before': {
backgroundColor: `${theme.components.overlay.background} !important`,

View File

@ -113,6 +113,7 @@ RadioButtonGroup.displayName = 'RadioButtonGroup';
const getStyles = (theme: GrafanaTheme2) => {
return {
radioGroup: css({
backgroundColor: theme.colors.background.primary,
display: 'inline-flex',
flexDirection: 'row',
flexWrap: 'nowrap',

View File

@ -39,7 +39,7 @@ const VIRTUAL_LIST_ITEM_HEIGHT = 37;
const VIRTUAL_LIST_WIDTH_ESTIMATE_MULTIPLIER = 8;
const VIRTUAL_LIST_PADDING = 8;
// Some list items have icons or checkboxes so we need some extra width
const VIRTUAL_LIST_WIDTH_EXTRA = 36;
const VIRTUAL_LIST_WIDTH_EXTRA = 58;
// A virtualized version of the SelectMenu, descriptions for SelectableValue options not supported since those are of a variable height.
//

View File

@ -6,7 +6,8 @@ import { LegendDisplayMode, OptionsWithLegend } from '@grafana/schema';
*/
export function addLegendOptions<T extends OptionsWithLegend>(
builder: PanelOptionsEditorBuilder<T>,
includeLegendCalcs = true
includeLegendCalcs = true,
showLegend = true
) {
builder
.addBooleanSwitch({
@ -14,7 +15,7 @@ export function addLegendOptions<T extends OptionsWithLegend>(
name: 'Visibility',
category: ['Legend'],
description: '',
defaultValue: true,
defaultValue: showLegend,
})
.addRadio({
path: 'legend.displayMode',

View File

@ -4,7 +4,7 @@ go 1.23.0
require (
github.com/emicklei/go-restful/v3 v3.11.0
github.com/grafana/grafana-plugin-sdk-go v0.244.0
github.com/grafana/grafana-plugin-sdk-go v0.245.0
github.com/grafana/grafana/pkg/apimachinery v0.0.0-20240808213237-f4d2e064f435
github.com/grafana/grafana/pkg/semconv v0.0.0-20240808213237-f4d2e064f435
github.com/mattbaird/jsonpatch v0.0.0-20240118010651-0ba75a80ca38

View File

@ -130,8 +130,8 @@ github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grafana/grafana-plugin-sdk-go v0.244.0 h1:ZZxHbiiF6QcsnlbPFyZGmzNDoTC1pLeHXUQYoskWt5c=
github.com/grafana/grafana-plugin-sdk-go v0.244.0/go.mod h1:H3FXrJMUlwocQ6UYj8Ds5I9EzRAVOcdRcgaRE3mXQqk=
github.com/grafana/grafana-plugin-sdk-go v0.245.0 h1:2KCKA86//O20ffL6WKzHGx5scBbdV7GyEFGnH8Hdv7M=
github.com/grafana/grafana-plugin-sdk-go v0.245.0/go.mod h1:1X8Kgo/SK91Qo1WBCKjPSKrfgjpQys1OkQsHhA78TLg=
github.com/grafana/grafana/pkg/apimachinery v0.0.0-20240808213237-f4d2e064f435 h1:lmw60EW7JWlAEvgggktOyVkH4hF1m/+LSF/Ap0NCyi8=
github.com/grafana/grafana/pkg/apimachinery v0.0.0-20240808213237-f4d2e064f435/go.mod h1:ORVFiW/KNRY52lNjkGwnFWCxNVfE97bJG2jr2fetq0I=
github.com/grafana/grafana/pkg/semconv v0.0.0-20240808213237-f4d2e064f435 h1:SNEeqY22DrGr5E9kGF1mKSqlOom14W9+b1u4XEGJowA=

View File

@ -46,8 +46,11 @@ import (
publicdashboardsapi "github.com/grafana/grafana/pkg/services/publicdashboards/api"
"github.com/grafana/grafana/pkg/services/serviceaccounts"
"github.com/grafana/grafana/pkg/services/user"
"go.opentelemetry.io/otel"
)
var tracer = otel.Tracer("github.com/grafana/grafana/pkg/api")
// registerRoutes registers all API HTTP routes.
func (hs *HTTPServer) registerRoutes() {
reqNoAuth := middleware.NoAuth()

View File

@ -40,6 +40,10 @@ const (
)
func (hs *HTTPServer) isDashboardStarredByUser(c *contextmodel.ReqContext, dashID int64) (bool, error) {
ctx, span := tracer.Start(c.Req.Context(), "api.isDashboardStarredByUser")
defer span.End()
c.Req = c.Req.WithContext(ctx)
if !c.IsSignedIn {
return false, nil
}
@ -81,8 +85,9 @@ func dashboardGuardianResponse(err error) response.Response {
// 404: notFoundError
// 500: internalServerError
func (hs *HTTPServer) GetDashboard(c *contextmodel.ReqContext) response.Response {
ctx, span := hs.tracer.Start(c.Req.Context(), "api.GetDashboard")
ctx, span := tracer.Start(c.Req.Context(), "api.GetDashboard")
defer span.End()
c.Req = c.Req.WithContext(ctx)
uid := web.Params(c.Req)[":uid"]
dash, rsp := hs.getDashboardHelper(ctx, c.SignedInUser.GetOrgID(), 0, uid)
@ -230,6 +235,10 @@ func (hs *HTTPServer) GetDashboard(c *contextmodel.ReqContext) response.Response
}
func (hs *HTTPServer) getAnnotationPermissionsByScope(c *contextmodel.ReqContext, actions *dashboardsV0.AnnotationActions, scope string) {
ctx, span := tracer.Start(c.Req.Context(), "api.getAnnotationPermissionsByScope")
defer span.End()
c.Req = c.Req.WithContext(ctx)
var err error
evaluate := accesscontrol.EvalPermission(accesscontrol.ActionAnnotationsCreate, scope)
@ -252,6 +261,9 @@ func (hs *HTTPServer) getAnnotationPermissionsByScope(c *contextmodel.ReqContext
}
func (hs *HTTPServer) getUserLogin(ctx context.Context, userID int64) string {
ctx, span := tracer.Start(ctx, "api.getUserLogin")
defer span.End()
query := user.GetUserByIDQuery{ID: userID}
user, err := hs.userService.GetByID(ctx, &query)
if err != nil {
@ -292,6 +304,10 @@ func (hs *HTTPServer) getDashboardHelper(ctx context.Context, orgID int64, id in
// 404: notFoundError
// 500: internalServerError
func (hs *HTTPServer) RestoreDeletedDashboard(c *contextmodel.ReqContext) response.Response {
ctx, span := tracer.Start(c.Req.Context(), "api.RestoreDeletedDashboard")
defer span.End()
c.Req = c.Req.WithContext(ctx)
uid := web.Params(c.Req)[":uid"]
cmd := dashboards.RestoreDeletedDashboardCommand{}
@ -342,6 +358,10 @@ func (hs *HTTPServer) RestoreDeletedDashboard(c *contextmodel.ReqContext) respon
// 404: notFoundError
// 500: internalServerError
func (hs *HTTPServer) SoftDeleteDashboard(c *contextmodel.ReqContext) response.Response {
ctx, span := tracer.Start(c.Req.Context(), "api.SoftDeleteDashboard")
defer span.End()
c.Req = c.Req.WithContext(ctx)
uid := web.Params(c.Req)[":uid"]
dash, rsp := hs.getDashboardHelper(c.Req.Context(), c.SignedInUser.GetOrgID(), 0, uid)
if rsp != nil {
@ -408,6 +428,10 @@ func (hs *HTTPServer) HardDeleteDashboardByUID(c *contextmodel.ReqContext) respo
}
func (hs *HTTPServer) deleteDashboard(c *contextmodel.ReqContext) response.Response {
ctx, span := tracer.Start(c.Req.Context(), "api.deleteDashboard")
defer span.End()
c.Req = c.Req.WithContext(ctx)
uid := web.Params(c.Req)[":uid"]
var dash *dashboards.Dashboard
@ -496,6 +520,10 @@ func (hs *HTTPServer) deleteDashboard(c *contextmodel.ReqContext) response.Respo
// 422: unprocessableEntityError
// 500: internalServerError
func (hs *HTTPServer) PostDashboard(c *contextmodel.ReqContext) response.Response {
ctx, span := tracer.Start(c.Req.Context(), "api.PostDashboard")
defer span.End()
c.Req = c.Req.WithContext(ctx)
cmd := dashboards.SaveDashboardCommand{}
if err := web.Bind(c.Req, &cmd); err != nil {
return response.Error(http.StatusBadRequest, "bad request data", err)
@ -504,11 +532,15 @@ func (hs *HTTPServer) PostDashboard(c *contextmodel.ReqContext) response.Respons
}
func (hs *HTTPServer) postDashboard(c *contextmodel.ReqContext, cmd dashboards.SaveDashboardCommand) response.Response {
ctx, span := tracer.Start(c.Req.Context(), "api.postDashboard")
defer span.End()
c.Req = c.Req.WithContext(ctx)
if cmd.IsFolder {
return response.Error(http.StatusBadRequest, "Use folders endpoint for saving folders.", nil)
}
ctx := c.Req.Context()
ctx = c.Req.Context()
var err error
var userID int64
@ -622,6 +654,10 @@ func (hs *HTTPServer) postDashboard(c *contextmodel.ReqContext, cmd dashboards.S
// 401: unauthorisedError
// 500: internalServerError
func (hs *HTTPServer) GetHomeDashboard(c *contextmodel.ReqContext) response.Response {
ctx, span := tracer.Start(c.Req.Context(), "api.GetHomeDashboard")
defer span.End()
c.Req = c.Req.WithContext(ctx)
var userID int64
if id, err := identity.UserIdentifier(c.SignedInUser.GetID()); err == nil {
userID = id
@ -685,6 +721,10 @@ func (hs *HTTPServer) GetHomeDashboard(c *contextmodel.ReqContext) response.Resp
}
func (hs *HTTPServer) addGettingStartedPanelToHomeDashboard(c *contextmodel.ReqContext, dash *simplejson.Json) {
ctx, span := tracer.Start(c.Req.Context(), "api.addGettingStartedPanelToHomeDashboard")
defer span.End()
c.Req = c.Req.WithContext(ctx)
// We only add this getting started panel for Admins who have not dismissed it,
// and if a custom default home dashboard hasn't been configured
if !c.HasUserRole(org.RoleAdmin) ||
@ -736,6 +776,10 @@ func (hs *HTTPServer) addGettingStartedPanelToHomeDashboard(c *contextmodel.ReqC
// 404: notFoundError
// 500: internalServerError
func (hs *HTTPServer) GetDashboardVersions(c *contextmodel.ReqContext) response.Response {
ctx, span := tracer.Start(c.Req.Context(), "api.GetDashboardVersions")
defer span.End()
c.Req = c.Req.WithContext(ctx)
var dashID int64
var err error
@ -846,6 +890,10 @@ func (hs *HTTPServer) GetDashboardVersions(c *contextmodel.ReqContext) response.
// 404: notFoundError
// 500: internalServerError
func (hs *HTTPServer) GetDashboardVersion(c *contextmodel.ReqContext) response.Response {
ctx, span := tracer.Start(c.Req.Context(), "api.GetDashboardVersion")
defer span.End()
c.Req = c.Req.WithContext(ctx)
var dashID int64
var err error
@ -921,6 +969,10 @@ func (hs *HTTPServer) GetDashboardVersion(c *contextmodel.ReqContext) response.R
// 403: forbiddenError
// 500: internalServerError
func (hs *HTTPServer) CalculateDashboardDiff(c *contextmodel.ReqContext) response.Response {
ctx, span := tracer.Start(c.Req.Context(), "api.CalculateDashboardDiff")
defer span.End()
c.Req = c.Req.WithContext(ctx)
apiOptions := dtos.CalculateDiffOptions{}
if err := web.Bind(c.Req, &apiOptions); err != nil {
return response.Error(http.StatusBadRequest, "bad request data", err)
@ -1032,6 +1084,10 @@ func (hs *HTTPServer) CalculateDashboardDiff(c *contextmodel.ReqContext) respons
// 404: notFoundError
// 500: internalServerError
func (hs *HTTPServer) RestoreDashboardVersion(c *contextmodel.ReqContext) response.Response {
ctx, span := tracer.Start(c.Req.Context(), "api.RestoreDashboardVersion")
defer span.End()
c.Req = c.Req.WithContext(ctx)
var dashID int64
var err error
@ -1098,6 +1154,10 @@ func (hs *HTTPServer) RestoreDashboardVersion(c *contextmodel.ReqContext) respon
// 401: unauthorisedError
// 500: internalServerError
func (hs *HTTPServer) GetDashboardTags(c *contextmodel.ReqContext) {
ctx, span := tracer.Start(c.Req.Context(), "api.GetDashboardTags")
defer span.End()
c.Req = c.Req.WithContext(ctx)
query := dashboards.GetDashboardTagsQuery{OrgID: c.SignedInUser.GetOrgID()}
queryResult, err := hs.DashboardService.GetDashboardTags(c.Req.Context(), &query)
if err != nil {
@ -1110,6 +1170,10 @@ func (hs *HTTPServer) GetDashboardTags(c *contextmodel.ReqContext) {
// GetDashboardUIDs converts internal ids to UIDs
func (hs *HTTPServer) GetDashboardUIDs(c *contextmodel.ReqContext) {
ctx, span := tracer.Start(c.Req.Context(), "api.GetDashboardUIDs")
defer span.End()
c.Req = c.Req.WithContext(ctx)
ids := strings.Split(web.Params(c.Req)[":ids"], ",")
uids := make([]string, 0, len(ids))

View File

@ -44,6 +44,10 @@ import (
// 404: notFoundError
// 500: internalServerError
func (hs *HTTPServer) GetDashboardPermissionList(c *contextmodel.ReqContext) response.Response {
ctx, span := tracer.Start(c.Req.Context(), "api.GetDashboardPermissionList")
defer span.End()
c.Req = c.Req.WithContext(ctx)
var dashID int64
var err error
dashUID := web.Params(c.Req)[":uid"]
@ -117,6 +121,10 @@ func (hs *HTTPServer) GetDashboardPermissionList(c *contextmodel.ReqContext) res
// 404: notFoundError
// 500: internalServerError
func (hs *HTTPServer) UpdateDashboardPermissions(c *contextmodel.ReqContext) response.Response {
ctx, span := tracer.Start(c.Req.Context(), "api.UpdateDashboardPermissions")
defer span.End()
c.Req = c.Req.WithContext(ctx)
var dashID int64
var err error
apiCmd := dtos.UpdateDashboardACLCommand{}
@ -175,6 +183,9 @@ var dashboardPermissionMap = map[string]dashboardaccess.PermissionType{
}
func (hs *HTTPServer) getDashboardACL(ctx context.Context, user identity.Requester, dashboard *dashboards.Dashboard) ([]*dashboards.DashboardACLInfoDTO, error) {
ctx, span := tracer.Start(ctx, "api.getDashboardACL")
defer span.End()
permissions, err := hs.dashboardPermissionsService.GetPermissions(ctx, user, dashboard.UID)
if err != nil {
return nil, err
@ -253,6 +264,9 @@ func (hs *HTTPServer) filterHiddenACL(user identity.Requester, acl []*dashboards
// updateDashboardAccessControl is used for api backward compatibility
func (hs *HTTPServer) updateDashboardAccessControl(ctx context.Context, orgID int64, uid string, isFolder bool, items []*dashboards.DashboardACL, old []*dashboards.DashboardACLInfoDTO) error {
ctx, span := tracer.Start(ctx, "api.updateDashboardAccessControl")
defer span.End()
commands := []accesscontrol.SetResourcePermissionCommand{}
for _, item := range items {
permissions := item.Permission.String()

View File

@ -465,7 +465,8 @@ func (hs *HTTPServer) InstallPlugin(c *contextmodel.ReqContext) response.Respons
}
compatOpts := plugins.NewCompatOpts(hs.Cfg.BuildVersion, runtime.GOOS, runtime.GOARCH)
err := hs.pluginInstaller.Add(c.Req.Context(), pluginID, dto.Version, compatOpts)
ctx := repo.WithRequestOrigin(c.Req.Context(), "api")
err := hs.pluginInstaller.Add(ctx, pluginID, dto.Version, compatOpts)
if err != nil {
var dupeErr plugins.DuplicateError
if errors.As(err, &dupeErr) {

View File

@ -53,7 +53,6 @@ var ScopeDashboardBindingResourceInfo = common.NewResourceInfo(GROUP, VERSION,
{Name: "Created At", Type: "date"},
{Name: "Dashboard", Type: "string"},
{Name: "Scope", Type: "string"},
{Name: "Groups", Type: "array"},
},
Reader: func(obj any) ([]interface{}, error) {
m, ok := obj.(*ScopeDashboardBinding)
@ -65,7 +64,6 @@ var ScopeDashboardBindingResourceInfo = common.NewResourceInfo(GROUP, VERSION,
m.CreationTimestamp.UTC().Format(time.RFC3339),
m.Spec.Dashboard,
m.Spec.Scope,
m.Spec.Groups,
}, nil
},
},

View File

@ -56,21 +56,8 @@ type ScopeDashboardBinding struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ScopeDashboardBindingSpec `json:"spec,omitempty"`
}
type ScopeDashboardBindingSpec struct {
Dashboard string `json:"dashboard"`
// DashboardTitle should be populated and update from the dashboard
DashboardTitle string `json:"dashboardTitle"`
// Groups is used for the grouping of dashboards that are suggested based
// on a scope. The source of truth for this information has not been
// determined yet.
Groups []string `json:"groups,omitempty"`
Scope string `json:"scope"`
Spec ScopeDashboardBindingSpec `json:"spec,omitempty"`
Status ScopeDashboardBindingStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@ -97,8 +84,36 @@ type ScopeNode struct {
Spec ScopeNodeSpec `json:"spec,omitempty"`
}
type ScopeDashboardBindingSpec struct {
Dashboard string `json:"dashboard"`
Scope string `json:"scope"`
}
// Type of the item.
// +enum
// ScopeDashboardBindingStatus contains derived information about a ScopeDashboardBinding.
type ScopeDashboardBindingStatus struct {
// DashboardTitle should be populated and update from the dashboard
DashboardTitle string `json:"dashboardTitle"`
// Groups is used for the grouping of dashboards that are suggested based
// on a scope. The source of truth for this information has not been
// determined yet.
Groups []string `json:"groups,omitempty"`
// DashboardTitleConditions is a list of conditions that are used to determine if the dashboard title is valid.
// +optional
// +listType=map
// +listMapKey=type
DashboardTitleConditions []metav1.Condition `json:"dashboardTitleConditions,omitempty"`
// DashboardTitleConditions is a list of conditions that are used to determine if the list of groups is valid.
// +optional
// +listType=map
// +listMapKey=type
GroupsConditions []metav1.Condition `json:"groupsConditions,omitempty"`
}
type NodeType string
// Defines values for ItemType.

View File

@ -8,6 +8,7 @@
package v0alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
@ -108,7 +109,8 @@ func (in *ScopeDashboardBinding) DeepCopyInto(out *ScopeDashboardBinding) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Spec = in.Spec
in.Status.DeepCopyInto(&out.Status)
return
}
@ -166,11 +168,6 @@ func (in *ScopeDashboardBindingList) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScopeDashboardBindingSpec) DeepCopyInto(out *ScopeDashboardBindingSpec) {
*out = *in
if in.Groups != nil {
in, out := &in.Groups, &out.Groups
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
@ -184,6 +181,41 @@ func (in *ScopeDashboardBindingSpec) DeepCopy() *ScopeDashboardBindingSpec {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScopeDashboardBindingStatus) DeepCopyInto(out *ScopeDashboardBindingStatus) {
*out = *in
if in.Groups != nil {
in, out := &in.Groups, &out.Groups
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.DashboardTitleConditions != nil {
in, out := &in.DashboardTitleConditions, &out.DashboardTitleConditions
*out = make([]v1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.GroupsConditions != nil {
in, out := &in.GroupsConditions, &out.GroupsConditions
*out = make([]v1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeDashboardBindingStatus.
func (in *ScopeDashboardBindingStatus) DeepCopy() *ScopeDashboardBindingStatus {
if in == nil {
return nil
}
out := new(ScopeDashboardBindingStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScopeFilter) DeepCopyInto(out *ScopeFilter) {
*out = *in

View File

@ -20,6 +20,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
"github.com/grafana/grafana/pkg/apis/scope/v0alpha1.ScopeDashboardBinding": schema_pkg_apis_scope_v0alpha1_ScopeDashboardBinding(ref),
"github.com/grafana/grafana/pkg/apis/scope/v0alpha1.ScopeDashboardBindingList": schema_pkg_apis_scope_v0alpha1_ScopeDashboardBindingList(ref),
"github.com/grafana/grafana/pkg/apis/scope/v0alpha1.ScopeDashboardBindingSpec": schema_pkg_apis_scope_v0alpha1_ScopeDashboardBindingSpec(ref),
"github.com/grafana/grafana/pkg/apis/scope/v0alpha1.ScopeDashboardBindingStatus": schema_pkg_apis_scope_v0alpha1_ScopeDashboardBindingStatus(ref),
"github.com/grafana/grafana/pkg/apis/scope/v0alpha1.ScopeFilter": schema_pkg_apis_scope_v0alpha1_ScopeFilter(ref),
"github.com/grafana/grafana/pkg/apis/scope/v0alpha1.ScopeList": schema_pkg_apis_scope_v0alpha1_ScopeList(ref),
"github.com/grafana/grafana/pkg/apis/scope/v0alpha1.ScopeNode": schema_pkg_apis_scope_v0alpha1_ScopeNode(ref),
@ -195,11 +196,17 @@ func schema_pkg_apis_scope_v0alpha1_ScopeDashboardBinding(ref common.ReferenceCa
Ref: ref("github.com/grafana/grafana/pkg/apis/scope/v0alpha1.ScopeDashboardBindingSpec"),
},
},
"status": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/grafana/grafana/pkg/apis/scope/v0alpha1.ScopeDashboardBindingStatus"),
},
},
},
},
},
Dependencies: []string{
"github.com/grafana/grafana/pkg/apis/scope/v0alpha1.ScopeDashboardBindingSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
"github.com/grafana/grafana/pkg/apis/scope/v0alpha1.ScopeDashboardBindingSpec", "github.com/grafana/grafana/pkg/apis/scope/v0alpha1.ScopeDashboardBindingStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
@ -263,6 +270,27 @@ func schema_pkg_apis_scope_v0alpha1_ScopeDashboardBindingSpec(ref common.Referen
Format: "",
},
},
"scope": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"dashboard", "scope"},
},
},
}
}
func schema_pkg_apis_scope_v0alpha1_ScopeDashboardBindingStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Type of the item. ScopeDashboardBindingStatus contains derived information about a ScopeDashboardBinding.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"dashboardTitle": {
SchemaProps: spec.SchemaProps{
Description: "DashboardTitle should be populated and update from the dashboard",
@ -286,17 +314,56 @@ func schema_pkg_apis_scope_v0alpha1_ScopeDashboardBindingSpec(ref common.Referen
},
},
},
"scope": {
"dashboardTitleConditions": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-map-keys": []interface{}{
"type",
},
"x-kubernetes-list-type": "map",
},
},
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
Description: "DashboardTitleConditions is a list of conditions that are used to determine if the dashboard title is valid.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Condition"),
},
},
},
},
},
"groupsConditions": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-map-keys": []interface{}{
"type",
},
"x-kubernetes-list-type": "map",
},
},
SchemaProps: spec.SchemaProps{
Description: "DashboardTitleConditions is a list of conditions that are used to determine if the list of groups is valid.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Condition"),
},
},
},
},
},
},
Required: []string{"dashboard", "dashboardTitle", "scope"},
Required: []string{"dashboardTitle"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.Condition"},
}
}
@ -484,11 +551,9 @@ func schema_pkg_apis_scope_v0alpha1_ScopeNodeSpec(ref common.ReferenceCallback)
},
"nodeType": {
SchemaProps: spec.SchemaProps{
Description: "Possible enum values:\n - `\"container\"`\n - `\"leaf\"`",
Default: "",
Type: []string{"string"},
Format: "",
Enum: []interface{}{"container", "leaf"},
Default: "",
Type: []string{"string"},
Format: "",
},
},
"title": {

View File

@ -1,3 +1,3 @@
API rule violation: list_type_missing,github.com/grafana/grafana/pkg/apis/scope/v0alpha1,FindScopeDashboardBindingsResults,Items
API rule violation: list_type_missing,github.com/grafana/grafana/pkg/apis/scope/v0alpha1,ScopeDashboardBindingSpec,Groups
API rule violation: list_type_missing,github.com/grafana/grafana/pkg/apis/scope/v0alpha1,ScopeDashboardBindingStatus,Groups
API rule violation: names_match,github.com/grafana/grafana/pkg/apis/scope/v0alpha1,ScopeNodeSpec,LinkID

View File

@ -90,7 +90,7 @@ type DualWriterMode int
const (
// Mode0 represents writing to and reading from solely LegacyStorage. This mode is enabled when the
// `unifiedStorage` feature flag is not set. All reads and writes are made to LegacyStorage. None are made to Storage.
// Unified Storage is disabled. All reads and writes are made to LegacyStorage. None are made to Storage.
Mode0 DualWriterMode = iota
// Mode1 represents writing to and reading from LegacyStorage for all primary functionality while additionally
// reading and writing to Storage on a best effort basis for the sake of collecting metrics.
@ -110,27 +110,27 @@ func NewDualWriter(
legacy LegacyStorage,
storage Storage,
reg prometheus.Registerer,
kind string,
resource string,
) DualWriter {
metrics := &dualWriterMetrics{}
metrics.init(reg)
switch mode {
// It is not possible to initialize a mode 0 dual writer. Mode 0 represents
// writing to legacy storage without `unifiedStorage` enabled.
// writing to legacy storage without Unified Storage enabled.
case Mode1:
// read and write only from legacy storage
return newDualWriterMode1(legacy, storage, metrics, kind)
return newDualWriterMode1(legacy, storage, metrics, resource)
case Mode2:
// write to both, read from storage but use legacy as backup
return newDualWriterMode2(legacy, storage, metrics, kind)
return newDualWriterMode2(legacy, storage, metrics, resource)
case Mode3:
// write to both, read from storage only
return newDualWriterMode3(legacy, storage, metrics, kind)
return newDualWriterMode3(legacy, storage, metrics, resource)
case Mode4:
// read and write only from storage
return newDualWriterMode4(legacy, storage, metrics, kind)
return newDualWriterMode4(legacy, storage, metrics, resource)
default:
return newDualWriterMode1(legacy, storage, metrics, kind)
return newDualWriterMode1(legacy, storage, metrics, resource)
}
}
@ -179,7 +179,7 @@ func SetDualWritingMode(
toMode := map[string]DualWriterMode{
// It is not possible to initialize a mode 0 dual writer. Mode 0 represents
// writing to legacy storage without `unifiedStorage` enabled.
// writing to legacy storage without Unified Storage enabled.
"1": Mode1,
"2": Mode2,
"3": Mode3,

View File

@ -18,16 +18,22 @@ type DualWriterMode1 struct {
Legacy LegacyStorage
Storage Storage
*dualWriterMetrics
kind string
Log klog.Logger
resource string
Log klog.Logger
}
const mode1Str = "1"
// NewDualWriterMode1 returns a new DualWriter in mode 1.
// Mode 1 represents writing to and reading from LegacyStorage.
func newDualWriterMode1(legacy LegacyStorage, storage Storage, dwm *dualWriterMetrics, kind string) *DualWriterMode1 {
return &DualWriterMode1{Legacy: legacy, Storage: storage, Log: klog.NewKlogr().WithName("DualWriterMode1").WithValues("mode", mode1Str, "kind", kind), dualWriterMetrics: dwm}
func newDualWriterMode1(legacy LegacyStorage, storage Storage, dwm *dualWriterMetrics, resource string) *DualWriterMode1 {
return &DualWriterMode1{
Legacy: legacy,
Storage: storage,
Log: klog.NewKlogr().WithName("DualWriterMode1").WithValues("mode", mode1Str, "resource", resource),
dualWriterMetrics: dwm,
resource: resource,
}
}
// Mode returns the mode of the dual writer.
@ -45,10 +51,10 @@ func (d *DualWriterMode1) Create(ctx context.Context, original runtime.Object, c
created, err := d.Legacy.Create(ctx, original, createValidation, options)
if err != nil {
log.Error(err, "unable to create object in legacy storage")
d.recordLegacyDuration(true, mode1Str, d.kind, method, startLegacy)
d.recordLegacyDuration(true, mode1Str, d.resource, method, startLegacy)
return created, err
}
d.recordLegacyDuration(false, mode1Str, d.kind, method, startLegacy)
d.recordLegacyDuration(false, mode1Str, d.resource, method, startLegacy)
createdCopy := created.DeepCopyObject()
@ -62,7 +68,7 @@ func (d *DualWriterMode1) Create(ctx context.Context, original runtime.Object, c
startStorage := time.Now()
storageObj, errObjectSt := d.Storage.Create(ctx, createdCopy, createValidation, options)
d.recordStorageDuration(errObjectSt != nil, mode1Str, d.kind, method, startStorage)
d.recordStorageDuration(errObjectSt != nil, mode1Str, d.resource, method, startStorage)
if err != nil {
cancel()
}
@ -87,14 +93,14 @@ func (d *DualWriterMode1) Get(ctx context.Context, name string, options *metav1.
if errLegacy != nil {
log.Error(errLegacy, "unable to get object in legacy storage")
}
d.recordLegacyDuration(errLegacy != nil, mode1Str, d.kind, method, startLegacy)
d.recordLegacyDuration(errLegacy != nil, mode1Str, d.resource, method, startLegacy)
go func(res runtime.Object) {
startStorage := time.Now()
ctx, cancel := context.WithTimeoutCause(ctx, time.Second*10, errors.New("storage get timeout"))
defer cancel()
storageObj, err := d.Storage.Get(ctx, name, options)
d.recordStorageDuration(err != nil, mode1Str, d.kind, method, startStorage)
d.recordStorageDuration(err != nil, mode1Str, d.resource, method, startStorage)
if err != nil {
log.Error(err, "unable to get object in storage")
cancel()
@ -121,14 +127,14 @@ func (d *DualWriterMode1) List(ctx context.Context, options *metainternalversion
if errLegacy != nil {
log.Error(errLegacy, "unable to list object in legacy storage")
}
d.recordLegacyDuration(errLegacy != nil, mode1Str, d.kind, method, startLegacy)
d.recordLegacyDuration(errLegacy != nil, mode1Str, d.resource, method, startLegacy)
go func(res runtime.Object) {
startStorage := time.Now()
ctx, cancel := context.WithTimeoutCause(ctx, time.Second*10, errors.New("storage list timeout"))
defer cancel()
storageObj, err := d.Storage.List(ctx, options)
d.recordStorageDuration(err != nil, mode1Str, d.kind, method, startStorage)
d.recordStorageDuration(err != nil, mode1Str, d.resource, method, startStorage)
if err != nil {
cancel()
}
@ -151,7 +157,7 @@ func (d *DualWriterMode1) Delete(ctx context.Context, name string, deleteValidat
res, async, err := d.Legacy.Delete(ctx, name, deleteValidation, options)
if err != nil {
log.Error(err, "unable to delete object in legacy storage")
d.recordLegacyDuration(true, mode1Str, d.kind, method, startLegacy)
d.recordLegacyDuration(true, mode1Str, d.resource, method, startLegacy)
return res, async, err
}
d.recordLegacyDuration(false, mode1Str, name, method, startLegacy)
@ -161,7 +167,7 @@ func (d *DualWriterMode1) Delete(ctx context.Context, name string, deleteValidat
ctx, cancel := context.WithTimeoutCause(ctx, time.Second*10, errors.New("storage delete timeout"))
defer cancel()
storageObj, _, err := d.Storage.Delete(ctx, name, deleteValidation, options)
d.recordStorageDuration(err != nil, mode1Str, d.kind, method, startStorage)
d.recordStorageDuration(err != nil, mode1Str, d.resource, method, startStorage)
if err != nil {
cancel()
}
@ -185,17 +191,17 @@ func (d *DualWriterMode1) DeleteCollection(ctx context.Context, deleteValidation
res, err := d.Legacy.DeleteCollection(ctx, deleteValidation, options, listOptions)
if err != nil {
log.Error(err, "unable to delete collection in legacy storage")
d.recordLegacyDuration(true, mode1Str, d.kind, method, startLegacy)
d.recordLegacyDuration(true, mode1Str, d.resource, method, startLegacy)
return res, err
}
d.recordLegacyDuration(false, mode1Str, d.kind, method, startLegacy)
d.recordLegacyDuration(false, mode1Str, d.resource, method, startLegacy)
go func(res runtime.Object) {
startStorage := time.Now()
ctx, cancel := context.WithTimeoutCause(ctx, time.Second*10, errors.New("storage deletecollection timeout"))
defer cancel()
storageObj, err := d.Storage.DeleteCollection(ctx, deleteValidation, options, listOptions)
d.recordStorageDuration(err != nil, mode1Str, d.kind, method, startStorage)
d.recordStorageDuration(err != nil, mode1Str, d.resource, method, startStorage)
if err != nil {
cancel()
}
@ -218,10 +224,10 @@ func (d *DualWriterMode1) Update(ctx context.Context, name string, objInfo rest.
res, async, err := d.Legacy.Update(ctx, name, objInfo, createValidation, updateValidation, forceAllowCreate, options)
if err != nil {
log.Error(err, "unable to update in legacy storage")
d.recordLegacyDuration(true, mode1Str, d.kind, method, startLegacy)
d.recordLegacyDuration(true, mode1Str, d.resource, method, startLegacy)
return res, async, err
}
d.recordLegacyDuration(false, mode1Str, d.kind, method, startLegacy)
d.recordLegacyDuration(false, mode1Str, d.resource, method, startLegacy)
go func(res runtime.Object) {
ctx, cancel := context.WithTimeoutCause(ctx, time.Second*10, errors.New("storage update timeout"))
@ -257,7 +263,7 @@ func (d *DualWriterMode1) Update(ctx context.Context, name string, objInfo rest.
startStorage := time.Now()
defer cancel()
storageObj, _, errObjectSt := d.Storage.Update(ctx, name, objInfo, createValidation, updateValidation, forceAllowCreate, options)
d.recordStorageDuration(errObjectSt != nil, mode1Str, d.kind, method, startStorage)
d.recordStorageDuration(errObjectSt != nil, mode1Str, d.resource, method, startStorage)
if err != nil {
cancel()
}

View File

@ -26,17 +26,21 @@ type DualWriterMode2 struct {
Storage Storage
Legacy LegacyStorage
*dualWriterMetrics
kind string
Log klog.Logger
resource string
Log klog.Logger
}
const mode2Str = "2"
// NewDualWriterMode2 returns a new DualWriter in mode 2.
// Mode 2 represents writing to LegacyStorage and Storage and reading from LegacyStorage.
func newDualWriterMode2(legacy LegacyStorage, storage Storage, dwm *dualWriterMetrics, kind string) *DualWriterMode2 {
func newDualWriterMode2(legacy LegacyStorage, storage Storage, dwm *dualWriterMetrics, resource string) *DualWriterMode2 {
return &DualWriterMode2{
Legacy: legacy, Storage: storage, Log: klog.NewKlogr().WithName("DualWriterMode2").WithValues("mode", mode2Str, "kind", kind), dualWriterMetrics: dwm,
Legacy: legacy,
Storage: storage,
Log: klog.NewKlogr().WithName("DualWriterMode2").WithValues("mode", mode2Str, "resource", resource),
dualWriterMetrics: dwm,
resource: resource,
}
}
@ -55,10 +59,10 @@ func (d *DualWriterMode2) Create(ctx context.Context, original runtime.Object, c
created, err := d.Legacy.Create(ctx, original, createValidation, options)
if err != nil {
log.Error(err, "unable to create object in legacy storage")
d.recordLegacyDuration(true, mode2Str, d.kind, method, startLegacy)
d.recordLegacyDuration(true, mode2Str, d.resource, method, startLegacy)
return created, err
}
d.recordLegacyDuration(false, mode2Str, d.kind, method, startLegacy)
d.recordLegacyDuration(false, mode2Str, d.resource, method, startLegacy)
if err := enrichLegacyObject(original, created); err != nil {
return created, err
@ -68,10 +72,10 @@ func (d *DualWriterMode2) Create(ctx context.Context, original runtime.Object, c
rsp, err := d.Storage.Create(ctx, created, createValidation, options)
if err != nil {
log.WithValues("name").Error(err, "unable to create object in storage")
d.recordStorageDuration(true, mode2Str, d.kind, method, startStorage)
d.recordStorageDuration(true, mode2Str, d.resource, method, startStorage)
return rsp, err
}
d.recordStorageDuration(false, mode2Str, d.kind, method, startStorage)
d.recordStorageDuration(false, mode2Str, d.resource, method, startStorage)
areEqual := Compare(rsp, created)
d.recordOutcome(mode2Str, getName(rsp), areEqual, method)
@ -89,7 +93,7 @@ func (d *DualWriterMode2) Get(ctx context.Context, name string, options *metav1.
startStorage := time.Now()
objStorage, err := d.Storage.Get(ctx, name, options)
d.recordStorageDuration(err != nil, mode2Str, d.kind, method, startStorage)
d.recordStorageDuration(err != nil, mode2Str, d.resource, method, startStorage)
if err != nil {
// if it errors because it's not found, we try to fetch it from the legacy storage
if !apierrors.IsNotFound(err) {
@ -103,10 +107,10 @@ func (d *DualWriterMode2) Get(ctx context.Context, name string, options *metav1.
objLegacy, err := d.Legacy.Get(ctx, name, options)
if err != nil {
log.Error(err, "unable to fetch object from legacy")
d.recordLegacyDuration(true, mode2Str, d.kind, method, startLegacy)
d.recordLegacyDuration(true, mode2Str, d.resource, method, startLegacy)
return objLegacy, err
}
d.recordLegacyDuration(false, mode2Str, d.kind, method, startLegacy)
d.recordLegacyDuration(false, mode2Str, d.resource, method, startLegacy)
areEqual := Compare(objStorage, objLegacy)
d.recordOutcome(mode2Str, name, areEqual, method)
@ -132,10 +136,10 @@ func (d *DualWriterMode2) List(ctx context.Context, options *metainternalversion
ll, err := d.Legacy.List(ctx, options)
if err != nil {
log.Error(err, "unable to list objects from legacy storage")
d.recordLegacyDuration(true, mode2Str, d.kind, method, startLegacy)
d.recordLegacyDuration(true, mode2Str, d.resource, method, startLegacy)
return ll, err
}
d.recordLegacyDuration(false, mode2Str, d.kind, method, startLegacy)
d.recordLegacyDuration(false, mode2Str, d.resource, method, startLegacy)
legacyList, err := meta.ExtractList(ll)
if err != nil {
@ -154,10 +158,10 @@ func (d *DualWriterMode2) List(ctx context.Context, options *metainternalversion
sl, err := d.Storage.List(ctx, options)
if err != nil {
log.Error(err, "unable to list objects from storage")
d.recordStorageDuration(true, mode2Str, d.kind, method, startStorage)
d.recordStorageDuration(true, mode2Str, d.resource, method, startStorage)
return sl, err
}
d.recordStorageDuration(false, mode2Str, d.kind, method, startStorage)
d.recordStorageDuration(false, mode2Str, d.resource, method, startStorage)
storageList, err := meta.ExtractList(sl)
if err != nil {
@ -181,11 +185,7 @@ func (d *DualWriterMode2) List(ctx context.Context, options *metainternalversion
return nil, err
}
// if the number of items in the legacy list and the storage list are the same, we can return the storage list
if len(storageList) == len(legacyList) {
return sl, nil
}
log.Info("lists from legacy and storage are not the same size")
// always return the list from legacy storage
return ll, nil
}
@ -199,10 +199,10 @@ func (d *DualWriterMode2) DeleteCollection(ctx context.Context, deleteValidation
deleted, err := d.Legacy.DeleteCollection(ctx, deleteValidation, options, listOptions)
if err != nil {
log.WithValues("deleted", deleted).Error(err, "failed to delete collection successfully from legacy storage")
d.recordLegacyDuration(true, mode2Str, d.kind, method, startLegacy)
d.recordLegacyDuration(true, mode2Str, d.resource, method, startLegacy)
return deleted, err
}
d.recordLegacyDuration(false, mode2Str, d.kind, method, startLegacy)
d.recordLegacyDuration(false, mode2Str, d.resource, method, startLegacy)
legacyList, err := meta.ExtractList(deleted)
if err != nil {
@ -220,10 +220,10 @@ func (d *DualWriterMode2) DeleteCollection(ctx context.Context, deleteValidation
res, err := d.Storage.DeleteCollection(ctx, deleteValidation, options, listOptions)
if err != nil {
log.WithValues("deleted", res).Error(err, "failed to delete collection successfully from Storage")
d.recordStorageDuration(true, mode2Str, d.kind, method, startStorage)
d.recordStorageDuration(true, mode2Str, d.resource, method, startStorage)
return res, err
}
d.recordStorageDuration(false, mode2Str, d.kind, method, startStorage)
d.recordStorageDuration(false, mode2Str, d.resource, method, startStorage)
areEqual := Compare(res, deleted)
d.recordOutcome(mode2Str, getName(res), areEqual, method)
@ -245,22 +245,22 @@ func (d *DualWriterMode2) Delete(ctx context.Context, name string, deleteValidat
if err != nil {
if !apierrors.IsNotFound(err) {
log.WithValues("objectList", deletedLS).Error(err, "could not delete from legacy store")
d.recordLegacyDuration(true, mode2Str, d.kind, method, startLegacy)
d.recordLegacyDuration(true, mode2Str, d.resource, method, startLegacy)
return deletedLS, async, err
}
}
d.recordLegacyDuration(false, mode2Str, d.kind, method, startLegacy)
d.recordLegacyDuration(false, mode2Str, d.resource, method, startLegacy)
startStorage := time.Now()
deletedS, _, err := d.Storage.Delete(ctx, name, deleteValidation, options)
if err != nil {
if !apierrors.IsNotFound(err) {
log.WithValues("objectList", deletedS).Error(err, "could not delete from duplicate storage")
d.recordStorageDuration(true, mode2Str, d.kind, method, startStorage)
d.recordStorageDuration(true, mode2Str, d.resource, method, startStorage)
}
return deletedS, async, err
}
d.recordStorageDuration(false, mode2Str, d.kind, method, startStorage)
d.recordStorageDuration(false, mode2Str, d.resource, method, startStorage)
areEqual := Compare(deletedS, deletedLS)
d.recordOutcome(mode2Str, name, areEqual, method)
@ -298,10 +298,10 @@ func (d *DualWriterMode2) Update(ctx context.Context, name string, objInfo rest.
obj, created, err := d.Legacy.Update(ctx, name, &updateWrapper{upstream: objInfo, updated: updated}, createValidation, updateValidation, forceAllowCreate, options)
if err != nil {
log.WithValues("object", obj).Error(err, "could not update in legacy storage")
d.recordLegacyDuration(true, mode2Str, d.kind, "update", startLegacy)
d.recordLegacyDuration(true, mode2Str, d.resource, "update", startLegacy)
return obj, created, err
}
d.recordLegacyDuration(false, mode2Str, d.kind, "update", startLegacy)
d.recordLegacyDuration(false, mode2Str, d.resource, "update", startLegacy)
// if the object is found, create a new updateWrapper with the object found
if foundObj != nil {
@ -320,7 +320,7 @@ func (d *DualWriterMode2) Update(ctx context.Context, name string, objInfo rest.
res, created, err := d.Storage.Update(ctx, name, objInfo, createValidation, updateValidation, forceAllowCreate, options)
if err != nil {
log.WithValues("object", res).Error(err, "could not update in storage")
d.recordStorageDuration(true, mode2Str, d.kind, "update", startStorage)
d.recordStorageDuration(true, mode2Str, d.resource, "update", startStorage)
return res, created, err
}
@ -434,7 +434,7 @@ func getList(ctx context.Context, obj rest.Lister, listOptions *metainternalvers
return meta.ExtractList(ll)
}
func mode2DataSyncer(ctx context.Context, legacy LegacyStorage, storage Storage, kind string, reg prometheus.Registerer, serverLockService ServerLockService, requestInfo *request.RequestInfo) (bool, error) {
func mode2DataSyncer(ctx context.Context, legacy LegacyStorage, storage Storage, resource string, reg prometheus.Registerer, serverLockService ServerLockService, requestInfo *request.RequestInfo) (bool, error) {
metrics := &dualWriterMetrics{}
metrics.init(reg)
@ -599,8 +599,8 @@ func mode2DataSyncer(ctx context.Context, legacy LegacyStorage, storage Storage,
everythingSynced = outOfSync == syncSuccess
metrics.recordDataSyncerOutcome(mode2Str, kind, everythingSynced)
metrics.recordDataSyncerDuration(err != nil, mode2Str, kind, startSync)
metrics.recordDataSyncerOutcome(mode2Str, resource, everythingSynced)
metrics.recordDataSyncerDuration(err != nil, mode2Str, resource, startSync)
log.Info("finished syncing items", "items", len(itemsByName), "updated", syncSuccess, "failed", syncErr, "outcome", everythingSynced)
})

View File

@ -18,14 +18,20 @@ type DualWriterMode3 struct {
Storage Storage
watchImp rest.Watcher // watch is only available in mode 3 and 4
*dualWriterMetrics
kind string
Log klog.Logger
resource string
Log klog.Logger
}
// newDualWriterMode3 returns a new DualWriter in mode 3.
// Mode 3 represents writing to LegacyStorage and Storage and reading from Storage.
func newDualWriterMode3(legacy LegacyStorage, storage Storage, dwm *dualWriterMetrics, kind string) *DualWriterMode3 {
return &DualWriterMode3{Legacy: legacy, Storage: storage, Log: klog.NewKlogr().WithName("DualWriterMode3").WithValues("mode", mode3Str, "kind", kind), dualWriterMetrics: dwm}
func newDualWriterMode3(legacy LegacyStorage, storage Storage, dwm *dualWriterMetrics, resource string) *DualWriterMode3 {
return &DualWriterMode3{
Legacy: legacy,
Storage: storage,
Log: klog.NewKlogr().WithName("DualWriterMode3").WithValues("mode", mode3Str, "resource", resource),
dualWriterMetrics: dwm,
resource: resource,
}
}
// Mode returns the mode of the dual writer.
@ -45,10 +51,10 @@ func (d *DualWriterMode3) Create(ctx context.Context, obj runtime.Object, create
created, err := d.Storage.Create(ctx, obj, createValidation, options)
if err != nil {
log.Error(err, "unable to create object in storage")
d.recordLegacyDuration(true, mode3Str, d.kind, method, startStorage)
d.recordLegacyDuration(true, mode3Str, d.resource, method, startStorage)
return created, err
}
d.recordStorageDuration(false, mode3Str, d.kind, method, startStorage)
d.recordStorageDuration(false, mode3Str, d.resource, method, startStorage)
go func() {
ctx, cancel := context.WithTimeoutCause(ctx, time.Second*10, errors.New("legacy create timeout"))
@ -56,7 +62,7 @@ func (d *DualWriterMode3) Create(ctx context.Context, obj runtime.Object, create
startLegacy := time.Now()
_, errObjectSt := d.Legacy.Create(ctx, obj, createValidation, options)
d.recordLegacyDuration(errObjectSt != nil, mode3Str, d.kind, method, startLegacy)
d.recordLegacyDuration(errObjectSt != nil, mode3Str, d.resource, method, startLegacy)
}()
return created, err
@ -73,7 +79,7 @@ func (d *DualWriterMode3) Get(ctx context.Context, name string, options *metav1.
if err != nil {
log.Error(err, "unable to get object in storage")
}
d.recordStorageDuration(err != nil, mode3Str, d.kind, method, startStorage)
d.recordStorageDuration(err != nil, mode3Str, d.resource, method, startStorage)
return res, err
}
@ -89,7 +95,7 @@ func (d *DualWriterMode3) List(ctx context.Context, options *metainternalversion
if err != nil {
log.Error(err, "unable to list object in storage")
}
d.recordStorageDuration(err != nil, mode3Str, d.kind, method, startStorage)
d.recordStorageDuration(err != nil, mode3Str, d.resource, method, startStorage)
return res, err
}
@ -103,7 +109,7 @@ func (d *DualWriterMode3) Delete(ctx context.Context, name string, deleteValidat
res, async, err := d.Storage.Delete(ctx, name, deleteValidation, options)
if err != nil {
log.Error(err, "unable to delete object in storage")
d.recordStorageDuration(true, mode3Str, d.kind, method, startStorage)
d.recordStorageDuration(true, mode3Str, d.resource, method, startStorage)
return res, async, err
}
d.recordStorageDuration(false, mode3Str, name, method, startStorage)
@ -113,7 +119,7 @@ func (d *DualWriterMode3) Delete(ctx context.Context, name string, deleteValidat
ctx, cancel := context.WithTimeoutCause(ctx, time.Second*10, errors.New("legacy delete timeout"))
defer cancel()
_, _, err := d.Legacy.Delete(ctx, name, deleteValidation, options)
d.recordLegacyDuration(err != nil, mode3Str, d.kind, method, startLegacy)
d.recordLegacyDuration(err != nil, mode3Str, d.resource, method, startLegacy)
}()
return res, async, err
@ -129,10 +135,10 @@ func (d *DualWriterMode3) Update(ctx context.Context, name string, objInfo rest.
res, async, err := d.Storage.Update(ctx, name, objInfo, createValidation, updateValidation, forceAllowCreate, options)
if err != nil {
log.Error(err, "unable to update in storage")
d.recordLegacyDuration(true, mode3Str, d.kind, method, startStorage)
d.recordLegacyDuration(true, mode3Str, d.resource, method, startStorage)
return res, async, err
}
d.recordStorageDuration(false, mode3Str, d.kind, method, startStorage)
d.recordStorageDuration(false, mode3Str, d.resource, method, startStorage)
go func() {
ctx, cancel := context.WithTimeoutCause(ctx, time.Second*10, errors.New("legacy update timeout"))
@ -140,7 +146,7 @@ func (d *DualWriterMode3) Update(ctx context.Context, name string, objInfo rest.
startLegacy := time.Now()
defer cancel()
_, _, errObjectSt := d.Legacy.Update(ctx, name, objInfo, createValidation, updateValidation, forceAllowCreate, options)
d.recordLegacyDuration(errObjectSt != nil, mode3Str, d.kind, method, startLegacy)
d.recordLegacyDuration(errObjectSt != nil, mode3Str, d.resource, method, startLegacy)
}()
return res, async, err
@ -156,17 +162,17 @@ func (d *DualWriterMode3) DeleteCollection(ctx context.Context, deleteValidation
res, err := d.Storage.DeleteCollection(ctx, deleteValidation, options, listOptions)
if err != nil {
log.Error(err, "unable to delete collection in storage")
d.recordStorageDuration(true, mode3Str, d.kind, method, startStorage)
d.recordStorageDuration(true, mode3Str, d.resource, method, startStorage)
return res, err
}
d.recordStorageDuration(false, mode3Str, d.kind, method, startStorage)
d.recordStorageDuration(false, mode3Str, d.resource, method, startStorage)
go func() {
startLegacy := time.Now()
ctx, cancel := context.WithTimeoutCause(ctx, time.Second*10, errors.New("legacy deletecollection timeout"))
defer cancel()
_, err := d.Legacy.DeleteCollection(ctx, deleteValidation, options, listOptions)
d.recordStorageDuration(err != nil, mode3Str, d.kind, method, startLegacy)
d.recordStorageDuration(err != nil, mode3Str, d.resource, method, startLegacy)
}()
return res, err

View File

@ -17,16 +17,22 @@ type DualWriterMode4 struct {
Storage Storage
watchImp rest.Watcher // watch is only available in mode 3 and 4
*dualWriterMetrics
kind string
Log klog.Logger
resource string
Log klog.Logger
}
const mode4Str = "4"
// newDualWriterMode4 returns a new DualWriter in mode 4.
// Mode 4 represents writing and reading from Storage.
func newDualWriterMode4(legacy LegacyStorage, storage Storage, dwm *dualWriterMetrics, kind string) *DualWriterMode4 {
return &DualWriterMode4{Legacy: legacy, Storage: storage, Log: klog.NewKlogr().WithName("DualWriterMode4").WithValues("mode", mode4Str, "kind", kind), dualWriterMetrics: dwm}
func newDualWriterMode4(legacy LegacyStorage, storage Storage, dwm *dualWriterMetrics, resource string) *DualWriterMode4 {
return &DualWriterMode4{
Legacy: legacy,
Storage: storage,
Log: klog.NewKlogr().WithName("DualWriterMode4").WithValues("mode", mode4Str, "resource", resource),
dualWriterMetrics: dwm,
resource: resource,
}
}
// Mode returns the mode of the dual writer.
@ -47,7 +53,7 @@ func (d *DualWriterMode4) Create(ctx context.Context, obj runtime.Object, create
if err != nil {
log.Error(err, "unable to create object in storage")
}
d.recordStorageDuration(err != nil, mode4Str, d.kind, method, startStorage)
d.recordStorageDuration(err != nil, mode4Str, d.resource, method, startStorage)
return res, err
}
@ -62,7 +68,7 @@ func (d *DualWriterMode4) Get(ctx context.Context, name string, options *metav1.
if err != nil {
log.Error(err, "unable to create object in storage")
}
d.recordStorageDuration(err != nil, mode4Str, d.kind, method, startStorage)
d.recordStorageDuration(err != nil, mode4Str, d.resource, method, startStorage)
return res, err
}
@ -76,7 +82,7 @@ func (d *DualWriterMode4) Delete(ctx context.Context, name string, deleteValidat
if err != nil {
log.Error(err, "unable to delete object in storage")
}
d.recordStorageDuration(err != nil, mode4Str, d.kind, method, startStorage)
d.recordStorageDuration(err != nil, mode4Str, d.resource, method, startStorage)
return res, async, err
}
@ -91,14 +97,14 @@ func (d *DualWriterMode4) DeleteCollection(ctx context.Context, deleteValidation
if err != nil {
log.Error(err, "unable to delete collection in storage")
}
d.recordStorageDuration(err != nil, mode4Str, d.kind, method, startStorage)
d.recordStorageDuration(err != nil, mode4Str, d.resource, method, startStorage)
return res, err
}
// Update overrides the generic behavior of the Storage and writes only to US.
func (d *DualWriterMode4) Update(ctx context.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc, forceAllowCreate bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) {
var method = "update"
log := d.Log.WithValues("name", name, "kind", d.kind, "method", method)
log := d.Log.WithValues("name", name, "resource", d.resource, "method", method)
ctx = klog.NewContext(ctx, log)
startStorage := time.Now()
@ -106,7 +112,7 @@ func (d *DualWriterMode4) Update(ctx context.Context, name string, objInfo rest.
if err != nil {
log.Error(err, "unable to update object in storage")
}
d.recordStorageDuration(err != nil, mode4Str, d.kind, method, startStorage)
d.recordStorageDuration(err != nil, mode4Str, d.resource, method, startStorage)
return res, async, err
}
@ -120,7 +126,7 @@ func (d *DualWriterMode4) List(ctx context.Context, options *metainternalversion
if err != nil {
log.Error(err, "unable to list objects in storage")
}
d.recordStorageDuration(err != nil, mode4Str, d.kind, method, startStorage)
d.recordStorageDuration(err != nil, mode4Str, d.resource, method, startStorage)
return res, err
}

View File

@ -22,7 +22,7 @@ var DualWriterStorageDuration = prometheus.NewHistogramVec(prometheus.HistogramO
Help: "Histogram for the runtime of dual writer storage duration per mode",
Namespace: "grafana",
NativeHistogramBucketFactor: 1.1,
}, []string{"is_error", "mode", "kind", "method"})
}, []string{"is_error", "mode", "resource", "method"})
// DualWriterLegacyDuration is a metric summary for dual writer legacy duration per mode
var DualWriterLegacyDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{
@ -30,7 +30,7 @@ var DualWriterLegacyDuration = prometheus.NewHistogramVec(prometheus.HistogramOp
Help: "Histogram for the runtime of dual writer legacy duration per mode",
Namespace: "grafana",
NativeHistogramBucketFactor: 1.1,
}, []string{"is_error", "mode", "kind", "method"})
}, []string{"is_error", "mode", "resource", "method"})
// DualWriterOutcome is a metric summary for dual writer outcome comparison between the 2 stores per mode
var DualWriterOutcome = prometheus.NewHistogramVec(prometheus.HistogramOpts{
@ -44,7 +44,7 @@ var DualWriterReadLegacyCounts = prometheus.NewCounterVec(prometheus.CounterOpts
Name: "dual_writer_read_legacy_count",
Help: "Histogram for the runtime of dual writer reads from legacy",
Namespace: "grafana",
}, []string{"kind", "method"})
}, []string{"resource", "method"})
// DualWriterSyncerDuration is a metric summary for dual writer sync duration per mode
var DualWriterSyncerDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{
@ -52,7 +52,7 @@ var DualWriterSyncerDuration = prometheus.NewHistogramVec(prometheus.HistogramOp
Help: "Histogram for the runtime of dual writer data syncer duration per mode",
Namespace: "grafana",
NativeHistogramBucketFactor: 1.1,
}, []string{"is_error", "mode", "kind"})
}, []string{"is_error", "mode", "resource"})
// DualWriterDataSyncerOutcome is a metric summary for dual writer data syncer outcome comparison between the 2 stores per mode
var DualWriterDataSyncerOutcome = prometheus.NewHistogramVec(prometheus.HistogramOpts{
@ -60,7 +60,7 @@ var DualWriterDataSyncerOutcome = prometheus.NewHistogramVec(prometheus.Histogra
Help: "Histogram for the runtime of dual writer data syncer outcome comparison between the 2 stores per mode",
Namespace: "grafana",
NativeHistogramBucketFactor: 1.1,
}, []string{"mode", "kind"})
}, []string{"mode", "resource"})
func (m *dualWriterMetrics) init(reg prometheus.Registerer) {
log := klog.NewKlogr()
@ -79,14 +79,14 @@ func (m *dualWriterMetrics) init(reg prometheus.Registerer) {
}
}
func (m *dualWriterMetrics) recordLegacyDuration(isError bool, mode string, kind string, method string, startFrom time.Time) {
func (m *dualWriterMetrics) recordLegacyDuration(isError bool, mode string, resource string, method string, startFrom time.Time) {
duration := time.Since(startFrom).Seconds()
m.legacy.WithLabelValues(strconv.FormatBool(isError), mode, kind, method).Observe(duration)
m.legacy.WithLabelValues(strconv.FormatBool(isError), mode, resource, method).Observe(duration)
}
func (m *dualWriterMetrics) recordStorageDuration(isError bool, mode string, kind string, method string, startFrom time.Time) {
func (m *dualWriterMetrics) recordStorageDuration(isError bool, mode string, resource string, method string, startFrom time.Time) {
duration := time.Since(startFrom).Seconds()
m.storage.WithLabelValues(strconv.FormatBool(isError), mode, kind, method).Observe(duration)
m.storage.WithLabelValues(strconv.FormatBool(isError), mode, resource, method).Observe(duration)
}
func (m *dualWriterMetrics) recordOutcome(mode string, name string, areEqual bool, method string) {
@ -97,15 +97,15 @@ func (m *dualWriterMetrics) recordOutcome(mode string, name string, areEqual boo
m.outcome.WithLabelValues(mode, name, method).Observe(observeValue)
}
func (m *dualWriterMetrics) recordDataSyncerDuration(isError bool, mode string, kind string, startFrom time.Time) {
func (m *dualWriterMetrics) recordDataSyncerDuration(isError bool, mode string, resource string, startFrom time.Time) {
duration := time.Since(startFrom).Seconds()
m.syncer.WithLabelValues(strconv.FormatBool(isError), mode, kind).Observe(duration)
m.syncer.WithLabelValues(strconv.FormatBool(isError), mode, resource).Observe(duration)
}
func (m *dualWriterMetrics) recordDataSyncerOutcome(mode string, kind string, synced bool) {
func (m *dualWriterMetrics) recordDataSyncerOutcome(mode string, resource string, synced bool) {
var observeValue float64
if !synced {
observeValue = 1
}
m.syncerOutcome.WithLabelValues(mode, kind).Observe(observeValue)
m.syncerOutcome.WithLabelValues(mode, resource).Observe(observeValue)
}

View File

@ -146,6 +146,7 @@ func doInstallPlugin(ctx context.Context, pluginID, version string, o pluginInst
return err
}
} else {
ctx = repo.WithRequestOrigin(ctx, "cli")
archiveInfo, err := repository.GetPluginArchiveInfo(ctx, pluginID, version, compatOpts)
if err != nil {
return err

View File

@ -1,39 +0,0 @@
package httpclient
import (
"io"
)
type CloseCallbackFunc func(bytesRead int64)
// CountBytesReader counts the total amount of bytes read from the underlying reader.
//
// The provided callback func will be called before the underlying reader is closed.
func CountBytesReader(reader io.ReadCloser, callback CloseCallbackFunc) io.ReadCloser {
if reader == nil {
panic("reader cannot be nil")
}
if callback == nil {
panic("callback cannot be nil")
}
return &countBytesReader{reader: reader, callback: callback}
}
type countBytesReader struct {
reader io.ReadCloser
callback CloseCallbackFunc
counter int64
}
func (r *countBytesReader) Read(p []byte) (int, error) {
n, err := r.reader.Read(p)
r.counter += int64(n)
return n, err
}
func (r *countBytesReader) Close() error {
r.callback(r.counter)
return r.reader.Close()
}

View File

@ -1,38 +0,0 @@
package httpclient
import (
"fmt"
"io"
"strings"
"testing"
"github.com/stretchr/testify/require"
)
func TestCountBytesReader(t *testing.T) {
tcs := []struct {
body string
expectedBytesCount int64
}{
{body: "d", expectedBytesCount: 1},
{body: "dummy", expectedBytesCount: 5},
}
for index, tc := range tcs {
t.Run(fmt.Sprintf("Test CountBytesReader %d", index), func(t *testing.T) {
body := io.NopCloser(strings.NewReader(tc.body))
var actualBytesRead int64
readCloser := CountBytesReader(body, func(bytesRead int64) {
actualBytesRead = bytesRead
})
bodyBytes, err := io.ReadAll(readCloser)
require.NoError(t, err)
err = readCloser.Close()
require.NoError(t, err)
require.Equal(t, tc.expectedBytesCount, actualBytesRead)
require.Equal(t, string(bodyBytes), tc.body)
})
}
}

View File

@ -6,7 +6,6 @@ import (
"time"
sdkhttpclient "github.com/grafana/grafana-plugin-sdk-go/backend/httpclient"
"github.com/grafana/grafana/pkg/infra/httpclient"
"github.com/grafana/grafana/pkg/infra/metrics/metricutil"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
@ -113,7 +112,7 @@ func executeMiddleware(next http.RoundTripper, labels prometheus.Labels) http.Ro
}
if res != nil && res.StatusCode != http.StatusSwitchingProtocols {
res.Body = httpclient.CountBytesReader(res.Body, func(bytesRead int64) {
res.Body = sdkhttpclient.CountBytesReader(res.Body, func(bytesRead int64) {
responseSizeHistogram.Observe(float64(bytesRead))
})
}

View File

@ -231,7 +231,7 @@ type FakePluginRepo struct {
GetPluginArchiveFunc func(_ context.Context, pluginID, version string, _ repo.CompatOpts) (*repo.PluginArchive, error)
GetPluginArchiveByURLFunc func(_ context.Context, archiveURL string, _ repo.CompatOpts) (*repo.PluginArchive, error)
GetPluginArchiveInfoFunc func(_ context.Context, pluginID, version string, _ repo.CompatOpts) (*repo.PluginArchiveInfo, error)
PluginVersionFunc func(pluginID, version string, compatOpts repo.CompatOpts) (repo.VersionData, error)
PluginVersionFunc func(_ context.Context, pluginID, version string, compatOpts repo.CompatOpts) (repo.VersionData, error)
}
// GetPluginArchive fetches the requested plugin archive.
@ -260,9 +260,9 @@ func (r *FakePluginRepo) GetPluginArchiveInfo(ctx context.Context, pluginID, ver
return &repo.PluginArchiveInfo{}, nil
}
func (r *FakePluginRepo) PluginVersion(pluginID, version string, compatOpts repo.CompatOpts) (repo.VersionData, error) {
func (r *FakePluginRepo) PluginVersion(ctx context.Context, pluginID, version string, compatOpts repo.CompatOpts) (repo.VersionData, error) {
if r.PluginVersionFunc != nil {
return r.PluginVersionFunc(pluginID, version, compatOpts)
return r.PluginVersionFunc(ctx, pluginID, version, compatOpts)
}
return repo.VersionData{}, nil
}

View File

@ -35,7 +35,15 @@ func NewClient(skipTLSVerify bool, logger log.PrettyLogger) *Client {
}
}
func (c *Client) Download(_ context.Context, pluginZipURL, checksum string, compatOpts CompatOpts) (*PluginArchive, error) {
type requestOrigin struct{}
// WithRequestOrigin adds the request origin to the context which is used
// to set the `grafana-origin` header in the outgoing HTTP request.
func WithRequestOrigin(ctx context.Context, origin string) context.Context {
return context.WithValue(ctx, requestOrigin{}, origin)
}
func (c *Client) Download(ctx context.Context, pluginZipURL, checksum string, compatOpts CompatOpts) (*PluginArchive, error) {
// Create temp file for downloading zip file
tmpFile, err := os.CreateTemp("", "*.zip")
if err != nil {
@ -49,7 +57,7 @@ func (c *Client) Download(_ context.Context, pluginZipURL, checksum string, comp
c.log.Debugf("Installing plugin from %s", pluginZipURL)
err = c.downloadFile(tmpFile, pluginZipURL, checksum, compatOpts)
err = c.downloadFile(ctx, tmpFile, pluginZipURL, checksum, compatOpts)
if err != nil {
if err := tmpFile.Close(); err != nil {
c.log.Warn("Failed to close file", "error", err)
@ -65,8 +73,8 @@ func (c *Client) Download(_ context.Context, pluginZipURL, checksum string, comp
return &PluginArchive{File: rc}, nil
}
func (c *Client) SendReq(url *url.URL, compatOpts CompatOpts) ([]byte, error) {
req, err := c.createReq(url, compatOpts)
func (c *Client) SendReq(ctx context.Context, url *url.URL, compatOpts CompatOpts) ([]byte, error) {
req, err := c.createReq(ctx, url, compatOpts)
if err != nil {
return nil, err
}
@ -87,7 +95,7 @@ func (c *Client) SendReq(url *url.URL, compatOpts CompatOpts) ([]byte, error) {
return io.ReadAll(bodyReader)
}
func (c *Client) downloadFile(tmpFile *os.File, pluginURL, checksum string, compatOpts CompatOpts) (err error) {
func (c *Client) downloadFile(ctx context.Context, tmpFile *os.File, pluginURL, checksum string, compatOpts CompatOpts) (err error) {
// Try handling URL as a local file path first
if _, err := os.Stat(pluginURL); err == nil {
// TODO re-verify
@ -110,13 +118,11 @@ func (c *Client) downloadFile(tmpFile *os.File, pluginURL, checksum string, comp
return nil
}
c.retryCount = 0
defer func() {
if r := recover(); r != nil {
c.retryCount++
if c.retryCount < 3 {
c.log.Debug("Failed downloading. Will retry once.")
c.log.Debug("Failed downloading. Will retry.")
err = tmpFile.Truncate(0)
if err != nil {
return
@ -125,7 +131,7 @@ func (c *Client) downloadFile(tmpFile *os.File, pluginURL, checksum string, comp
if err != nil {
return
}
err = c.downloadFile(tmpFile, pluginURL, checksum, compatOpts)
err = c.downloadFile(ctx, tmpFile, pluginURL, checksum, compatOpts)
} else {
c.retryCount = 0
failure := fmt.Sprintf("%v", r)
@ -145,8 +151,13 @@ func (c *Client) downloadFile(tmpFile *os.File, pluginURL, checksum string, comp
// Using no timeout as some plugin archives make take longer to fetch due to size, network performance, etc.
// Note: This is also used as part of the grafana plugin install CLI operation
bodyReader, err := c.sendReqNoTimeout(u, compatOpts)
bodyReader, err := c.sendReqNoTimeout(ctx, u, compatOpts)
if err != nil {
if c.retryCount < 3 {
c.retryCount++
c.log.Debug("Failed downloading. Will retry.")
err = c.downloadFile(ctx, tmpFile, pluginURL, checksum, compatOpts)
}
return err
}
defer func() {
@ -166,11 +177,14 @@ func (c *Client) downloadFile(tmpFile *os.File, pluginURL, checksum string, comp
if len(checksum) > 0 && checksum != fmt.Sprintf("%x", h.Sum(nil)) {
return ErrChecksumMismatch(pluginURL)
}
c.retryCount = 0
return nil
}
func (c *Client) sendReqNoTimeout(url *url.URL, compatOpts CompatOpts) (io.ReadCloser, error) {
req, err := c.createReq(url, compatOpts)
func (c *Client) sendReqNoTimeout(ctx context.Context, url *url.URL, compatOpts CompatOpts) (io.ReadCloser, error) {
req, err := c.createReq(ctx, url, compatOpts)
if err != nil {
return nil, err
}
@ -182,7 +196,7 @@ func (c *Client) sendReqNoTimeout(url *url.URL, compatOpts CompatOpts) (io.ReadC
return c.handleResp(res, compatOpts)
}
func (c *Client) createReq(url *url.URL, compatOpts CompatOpts) (*http.Request, error) {
func (c *Client) createReq(ctx context.Context, url *url.URL, compatOpts CompatOpts) (*http.Request, error) {
req, err := http.NewRequest(http.MethodGet, url.String(), nil)
if err != nil {
return nil, err
@ -201,6 +215,14 @@ func (c *Client) createReq(url *url.URL, compatOpts CompatOpts) (*http.Request,
req.Header.Set("grafana-arch", sysArch)
}
if c.retryCount > 0 {
req.Header.Set("grafana-retrycount", fmt.Sprintf("%d", c.retryCount))
}
if orig := ctx.Value(requestOrigin{}); orig != nil {
req.Header.Set("grafana-origin", orig.(string))
}
return req, err
}

View File

@ -0,0 +1,72 @@
package repo
import (
"archive/zip"
"context"
"net/http"
"net/http/httptest"
"testing"
"github.com/grafana/grafana/pkg/plugins/log"
"github.com/stretchr/testify/require"
)
func writeFakeZip(w http.ResponseWriter) error {
ww := zip.NewWriter(w)
_, err := ww.Create("test.txt")
if err != nil {
return err
}
return ww.Close()
}
func Test_Download(t *testing.T) {
t.Run("it should download a file", func(t *testing.T) {
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
err := writeFakeZip(w)
require.NoError(t, err)
}))
defer fakeServer.Close()
cli := fakeServer.Client()
repo := Client{httpClient: *cli, httpClientNoTimeout: *cli, log: log.NewPrettyLogger("test")}
_, err := repo.Download(context.Background(), fakeServer.URL, "", CompatOpts{})
require.NoError(t, err)
})
t.Run("it should set the origin header", func(t *testing.T) {
var origin string
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
origin = r.Header.Get("grafana-origin")
err := writeFakeZip(w)
require.NoError(t, err)
}))
defer fakeServer.Close()
cli := fakeServer.Client()
repo := Client{httpClient: *cli, httpClientNoTimeout: *cli, log: log.NewPrettyLogger("test")}
ctx := WithRequestOrigin(context.Background(), "test")
_, err := repo.Download(ctx, fakeServer.URL, "", CompatOpts{})
require.NoError(t, err)
require.Equal(t, "test", origin, "origin header should be set")
})
t.Run("it should retry on error", func(t *testing.T) {
var count int
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
count++
if count < 2 {
http.Error(w, "error", http.StatusInternalServerError)
return
}
retryCount := r.Header.Get("grafana-retrycount")
require.Equal(t, "2", retryCount, "retry count should be set")
err := writeFakeZip(w)
require.NoError(t, err)
}))
defer fakeServer.Close()
cli := fakeServer.Client()
repo := Client{httpClient: *cli, httpClientNoTimeout: *cli, log: log.NewPrettyLogger("test"), retryCount: 1}
_, err := repo.Download(context.Background(), fakeServer.URL, "", CompatOpts{})
require.NoError(t, err)
require.Equal(t, 2, count, "should retry on error")
})
}

View File

@ -15,7 +15,7 @@ type Service interface {
// GetPluginArchiveInfo fetches information needed for downloading the requested plugin.
GetPluginArchiveInfo(ctx context.Context, pluginID, version string, opts CompatOpts) (*PluginArchiveInfo, error)
// PluginVersion will return plugin version based on the requested information.
PluginVersion(pluginID, version string, compatOpts CompatOpts) (VersionData, error)
PluginVersion(ctx context.Context, pluginID, version string, compatOpts CompatOpts) (VersionData, error)
}
type CompatOpts struct {

View File

@ -63,8 +63,8 @@ func (m *Manager) GetPluginArchiveByURL(ctx context.Context, pluginZipURL string
}
// GetPluginArchiveInfo returns the options for downloading the requested plugin (with optional `version`)
func (m *Manager) GetPluginArchiveInfo(_ context.Context, pluginID, version string, compatOpts CompatOpts) (*PluginArchiveInfo, error) {
v, err := m.PluginVersion(pluginID, version, compatOpts)
func (m *Manager) GetPluginArchiveInfo(ctx context.Context, pluginID, version string, compatOpts CompatOpts) (*PluginArchiveInfo, error) {
v, err := m.PluginVersion(ctx, pluginID, version, compatOpts)
if err != nil {
return nil, err
}
@ -77,8 +77,8 @@ func (m *Manager) GetPluginArchiveInfo(_ context.Context, pluginID, version stri
}
// PluginVersion will return plugin version based on the requested information
func (m *Manager) PluginVersion(pluginID, version string, compatOpts CompatOpts) (VersionData, error) {
versions, err := m.grafanaCompatiblePluginVersions(pluginID, compatOpts)
func (m *Manager) PluginVersion(ctx context.Context, pluginID, version string, compatOpts CompatOpts) (VersionData, error) {
versions, err := m.grafanaCompatiblePluginVersions(ctx, pluginID, compatOpts)
if err != nil {
return VersionData{}, err
}
@ -103,7 +103,7 @@ func (m *Manager) downloadURL(pluginID, version string) string {
}
// grafanaCompatiblePluginVersions will get version info from /api/plugins/$pluginID/versions
func (m *Manager) grafanaCompatiblePluginVersions(pluginID string, compatOpts CompatOpts) ([]Version, error) {
func (m *Manager) grafanaCompatiblePluginVersions(ctx context.Context, pluginID string, compatOpts CompatOpts) ([]Version, error) {
u, err := url.Parse(m.baseURL)
if err != nil {
return nil, err
@ -111,7 +111,7 @@ func (m *Manager) grafanaCompatiblePluginVersions(pluginID string, compatOpts Co
u.Path = path.Join(u.Path, pluginID, "versions")
body, err := m.client.SendReq(u, compatOpts)
body, err := m.client.SendReq(ctx, u, compatOpts)
if err != nil {
return nil, err
}

View File

@ -4,7 +4,7 @@ go 1.23.0
require (
github.com/grafana/dskit v0.0.0-20240805174438-dfa83b4ed2d3
github.com/grafana/grafana-plugin-sdk-go v0.244.0
github.com/grafana/grafana-plugin-sdk-go v0.245.0
github.com/json-iterator/go v1.1.12
github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/prometheus/client_golang v1.20.0

View File

@ -98,8 +98,8 @@ github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/grafana/dskit v0.0.0-20240805174438-dfa83b4ed2d3 h1:as4PmrFoYI1byS5JjsgPC7uSGTMh+SgS0ePv6hOyDGU=
github.com/grafana/dskit v0.0.0-20240805174438-dfa83b4ed2d3/go.mod h1:lcjGB6SuaZ2o44A9nD6p/tR4QXSPbzViRY520Gy6pTQ=
github.com/grafana/grafana-plugin-sdk-go v0.244.0 h1:ZZxHbiiF6QcsnlbPFyZGmzNDoTC1pLeHXUQYoskWt5c=
github.com/grafana/grafana-plugin-sdk-go v0.244.0/go.mod h1:H3FXrJMUlwocQ6UYj8Ds5I9EzRAVOcdRcgaRE3mXQqk=
github.com/grafana/grafana-plugin-sdk-go v0.245.0 h1:2KCKA86//O20ffL6WKzHGx5scBbdV7GyEFGnH8Hdv7M=
github.com/grafana/grafana-plugin-sdk-go v0.245.0/go.mod h1:1X8Kgo/SK91Qo1WBCKjPSKrfgjpQys1OkQsHhA78TLg=
github.com/grafana/otel-profiling-go v0.5.1 h1:stVPKAFZSa7eGiqbYuG25VcqYksR6iWvF3YH66t4qL8=
github.com/grafana/otel-profiling-go v0.5.1/go.mod h1:ftN/t5A/4gQI19/8MoWurBEtC6gFw8Dns1sJZ9W4Tls=
github.com/grafana/pyroscope-go/godeltaprof v0.1.8 h1:iwOtYXeeVSAeYefJNaxDytgjKtUuKQbJqgAIjlnicKg=

View File

@ -86,6 +86,8 @@ func New(
func (s *QueryData) Execute(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) {
fromAlert := req.Headers["FromAlert"] == "true"
logger := s.log.FromContext(ctx)
logger.Debug("Begin query execution", "fromAlert", fromAlert)
result := backend.QueryDataResponse{
Responses: backend.Responses{},
}
@ -104,7 +106,6 @@ func (s *QueryData) Execute(ctx context.Context, req *backend.QueryDataRequest)
concurrentQueryCount, err := req.PluginContext.GrafanaConfig.ConcurrentQueryCount()
if err != nil {
logger := s.log.FromContext(ctx)
logger.Debug(fmt.Sprintf("Concurrent Query Count read/parse error: %v", err), "prometheusRunQueriesInParallel")
concurrentQueryCount = 10
}

View File

@ -6,6 +6,7 @@ import (
"fmt"
"github.com/grafana/grafana-plugin-sdk-go/backend"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/prometheus/client_golang/prometheus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@ -45,6 +46,7 @@ type DataSourceAPIBuilder struct {
contextProvider PluginContextWrapper
accessControl accesscontrol.AccessControl
queryTypes *query.QueryTypeDefinitionList
log log.Logger
}
func RegisterAPIService(
@ -121,6 +123,7 @@ func NewDataSourceAPIBuilder(
datasources: datasources,
contextProvider: contextProvider,
accessControl: accessControl,
log: log.New("grafana-apiserver.datasource"),
}
if loadQueryTypes {
// In the future, this will somehow come from the plugin

View File

@ -4,6 +4,7 @@ import (
"context"
"fmt"
"net/http"
"strings"
"github.com/grafana/grafana-plugin-sdk-go/backend"
data "github.com/grafana/grafana-plugin-sdk-go/experimental/apis/data/v0alpha1"
@ -52,7 +53,6 @@ func (r *subQueryREST) Connect(ctx context.Context, name string, opts runtime.Ob
if err != nil {
return nil, err
}
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
dqr := data.QueryDataRequest{}
err := web.Bind(req, &dqr)
@ -73,9 +73,30 @@ func (r *subQueryREST) Connect(ctx context.Context, name string, opts runtime.Ob
ctx = backend.WithGrafanaConfig(ctx, pluginCtx.GrafanaConfig)
ctx = contextualMiddlewares(ctx)
// only forward expected headers, log unexpected ones
headers := make(map[string]string)
// headers are case insensitive, however some datasources still check for camel casing so we have to send them camel cased
expectedHeaders := map[string]string{
"fromalert": "FromAlert",
"content-type": "Content-Type",
"content-length": "Content-Length",
"user-agent": "User-Agent",
"accept": "Accept",
}
for k, v := range req.Header {
headerToSend, ok := expectedHeaders[strings.ToLower(k)]
if ok {
headers[headerToSend] = v[0]
} else {
r.builder.log.Warn("datasource received an unexpected header, ignoring it", "header", k)
}
}
rsp, err := r.builder.client.QueryData(ctx, &backend.QueryDataRequest{
Queries: queries,
PluginContext: pluginCtx,
Headers: headers,
})
if err != nil {
responder.Error(err)

View File

@ -0,0 +1,98 @@
package datasource
import (
"context"
"fmt"
"net/http"
"net/http/httptest"
"testing"
"github.com/grafana/grafana-plugin-sdk-go/backend"
"github.com/grafana/grafana/pkg/apis/datasource/v0alpha1"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/runtime"
)
func TestSubQueryConnect(t *testing.T) {
sqr := subQueryREST{
builder: &DataSourceAPIBuilder{
client: mockClient{
lastCalledWithHeaders: &map[string]string{},
},
datasources: mockDatasources{},
contextProvider: mockContextProvider{},
log: log.NewNopLogger(),
},
}
mr := mockResponder{}
handler, err := sqr.Connect(context.Background(), "dsname", nil, mr)
require.NoError(t, err)
rr := httptest.NewRecorder()
req := httptest.NewRequest(http.MethodGet, "/some-path", nil)
req.Header.Set("fromAlert", "true")
req.Header.Set("Content-Type", "application/json")
handler.ServeHTTP(rr, req)
// test that headers are forwarded and cased appropriately
require.Equal(t, map[string]string{
"FromAlert": "true",
"Content-Type": "application/json",
}, *sqr.builder.client.(mockClient).lastCalledWithHeaders)
}
type mockClient struct {
lastCalledWithHeaders *map[string]string
}
func (m mockClient) QueryData(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) {
*m.lastCalledWithHeaders = req.Headers
return nil, fmt.Errorf("mock error")
}
func (m mockClient) CallResource(ctx context.Context, req *backend.CallResourceRequest, sender backend.CallResourceResponseSender) error {
return nil
}
func (m mockClient) CheckHealth(ctx context.Context, req *backend.CheckHealthRequest) (*backend.CheckHealthResult, error) {
return nil, nil
}
type mockResponder struct {
}
// Object writes the provided object to the response. Invoking this method multiple times is undefined.
func (m mockResponder) Object(statusCode int, obj runtime.Object) {
}
// Error writes the provided error to the response. This method may only be invoked once.
func (m mockResponder) Error(err error) {
}
type mockDatasources struct {
}
// Get gets a specific datasource (that the user in context can see)
func (m mockDatasources) Get(ctx context.Context, uid string) (*v0alpha1.DataSourceConnection, error) {
return nil, nil
}
// List lists all data sources the user in context can see
func (m mockDatasources) List(ctx context.Context) (*v0alpha1.DataSourceConnectionList, error) {
return nil, nil
}
// Return settings (decrypted!) for a specific plugin
// This will require "query" permission for the user in context
func (m mockDatasources) GetInstanceSettings(ctx context.Context, uid string) (*backend.DataSourceInstanceSettings, error) {
return nil, nil
}
type mockContextProvider struct {
}
func (m mockContextProvider) PluginContextForDataSource(ctx context.Context, datasourceSettings *backend.DataSourceInstanceSettings) (backend.PluginContext, error) {
return backend.PluginContext{}, nil
}

View File

@ -9,14 +9,13 @@ import (
// The query runner interface
type DataSourceClientSupplier interface {
// Get a client for a given datasource
// NOTE: authorization headers are not yet added and the client may be shared across multiple users
GetDataSourceClient(ctx context.Context, ref data.DataSourceRef) (data.QueryDataClient, error)
GetDataSourceClient(ctx context.Context, ref data.DataSourceRef, headers map[string]string) (data.QueryDataClient, error)
}
type CommonDataSourceClientSupplier struct {
Client data.QueryDataClient
}
func (s *CommonDataSourceClientSupplier) GetDataSourceClient(ctx context.Context, ref data.DataSourceRef) (data.QueryDataClient, error) {
func (s *CommonDataSourceClientSupplier) GetDataSourceClient(_ context.Context, _ data.DataSourceRef, _ map[string]string) (data.QueryDataClient, error) {
return s.Client, nil
}

View File

@ -6,6 +6,7 @@ import (
"fmt"
"net/http"
"strconv"
"strings"
"time"
"github.com/grafana/grafana-plugin-sdk-go/backend"
@ -48,7 +49,7 @@ func newQueryREST(builder *QueryAPIBuilder) *queryREST {
}
func (r *queryREST) New() runtime.Object {
// This is added as the "ResponseType" regarless what ProducesObject() says :)
// This is added as the "ResponseType" regardless what ProducesObject() says :)
return &query.QueryDataResponse{}
}
@ -134,6 +135,28 @@ func (r *queryREST) Connect(connectCtx context.Context, name string, _ runtime.O
return
}
// get headers from the original http req and add them to each sub request
// headers are case insensitive, however some datasources still check for camel casing so we have to send them camel cased
expectedHeaders := map[string]string{
"fromalert": "FromAlert",
"content-type": "Content-Type",
"content-length": "Content-Length",
"user-agent": "User-Agent",
"accept": "Accept",
}
for i := range req.Requests {
req.Requests[i].Headers = make(map[string]string)
for k, v := range httpreq.Header {
headerToSend, ok := expectedHeaders[strings.ToLower(k)]
if ok {
req.Requests[i].Headers[headerToSend] = v[0]
} else {
b.log.Warn(fmt.Sprintf("query service received an unexpected header, ignoring it: %s", k))
}
}
}
// Actually run the query
rsp, err := b.execute(ctx, req)
if err != nil {
@ -192,11 +215,14 @@ func (b *QueryAPIBuilder) handleQuerySingleDatasource(ctx context.Context, req d
return &backend.QueryDataResponse{}, nil
}
// Add user headers... here or in client.QueryData
client, err := b.client.GetDataSourceClient(ctx, v0alpha1.DataSourceRef{
Type: req.PluginId,
UID: req.UID,
})
client, err := b.client.GetDataSourceClient(
ctx,
v0alpha1.DataSourceRef{
Type: req.PluginId,
UID: req.UID,
},
req.Headers,
)
if err != nil {
return nil, err
}

View File

@ -0,0 +1,98 @@
package query
import (
"bytes"
"context"
"fmt"
"net/http"
"net/http/httptest"
"testing"
"github.com/grafana/grafana-plugin-sdk-go/backend"
data "github.com/grafana/grafana-plugin-sdk-go/experimental/apis/data/v0alpha1"
"github.com/grafana/grafana/pkg/expr"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/infra/tracing"
"github.com/grafana/grafana/pkg/services/featuremgmt"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/runtime"
)
func TestQueryRestConnectHandler(t *testing.T) {
b := &QueryAPIBuilder{
client: mockClient{
lastCalledWithHeaders: &map[string]string{},
},
tracer: tracing.InitializeTracerForTest(),
parser: newQueryParser(expr.NewExpressionQueryReader(featuremgmt.WithFeatures()),
&legacyDataSourceRetriever{}, tracing.InitializeTracerForTest()),
log: log.New("test"),
}
qr := newQueryREST(b)
ctx := context.Background()
mr := mockResponder{}
handler, err := qr.Connect(ctx, "name", nil, mr)
require.NoError(t, err)
rr := httptest.NewRecorder()
body := runtime.RawExtension{
Raw: []byte(`{
"queries": [
{
"datasource": {
"type": "prometheus",
"uid": "demo-prometheus"
},
"expr": "sum(go_gc_duration_seconds)",
"range": false,
"instant": true
}
],
"from": "now-1h",
"to": "now"}`),
}
req := httptest.NewRequest(http.MethodGet, "/some-path", bytes.NewReader(body.Raw))
req.Header.Set("fromAlert", "true")
req.Header.Set("Content-Type", "application/json")
req.Header.Set("some-unexpected-header", "some-value")
handler.ServeHTTP(rr, req)
require.Equal(t, map[string]string{
"FromAlert": "true",
"Content-Type": "application/json",
}, *b.client.(mockClient).lastCalledWithHeaders)
}
type mockResponder struct {
}
// Object writes the provided object to the response. Invoking this method multiple times is undefined.
func (m mockResponder) Object(statusCode int, obj runtime.Object) {
}
// Error writes the provided error to the response. This method may only be invoked once.
func (m mockResponder) Error(err error) {
}
type mockClient struct {
lastCalledWithHeaders *map[string]string
}
func (m mockClient) GetDataSourceClient(ctx context.Context, ref data.DataSourceRef, headers map[string]string) (data.QueryDataClient, error) {
*m.lastCalledWithHeaders = headers
return nil, fmt.Errorf("mock error")
}
func (m mockClient) QueryData(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) {
return nil, fmt.Errorf("mock error")
}
func (m mockClient) CallResource(ctx context.Context, req *backend.CallResourceRequest, sender backend.CallResourceResponseSender) error {
return nil
}
func (m mockClient) CheckHealth(ctx context.Context, req *backend.CheckHealthRequest) (*backend.CheckHealthResult, error) {
return nil, nil
}

View File

@ -71,7 +71,7 @@ func (r *findREST) Connect(ctx context.Context, name string, opts runtime.Object
results := &scope.FindScopeNodeChildrenResults{}
raw, err := r.scopeNodeStorage.List(ctx, &internalversion.ListOptions{
Limit: 1000,
Limit: 10000,
})
if err != nil {
@ -80,6 +80,7 @@ func (r *findREST) Connect(ctx context.Context, name string, opts runtime.Object
}
all, ok := raw.(*scope.ScopeNodeList)
if !ok {
responder.Error(fmt.Errorf("expected ScopeNodeList"))
return

View File

@ -64,7 +64,9 @@ func (f *findScopeDashboardsREST) Connect(ctx context.Context, name string, opts
}
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
raw, err := f.scopeDashboardStorage.List(ctx, &internalversion.ListOptions{})
raw, err := f.scopeDashboardStorage.List(ctx, &internalversion.ListOptions{
Limit: 10000,
})
if err != nil {
w.WriteHeader(500)
return

View File

@ -133,11 +133,12 @@ func (b *ScopeAPIBuilder) GetAPIGroupInfo(
}
storage[scopeResourceInfo.StoragePath()] = scopeStorage
scopeDashboardStorage, err := newScopeDashboardBindingStorage(scheme, optsGetter)
scopeDashboardStorage, scopedDashboardStatusStorage, err := newScopeDashboardBindingStorage(scheme, optsGetter)
if err != nil {
return nil, err
}
storage[scopeDashboardResourceInfo.StoragePath()] = scopeDashboardStorage
storage[scopeDashboardResourceInfo.StoragePath()+"/status"] = scopedDashboardStatusStorage
scopeNodeStorage, err := newScopeNodeStorage(scheme, optsGetter)
if err != nil {

View File

@ -44,7 +44,7 @@ func newScopeStorage(scheme *runtime.Scheme, optsGetter generic.RESTOptionsGette
return &storage{Store: store}, nil
}
func newScopeDashboardBindingStorage(scheme *runtime.Scheme, optsGetter generic.RESTOptionsGetter) (*storage, error) {
func newScopeDashboardBindingStorage(scheme *runtime.Scheme, optsGetter generic.RESTOptionsGetter) (*storage, *grafanaregistry.StatusREST, error) {
resourceInfo := scope.ScopeDashboardBindingResourceInfo
strategy := grafanaregistry.NewStrategy(scheme, resourceInfo.GroupVersion())
@ -63,10 +63,12 @@ func newScopeDashboardBindingStorage(scheme *runtime.Scheme, optsGetter generic.
}
options := &generic.StoreOptions{RESTOptions: optsGetter, AttrFunc: GetAttrs}
if err := store.CompleteWithOptions(options); err != nil {
return nil, err
return nil, nil, err
}
return &storage{Store: store}, nil
statusStrategy := grafanaregistry.NewStatusStrategy(scheme, resourceInfo.GroupVersion())
statusREST := grafanaregistry.NewStatusREST(store, statusStrategy)
return &storage{Store: store}, statusREST, nil
}
func newScopeNodeStorage(scheme *runtime.Scheme, optsGetter generic.RESTOptionsGetter) (*storage, error) {

View File

@ -406,7 +406,9 @@ func (s *Service) DeclareFixedRoles(registrations ...accesscontrol.RoleRegistrat
}
for i := range r.Role.Permissions {
s.permRegistry.RegisterPermission(r.Role.Permissions[i].Action, r.Role.Permissions[i].Scope)
if err := s.permRegistry.RegisterPermission(r.Role.Permissions[i].Action, r.Role.Permissions[i].Scope); err != nil {
return err
}
}
s.registrations.Append(r)
@ -464,7 +466,9 @@ func (s *Service) DeclarePluginRoles(ctx context.Context, ID, name string, regs
for i := range r.Role.Permissions {
// Register plugin actions and their possible scopes for permission validation
s.permRegistry.RegisterPluginScope(r.Role.Permissions[i].Scope)
s.permRegistry.RegisterPermission(r.Role.Permissions[i].Action, r.Role.Permissions[i].Scope)
if err := s.permRegistry.RegisterPermission(r.Role.Permissions[i].Action, r.Role.Permissions[i].Scope); err != nil {
return err
}
}
s.log.Debug("Registering plugin role", "role", r.Role.Name)

View File

@ -50,8 +50,7 @@ func (api *AccessControlAPI) getUserActions(c *contextmodel.ReqContext) response
defer span.End()
reloadCache := c.QueryBool("reloadcache")
permissions, err := api.Service.GetUserPermissions(ctx,
c.SignedInUser, ac.Options{ReloadCache: reloadCache})
permissions, err := api.Service.GetUserPermissions(ctx, c.SignedInUser, ac.Options{ReloadCache: reloadCache})
if err != nil {
return response.JSON(http.StatusInternalServerError, err)
}
@ -65,8 +64,7 @@ func (api *AccessControlAPI) getUserPermissions(c *contextmodel.ReqContext) resp
defer span.End()
reloadCache := c.QueryBool("reloadcache")
permissions, err := api.Service.GetUserPermissions(ctx,
c.SignedInUser, ac.Options{ReloadCache: reloadCache})
permissions, err := api.Service.GetUserPermissions(ctx, c.SignedInUser, ac.Options{ReloadCache: reloadCache})
if err != nil {
return response.JSON(http.StatusInternalServerError, err)
}

View File

@ -14,6 +14,8 @@ var (
ErrUnknownActionTplt = "unknown action: {{.Public.Action}}, was not found in the list of valid actions"
ErrBaseUnknownAction = errutil.BadRequest("permreg.unknown-action").MustTemplate(ErrUnknownActionTplt, errutil.WithPublic(ErrUnknownActionTplt))
ErrBaseUnknownKind = errutil.BadRequest("permreg.unknown-kind").MustTemplate("unknown kind: {{.Public.Kind}}")
)
func ErrInvalidScope(scope string, action string, validScopePrefixes PrefixSet) error {
@ -28,6 +30,10 @@ func ErrUnknownAction(action string) error {
return ErrBaseUnknownAction.Build(errutil.TemplateData{Public: map[string]any{"Action": action}})
}
func ErrUnknownKind(kind string) error {
return ErrBaseUnknownKind.Build(errutil.TemplateData{Public: map[string]any{"Kind": kind}})
}
func generateValidScopeFormats(acceptedScopePrefixes PrefixSet) []string {
if len(acceptedScopePrefixes) == 0 {
return []string{}
@ -48,7 +54,7 @@ func generateValidScopeFormats(acceptedScopePrefixes PrefixSet) []string {
type PermissionRegistry interface {
RegisterPluginScope(scope string)
RegisterPermission(action, scope string)
RegisterPermission(action, scope string) error
IsPermissionValid(action, scope string) error
GetScopePrefixes(action string) (PrefixSet, bool)
}
@ -87,6 +93,7 @@ func newPermissionRegistry() *permissionRegistry {
"global.users": "global.users:id:",
"roles": "roles:uid:",
"services": "services:",
"receivers": "receivers:uid:",
}
return &permissionRegistry{
actionScopePrefixes: make(map[string]PrefixSet, 200),
@ -101,42 +108,47 @@ func (pr *permissionRegistry) RegisterPluginScope(scope string) {
}
scopeParts := strings.Split(scope, ":")
kind := scopeParts[0]
// If the scope is already registered, return
if _, found := pr.kindScopePrefix[scopeParts[0]]; found {
if _, found := pr.kindScopePrefix[kind]; found {
return
}
// If the scope contains an attribute part, register the kind and attribute
if len(scopeParts) > 2 {
kind, attr := scopeParts[0], scopeParts[1]
attr := scopeParts[1]
pr.kindScopePrefix[kind] = kind + ":" + attr + ":"
pr.logger.Debug("registered scope prefix", "kind", kind, "scope_prefix", kind+":"+attr+":")
return
}
pr.logger.Debug("registered scope prefix", "kind", scopeParts[0], "scope_prefix", scopeParts[0]+":")
pr.kindScopePrefix[scopeParts[0]] = scopeParts[0] + ":"
pr.logger.Debug("registered scope prefix", "kind", kind, "scope_prefix", kind+":")
pr.kindScopePrefix[kind] = kind + ":"
}
func (pr *permissionRegistry) RegisterPermission(action, scope string) {
if _, ok := pr.actionScopePrefixes[action]; !ok {
pr.actionScopePrefixes[action] = PrefixSet{}
func (pr *permissionRegistry) RegisterPermission(action, scope string) error {
if _, ok := pr.actionScopePrefixes[action]; ok {
// action already registered
return nil
}
pr.actionScopePrefixes[action] = PrefixSet{}
if scope == "" {
// scopeless action
return
return nil
}
kind := strings.Split(scope, ":")[0]
scopePrefix, ok := pr.kindScopePrefix[kind]
if !ok {
pr.logger.Warn("unknown scope prefix", "scope", scope)
return
pr.logger.Error("unknown kind: please update `kindScopePrefix` with the correct scope prefix", "kind", kind)
return ErrUnknownKind(kind)
}
// Add a new entry in case the scope is not empty
pr.actionScopePrefixes[action][scopePrefix] = true
return nil
}
func (pr *permissionRegistry) IsPermissionValid(action, scope string) error {

View File

@ -51,7 +51,7 @@ func Test_permissionRegistry_RegisterPermission(t *testing.T) {
scope string
wantKind string
wantPrefixSet PrefixSet
wantSkip bool
wantErr bool
}{
{
name: "register folders read",
@ -67,16 +67,31 @@ func Test_permissionRegistry_RegisterPermission(t *testing.T) {
wantPrefixSet: PrefixSet{},
},
{
name: "register an action on an unknown kind",
action: "unknown:action",
scope: "unknown:uid:*",
wantPrefixSet: PrefixSet{},
name: "register an action on an unknown kind",
action: "unknown:action",
scope: "unknown:uid:*",
wantErr: true,
},
{
name: "register an action that is already registered",
action: "already:registered",
scope: "already:uid:*",
wantPrefixSet: PrefixSet{"already:uid:": true},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
pr := newPermissionRegistry()
pr.RegisterPermission(tt.action, tt.scope)
// Pretend that an action is registered
pr.actionScopePrefixes["already:registered"] = PrefixSet{"already:uid:": true}
err := pr.RegisterPermission(tt.action, tt.scope)
if tt.wantErr {
require.Error(t, err)
return
}
got, ok := pr.actionScopePrefixes[tt.action]
require.True(t, ok)
for k, v := range got {
@ -88,8 +103,10 @@ func Test_permissionRegistry_RegisterPermission(t *testing.T) {
func Test_permissionRegistry_IsPermissionValid(t *testing.T) {
pr := newPermissionRegistry()
pr.RegisterPermission("folders:read", "folders:uid:")
pr.RegisterPermission("test-app.settings:read", "")
err := pr.RegisterPermission("folders:read", "folders:uid:")
require.NoError(t, err)
err = pr.RegisterPermission("test-app.settings:read", "")
require.NoError(t, err)
tests := []struct {
name string
@ -166,8 +183,10 @@ func Test_permissionRegistry_IsPermissionValid(t *testing.T) {
func Test_permissionRegistry_GetScopePrefixes(t *testing.T) {
pr := newPermissionRegistry()
pr.RegisterPermission("folders:read", "folders:uid:")
pr.RegisterPermission("test-app.settings:read", "")
err := pr.RegisterPermission("folders:read", "folders:uid:")
require.NoError(t, err)
err = pr.RegisterPermission("test-app.settings:read", "")
require.NoError(t, err)
tests := []struct {
name string

View File

@ -1,22 +1,38 @@
package test
import "github.com/grafana/grafana/pkg/services/accesscontrol/permreg"
import (
"testing"
func ProvidePermissionRegistry() permreg.PermissionRegistry {
"github.com/stretchr/testify/require"
"github.com/grafana/grafana/pkg/services/accesscontrol/permreg"
)
func ProvidePermissionRegistry(t *testing.T) permreg.PermissionRegistry {
permReg := permreg.ProvidePermissionRegistry()
// Test core permissions
permReg.RegisterPermission("datasources:read", "datasources:uid:")
permReg.RegisterPermission("dashboards:read", "dashboards:uid:")
permReg.RegisterPermission("dashboards:read", "folders:uid:")
permReg.RegisterPermission("folders:read", "folders:uid:")
err := permReg.RegisterPermission("datasources:read", "datasources:uid:")
require.NoError(t, err)
err = permReg.RegisterPermission("dashboards:read", "dashboards:uid:")
require.NoError(t, err)
err = permReg.RegisterPermission("dashboards:read", "folders:uid:")
require.NoError(t, err)
err = permReg.RegisterPermission("folders:read", "folders:uid:")
require.NoError(t, err)
// Test plugins permissions
permReg.RegisterPermission("plugins.app:access", "plugins:id:")
err = permReg.RegisterPermission("plugins.app:access", "plugins:id:")
require.NoError(t, err)
// App
permReg.RegisterPermission("test-app:read", "")
permReg.RegisterPermission("test-app.settings:read", "")
permReg.RegisterPermission("test-app.projects:read", "")
err = permReg.RegisterPermission("test-app:read", "")
require.NoError(t, err)
err = permReg.RegisterPermission("test-app.settings:read", "")
require.NoError(t, err)
err = permReg.RegisterPermission("test-app.projects:read", "")
require.NoError(t, err)
// App 1
permReg.RegisterPermission("test-app1.catalog:read", "")
permReg.RegisterPermission("test-app1.announcements:read", "")
err = permReg.RegisterPermission("test-app1.catalog:read", "")
require.NoError(t, err)
err = permReg.RegisterPermission("test-app1.announcements:read", "")
require.NoError(t, err)
return permReg
}

View File

@ -138,7 +138,7 @@ type Service struct {
}
func (s *Service) GetPermissions(ctx context.Context, user identity.Requester, resourceID string) ([]accesscontrol.ResourcePermission, error) {
ctx, span := tracer.Start(ctx, "accesscontroll.resourcepermissions.GetPermissions")
ctx, span := tracer.Start(ctx, "accesscontrol.resourcepermissions.GetPermissions")
defer span.End()
var inheritedScopes []string
@ -209,7 +209,7 @@ func (s *Service) GetPermissions(ctx context.Context, user identity.Requester, r
}
func (s *Service) SetUserPermission(ctx context.Context, orgID int64, user accesscontrol.User, resourceID, permission string) (*accesscontrol.ResourcePermission, error) {
ctx, span := tracer.Start(ctx, "accesscontroll.resourcepermissions.SetUserPermission")
ctx, span := tracer.Start(ctx, "accesscontrol.resourcepermissions.SetUserPermission")
defer span.End()
actions, err := s.mapPermission(permission)
@ -235,7 +235,7 @@ func (s *Service) SetUserPermission(ctx context.Context, orgID int64, user acces
}
func (s *Service) SetTeamPermission(ctx context.Context, orgID, teamID int64, resourceID, permission string) (*accesscontrol.ResourcePermission, error) {
ctx, span := tracer.Start(ctx, "accesscontroll.resourcepermissions.SetTeamPermission")
ctx, span := tracer.Start(ctx, "accesscontrol.resourcepermissions.SetTeamPermission")
defer span.End()
actions, err := s.mapPermission(permission)
@ -261,7 +261,7 @@ func (s *Service) SetTeamPermission(ctx context.Context, orgID, teamID int64, re
}
func (s *Service) SetBuiltInRolePermission(ctx context.Context, orgID int64, builtInRole, resourceID, permission string) (*accesscontrol.ResourcePermission, error) {
ctx, span := tracer.Start(ctx, "accesscontroll.resourcepermissions.SetBuiltInRolePermission")
ctx, span := tracer.Start(ctx, "accesscontrol.resourcepermissions.SetBuiltInRolePermission")
defer span.End()
actions, err := s.mapPermission(permission)
@ -290,7 +290,7 @@ func (s *Service) SetPermissions(
ctx context.Context, orgID int64, resourceID string,
commands ...accesscontrol.SetResourcePermissionCommand,
) ([]accesscontrol.ResourcePermission, error) {
ctx, span := tracer.Start(ctx, "accesscontroll.resourcepermissions.SetPermissions")
ctx, span := tracer.Start(ctx, "accesscontrol.resourcepermissions.SetPermissions")
defer span.End()
if err := s.validateResource(ctx, orgID, resourceID); err != nil {
@ -370,7 +370,7 @@ func (s *Service) mapPermission(permission string) ([]string, error) {
}
func (s *Service) validateResource(ctx context.Context, orgID int64, resourceID string) error {
ctx, span := tracer.Start(ctx, "accesscontroll.resourcepermissions.validateResource")
ctx, span := tracer.Start(ctx, "accesscontrol.resourcepermissions.validateResource")
defer span.End()
if s.options.ResourceValidator != nil {
@ -380,7 +380,7 @@ func (s *Service) validateResource(ctx context.Context, orgID int64, resourceID
}
func (s *Service) validateUser(ctx context.Context, orgID, userID int64) error {
ctx, span := tracer.Start(ctx, "accesscontroll.resourcepermissions.validateUser")
ctx, span := tracer.Start(ctx, "accesscontrol.resourcepermissions.validateUser")
defer span.End()
if !s.options.Assignments.Users {
@ -397,7 +397,7 @@ func (s *Service) validateUser(ctx context.Context, orgID, userID int64) error {
}
func (s *Service) validateTeam(ctx context.Context, orgID, teamID int64) error {
ctx, span := tracer.Start(ctx, "accesscontroll.resourcepermissions.validateTeam")
ctx, span := tracer.Start(ctx, "accesscontrol.resourcepermissions.validateTeam")
defer span.End()
if !s.options.Assignments.Teams {
@ -416,7 +416,7 @@ func (s *Service) validateTeam(ctx context.Context, orgID, teamID int64) error {
}
func (s *Service) validateBuiltinRole(ctx context.Context, builtinRole string) error {
_, span := tracer.Start(ctx, "accesscontroll.resourcepermissions.validateBuiltinRole")
_, span := tracer.Start(ctx, "accesscontrol.resourcepermissions.validateBuiltinRole")
defer span.End()
if !s.options.Assignments.BuiltInRoles {
@ -580,7 +580,7 @@ func (a *ActionSetSvc) ExpandActionSetsWithFilter(permissions []accesscontrol.Pe
// RegisterActionSets allow the caller to expand the existing action sets with additional permissions
// This is intended to be used by plugins, and currently supports extending folder and dashboard action sets
func (a *ActionSetSvc) RegisterActionSets(ctx context.Context, pluginID string, registrations []plugins.ActionSet) error {
ctx, span := tracer.Start(ctx, "accesscontroll.resourcepermissions.RegisterActionSets")
ctx, span := tracer.Start(ctx, "accesscontrol.resourcepermissions.RegisterActionSets")
defer span.End()
if !a.features.IsEnabled(ctx, featuremgmt.FlagAccessActionSets) || !a.features.IsEnabled(ctx, featuremgmt.FlagAccessControlOnCall) {

View File

@ -17,6 +17,21 @@ type NamespaceMapper = claims.NamespaceFormatter
// GetNamespaceMapper returns a function that will convert orgIds into a consistent namespace
func GetNamespaceMapper(cfg *setting.Cfg) NamespaceMapper {
if cfg != nil && cfg.StackID != "" {
stackId, err := strconv.ParseInt(cfg.StackID, 10, 64)
if err != nil {
stackId = 0
}
// Temporarily force this as plural
cloudNamespace := fmt.Sprintf("stacks-%d", stackId)
// cloudNamespace := claims.CloudNamespaceFormatter(stackIdInt)
return func(_ int64) string { return cloudNamespace }
}
return claims.OrgNamespaceFormatter
}
// Temporary version that is only passed to th
func GetTemporarySingularNamespaceMapper(cfg *setting.Cfg) NamespaceMapper {
if cfg != nil && cfg.StackID != "" {
stackIdInt, err := strconv.ParseInt(cfg.StackID, 10, 64)
if err != nil {

View File

@ -32,8 +32,8 @@ func TestNamespaceMapper(t *testing.T) {
{
name: "with stackId",
cfg: "abc",
orgId: 123, // ignored
expected: "stack-0", // we parse to int and default to 0
orgId: 123, // ignored
expected: "stacks-0", // we parse to int and default to 0
},
}

View File

@ -8,7 +8,7 @@ import (
"github.com/go-jose/go-jose/v3/jwt"
authnlib "github.com/grafana/authlib/authn"
authnlibclaims "github.com/grafana/authlib/claims"
"github.com/grafana/authlib/claims"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sync/singleflight"
@ -39,7 +39,7 @@ func ProvideService(
cfg: cfg, logger: log.New("id-service"),
signer: signer, cache: cache,
metrics: newMetrics(reg),
nsMapper: request.GetNamespaceMapper(cfg),
nsMapper: request.GetTemporarySingularNamespaceMapper(cfg), // TODO replace with the plural one
}
authnService.RegisterPostAuthHook(s.hook, 140)
@ -85,7 +85,7 @@ func (s *Service) SignIdentity(ctx context.Context, id identity.Requester) (stri
s.logger.FromContext(ctx).Debug("Sign new id token", "id", id.GetID())
now := time.Now()
claims := &auth.IDClaims{
idClaims := &auth.IDClaims{
Claims: &jwt.Claims{
Issuer: s.cfg.AppURL,
Audience: getAudience(id.GetOrgID()),
@ -100,15 +100,15 @@ func (s *Service) SignIdentity(ctx context.Context, id identity.Requester) (stri
},
}
if id.IsIdentityType(authnlibclaims.TypeUser) {
claims.Rest.Email = id.GetEmail()
claims.Rest.EmailVerified = id.IsEmailVerified()
claims.Rest.AuthenticatedBy = id.GetAuthenticatedBy()
claims.Rest.Username = id.GetLogin()
claims.Rest.DisplayName = id.GetDisplayName()
if id.IsIdentityType(claims.TypeUser) {
idClaims.Rest.Email = id.GetEmail()
idClaims.Rest.EmailVerified = id.IsEmailVerified()
idClaims.Rest.AuthenticatedBy = id.GetAuthenticatedBy()
idClaims.Rest.Username = id.GetLogin()
idClaims.Rest.DisplayName = id.GetDisplayName()
}
token, err := s.signer.SignIDToken(ctx, claims)
token, err := s.signer.SignIDToken(ctx, idClaims)
if err != nil {
s.metrics.failedTokenSigningCounter.Inc()
return resultType{}, nil
@ -124,7 +124,7 @@ func (s *Service) SignIdentity(ctx context.Context, id identity.Requester) (stri
s.logger.FromContext(ctx).Error("Failed to add id token to cache", "error", err)
}
return resultType{token: token, idClaims: claims}, nil
return resultType{token: token, idClaims: idClaims}, nil
})
if err != nil {
@ -140,7 +140,7 @@ func (s *Service) RemoveIDToken(ctx context.Context, id identity.Requester) erro
func (s *Service) hook(ctx context.Context, identity *authn.Identity, _ *authn.Request) error {
// FIXME(kalleep): we should probably lazy load this
token, claims, err := s.SignIdentity(ctx, identity)
token, idClaims, err := s.SignIdentity(ctx, identity)
if err != nil {
if shouldLogErr(err) {
s.logger.FromContext(ctx).Error("Failed to sign id token", "err", err, "id", identity.GetID())
@ -150,7 +150,7 @@ func (s *Service) hook(ctx context.Context, identity *authn.Identity, _ *authn.R
}
identity.IDToken = token
identity.IDTokenClaims = claims
identity.IDTokenClaims = idClaims
return nil
}

View File

@ -31,6 +31,11 @@ var _ authn.HookClient = new(APIKey)
var _ authn.ContextAwareClient = new(APIKey)
var _ authn.IdentityResolverClient = new(APIKey)
const (
metaKeyID = "keyID"
metaKeySkipLastUsed = "keySkipLastUsed"
)
func ProvideAPIKey(apiKeyService apikey.Service) *APIKey {
return &APIKey{
log: log.New(authn.ClientAPIKey),
@ -64,6 +69,14 @@ func (s *APIKey) Authenticate(ctx context.Context, r *authn.Request) (*authn.Ide
return nil, err
}
// Set keyID so we can use it in last used hook
r.SetMeta(metaKeyID, strconv.FormatInt(key.ID, 10))
if !shouldUpdateLastUsedAt(key) {
// Hack to just have some value, we will check this key in the hook
// and if its not an empty string we will not update last used.
r.SetMeta(metaKeySkipLastUsed, "true")
}
// if the api key don't belong to a service account construct the identity and return it
if key.ServiceAccountId == nil || *key.ServiceAccountId < 1 {
return newAPIKeyIdentity(key), nil
@ -109,7 +122,6 @@ func (s *APIKey) getFromTokenLegacy(ctx context.Context, token string) (*apikey.
if err != nil {
return nil, err
}
// fetch key
keyQuery := apikey.GetByNameQuery{KeyName: decoded.Name, OrgID: decoded.OrgId}
key, err := s.apiKeyService.GetApiKeyByName(ctx, &keyQuery)
@ -170,52 +182,32 @@ func (s *APIKey) ResolveIdentity(ctx context.Context, orgID int64, typ claims.Id
}
func (s *APIKey) Hook(ctx context.Context, identity *authn.Identity, r *authn.Request) error {
id, exists := s.getAPIKeyID(ctx, identity, r)
if !exists {
if r.GetMeta(metaKeySkipLastUsed) != "" {
return nil
}
go func(apikeyID int64) {
go func(keyID string) {
defer func() {
if err := recover(); err != nil {
s.log.Error("Panic during user last seen sync", "err", err)
}
}()
if err := s.apiKeyService.UpdateAPIKeyLastUsedDate(context.Background(), apikeyID); err != nil {
s.log.Warn("Failed to update last use date for api key", "id", apikeyID)
id, err := strconv.ParseInt(keyID, 10, 64)
if err != nil {
s.log.Warn("Invalid api key id", "id", keyID, "err", err)
return
}
}(id)
if err := s.apiKeyService.UpdateAPIKeyLastUsedDate(context.Background(), id); err != nil {
s.log.Warn("Failed to update last used date for api key", "id", keyID, "err", err)
return
}
}(r.GetMeta(metaKeyID))
return nil
}
func (s *APIKey) getAPIKeyID(ctx context.Context, id *authn.Identity, r *authn.Request) (apiKeyID int64, exists bool) {
internalId, err := id.GetInternalID()
if err != nil {
s.log.Warn("Failed to parse ID from identifier", "err", err)
return -1, false
}
if id.IsIdentityType(claims.TypeAPIKey) {
return internalId, true
}
if id.IsIdentityType(claims.TypeServiceAccount) {
// When the identity is service account, the ID in from the namespace is the service account ID.
// We need to fetch the API key in this scenario, as we could use it to uniquely identify a service account token.
apiKey, err := s.getAPIKey(ctx, getTokenFromRequest(r))
if err != nil {
s.log.Warn("Failed to fetch the API Key from request")
return -1, false
}
return apiKey.ID, true
}
return -1, false
}
func looksLikeApiKey(token string) bool {
return token != ""
}
@ -276,3 +268,7 @@ func newServiceAccountIdentity(key *apikey.APIKey) *authn.Identity {
ClientParams: authn.ClientParams{FetchSyncedUser: true, SyncPermissions: true},
}
}
func shouldUpdateLastUsedAt(key *apikey.APIKey) bool {
return key.LastUsedAt == nil || time.Since(*key.LastUsedAt) > 5*time.Minute
}

View File

@ -189,108 +189,6 @@ func TestAPIKey_Test(t *testing.T) {
}
}
func TestAPIKey_GetAPIKeyIDFromIdentity(t *testing.T) {
type TestCase struct {
desc string
expectedKey *apikey.APIKey
expectedIdentity *authn.Identity
expectedError error
expectedKeyID int64
expectedExists bool
}
tests := []TestCase{
{
desc: "should return API Key ID for valid token that is connected to service account",
expectedKey: &apikey.APIKey{
ID: 1,
OrgID: 1,
Key: hash,
ServiceAccountId: intPtr(1),
},
expectedIdentity: &authn.Identity{
ID: "1",
Type: claims.TypeServiceAccount,
OrgID: 1,
Name: "test",
AuthenticatedBy: login.APIKeyAuthModule,
},
expectedKeyID: 1,
expectedExists: true,
},
{
desc: "should return API Key ID for valid token for API key",
expectedKey: &apikey.APIKey{
ID: 2,
OrgID: 1,
Key: hash,
},
expectedIdentity: &authn.Identity{
ID: "2",
Type: claims.TypeAPIKey,
OrgID: 1,
Name: "test",
AuthenticatedBy: login.APIKeyAuthModule,
},
expectedKeyID: 2,
expectedExists: true,
},
{
desc: "should not return any ID when the request is not made by API key or service account",
expectedKey: &apikey.APIKey{
ID: 2,
OrgID: 1,
Key: hash,
},
expectedIdentity: &authn.Identity{
ID: "2",
Type: claims.TypeUser,
OrgID: 1,
Name: "test",
AuthenticatedBy: login.APIKeyAuthModule,
},
expectedKeyID: -1,
expectedExists: false,
},
{
desc: "should not return any ID when the can't fetch API Key",
expectedKey: &apikey.APIKey{
ID: 1,
OrgID: 1,
Key: hash,
},
expectedIdentity: &authn.Identity{
ID: "2",
Type: claims.TypeServiceAccount,
OrgID: 1,
Name: "test",
AuthenticatedBy: login.APIKeyAuthModule,
},
expectedError: fmt.Errorf("invalid token"),
expectedKeyID: -1,
expectedExists: false,
},
}
req := &authn.Request{HTTPRequest: &http.Request{
Header: map[string][]string{
"Authorization": {"Bearer " + secret},
},
}}
for _, tt := range tests {
t.Run(tt.desc, func(t *testing.T) {
c := ProvideAPIKey(&apikeytest.Service{
ExpectedError: tt.expectedError,
ExpectedAPIKey: tt.expectedKey,
})
id, exists := c.getAPIKeyID(context.Background(), tt.expectedIdentity, req)
assert.Equal(t, tt.expectedExists, exists)
assert.Equal(t, tt.expectedKeyID, id)
})
}
}
func TestAPIKey_ResolveIdentity(t *testing.T) {
type testCase struct {
desc string

View File

@ -74,13 +74,11 @@ type ExtendedJWT struct {
func (s *ExtendedJWT) Authenticate(ctx context.Context, r *authn.Request) (*authn.Identity, error) {
jwtToken := s.retrieveAuthenticationToken(r.HTTPRequest)
accessToken, err := s.accessTokenVerifier.Verify(ctx, jwtToken)
accessTokenClaims, err := s.accessTokenVerifier.Verify(ctx, jwtToken)
if err != nil {
return nil, errExtJWTInvalid.Errorf("failed to verify access token: %w", err)
}
accessTokenClaims := authlib.NewAccessClaims(*accessToken)
idToken := s.retrieveAuthorizationToken(r.HTTPRequest)
if idToken != "" {
idTokenClaims, err := s.idTokenVerifier.Verify(ctx, idToken)
@ -88,10 +86,10 @@ func (s *ExtendedJWT) Authenticate(ctx context.Context, r *authn.Request) (*auth
return nil, errExtJWTInvalid.Errorf("failed to verify id token: %w", err)
}
return s.authenticateAsUser(authlib.NewIdentityClaims(*idTokenClaims), accessTokenClaims)
return s.authenticateAsUser(*idTokenClaims, *accessTokenClaims)
}
return s.authenticateAsService(accessTokenClaims)
return s.authenticateAsService(*accessTokenClaims)
}
func (s *ExtendedJWT) IsEnabled() bool {
@ -99,73 +97,75 @@ func (s *ExtendedJWT) IsEnabled() bool {
}
func (s *ExtendedJWT) authenticateAsUser(
idTokenClaims claims.IdentityClaims,
accessTokenClaims claims.AccessClaims,
idTokenClaims authlib.Claims[authlib.IDTokenClaims],
accessTokenClaims authlib.Claims[authlib.AccessTokenClaims],
) (*authn.Identity, error) {
// Only allow id tokens signed for namespace configured for this instance.
if allowedNamespace := s.namespaceMapper(s.getDefaultOrgID()); !claims.NamespaceMatches(idTokenClaims, allowedNamespace) {
return nil, errExtJWTDisallowedNamespaceClaim.Errorf("unexpected id token namespace: %s", idTokenClaims.Namespace())
if allowedNamespace := s.namespaceMapper(s.getDefaultOrgID()); !claims.NamespaceMatches(authlib.NewIdentityClaims(idTokenClaims), allowedNamespace) {
return nil, errExtJWTDisallowedNamespaceClaim.Errorf("unexpected id token namespace: %s", idTokenClaims.Rest.Namespace)
}
// Allow access tokens with either the same namespace as the validated id token namespace or wildcard (`*`).
if !claims.NamespaceMatches(accessTokenClaims, idTokenClaims.Namespace()) {
return nil, errExtJWTMisMatchedNamespaceClaims.Errorf("unexpected access token namespace: %s", accessTokenClaims.Namespace())
if !claims.NamespaceMatches(authlib.NewAccessClaims(accessTokenClaims), idTokenClaims.Rest.Namespace) {
return nil, errExtJWTMisMatchedNamespaceClaims.Errorf("unexpected access token namespace: %s", accessTokenClaims.Rest.Namespace)
}
accessType, _, err := identity.ParseTypeAndID(accessTokenClaims.Subject())
accessType, _, err := identity.ParseTypeAndID(accessTokenClaims.Subject)
if err != nil {
return nil, errExtJWTInvalidSubject.Errorf("unexpected identity: %s", accessTokenClaims.Subject())
return nil, errExtJWTInvalidSubject.Errorf("unexpected identity: %s", accessTokenClaims.Subject)
}
if !claims.IsIdentityType(accessType, claims.TypeAccessPolicy) {
return nil, errExtJWTInvalid.Errorf("unexpected identity: %s", accessTokenClaims.Subject())
return nil, errExtJWTInvalid.Errorf("unexpected identity: %s", accessTokenClaims.Subject)
}
t, id, err := identity.ParseTypeAndID(idTokenClaims.Subject())
t, id, err := identity.ParseTypeAndID(idTokenClaims.Subject)
if err != nil {
return nil, errExtJWTInvalid.Errorf("failed to parse id token subject: %w", err)
}
if !claims.IsIdentityType(t, claims.TypeUser) {
return nil, errExtJWTInvalidSubject.Errorf("unexpected identity: %s", idTokenClaims.Subject())
return nil, errExtJWTInvalidSubject.Errorf("unexpected identity: %s", idTokenClaims.Subject)
}
// For use in service layer, allow higher privilege
allowedKubernetesNamespace := accessTokenClaims.Namespace()
allowedKubernetesNamespace := accessTokenClaims.Rest.Namespace
if len(s.cfg.StackID) > 0 {
// For single-tenant cloud use, choose the lower of the two (id token will always have the specific namespace)
allowedKubernetesNamespace = idTokenClaims.Namespace()
allowedKubernetesNamespace = idTokenClaims.Rest.Namespace
}
return &authn.Identity{
ID: id,
Type: t,
OrgID: s.getDefaultOrgID(),
AccessTokenClaims: &accessTokenClaims,
IDTokenClaims: &idTokenClaims,
AuthenticatedBy: login.ExtendedJWTModule,
AuthID: accessTokenClaims.Subject(),
AuthID: accessTokenClaims.Subject,
AllowedKubernetesNamespace: allowedKubernetesNamespace,
ClientParams: authn.ClientParams{
SyncPermissions: true,
FetchPermissionsParams: authn.FetchPermissionsParams{
ActionsLookup: accessTokenClaims.DelegatedPermissions(),
ActionsLookup: accessTokenClaims.Rest.DelegatedPermissions,
},
FetchSyncedUser: true,
}}, nil
}
func (s *ExtendedJWT) authenticateAsService(accessTokenClaims claims.AccessClaims) (*authn.Identity, error) {
func (s *ExtendedJWT) authenticateAsService(accessTokenClaims authlib.Claims[authlib.AccessTokenClaims]) (*authn.Identity, error) {
// Allow access tokens with that has a wildcard namespace or a namespace matching this instance.
if allowedNamespace := s.namespaceMapper(s.getDefaultOrgID()); !claims.NamespaceMatches(accessTokenClaims, allowedNamespace) {
return nil, errExtJWTDisallowedNamespaceClaim.Errorf("unexpected access token namespace: %s", accessTokenClaims.Namespace())
if allowedNamespace := s.namespaceMapper(s.getDefaultOrgID()); !claims.NamespaceMatches(authlib.NewAccessClaims(accessTokenClaims), allowedNamespace) {
return nil, errExtJWTDisallowedNamespaceClaim.Errorf("unexpected access token namespace: %s", accessTokenClaims.Rest.Namespace)
}
t, id, err := identity.ParseTypeAndID(accessTokenClaims.Subject())
t, id, err := identity.ParseTypeAndID(accessTokenClaims.Subject)
if err != nil {
return nil, fmt.Errorf("failed to parse access token subject: %w", err)
}
if !claims.IsIdentityType(t, claims.TypeAccessPolicy) {
return nil, errExtJWTInvalidSubject.Errorf("unexpected identity: %s", accessTokenClaims.Subject())
return nil, errExtJWTInvalidSubject.Errorf("unexpected identity: %s", accessTokenClaims.Subject)
}
return &authn.Identity{
@ -173,13 +173,15 @@ func (s *ExtendedJWT) authenticateAsService(accessTokenClaims claims.AccessClaim
UID: id,
Type: t,
OrgID: s.getDefaultOrgID(),
AccessTokenClaims: &accessTokenClaims,
IDTokenClaims: nil,
AuthenticatedBy: login.ExtendedJWTModule,
AuthID: accessTokenClaims.Subject(),
AllowedKubernetesNamespace: accessTokenClaims.Namespace(),
AuthID: accessTokenClaims.Subject,
AllowedKubernetesNamespace: accessTokenClaims.Rest.Namespace,
ClientParams: authn.ClientParams{
SyncPermissions: true,
FetchPermissionsParams: authn.FetchPermissionsParams{
Roles: accessTokenClaims.Permissions(),
Roles: accessTokenClaims.Rest.Permissions,
},
FetchSyncedUser: false,
},

View File

@ -230,6 +230,7 @@ func TestExtendedJWT_Authenticate(t *testing.T) {
UID: "this-uid",
Type: claims.TypeAccessPolicy,
OrgID: 1,
AccessTokenClaims: &validAccessTokenClaims,
AllowedKubernetesNamespace: "default",
AuthenticatedBy: "extendedjwt",
AuthID: "access-policy:this-uid",
@ -247,6 +248,7 @@ func TestExtendedJWT_Authenticate(t *testing.T) {
UID: "this-uid",
Type: claims.TypeAccessPolicy,
OrgID: 1,
AccessTokenClaims: &validAccessTokenClaimsWildcard,
AllowedKubernetesNamespace: "*",
AuthenticatedBy: "extendedjwt",
AuthID: "access-policy:this-uid",
@ -264,6 +266,8 @@ func TestExtendedJWT_Authenticate(t *testing.T) {
ID: "2",
Type: claims.TypeUser,
OrgID: 1,
AccessTokenClaims: &validAccessTokenClaims,
IDTokenClaims: &validIDTokenClaims,
AllowedKubernetesNamespace: "default",
AuthenticatedBy: "extendedjwt",
AuthID: "access-policy:this-uid",
@ -285,6 +289,8 @@ func TestExtendedJWT_Authenticate(t *testing.T) {
ID: "2",
Type: claims.TypeUser,
OrgID: 1,
AccessTokenClaims: &validAccessTokenClaimsWildcard,
IDTokenClaims: &validIDTokenClaims,
AllowedKubernetesNamespace: "*",
AuthenticatedBy: "extendedjwt",
AuthID: "access-policy:this-uid",
@ -311,6 +317,8 @@ func TestExtendedJWT_Authenticate(t *testing.T) {
ID: "2",
Type: claims.TypeUser,
OrgID: 1,
AccessTokenClaims: &validAccessTokenClaimsWildcard,
IDTokenClaims: &validIDTokenClaimsWithStackSet,
AllowedKubernetesNamespace: "stacks-1234",
AuthenticatedBy: "extendedjwt",
AuthID: "access-policy:this-uid",
@ -337,6 +345,7 @@ func TestExtendedJWT_Authenticate(t *testing.T) {
UID: "this-uid",
Type: claims.TypeAccessPolicy,
OrgID: 1,
AccessTokenClaims: &validAccessTokenClaimsWithStackSet,
AllowedKubernetesNamespace: "stacks-1234",
AuthenticatedBy: "extendedjwt",
AuthID: "access-policy:this-uid",
@ -362,6 +371,7 @@ func TestExtendedJWT_Authenticate(t *testing.T) {
UID: "this-uid",
Type: claims.TypeAccessPolicy,
OrgID: 1,
AccessTokenClaims: &validAccessTokenClaimsWithDeprecatedStackClaimSet,
AllowedKubernetesNamespace: "stack-1234",
AuthenticatedBy: "extendedjwt",
AuthID: "access-policy:this-uid",
@ -387,6 +397,8 @@ func TestExtendedJWT_Authenticate(t *testing.T) {
ID: "2",
Type: claims.TypeUser,
OrgID: 1,
AccessTokenClaims: &validAccessTokenClaimsWithDeprecatedStackClaimSet,
IDTokenClaims: &validIDTokenClaimsWithDeprecatedStackClaimSet,
AllowedKubernetesNamespace: "stack-1234",
AuthenticatedBy: "extendedjwt",
AuthID: "access-policy:this-uid",
@ -413,6 +425,8 @@ func TestExtendedJWT_Authenticate(t *testing.T) {
ID: "2",
Type: claims.TypeUser,
OrgID: 1,
AccessTokenClaims: &validAccessTokenClaimsWildcard,
IDTokenClaims: &validIDTokenClaimsWithStackSet,
AllowedKubernetesNamespace: "stacks-1234",
AuthenticatedBy: "extendedjwt",
AuthID: "access-policy:this-uid",

View File

@ -74,10 +74,15 @@ type Identity struct {
// IDToken is a signed token representing the identity that can be forwarded to plugins and external services.
IDToken string
IDTokenClaims *authn.Claims[authn.IDTokenClaims]
AccessTokenClaims *authn.Claims[authn.AccessTokenClaims]
}
// Access implements claims.AuthInfo.
func (i *Identity) GetAccess() claims.AccessClaims {
if i.AccessTokenClaims != nil {
return authn.NewAccessClaims(*i.AccessTokenClaims)
}
return &identity.IDClaimsWrapper{Source: i}
}

View File

@ -122,6 +122,7 @@ func (h *ContextHandler) Middleware(next http.Handler) http.Handler {
reqContext.IsSignedIn = !reqContext.SignedInUser.IsAnonymous
reqContext.AllowAnonymous = reqContext.SignedInUser.IsAnonymous
reqContext.IsRenderCall = id.IsAuthenticatedBy(login.RenderModule)
ctx = identity.WithRequester(ctx, id)
}
h.excludeSensitiveHeadersFromRequest(reqContext.Req)
@ -139,8 +140,7 @@ func (h *ContextHandler) Middleware(next http.Handler) http.Handler {
// End the span to make next handlers not wrapped within middleware span
span.End()
next.ServeHTTP(w, r.WithContext(identity.WithRequester(ctx, id)))
next.ServeHTTP(w, r.WithContext(ctx))
})
}

View File

@ -8,6 +8,7 @@ import (
"github.com/grafana/grafana/pkg/infra/metrics"
ac "github.com/grafana/grafana/pkg/services/accesscontrol"
"github.com/grafana/grafana/pkg/services/folder"
"go.opentelemetry.io/otel"
)
const (
@ -38,12 +39,16 @@ var (
ScopeFoldersAll = ScopeFoldersProvider.GetResourceAllScope()
ScopeDashboardsProvider = ac.NewScopeProvider(ScopeDashboardsRoot)
ScopeDashboardsAll = ScopeDashboardsProvider.GetResourceAllScope()
tracer = otel.Tracer("github.com/grafana/grafana/pkg/services/dashboards")
)
// NewFolderNameScopeResolver provides an ScopeAttributeResolver that is able to convert a scope prefixed with "folders:name:" into an uid based scope.
func NewFolderNameScopeResolver(folderDB folder.FolderStore, folderSvc folder.Service) (string, ac.ScopeAttributeResolver) {
prefix := ScopeFoldersProvider.GetResourceScopeName("")
return prefix, ac.ScopeAttributeResolverFunc(func(ctx context.Context, orgID int64, scope string) ([]string, error) {
ctx, span := tracer.Start(ctx, "dashboards.NewFolderNameScopeResolver")
span.End()
if !strings.HasPrefix(scope, prefix) {
return nil, ac.ErrInvalidScope
}
@ -72,6 +77,9 @@ func NewFolderNameScopeResolver(folderDB folder.FolderStore, folderSvc folder.Se
func NewFolderIDScopeResolver(folderDB folder.FolderStore, folderSvc folder.Service) (string, ac.ScopeAttributeResolver) {
prefix := ScopeFoldersProvider.GetResourceScope("")
return prefix, ac.ScopeAttributeResolverFunc(func(ctx context.Context, orgID int64, scope string) ([]string, error) {
ctx, span := tracer.Start(ctx, "dashboards.NewFolderIDScopeResolver")
span.End()
if !strings.HasPrefix(scope, prefix) {
return nil, ac.ErrInvalidScope
}
@ -105,6 +113,9 @@ func NewFolderIDScopeResolver(folderDB folder.FolderStore, folderSvc folder.Serv
func NewFolderUIDScopeResolver(folderSvc folder.Service) (string, ac.ScopeAttributeResolver) {
prefix := ScopeFoldersProvider.GetResourceScopeUID("")
return prefix, ac.ScopeAttributeResolverFunc(func(ctx context.Context, orgID int64, scope string) ([]string, error) {
ctx, span := tracer.Start(ctx, "dashboards.NewFolderUIDScopeResolver")
span.End()
if !strings.HasPrefix(scope, prefix) {
return nil, ac.ErrInvalidScope
}
@ -127,6 +138,9 @@ func NewFolderUIDScopeResolver(folderSvc folder.Service) (string, ac.ScopeAttrib
func NewDashboardIDScopeResolver(folderDB folder.FolderStore, ds DashboardService, folderSvc folder.Service) (string, ac.ScopeAttributeResolver) {
prefix := ScopeDashboardsProvider.GetResourceScope("")
return prefix, ac.ScopeAttributeResolverFunc(func(ctx context.Context, orgID int64, scope string) ([]string, error) {
ctx, span := tracer.Start(ctx, "dashboards.NewDashboardIDScopeResolver")
span.End()
if !strings.HasPrefix(scope, prefix) {
return nil, ac.ErrInvalidScope
}
@ -150,6 +164,9 @@ func NewDashboardIDScopeResolver(folderDB folder.FolderStore, ds DashboardServic
func NewDashboardUIDScopeResolver(folderDB folder.FolderStore, ds DashboardService, folderSvc folder.Service) (string, ac.ScopeAttributeResolver) {
prefix := ScopeDashboardsProvider.GetResourceScopeUID("")
return prefix, ac.ScopeAttributeResolverFunc(func(ctx context.Context, orgID int64, scope string) ([]string, error) {
ctx, span := tracer.Start(ctx, "dashboards.NewDashboardUIDScopeResolver")
span.End()
if !strings.HasPrefix(scope, prefix) {
return nil, ac.ErrInvalidScope
}
@ -169,6 +186,9 @@ func NewDashboardUIDScopeResolver(folderDB folder.FolderStore, ds DashboardServi
}
func resolveDashboardScope(ctx context.Context, folderDB folder.FolderStore, orgID int64, dashboard *Dashboard, folderSvc folder.Service) ([]string, error) {
ctx, span := tracer.Start(ctx, "dashboards.resolveDashboardScope")
span.End()
var folderUID string
metrics.MFolderIDsServiceCount.WithLabelValues(metrics.Dashboard).Inc()
// nolint:staticcheck
@ -204,6 +224,9 @@ func resolveDashboardScope(ctx context.Context, folderDB folder.FolderStore, org
}
func GetInheritedScopes(ctx context.Context, orgID int64, folderUID string, folderSvc folder.Service) ([]string, error) {
ctx, span := tracer.Start(ctx, "dashboards.GetInheritedScopes")
span.End()
if folderUID == ac.GeneralFolderUID {
return nil, nil
}

View File

@ -25,8 +25,11 @@ import (
"github.com/grafana/grafana/pkg/services/tag"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/util"
"go.opentelemetry.io/otel"
)
var tracer = otel.Tracer("github.com/grafana/grafana/pkg/services/dashboard/database")
type dashboardStore struct {
store db.ReplDB
cfg *setting.Cfg
@ -69,6 +72,9 @@ func (d *dashboardStore) emitEntityEvent() bool {
}
func (d *dashboardStore) ValidateDashboardBeforeSave(ctx context.Context, dashboard *dashboards.Dashboard, overwrite bool) (bool, error) {
ctx, span := tracer.Start(ctx, "dashboards.database.ValidateDashboardBeforesave")
defer span.End()
isParentFolderChanged := false
err := d.store.DB().WithTransactionalDbSession(ctx, func(sess *db.Session) error {
var err error
@ -93,6 +99,9 @@ func (d *dashboardStore) ValidateDashboardBeforeSave(ctx context.Context, dashbo
}
func (d *dashboardStore) GetProvisionedDataByDashboardID(ctx context.Context, dashboardID int64) (*dashboards.DashboardProvisioning, error) {
ctx, span := tracer.Start(ctx, "dashboards.database.GetProvisionedDataByDashboardID")
defer span.End()
var data dashboards.DashboardProvisioning
err := d.store.DB().WithDbSession(ctx, func(sess *db.Session) error {
_, err := sess.Where("dashboard_id = ?", dashboardID).Get(&data)
@ -106,6 +115,9 @@ func (d *dashboardStore) GetProvisionedDataByDashboardID(ctx context.Context, da
}
func (d *dashboardStore) GetProvisionedDataByDashboardUID(ctx context.Context, orgID int64, dashboardUID string) (*dashboards.DashboardProvisioning, error) {
ctx, span := tracer.Start(ctx, "dashboards.database.GetProvisionedDataByDashboardUID")
defer span.End()
var provisionedDashboard dashboards.DashboardProvisioning
err := d.store.DB().WithDbSession(ctx, func(sess *db.Session) error {
var dashboard dashboards.Dashboard
@ -129,6 +141,9 @@ func (d *dashboardStore) GetProvisionedDataByDashboardUID(ctx context.Context, o
}
func (d *dashboardStore) GetProvisionedDashboardData(ctx context.Context, name string) ([]*dashboards.DashboardProvisioning, error) {
ctx, span := tracer.Start(ctx, "dashboards.database.GetProvisionedDashboardData")
defer span.End()
var result []*dashboards.DashboardProvisioning
err := d.store.DB().WithDbSession(ctx, func(sess *db.Session) error {
return sess.Where("name = ?", name).Find(&result)
@ -137,6 +152,9 @@ func (d *dashboardStore) GetProvisionedDashboardData(ctx context.Context, name s
}
func (d *dashboardStore) SaveProvisionedDashboard(ctx context.Context, cmd dashboards.SaveDashboardCommand, provisioning *dashboards.DashboardProvisioning) (*dashboards.Dashboard, error) {
ctx, span := tracer.Start(ctx, "dashboards.database.SaveProvisionedDashboard")
defer span.End()
var result *dashboards.Dashboard
var err error
err = d.store.DB().WithTransactionalDbSession(ctx, func(sess *db.Session) error {
@ -155,6 +173,9 @@ func (d *dashboardStore) SaveProvisionedDashboard(ctx context.Context, cmd dashb
}
func (d *dashboardStore) SaveDashboard(ctx context.Context, cmd dashboards.SaveDashboardCommand) (*dashboards.Dashboard, error) {
ctx, span := tracer.Start(ctx, "dashboards.database.SaveDashboard")
defer span.End()
var result *dashboards.Dashboard
var err error
err = d.store.DB().WithTransactionalDbSession(ctx, func(sess *db.Session) error {
@ -173,6 +194,9 @@ func (d *dashboardStore) SaveDashboard(ctx context.Context, cmd dashboards.SaveD
// UnprovisionDashboard removes row in dashboard_provisioning for the dashboard making it seem as if manually created.
// The dashboard will still have `created_by = -1` to see it was not created by any particular user.
func (d *dashboardStore) UnprovisionDashboard(ctx context.Context, id int64) error {
ctx, span := tracer.Start(ctx, "dashboards.database.UnprovisionDashboard")
defer span.End()
return d.store.DB().WithTransactionalDbSession(ctx, func(sess *db.Session) error {
_, err := sess.Where("dashboard_id = ?", id).Delete(&dashboards.DashboardProvisioning{})
return err
@ -180,6 +204,9 @@ func (d *dashboardStore) UnprovisionDashboard(ctx context.Context, id int64) err
}
func (d *dashboardStore) DeleteOrphanedProvisionedDashboards(ctx context.Context, cmd *dashboards.DeleteOrphanedProvisionedDashboardsCommand) error {
ctx, span := tracer.Start(ctx, "dashboards.database.DeleteOrphanedProvisionedDashboards")
defer span.End()
return d.store.DB().WithDbSession(ctx, func(sess *db.Session) error {
var result []*dashboards.DashboardProvisioning
@ -205,6 +232,9 @@ func (d *dashboardStore) DeleteOrphanedProvisionedDashboards(ctx context.Context
}
func (d *dashboardStore) Count(ctx context.Context, scopeParams *quota.ScopeParameters) (*quota.Map, error) {
ctx, span := tracer.Start(ctx, "dashboards.database.Count")
defer span.End()
u := &quota.Map{}
type result struct {
Count int64
@ -500,6 +530,9 @@ func saveProvisionedData(sess *db.Session, provisioning *dashboards.DashboardPro
}
func (d *dashboardStore) GetDashboardsByPluginID(ctx context.Context, query *dashboards.GetDashboardsByPluginIDQuery) ([]*dashboards.Dashboard, error) {
ctx, span := tracer.Start(ctx, "dashboards.database.GetDashboardsByPluginID")
defer span.End()
var dashboards = make([]*dashboards.Dashboard, 0)
err := d.store.DB().WithDbSession(ctx, func(dbSession *db.Session) error {
whereExpr := "org_id=? AND plugin_id=? AND is_folder=" + d.store.DB().GetDialect().BooleanStr(false)
@ -514,6 +547,9 @@ func (d *dashboardStore) GetDashboardsByPluginID(ctx context.Context, query *das
}
func (d *dashboardStore) GetSoftDeletedDashboard(ctx context.Context, orgID int64, uid string) (*dashboards.Dashboard, error) {
ctx, span := tracer.Start(ctx, "dashboards.database.GetSoftDeletedDashboard")
defer span.End()
if orgID == 0 || uid == "" {
return nil, dashboards.ErrDashboardIdentifierNotSet
}
@ -537,6 +573,9 @@ func (d *dashboardStore) GetSoftDeletedDashboard(ctx context.Context, orgID int6
}
func (d *dashboardStore) RestoreDashboard(ctx context.Context, orgID int64, dashboardUID string, folder *folder.Folder) error {
ctx, span := tracer.Start(ctx, "dashboards.database.RestoreDashboard")
defer span.End()
return d.store.DB().WithTransactionalDbSession(ctx, func(sess *db.Session) error {
if folder == nil || folder.UID == "" {
_, err := sess.Exec("UPDATE dashboard SET deleted=NULL, folder_id=0, folder_uid=NULL WHERE org_id=? AND uid=?", orgID, dashboardUID)
@ -549,6 +588,9 @@ func (d *dashboardStore) RestoreDashboard(ctx context.Context, orgID int64, dash
}
func (d *dashboardStore) SoftDeleteDashboard(ctx context.Context, orgID int64, dashboardUID string) error {
ctx, span := tracer.Start(ctx, "dashboards.database.SoftDeleteDashboard")
defer span.End()
return d.store.DB().WithTransactionalDbSession(ctx, func(sess *db.Session) error {
_, err := sess.Exec("UPDATE dashboard SET deleted=? WHERE org_id=? AND uid=?", time.Now(), orgID, dashboardUID)
return err
@ -556,6 +598,9 @@ func (d *dashboardStore) SoftDeleteDashboard(ctx context.Context, orgID int64, d
}
func (d *dashboardStore) SoftDeleteDashboardsInFolders(ctx context.Context, orgID int64, folderUids []string) error {
ctx, span := tracer.Start(ctx, "dashboards.database.SoftDeleteDashboardsInFolders")
defer span.End()
if len(folderUids) == 0 {
return nil
}
@ -580,6 +625,9 @@ func (d *dashboardStore) SoftDeleteDashboardsInFolders(ctx context.Context, orgI
}
func (d *dashboardStore) DeleteDashboard(ctx context.Context, cmd *dashboards.DeleteDashboardCommand) error {
ctx, span := tracer.Start(ctx, "dashboards.database.DeleteDashboard")
defer span.End()
return d.store.DB().WithTransactionalDbSession(ctx, func(sess *db.Session) error {
return d.deleteDashboard(cmd, sess, d.emitEntityEvent())
})
@ -736,6 +784,9 @@ func createEntityEvent(dashboard *dashboards.Dashboard, eventType store.EntityEv
}
func (d *dashboardStore) GetDashboard(ctx context.Context, query *dashboards.GetDashboardQuery) (*dashboards.Dashboard, error) {
ctx, span := tracer.Start(ctx, "dashboards.database.GetDashboard")
defer span.End()
var queryResult *dashboards.Dashboard
err := d.store.ReadReplica().WithDbSession(ctx, func(sess *db.Session) error {
metrics.MFolderIDsServiceCount.WithLabelValues(metrics.Dashboard).Inc()
@ -778,6 +829,9 @@ func (d *dashboardStore) GetDashboard(ctx context.Context, query *dashboards.Get
}
func (d *dashboardStore) GetDashboardUIDByID(ctx context.Context, query *dashboards.GetDashboardRefByIDQuery) (*dashboards.DashboardRef, error) {
ctx, span := tracer.Start(ctx, "dashboards.database.GetDashboardUIDByID")
defer span.End()
us := &dashboards.DashboardRef{}
err := d.store.DB().WithDbSession(ctx, func(sess *db.Session) error {
var rawSQL = `SELECT uid, slug from dashboard WHERE Id=?`
@ -796,6 +850,9 @@ func (d *dashboardStore) GetDashboardUIDByID(ctx context.Context, query *dashboa
}
func (d *dashboardStore) GetDashboards(ctx context.Context, query *dashboards.GetDashboardsQuery) ([]*dashboards.Dashboard, error) {
ctx, span := tracer.Start(ctx, "dashboards.database.GetDashboards")
defer span.End()
var dashboards = make([]*dashboards.Dashboard, 0)
err := d.store.ReadReplica().WithDbSession(ctx, func(sess *db.Session) error {
if len(query.DashboardIDs) == 0 && len(query.DashboardUIDs) == 0 {
@ -824,6 +881,9 @@ func (d *dashboardStore) GetDashboards(ctx context.Context, query *dashboards.Ge
}
func (d *dashboardStore) FindDashboards(ctx context.Context, query *dashboards.FindPersistedDashboardsQuery) ([]dashboards.DashboardSearchProjection, error) {
ctx, span := tracer.Start(ctx, "dashboards.database.FindDashboards")
defer span.End()
recursiveQueriesAreSupported, err := d.store.ReadReplica().RecursiveQueriesAreSupported()
if err != nil {
return nil, err
@ -914,6 +974,9 @@ func (d *dashboardStore) FindDashboards(ctx context.Context, query *dashboards.F
}
func (d *dashboardStore) GetDashboardTags(ctx context.Context, query *dashboards.GetDashboardTagsQuery) ([]*dashboards.DashboardTagCloudItem, error) {
ctx, span := tracer.Start(ctx, "dashboards.database.GetDashboardTags")
defer span.End()
queryResult := make([]*dashboards.DashboardTagCloudItem, 0)
err := d.store.DB().WithDbSession(ctx, func(dbSession *db.Session) error {
sql := `SELECT
@ -939,6 +1002,9 @@ func (d *dashboardStore) GetDashboardTags(ctx context.Context, query *dashboards
// given parent folder ID.
func (d *dashboardStore) CountDashboardsInFolders(
ctx context.Context, req *dashboards.CountDashboardsInFolderRequest) (int64, error) {
ctx, span := tracer.Start(ctx, "dashboards.database.CountDashboardsInFolders")
defer span.End()
if len(req.FolderUIDs) == 0 {
return 0, nil
}
@ -967,6 +1033,9 @@ func (d *dashboardStore) CountDashboardsInFolders(
func (d *dashboardStore) DeleteDashboardsInFolders(
ctx context.Context, req *dashboards.DeleteDashboardsInFolderRequest) error {
ctx, span := tracer.Start(ctx, "dashboards.database.DeleteDashboardsInFolders")
defer span.End()
return d.store.DB().WithTransactionalDbSession(ctx, func(sess *db.Session) error {
// TODO delete all dashboards in the folder in a bulk query
for _, folderUID := range req.FolderUIDs {
@ -993,6 +1062,9 @@ func (d *dashboardStore) DeleteDashboardsInFolders(
}
func (d *dashboardStore) GetAllDashboards(ctx context.Context) ([]*dashboards.Dashboard, error) {
ctx, span := tracer.Start(ctx, "dashboards.database.GetAllDashboards")
defer span.End()
var dashboards = make([]*dashboards.Dashboard, 0)
err := d.store.ReadReplica().WithDbSession(ctx, func(session *db.Session) error {
err := session.Find(&dashboards)
@ -1005,6 +1077,9 @@ func (d *dashboardStore) GetAllDashboards(ctx context.Context) ([]*dashboards.Da
}
func (d *dashboardStore) GetSoftDeletedExpiredDashboards(ctx context.Context, duration time.Duration) ([]*dashboards.Dashboard, error) {
ctx, span := tracer.Start(ctx, "dashboards.database.GetSoftDeletedExpiredDashboards")
defer span.End()
var dashboards = make([]*dashboards.Dashboard, 0)
err := d.store.DB().WithDbSession(ctx, func(sess *db.Session) error {
err := sess.Where("deleted IS NOT NULL AND deleted < ?", time.Now().Add(-duration)).Find(&dashboards)

View File

@ -10,6 +10,7 @@ import (
"github.com/grafana/authlib/claims"
"github.com/grafana/grafana-plugin-sdk-go/backend/gtime"
"github.com/prometheus/client_golang/prometheus"
"go.opentelemetry.io/otel"
"golang.org/x/exp/slices"
"github.com/grafana/grafana/pkg/apimachinery/identity"
@ -43,6 +44,7 @@ var (
_ dashboards.PluginService = (*DashboardServiceImpl)(nil)
daysInTrash = 24 * 30 * time.Hour
tracer = otel.Tracer("github.com/grafana/grafana/pkg/services/dashboards/service")
)
type DashboardServiceImpl struct {
@ -103,6 +105,9 @@ func (dr *DashboardServiceImpl) GetProvisionedDashboardDataByDashboardUID(ctx co
//nolint:gocyclo
func (dr *DashboardServiceImpl) BuildSaveDashboardCommand(ctx context.Context, dto *dashboards.SaveDashboardDTO,
validateProvisionedDashboard bool) (*dashboards.SaveDashboardCommand, error) {
ctx, span := tracer.Start(ctx, "dashboards.service.BuildSaveDashboardcommand")
defer span.End()
dash := dto.Dashboard
dash.OrgID = dto.OrgID
@ -242,6 +247,9 @@ func (dr *DashboardServiceImpl) DeleteOrphanedProvisionedDashboards(ctx context.
// getGuardianForSavePermissionCheck returns the guardian to be used for checking permission of dashboard
// It replaces deleted Dashboard.GetDashboardIdForSavePermissionCheck()
func getGuardianForSavePermissionCheck(ctx context.Context, d *dashboards.Dashboard, user identity.Requester) (guardian.DashboardGuardian, error) {
ctx, span := tracer.Start(ctx, "dashboards.service.getGuardianForSavePermissionCheck")
defer span.End()
newDashboard := d.ID == 0
if newDashboard {
@ -290,6 +298,9 @@ func validateDashboardRefreshInterval(minRefreshInterval string, dash *dashboard
func (dr *DashboardServiceImpl) SaveProvisionedDashboard(ctx context.Context, dto *dashboards.SaveDashboardDTO,
provisioning *dashboards.DashboardProvisioning) (*dashboards.Dashboard, error) {
ctx, span := tracer.Start(ctx, "dashboards.service.SaveProvisionedDashboard")
defer span.End()
if err := validateDashboardRefreshInterval(dr.cfg.MinRefreshInterval, dto.Dashboard); err != nil {
dr.log.Warn("Changing refresh interval for provisioned dashboard to minimum refresh interval", "dashboardUid",
dto.Dashboard.UID, "dashboardTitle", dto.Dashboard.Title, "minRefreshInterval", dr.cfg.MinRefreshInterval)
@ -317,6 +328,9 @@ func (dr *DashboardServiceImpl) SaveProvisionedDashboard(ctx context.Context, dt
}
func (dr *DashboardServiceImpl) SaveFolderForProvisionedDashboards(ctx context.Context, dto *folder.CreateFolderCommand) (*folder.Folder, error) {
ctx, span := tracer.Start(ctx, "dashboards.service.SaveFolderForProvisionedDashboards")
defer span.End()
dto.SignedInUser = accesscontrol.BackgroundUser("dashboard_provisioning", dto.OrgID, org.RoleAdmin, provisionerPermissions)
f, err := dr.folderService.Create(ctx, dto)
if err != nil {
@ -330,6 +344,9 @@ func (dr *DashboardServiceImpl) SaveFolderForProvisionedDashboards(ctx context.C
func (dr *DashboardServiceImpl) SaveDashboard(ctx context.Context, dto *dashboards.SaveDashboardDTO,
allowUiUpdate bool) (*dashboards.Dashboard, error) {
ctx, span := tracer.Start(ctx, "dashboards.service.SaveDashboard")
defer span.End()
if err := validateDashboardRefreshInterval(dr.cfg.MinRefreshInterval, dto.Dashboard); err != nil {
dr.log.Warn("Changing refresh interval for imported dashboard to minimum refresh interval",
"dashboardUid", dto.Dashboard.UID, "dashboardTitle", dto.Dashboard.Title, "minRefreshInterval",
@ -359,6 +376,9 @@ func (dr *DashboardServiceImpl) GetSoftDeletedDashboard(ctx context.Context, org
}
func (dr *DashboardServiceImpl) RestoreDashboard(ctx context.Context, dashboard *dashboards.Dashboard, user identity.Requester, optionalFolderUID string) error {
ctx, span := tracer.Start(ctx, "dashboards.service.RestoreDashboard")
defer span.End()
if !dr.features.IsEnabledGlobally(featuremgmt.FlagDashboardRestore) {
return fmt.Errorf("feature flag %s is not enabled", featuremgmt.FlagDashboardRestore)
}
@ -398,6 +418,9 @@ func (dr *DashboardServiceImpl) RestoreDashboard(ctx context.Context, dashboard
}
func (dr *DashboardServiceImpl) SoftDeleteDashboard(ctx context.Context, orgID int64, dashboardUID string) error {
ctx, span := tracer.Start(ctx, "dashboards.service.SoftDeleteDashboard")
defer span.End()
if !dr.features.IsEnabledGlobally(featuremgmt.FlagDashboardRestore) {
return fmt.Errorf("feature flag %s is not enabled", featuremgmt.FlagDashboardRestore)
}
@ -426,6 +449,9 @@ func (dr *DashboardServiceImpl) DeleteProvisionedDashboard(ctx context.Context,
}
func (dr *DashboardServiceImpl) deleteDashboard(ctx context.Context, dashboardId int64, orgId int64, validateProvisionedDashboard bool) error {
ctx, span := tracer.Start(ctx, "dashboards.service.deleteDashboard")
defer span.End()
if validateProvisionedDashboard {
provisionedData, err := dr.GetProvisionedDashboardDataByDashboardID(ctx, dashboardId)
if err != nil {
@ -442,6 +468,9 @@ func (dr *DashboardServiceImpl) deleteDashboard(ctx context.Context, dashboardId
func (dr *DashboardServiceImpl) ImportDashboard(ctx context.Context, dto *dashboards.SaveDashboardDTO) (
*dashboards.Dashboard, error) {
ctx, span := tracer.Start(ctx, "dashboards.service.ImportDashboard")
defer span.End()
if err := validateDashboardRefreshInterval(dr.cfg.MinRefreshInterval, dto.Dashboard); err != nil {
dr.log.Warn("Changing refresh interval for imported dashboard to minimum refresh interval",
"dashboardUid", dto.Dashboard.UID, "dashboardTitle", dto.Dashboard.Title,
@ -475,6 +504,9 @@ func (dr *DashboardServiceImpl) GetDashboardsByPluginID(ctx context.Context, que
}
func (dr *DashboardServiceImpl) setDefaultPermissions(ctx context.Context, dto *dashboards.SaveDashboardDTO, dash *dashboards.Dashboard, provisioned bool) {
ctx, span := tracer.Start(ctx, "dashboards.service.setDefaultPermissions")
defer span.End()
resource := "dashboard"
if dash.IsFolder {
resource = "folder"
@ -518,6 +550,9 @@ func (dr *DashboardServiceImpl) setDefaultPermissions(ctx context.Context, dto *
}
func (dr *DashboardServiceImpl) setDefaultFolderPermissions(ctx context.Context, cmd *folder.CreateFolderCommand, f *folder.Folder, provisioned bool) {
ctx, span := tracer.Start(ctx, "dashboards.service.setDefaultFolderPermissions")
defer span.End()
if !dr.cfg.RBAC.PermissionsOnCreation("folder") {
return
}
@ -565,6 +600,9 @@ func (dr *DashboardServiceImpl) GetDashboardsSharedWithUser(ctx context.Context,
}
func (dr *DashboardServiceImpl) getDashboardsSharedWithUser(ctx context.Context, user identity.Requester) ([]*dashboards.Dashboard, error) {
ctx, span := tracer.Start(ctx, "dashboards.service.getDashboardsSharedWithUser")
defer span.End()
permissions := user.GetPermissions()
dashboardPermissions := permissions[dashboards.ActionDashboardsRead]
sharedDashboards := make([]*dashboards.Dashboard, 0)
@ -594,6 +632,9 @@ func (dr *DashboardServiceImpl) getDashboardsSharedWithUser(ctx context.Context,
// filterUserSharedDashboards filter dashboards directly assigned to user, but not located in folders with view permissions
func (dr *DashboardServiceImpl) filterUserSharedDashboards(ctx context.Context, user identity.Requester, userDashboards []*dashboards.Dashboard) ([]*dashboards.Dashboard, error) {
ctx, span := tracer.Start(ctx, "dashboards.service.filterUserSharedDashboards")
defer span.End()
filteredDashboards := make([]*dashboards.Dashboard, 0)
folderUIDs := make([]string, 0)
@ -632,6 +673,9 @@ func (dr *DashboardServiceImpl) filterUserSharedDashboards(ctx context.Context,
}
func (dr *DashboardServiceImpl) getUserSharedDashboardUIDs(ctx context.Context, user identity.Requester) ([]string, error) {
ctx, span := tracer.Start(ctx, "dashboards.service.getUserSharedDashboardsUIDs")
defer span.End()
userDashboards, err := dr.getDashboardsSharedWithUser(ctx, user)
if err != nil {
return nil, err
@ -644,6 +688,9 @@ func (dr *DashboardServiceImpl) getUserSharedDashboardUIDs(ctx context.Context,
}
func (dr *DashboardServiceImpl) FindDashboards(ctx context.Context, query *dashboards.FindPersistedDashboardsQuery) ([]dashboards.DashboardSearchProjection, error) {
ctx, span := tracer.Start(ctx, "dashboards.service.FindDashboards")
defer span.End()
if dr.features.IsEnabled(ctx, featuremgmt.FlagNestedFolders) && len(query.FolderUIDs) > 0 && slices.Contains(query.FolderUIDs, folder.SharedWithMeFolderUID) {
start := time.Now()
userDashboardUIDs, err := dr.getUserSharedDashboardUIDs(ctx, query.SignedInUser)
@ -665,6 +712,9 @@ func (dr *DashboardServiceImpl) FindDashboards(ctx context.Context, query *dashb
}
func (dr *DashboardServiceImpl) SearchDashboards(ctx context.Context, query *dashboards.FindPersistedDashboardsQuery) (model.HitList, error) {
ctx, span := tracer.Start(ctx, "dashboards.service.SearchDashboards")
defer span.End()
res, err := dr.FindDashboards(ctx, query)
if err != nil {
return nil, err
@ -745,6 +795,9 @@ func (dr DashboardServiceImpl) CountInFolders(ctx context.Context, orgID int64,
}
func (dr *DashboardServiceImpl) DeleteInFolders(ctx context.Context, orgID int64, folderUIDs []string, u identity.Requester) error {
ctx, span := tracer.Start(ctx, "dashboards.service.DeleteInFolders")
defer span.End()
if dr.features.IsEnabledGlobally(featuremgmt.FlagDashboardRestore) {
return dr.dashboardStore.SoftDeleteDashboardsInFolders(ctx, orgID, folderUIDs)
}
@ -755,6 +808,9 @@ func (dr *DashboardServiceImpl) DeleteInFolders(ctx context.Context, orgID int64
func (dr *DashboardServiceImpl) Kind() string { return entity.StandardKindDashboard }
func (dr *DashboardServiceImpl) CleanUpDeletedDashboards(ctx context.Context) (int64, error) {
ctx, span := tracer.Start(ctx, "dashboards.service.CleanUpDeletedDashboards")
defer span.End()
var deletedDashboardsCount int64
deletedDashboards, err := dr.dashboardStore.GetSoftDeletedExpiredDashboards(ctx, daysInTrash)
if err != nil {

View File

@ -1397,6 +1397,34 @@ var (
Stage: FeatureStageExperimental,
Owner: grafanaObservabilityLogsSquad,
},
{
Name: "singleTopNav",
Description: "Unifies the top search bar and breadcrumb bar into one",
Stage: FeatureStageExperimental,
FrontendOnly: true,
Owner: grafanaFrontendPlatformSquad,
},
{
Name: "exploreLogsShardSplitting",
Description: "Used in Explore Logs to split queries into multiple queries based on the number of shards",
Stage: FeatureStageExperimental,
FrontendOnly: true,
Owner: grafanaObservabilityLogsSquad,
},
{
Name: "exploreLogsAggregatedMetrics",
Description: "Used in Explore Logs to query by aggregated metrics",
Stage: FeatureStageExperimental,
FrontendOnly: true,
Owner: grafanaObservabilityLogsSquad,
},
{
Name: "exploreLogsLimitedTimeRange",
Description: "Used in Explore Logs to limit the time range",
Stage: FeatureStageExperimental,
FrontendOnly: true,
Owner: grafanaObservabilityLogsSquad,
},
}
)

View File

@ -184,3 +184,7 @@ backgroundPluginInstaller,experimental,@grafana/plugins-platform-backend,false,t
dataplaneAggregator,experimental,@grafana/grafana-app-platform-squad,false,true,false
adhocFilterOneOf,experimental,@grafana/dashboards-squad,false,false,false
lokiSendDashboardPanelNames,experimental,@grafana/observability-logs,false,false,false
singleTopNav,experimental,@grafana/grafana-frontend-platform,false,false,true
exploreLogsShardSplitting,experimental,@grafana/observability-logs,false,false,true
exploreLogsAggregatedMetrics,experimental,@grafana/observability-logs,false,false,true
exploreLogsLimitedTimeRange,experimental,@grafana/observability-logs,false,false,true

1 Name Stage Owner requiresDevMode RequiresRestart FrontendOnly
184 dataplaneAggregator experimental @grafana/grafana-app-platform-squad false true false
185 adhocFilterOneOf experimental @grafana/dashboards-squad false false false
186 lokiSendDashboardPanelNames experimental @grafana/observability-logs false false false
187 singleTopNav experimental @grafana/grafana-frontend-platform false false true
188 exploreLogsShardSplitting experimental @grafana/observability-logs false false true
189 exploreLogsAggregatedMetrics experimental @grafana/observability-logs false false true
190 exploreLogsLimitedTimeRange experimental @grafana/observability-logs false false true

View File

@ -746,4 +746,20 @@ const (
// FlagLokiSendDashboardPanelNames
// Send dashboard and panel names to Loki when querying
FlagLokiSendDashboardPanelNames = "lokiSendDashboardPanelNames"
// FlagSingleTopNav
// Unifies the top search bar and breadcrumb bar into one
FlagSingleTopNav = "singleTopNav"
// FlagExploreLogsShardSplitting
// Used in Explore Logs to split queries into multiple queries based on the number of shards
FlagExploreLogsShardSplitting = "exploreLogsShardSplitting"
// FlagExploreLogsAggregatedMetrics
// Used in Explore Logs to query by aggregated metrics
FlagExploreLogsAggregatedMetrics = "exploreLogsAggregatedMetrics"
// FlagExploreLogsLimitedTimeRange
// Used in Explore Logs to limit the time range
FlagExploreLogsLimitedTimeRange = "exploreLogsLimitedTimeRange"
)

View File

@ -1046,6 +1046,45 @@
"allowSelfServe": true
}
},
{
"metadata": {
"name": "exploreLogsAggregatedMetrics",
"resourceVersion": "1724938092041",
"creationTimestamp": "2024-08-29T13:28:12Z"
},
"spec": {
"description": "Used in Explore Logs to query by aggregated metrics",
"stage": "experimental",
"codeowner": "@grafana/observability-logs",
"frontend": true
}
},
{
"metadata": {
"name": "exploreLogsLimitedTimeRange",
"resourceVersion": "1724938092041",
"creationTimestamp": "2024-08-29T13:28:12Z"
},
"spec": {
"description": "Used in Explore Logs to limit the time range",
"stage": "experimental",
"codeowner": "@grafana/observability-logs",
"frontend": true
}
},
{
"metadata": {
"name": "exploreLogsShardSplitting",
"resourceVersion": "1724938092041",
"creationTimestamp": "2024-08-29T13:28:12Z"
},
"spec": {
"description": "Used in Explore Logs to split queries into multiple queries based on the number of shards",
"stage": "experimental",
"codeowner": "@grafana/observability-logs",
"frontend": true
}
},
{
"metadata": {
"name": "exploreMetrics",
@ -2501,6 +2540,19 @@
"codeowner": "@grafana/dashboards-squad"
}
},
{
"metadata": {
"name": "singleTopNav",
"resourceVersion": "1724861961030",
"creationTimestamp": "2024-08-28T16:19:21Z"
},
"spec": {
"description": "Unifies the top search bar and breadcrumb bar into one",
"stage": "experimental",
"codeowner": "@grafana/grafana-frontend-platform",
"frontend": true
}
},
{
"metadata": {
"name": "sqlDatasourceDatabaseSelection",

View File

@ -326,7 +326,7 @@ func TestRouteGetReceiversResponses(t *testing.T) {
})
t.Run("json body content is as expected", func(t *testing.T) {
expectedRedactedResponse := `{"name":"multiple integrations","grafana_managed_receiver_configs":[{"uid":"c2090fda-f824-4add-b545-5a4d5c2ef082","name":"multiple integrations","type":"prometheus-alertmanager","disableResolveMessage":true,"settings":{"basicAuthPassword":"[REDACTED]","basicAuthUser":"test","url":"http://localhost:9093"},"secureFields":{"basicAuthPassword":true}},{"uid":"c84539ec-f87e-4fc5-9a91-7a687d34bbd1","name":"multiple integrations","type":"discord","disableResolveMessage":false,"settings":{"avatar_url":"some avatar","url":"[REDACTED]","use_discord_username":true},"secureFields":{"url":true}}]}`
expectedRedactedResponse := `{"name":"multiple integrations","grafana_managed_receiver_configs":[{"uid":"c2090fda-f824-4add-b545-5a4d5c2ef082","name":"multiple integrations","type":"prometheus-alertmanager","disableResolveMessage":true,"settings":{"basicAuthUser":"test","url":"http://localhost:9093"},"secureFields":{"basicAuthPassword":true}},{"uid":"c84539ec-f87e-4fc5-9a91-7a687d34bbd1","name":"multiple integrations","type":"discord","disableResolveMessage":false,"settings":{"avatar_url":"some avatar","use_discord_username":true},"secureFields":{"url":true}}]}`
expectedDecryptedResponse := `{"name":"multiple integrations","grafana_managed_receiver_configs":[{"uid":"c2090fda-f824-4add-b545-5a4d5c2ef082","name":"multiple integrations","type":"prometheus-alertmanager","disableResolveMessage":true,"settings":{"basicAuthPassword":"testpass","basicAuthUser":"test","url":"http://localhost:9093"},"secureFields":{"basicAuthPassword":true}},{"uid":"c84539ec-f87e-4fc5-9a91-7a687d34bbd1","name":"multiple integrations","type":"discord","disableResolveMessage":false,"settings":{"avatar_url":"some avatar","url":"some url","use_discord_username":true},"secureFields":{"url":true}}]}`
t.Run("decrypt false", func(t *testing.T) {
env := createTestEnv(t, testContactPointConfig)

View File

@ -32,13 +32,17 @@ func IntegrationToPostableGrafanaReceiver(integration *models.Integration) (*api
SecureSettings: maps.Clone(integration.SecureSettings),
}
if len(integration.Settings) > 0 {
jsonBytes, err := json.Marshal(integration.Settings)
if err != nil {
return nil, err
}
postable.Settings = jsonBytes
// Alertmanager will fail validation with nil Settings , so ensure we always have at least an empty map.
settings := integration.Settings
if settings == nil {
settings = make(map[string]any)
}
jsonBytes, err := json.Marshal(settings)
if err != nil {
return nil, err
}
postable.Settings = jsonBytes
return postable, nil
}

View File

@ -7,8 +7,8 @@ var (
ErrBadAlertmanagerConfiguration = errutil.Internal("alerting.notification.configCorrupted").MustTemplate("Failed to unmarshal the Alertmanager configuration", errutil.WithPublic("Current Alertmanager configuration in the storage is corrupted. Reset the configuration or rollback to a recent valid one."))
ErrReceiverNotFound = errutil.NotFound("alerting.notifications.receivers.notFound", errutil.WithPublicMessage("Receiver not found"))
ErrReceiverExists = errutil.BadRequest("alerting.notifications.receivers.exists", errutil.WithPublicMessage("Receiver with this name already exists. Use a different name or update an existing one."))
ErrReceiverInvalid = errutil.Conflict("alerting.notifications.receivers.invalid").MustTemplate(
ErrReceiverExists = errutil.Conflict("alerting.notifications.receivers.exists", errutil.WithPublicMessage("Receiver with this name already exists. Use a different name or update an existing one."))
ErrReceiverInvalid = errutil.BadRequest("alerting.notifications.receivers.invalid").MustTemplate(
"Invalid receiver: '{{ .Public.Reason }}'",
errutil.WithPublic("Invalid receiver: '{{ .Public.Reason }}'"),
)

View File

@ -130,7 +130,17 @@ func (rs *ReceiverService) GetReceiver(ctx context.Context, q models.GetReceiver
return nil, err
}
rs.decryptOrRedactSecureSettings(ctx, rcv, q.Decrypt)
if q.Decrypt {
err := rcv.Decrypt(rs.decryptor(ctx))
if err != nil {
rs.log.Warn("Failed to decrypt secure settings", "name", rcv.Name, "error", err)
}
} else {
err := rcv.Encrypt(rs.encryptor(ctx))
if err != nil {
rs.log.Warn("Failed to encrypt secure settings", "name", rcv.Name, "error", err)
}
}
return rcv, nil
}
@ -167,8 +177,18 @@ func (rs *ReceiverService) GetReceivers(ctx context.Context, q models.GetReceive
return nil, err
}
for _, r := range filtered {
rs.decryptOrRedactSecureSettings(ctx, r, q.Decrypt)
for _, rcv := range filtered {
if q.Decrypt {
err := rcv.Decrypt(rs.decryptor(ctx))
if err != nil {
rs.log.Warn("Failed to decrypt secure settings", "name", rcv.Name, "error", err)
}
} else {
err := rcv.Encrypt(rs.encryptor(ctx))
if err != nil {
rs.log.Warn("Failed to encrypt secure settings", "name", rcv.Name, "error", err)
}
}
}
return limitOffset(filtered, q.Offset, q.Limit), nil
@ -260,7 +280,7 @@ func (rs *ReceiverService) DeleteReceiver(ctx context.Context, uid string, calle
return err
}
} else {
rs.log.Debug("ignoring optimistic concurrency check because version was not provided", "receiver", existing.Name, "operation", "delete")
rs.log.Debug("Ignoring optimistic concurrency check because version was not provided", "receiver", existing.Name, "operation", "delete")
}
if err := rs.provenanceValidator(existing.Provenance, models.Provenance(callerProvenance)); err != nil {
@ -460,17 +480,6 @@ func (rs *ReceiverService) deleteProvenances(ctx context.Context, orgID int64, i
return nil
}
func (rs *ReceiverService) decryptOrRedactSecureSettings(ctx context.Context, recv *models.Receiver, decrypt bool) {
if decrypt {
err := recv.Decrypt(rs.decryptor(ctx))
if err != nil {
rs.log.Warn("failed to decrypt secure settings", "name", recv.Name, "error", err)
}
} else {
recv.Redact(rs.redactor())
}
}
// decryptor returns a models.DecryptFn that decrypts a secure setting. If decryption fails, the fallback value is used.
func (rs *ReceiverService) decryptor(ctx context.Context) models.DecryptFn {
return func(value string) (string, error) {
@ -486,13 +495,6 @@ func (rs *ReceiverService) decryptor(ctx context.Context) models.DecryptFn {
}
}
// redactor returns a models.RedactFn that redacts a secure setting.
func (rs *ReceiverService) redactor() models.RedactFn {
return func(value string) string {
return definitions.RedactedValue
}
}
// encryptor creates an encrypt function that delegates to secrets.Service and returns the base64 encoded result.
func (rs *ReceiverService) encryptor(ctx context.Context) models.EncryptFn {
return func(payload string) (string, error) {

View File

@ -7,6 +7,7 @@ import (
"strings"
"testing"
"github.com/prometheus/alertmanager/config"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -178,8 +179,14 @@ func TestReceiverService_DecryptRedact(t *testing.T) {
require.NoError(t, err)
if tc.decrypt {
require.Equal(t, "secure url", res.Integrations[0].Settings["url"])
require.NotContains(t, res.Integrations[0].SecureSettings, "url")
} else {
require.Equal(t, definitions.RedactedValue, res.Integrations[0].Settings["url"])
require.NotContains(t, res.Integrations[0].Settings, "url")
// Ensure the encrypted value exists and is not redacted or decrypted.
require.NotEmpty(t, res.Integrations[0].SecureSettings["url"])
require.NotEqual(t, definitions.RedactedValue, res.Integrations[0].SecureSettings["url"])
require.NotEqual(t, "secure url", res.Integrations[0].SecureSettings["url"])
}
}
})
@ -248,7 +255,7 @@ func TestReceiverService_Delete(t *testing.T) {
deleteUID: baseReceiver.UID,
existing: util.Pointer(baseReceiver.Clone()),
storeSettings: map[models.AlertRuleKey][]models.NotificationSettings{
models.AlertRuleKey{OrgID: 1, UID: "rule1"}: {
{OrgID: 1, UID: "rule1"}: {
models.NotificationSettingsGen(models.NSMuts.WithReceiver(baseReceiver.Name))(),
},
},
@ -342,6 +349,7 @@ func TestReceiverService_Create(t *testing.T) {
slackIntegration := models.IntegrationGen(models.IntegrationMuts.WithName("test receiver"), models.IntegrationMuts.WithValidConfig("slack"))()
emailIntegration := models.IntegrationGen(models.IntegrationMuts.WithName("test receiver"), models.IntegrationMuts.WithValidConfig("email"))()
lineIntegration := models.IntegrationGen(models.IntegrationMuts.WithName("test receiver"), models.IntegrationMuts.WithValidConfig("line"))()
baseReceiver := models.ReceiverGen(models.ReceiverMuts.WithName("test receiver"), models.ReceiverMuts.WithIntegrations(slackIntegration))()
for _, tc := range []struct {
@ -349,6 +357,7 @@ func TestReceiverService_Create(t *testing.T) {
user identity.Requester
receiver models.Receiver
expectedCreate models.Receiver
expectedStored *definitions.PostableApiReceiver
expectedErr error
expectedProvenances map[string]models.Provenance
}{
@ -414,6 +423,49 @@ func TestReceiverService_Create(t *testing.T) {
receiver: models.CopyReceiverWith(baseReceiver, models.ReceiverMuts.WithInvalidIntegration("slack")),
expectedErr: legacy_storage.ErrReceiverInvalid,
},
{
name: "create integration with no normal settings should not store nil settings",
user: writer,
receiver: models.CopyReceiverWith(baseReceiver, models.ReceiverMuts.WithIntegrations(
models.CopyIntegrationWith(lineIntegration,
models.IntegrationMuts.WithSettings(
map[string]any{ // Line is valid with only the single secure field "token", so Settings will be empty when saving.
"token": "secret",
},
)),
)),
expectedCreate: models.CopyReceiverWith(baseReceiver, models.ReceiverMuts.WithIntegrations(
models.CopyIntegrationWith(lineIntegration,
models.IntegrationMuts.WithSettings(
map[string]any{}, // Empty settings, not nil.
),
models.IntegrationMuts.WithSecureSettings(
map[string]string{
"token": "c2VjcmV0", // base64 encoded "secret".
},
),
),
)),
expectedStored: &definitions.PostableApiReceiver{
Receiver: config.Receiver{
Name: lineIntegration.Name,
},
PostableGrafanaReceivers: definitions.PostableGrafanaReceivers{
GrafanaManagedReceivers: []*definitions.PostableGrafanaReceiver{
{
UID: lineIntegration.UID,
Name: lineIntegration.Name,
Type: lineIntegration.Config.Type,
DisableResolveMessage: lineIntegration.DisableResolveMessage,
Settings: definitions.RawMessage(`{}`), // Empty settings, not nil.
SecureSettings: map[string]string{
"token": "c2VjcmV0", // base64 encoded "secret".
},
},
},
},
},
},
} {
t.Run(tc.name, func(t *testing.T) {
sut := createReceiverServiceSut(t, &secretsService)
@ -462,9 +514,19 @@ func TestReceiverService_Create(t *testing.T) {
decrypted.Version = tc.expectedCreate.Version // Version is calculated before decryption.
assert.Equal(t, decrypted, *stored)
provenances, err := sut.provisioningStore.GetProvenances(context.Background(), tc.user.GetOrgID(), (&definitions.EmbeddedContactPoint{}).ResourceType())
require.NoError(t, err)
assert.Equal(t, tc.expectedProvenances, provenances)
if tc.expectedProvenances != nil {
provenances, err := sut.provisioningStore.GetProvenances(context.Background(), tc.user.GetOrgID(), (&definitions.EmbeddedContactPoint{}).ResourceType())
require.NoError(t, err)
assert.Equal(t, tc.expectedProvenances, provenances)
}
if tc.expectedStored != nil {
revision, err := sut.cfgStore.Get(context.Background(), writer.GetOrgID())
require.NoError(t, err)
rcv, err := revision.GetReceiver(legacy_storage.NameToUid(tc.expectedStored.Name))
require.NoError(t, err)
assert.Equal(t, tc.expectedStored, rcv)
}
})
}
}

View File

@ -86,6 +86,12 @@ func (ecp *ContactPointService) GetContactPoints(ctx context.Context, q ContactP
contactPoints := make([]apimodels.EmbeddedContactPoint, 0, len(res))
for _, recv := range res {
for _, gr := range recv.Integrations {
if !q.Decrypt {
// Provisioning API redacts by default.
gr.Redact(func(value string) string {
return apimodels.RedactedValue
})
}
contactPoints = append(contactPoints, GrafanaIntegrationConfigToEmbeddedContactPoint(gr, recv.Provenance))
}
}

View File

@ -10,6 +10,7 @@ import (
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/plugins"
"github.com/grafana/grafana/pkg/plugins/repo"
"github.com/grafana/grafana/pkg/services/featuremgmt"
"github.com/grafana/grafana/pkg/services/pluginsintegration/pluginstore"
"github.com/grafana/grafana/pkg/setting"
@ -107,6 +108,7 @@ func (s *Service) installPlugins(ctx context.Context) error {
s.log.Info("Installing plugin", "pluginId", installPlugin.ID, "version", installPlugin.Version)
start := time.Now()
ctx = repo.WithRequestOrigin(ctx, "preinstall")
err := s.pluginInstaller.Add(ctx, installPlugin.ID, installPlugin.Version, compatOpts)
if err != nil {
var dupeErr plugins.DuplicateError

View File

@ -24,6 +24,8 @@ import (
var (
// ErrFolderNameMissing is returned when folder name is missing.
ErrFolderNameMissing = errors.New("folder name missing")
// ErrGetOrCreateFolder is returned when there is a failure to fetch or create a provisioning folder.
ErrGetOrCreateFolder = errors.New("failed to get or create provisioning folder")
)
// FileReader is responsible for reading dashboards from disk and
@ -147,7 +149,7 @@ func (fr *FileReader) storeDashboardsInFolder(ctx context.Context, filesFoundOnD
dashboardRefs map[string]*dashboards.DashboardProvisioning, usageTracker *usageTracker) error {
folderID, folderUID, err := fr.getOrCreateFolder(ctx, fr.Cfg, fr.dashboardProvisioningService, fr.Cfg.Folder)
if err != nil && !errors.Is(err, ErrFolderNameMissing) {
return err
return fmt.Errorf("%w with name %q: %w", ErrGetOrCreateFolder, fr.Cfg.Folder, err)
}
// save dashboards based on json files
@ -177,7 +179,7 @@ func (fr *FileReader) storeDashboardsInFoldersFromFileStructure(ctx context.Cont
folderID, folderUID, err := fr.getOrCreateFolder(ctx, fr.Cfg, fr.dashboardProvisioningService, folderName)
if err != nil && !errors.Is(err, ErrFolderNameMissing) {
return fmt.Errorf("can't provision folder %q from file system structure: %w", folderName, err)
return fmt.Errorf("%w with name %q from file system structure: %w", ErrGetOrCreateFolder, folderName, err)
}
provisioningMetadata, err := fr.saveDashboard(ctx, path, folderID, folderUID, fileInfo, dashboardRefs)

View File

@ -2,6 +2,7 @@ package provisioning
import (
"context"
"errors"
"fmt"
"path/filepath"
"sync"
@ -77,9 +78,8 @@ func ProvideService(
folderService: folderService,
}
err := s.setDashboardProvisioner()
if err != nil {
return nil, fmt.Errorf("%v: %w", "Failed to create provisioner", err)
if err := s.setDashboardProvisioner(); err != nil {
return nil, err
}
return s, nil
@ -106,30 +106,27 @@ type ProvisioningService interface {
GetAllowUIUpdatesFromConfig(name string) bool
}
// Add a public constructor for overriding service to be able to instantiate OSS as fallback
func NewProvisioningServiceImpl() *ProvisioningServiceImpl {
logger := log.New("provisioning")
return &ProvisioningServiceImpl{
log: logger,
newDashboardProvisioner: dashboards.New,
provisionDatasources: datasources.Provision,
provisionPlugins: plugins.Provision,
}
}
// Used for testing purposes
func newProvisioningServiceImpl(
newDashboardProvisioner dashboards.DashboardProvisionerFactory,
provisionDatasources func(context.Context, string, datasources.BaseDataSourceService, datasources.CorrelationsStore, org.Service) error,
provisionPlugins func(context.Context, string, pluginstore.Store, pluginsettings.Service, org.Service) error,
) *ProvisioningServiceImpl {
return &ProvisioningServiceImpl{
searchService searchV2.SearchService,
) (*ProvisioningServiceImpl, error) {
s := &ProvisioningServiceImpl{
log: log.New("provisioning"),
newDashboardProvisioner: newDashboardProvisioner,
provisionDatasources: provisionDatasources,
provisionPlugins: provisionPlugins,
Cfg: setting.NewCfg(),
searchService: searchService,
}
if err := s.setDashboardProvisioner(); err != nil {
return nil, err
}
return s, nil
}
type ProvisioningServiceImpl struct {
@ -185,7 +182,11 @@ func (ps *ProvisioningServiceImpl) Run(ctx context.Context) error {
err := ps.ProvisionDashboards(ctx)
if err != nil {
ps.log.Error("Failed to provision dashboard", "error", err)
return err
// Consider the allow list of errors for which running the provisioning service should not
// fail. For now this includes only dashboards.ErrGetOrCreateFolder.
if !errors.Is(err, dashboards.ErrGetOrCreateFolder) {
return err
}
}
if ps.dashboardProvisioner.HasDashboardSources() {
ps.searchService.TriggerReIndex()

Some files were not shown because too many files have changed in this diff Show More